aboutsummaryrefslogtreecommitdiff
path: root/src/test
diff options
context:
space:
mode:
Diffstat (limited to 'src/test')
-rw-r--r--src/test/icu/t/010_database.pl24
-rw-r--r--src/test/ldap/t/001_auth.pl3
-rw-r--r--src/test/modules/libpq_pipeline/t/001_libpq_pipeline.pl3
-rw-r--r--src/test/modules/ssl_passphrase_callback/t/001_testfunc.pl3
-rw-r--r--src/test/modules/test_misc/t/002_tablespace.pl34
-rw-r--r--src/test/modules/test_oat_hooks/test_oat_hooks.c1797
-rw-r--r--src/test/modules/test_pg_dump/t/001_base.pl2
-rw-r--r--src/test/perl/PostgreSQL/Test/Cluster.pm106
-rw-r--r--src/test/perl/PostgreSQL/Test/SimpleTee.pm8
-rw-r--r--src/test/perl/PostgreSQL/Test/Utils.pm17
-rw-r--r--src/test/perl/PostgreSQL/Version.pm16
-rw-r--r--src/test/recovery/t/001_stream_rep.pl3
-rw-r--r--src/test/recovery/t/002_archiving.pl2
-rw-r--r--src/test/recovery/t/006_logical_decoding.pl80
-rw-r--r--src/test/recovery/t/013_crash_restart.pl20
-rw-r--r--src/test/recovery/t/014_unlogged_reinit.pl3
-rw-r--r--src/test/recovery/t/019_replslot_limit.pl10
-rw-r--r--src/test/recovery/t/022_crash_temp_files.pl26
-rw-r--r--src/test/recovery/t/027_stream_regress.pl54
-rw-r--r--src/test/recovery/t/029_stats_restart.pl3
-rw-r--r--src/test/recovery/t/031_recovery_conflict.pl9
-rw-r--r--src/test/recovery/t/032_relfilenode_reuse.pl82
-rw-r--r--src/test/regress/regress.c6
-rw-r--r--src/test/ssl/t/001_ssltests.pl105
-rw-r--r--src/test/ssl/t/002_scram.pl11
-rw-r--r--src/test/ssl/t/003_sslinfo.pl103
-rw-r--r--src/test/ssl/t/SSL/Backend/OpenSSL.pm13
-rw-r--r--src/test/ssl/t/SSL/Server.pm29
-rw-r--r--src/test/subscription/t/001_rep_changes.pl22
-rw-r--r--src/test/subscription/t/007_ddl.pl12
-rw-r--r--src/test/subscription/t/013_partition.pl42
-rw-r--r--src/test/subscription/t/021_twophase.pl142
-rw-r--r--src/test/subscription/t/022_twophase_cascade.pl209
-rw-r--r--src/test/subscription/t/023_twophase_stream.pl132
-rw-r--r--src/test/subscription/t/024_add_drop_pub.pl3
-rw-r--r--src/test/subscription/t/025_rep_changes_for_schema.pl25
-rw-r--r--src/test/subscription/t/027_nosuperuser.pl167
-rw-r--r--src/test/subscription/t/028_row_filter.pl11
-rw-r--r--src/test/subscription/t/031_column_list.pl486
39 files changed, 2567 insertions, 1256 deletions
diff --git a/src/test/icu/t/010_database.pl b/src/test/icu/t/010_database.pl
index 07a1084b09d..7035ff3c209 100644
--- a/src/test/icu/t/010_database.pl
+++ b/src/test/icu/t/010_database.pl
@@ -16,30 +16,34 @@ $node1->init;
$node1->start;
$node1->safe_psql('postgres',
- q{CREATE DATABASE dbicu LOCALE_PROVIDER icu LOCALE 'C' ICU_LOCALE 'en@colCaseFirst=upper' ENCODING 'UTF8' TEMPLATE template0});
+ q{CREATE DATABASE dbicu LOCALE_PROVIDER icu LOCALE 'C' ICU_LOCALE 'en@colCaseFirst=upper' ENCODING 'UTF8' TEMPLATE template0}
+);
-$node1->safe_psql('dbicu',
-q{
+$node1->safe_psql(
+ 'dbicu',
+ q{
CREATE COLLATION upperfirst (provider = icu, locale = 'en@colCaseFirst=upper');
CREATE TABLE icu (def text, en text COLLATE "en-x-icu", upfirst text COLLATE upperfirst);
INSERT INTO icu VALUES ('a', 'a', 'a'), ('b', 'b', 'b'), ('A', 'A', 'A'), ('B', 'B', 'B');
});
-is($node1->safe_psql('dbicu', q{SELECT def FROM icu ORDER BY def}),
+is( $node1->safe_psql('dbicu', q{SELECT def FROM icu ORDER BY def}),
qq(A
a
B
b),
'sort by database default locale');
-is($node1->safe_psql('dbicu', q{SELECT def FROM icu ORDER BY def COLLATE "en-x-icu"}),
+is( $node1->safe_psql(
+ 'dbicu', q{SELECT def FROM icu ORDER BY def COLLATE "en-x-icu"}),
qq(a
A
b
B),
'sort by explicit collation standard');
-is($node1->safe_psql('dbicu', q{SELECT def FROM icu ORDER BY en COLLATE upperfirst}),
+is( $node1->safe_psql(
+ 'dbicu', q{SELECT def FROM icu ORDER BY en COLLATE upperfirst}),
qq(A
a
B
@@ -51,8 +55,12 @@ b),
my ($ret, $stdout, $stderr) = $node1->psql('postgres',
q{CREATE DATABASE dbicu LOCALE_PROVIDER icu TEMPLATE template0});
-isnt($ret, 0, "ICU locale must be specified for ICU provider: exit code not 0");
-like($stderr, qr/ERROR: ICU locale must be specified/, "ICU locale must be specified for ICU provider: error message");
+isnt($ret, 0,
+ "ICU locale must be specified for ICU provider: exit code not 0");
+like(
+ $stderr,
+ qr/ERROR: ICU locale must be specified/,
+ "ICU locale must be specified for ICU provider: error message");
done_testing();
diff --git a/src/test/ldap/t/001_auth.pl b/src/test/ldap/t/001_auth.pl
index b342146e556..86dff8bd1f3 100644
--- a/src/test/ldap/t/001_auth.pl
+++ b/src/test/ldap/t/001_auth.pl
@@ -46,7 +46,8 @@ elsif ($^O eq 'openbsd')
}
else
{
- plan skip_all => "ldap tests not supported on $^O or dependencies not installed";
+ plan skip_all =>
+ "ldap tests not supported on $^O or dependencies not installed";
}
# make your own edits here
diff --git a/src/test/modules/libpq_pipeline/t/001_libpq_pipeline.pl b/src/test/modules/libpq_pipeline/t/001_libpq_pipeline.pl
index cc79d96d473..4cb1170438a 100644
--- a/src/test/modules/libpq_pipeline/t/001_libpq_pipeline.pl
+++ b/src/test/modules/libpq_pipeline/t/001_libpq_pipeline.pl
@@ -28,7 +28,8 @@ for my $testname (@tests)
pipeline_abort transaction disallowed_in_pipeline)) > 0;
# For a bunch of tests, generate a libpq trace file too.
- my $traceout = "$PostgreSQL::Test::Utils::tmp_check/traces/$testname.trace";
+ my $traceout =
+ "$PostgreSQL::Test::Utils::tmp_check/traces/$testname.trace";
if ($cmptrace)
{
push @extraargs, "-t", $traceout;
diff --git a/src/test/modules/ssl_passphrase_callback/t/001_testfunc.pl b/src/test/modules/ssl_passphrase_callback/t/001_testfunc.pl
index 0429861b16a..5be5ac39eb6 100644
--- a/src/test/modules/ssl_passphrase_callback/t/001_testfunc.pl
+++ b/src/test/modules/ssl_passphrase_callback/t/001_testfunc.pl
@@ -63,7 +63,8 @@ like(
$node->append_conf('postgresql.conf', "ssl_passphrase.passphrase = 'blurfl'");
# try to start the server again
-my $ret = PostgreSQL::Test::Utils::system_log('pg_ctl', '-D', $node->data_dir, '-l',
+my $ret =
+ PostgreSQL::Test::Utils::system_log('pg_ctl', '-D', $node->data_dir, '-l',
$node->logfile, 'start');
diff --git a/src/test/modules/test_misc/t/002_tablespace.pl b/src/test/modules/test_misc/t/002_tablespace.pl
index 04e54394c12..95cd2b7b65f 100644
--- a/src/test/modules/test_misc/t/002_tablespace.pl
+++ b/src/test/modules/test_misc/t/002_tablespace.pl
@@ -13,9 +13,9 @@ $node->init;
$node->start;
# Create a couple of directories to use as tablespaces.
-my $basedir = $node->basedir();
+my $basedir = $node->basedir();
my $TS1_LOCATION = "$basedir/ts1";
-$TS1_LOCATION =~ s/\/\.\//\//g; # collapse foo/./bar to foo/bar
+$TS1_LOCATION =~ s/\/\.\//\//g; # collapse foo/./bar to foo/bar
mkdir($TS1_LOCATION);
my $TS2_LOCATION = "$basedir/ts2";
$TS2_LOCATION =~ s/\/\.\//\//g;
@@ -34,13 +34,11 @@ $result = $node->psql('postgres',
ok($result != 0, 'clobber tablespace with absolute path');
# Create table in it
-$result = $node->psql('postgres',
- "CREATE TABLE t () TABLESPACE regress_ts1");
+$result = $node->psql('postgres', "CREATE TABLE t () TABLESPACE regress_ts1");
ok($result == 0, 'create table in tablespace with absolute path');
# Can't drop a tablespace that still has a table in it
-$result = $node->psql('postgres',
- "DROP TABLESPACE regress_ts1");
+$result = $node->psql('postgres', "DROP TABLESPACE regress_ts1");
ok($result != 0, 'drop tablespace with absolute path');
# Drop the table
@@ -60,32 +58,28 @@ $result = $node->psql('postgres',
"CREATE TABLESPACE regress_ts2 LOCATION '$TS2_LOCATION'");
ok($result == 0, 'create tablespace 2 with absolute path');
$result = $node->psql('postgres',
- "SET allow_in_place_tablespaces=on; CREATE TABLESPACE regress_ts3 LOCATION ''");
+ "SET allow_in_place_tablespaces=on; CREATE TABLESPACE regress_ts3 LOCATION ''"
+);
ok($result == 0, 'create tablespace 3 with in-place directory');
$result = $node->psql('postgres',
- "SET allow_in_place_tablespaces=on; CREATE TABLESPACE regress_ts4 LOCATION ''");
+ "SET allow_in_place_tablespaces=on; CREATE TABLESPACE regress_ts4 LOCATION ''"
+);
ok($result == 0, 'create tablespace 4 with in-place directory');
# Create a table and test moving between absolute and in-place tablespaces
-$result = $node->psql('postgres',
- "CREATE TABLE t () TABLESPACE regress_ts1");
+$result = $node->psql('postgres', "CREATE TABLE t () TABLESPACE regress_ts1");
ok($result == 0, 'create table in tablespace 1');
-$result = $node->psql('postgres',
- "ALTER TABLE t SET tablespace regress_ts2");
+$result = $node->psql('postgres', "ALTER TABLE t SET tablespace regress_ts2");
ok($result == 0, 'move table abs->abs');
-$result = $node->psql('postgres',
- "ALTER TABLE t SET tablespace regress_ts3");
+$result = $node->psql('postgres', "ALTER TABLE t SET tablespace regress_ts3");
ok($result == 0, 'move table abs->in-place');
-$result = $node->psql('postgres',
- "ALTER TABLE t SET tablespace regress_ts4");
+$result = $node->psql('postgres', "ALTER TABLE t SET tablespace regress_ts4");
ok($result == 0, 'move table in-place->in-place');
-$result = $node->psql('postgres',
- "ALTER TABLE t SET tablespace regress_ts1");
+$result = $node->psql('postgres', "ALTER TABLE t SET tablespace regress_ts1");
ok($result == 0, 'move table in-place->abs');
# Drop everything
-$result = $node->psql('postgres',
- "DROP TABLE t");
+$result = $node->psql('postgres', "DROP TABLE t");
ok($result == 0, 'create table in tablespace 1');
$result = $node->psql('postgres', "DROP TABLESPACE regress_ts1");
ok($result == 0, 'drop tablespace 1');
diff --git a/src/test/modules/test_oat_hooks/test_oat_hooks.c b/src/test/modules/test_oat_hooks/test_oat_hooks.c
index 6f9838f93b5..7ef272cc7ae 100644
--- a/src/test/modules/test_oat_hooks/test_oat_hooks.c
+++ b/src/test/modules/test_oat_hooks/test_oat_hooks.c
@@ -234,9 +234,9 @@ static void
emit_audit_message(const char *type, const char *hook, char *action, char *objName)
{
/*
- * Ensure that audit messages are not duplicated by only emitting them from
- * a leader process, not a worker process. This makes the test results
- * deterministic even if run with force_parallel_mode = regress.
+ * Ensure that audit messages are not duplicated by only emitting them
+ * from a leader process, not a worker process. This makes the test
+ * results deterministic even if run with force_parallel_mode = regress.
*/
if (REGRESS_audit && !IsParallelWorker())
{
@@ -285,7 +285,7 @@ REGRESS_object_access_hook_str(ObjectAccessType access, Oid classId, const char
if (next_object_access_hook_str)
{
- (*next_object_access_hook_str)(access, classId, objName, subId, arg);
+ (*next_object_access_hook_str) (access, classId, objName, subId, arg);
}
switch (access)
@@ -325,7 +325,7 @@ REGRESS_object_access_hook_str(ObjectAccessType access, Oid classId, const char
}
static void
-REGRESS_object_access_hook (ObjectAccessType access, Oid classId, Oid objectId, int subId, void *arg)
+REGRESS_object_access_hook(ObjectAccessType access, Oid classId, Oid objectId, int subId, void *arg)
{
audit_attempt("object access",
accesstype_to_string(access, 0),
@@ -340,7 +340,7 @@ REGRESS_object_access_hook (ObjectAccessType access, Oid classId, Oid objectId,
/* Forward to next hook in the chain */
if (next_object_access_hook)
- (*next_object_access_hook)(access, classId, objectId, subId, arg);
+ (*next_object_access_hook) (access, classId, objectId, subId, arg);
audit_success("object access",
accesstype_to_string(access, 0),
@@ -381,18 +381,18 @@ REGRESS_exec_check_perms(List *rangeTabls, bool do_abort)
static void
REGRESS_utility_command(PlannedStmt *pstmt,
- const char *queryString,
- bool readOnlyTree,
- ProcessUtilityContext context,
- ParamListInfo params,
- QueryEnvironment *queryEnv,
- DestReceiver *dest,
- QueryCompletion *qc)
+ const char *queryString,
+ bool readOnlyTree,
+ ProcessUtilityContext context,
+ ParamListInfo params,
+ QueryEnvironment *queryEnv,
+ DestReceiver *dest,
+ QueryCompletion *qc)
{
Node *parsetree = pstmt->utilityStmt;
const char *action;
- NodeTag tag = nodeTag(parsetree);
+ NodeTag tag = nodeTag(parsetree);
switch (tag)
{
@@ -441,438 +441,1302 @@ nodetag_to_string(NodeTag tag)
{
switch (tag)
{
- case T_Invalid: return "Invalid"; break;
- case T_IndexInfo: return "IndexInfo"; break;
- case T_ExprContext: return "ExprContext"; break;
- case T_ProjectionInfo: return "ProjectionInfo"; break;
- case T_JunkFilter: return "JunkFilter"; break;
- case T_OnConflictSetState: return "OnConflictSetState"; break;
- case T_ResultRelInfo: return "ResultRelInfo"; break;
- case T_EState: return "EState"; break;
- case T_TupleTableSlot: return "TupleTableSlot"; break;
- case T_Plan: return "Plan"; break;
- case T_Result: return "Result"; break;
- case T_ProjectSet: return "ProjectSet"; break;
- case T_ModifyTable: return "ModifyTable"; break;
- case T_Append: return "Append"; break;
- case T_MergeAppend: return "MergeAppend"; break;
- case T_RecursiveUnion: return "RecursiveUnion"; break;
- case T_BitmapAnd: return "BitmapAnd"; break;
- case T_BitmapOr: return "BitmapOr"; break;
- case T_Scan: return "Scan"; break;
- case T_SeqScan: return "SeqScan"; break;
- case T_SampleScan: return "SampleScan"; break;
- case T_IndexScan: return "IndexScan"; break;
- case T_IndexOnlyScan: return "IndexOnlyScan"; break;
- case T_BitmapIndexScan: return "BitmapIndexScan"; break;
- case T_BitmapHeapScan: return "BitmapHeapScan"; break;
- case T_TidScan: return "TidScan"; break;
- case T_TidRangeScan: return "TidRangeScan"; break;
- case T_SubqueryScan: return "SubqueryScan"; break;
- case T_FunctionScan: return "FunctionScan"; break;
- case T_ValuesScan: return "ValuesScan"; break;
- case T_TableFuncScan: return "TableFuncScan"; break;
- case T_CteScan: return "CteScan"; break;
- case T_NamedTuplestoreScan: return "NamedTuplestoreScan"; break;
- case T_WorkTableScan: return "WorkTableScan"; break;
- case T_ForeignScan: return "ForeignScan"; break;
- case T_CustomScan: return "CustomScan"; break;
- case T_Join: return "Join"; break;
- case T_NestLoop: return "NestLoop"; break;
- case T_MergeJoin: return "MergeJoin"; break;
- case T_HashJoin: return "HashJoin"; break;
- case T_Material: return "Material"; break;
- case T_Memoize: return "Memoize"; break;
- case T_Sort: return "Sort"; break;
- case T_IncrementalSort: return "IncrementalSort"; break;
- case T_Group: return "Group"; break;
- case T_Agg: return "Agg"; break;
- case T_WindowAgg: return "WindowAgg"; break;
- case T_Unique: return "Unique"; break;
- case T_Gather: return "Gather"; break;
- case T_GatherMerge: return "GatherMerge"; break;
- case T_Hash: return "Hash"; break;
- case T_SetOp: return "SetOp"; break;
- case T_LockRows: return "LockRows"; break;
- case T_Limit: return "Limit"; break;
- case T_NestLoopParam: return "NestLoopParam"; break;
- case T_PlanRowMark: return "PlanRowMark"; break;
- case T_PartitionPruneInfo: return "PartitionPruneInfo"; break;
- case T_PartitionedRelPruneInfo: return "PartitionedRelPruneInfo"; break;
- case T_PartitionPruneStepOp: return "PartitionPruneStepOp"; break;
- case T_PartitionPruneStepCombine: return "PartitionPruneStepCombine"; break;
- case T_PlanInvalItem: return "PlanInvalItem"; break;
- case T_PlanState: return "PlanState"; break;
- case T_ResultState: return "ResultState"; break;
- case T_ProjectSetState: return "ProjectSetState"; break;
- case T_ModifyTableState: return "ModifyTableState"; break;
- case T_AppendState: return "AppendState"; break;
- case T_MergeAppendState: return "MergeAppendState"; break;
- case T_RecursiveUnionState: return "RecursiveUnionState"; break;
- case T_BitmapAndState: return "BitmapAndState"; break;
- case T_BitmapOrState: return "BitmapOrState"; break;
- case T_ScanState: return "ScanState"; break;
- case T_SeqScanState: return "SeqScanState"; break;
- case T_SampleScanState: return "SampleScanState"; break;
- case T_IndexScanState: return "IndexScanState"; break;
- case T_IndexOnlyScanState: return "IndexOnlyScanState"; break;
- case T_BitmapIndexScanState: return "BitmapIndexScanState"; break;
- case T_BitmapHeapScanState: return "BitmapHeapScanState"; break;
- case T_TidScanState: return "TidScanState"; break;
- case T_TidRangeScanState: return "TidRangeScanState"; break;
- case T_SubqueryScanState: return "SubqueryScanState"; break;
- case T_FunctionScanState: return "FunctionScanState"; break;
- case T_TableFuncScanState: return "TableFuncScanState"; break;
- case T_ValuesScanState: return "ValuesScanState"; break;
- case T_CteScanState: return "CteScanState"; break;
- case T_NamedTuplestoreScanState: return "NamedTuplestoreScanState"; break;
- case T_WorkTableScanState: return "WorkTableScanState"; break;
- case T_ForeignScanState: return "ForeignScanState"; break;
- case T_CustomScanState: return "CustomScanState"; break;
- case T_JoinState: return "JoinState"; break;
- case T_NestLoopState: return "NestLoopState"; break;
- case T_MergeJoinState: return "MergeJoinState"; break;
- case T_HashJoinState: return "HashJoinState"; break;
- case T_MaterialState: return "MaterialState"; break;
- case T_MemoizeState: return "MemoizeState"; break;
- case T_SortState: return "SortState"; break;
- case T_IncrementalSortState: return "IncrementalSortState"; break;
- case T_GroupState: return "GroupState"; break;
- case T_AggState: return "AggState"; break;
- case T_WindowAggState: return "WindowAggState"; break;
- case T_UniqueState: return "UniqueState"; break;
- case T_GatherState: return "GatherState"; break;
- case T_GatherMergeState: return "GatherMergeState"; break;
- case T_HashState: return "HashState"; break;
- case T_SetOpState: return "SetOpState"; break;
- case T_LockRowsState: return "LockRowsState"; break;
- case T_LimitState: return "LimitState"; break;
- case T_Alias: return "Alias"; break;
- case T_RangeVar: return "RangeVar"; break;
- case T_TableFunc: return "TableFunc"; break;
- case T_Var: return "Var"; break;
- case T_Const: return "Const"; break;
- case T_Param: return "Param"; break;
- case T_Aggref: return "Aggref"; break;
- case T_GroupingFunc: return "GroupingFunc"; break;
- case T_WindowFunc: return "WindowFunc"; break;
- case T_SubscriptingRef: return "SubscriptingRef"; break;
- case T_FuncExpr: return "FuncExpr"; break;
- case T_NamedArgExpr: return "NamedArgExpr"; break;
- case T_OpExpr: return "OpExpr"; break;
- case T_DistinctExpr: return "DistinctExpr"; break;
- case T_NullIfExpr: return "NullIfExpr"; break;
- case T_ScalarArrayOpExpr: return "ScalarArrayOpExpr"; break;
- case T_BoolExpr: return "BoolExpr"; break;
- case T_SubLink: return "SubLink"; break;
- case T_SubPlan: return "SubPlan"; break;
- case T_AlternativeSubPlan: return "AlternativeSubPlan"; break;
- case T_FieldSelect: return "FieldSelect"; break;
- case T_FieldStore: return "FieldStore"; break;
- case T_RelabelType: return "RelabelType"; break;
- case T_CoerceViaIO: return "CoerceViaIO"; break;
- case T_ArrayCoerceExpr: return "ArrayCoerceExpr"; break;
- case T_ConvertRowtypeExpr: return "ConvertRowtypeExpr"; break;
- case T_CollateExpr: return "CollateExpr"; break;
- case T_CaseExpr: return "CaseExpr"; break;
- case T_CaseWhen: return "CaseWhen"; break;
- case T_CaseTestExpr: return "CaseTestExpr"; break;
- case T_ArrayExpr: return "ArrayExpr"; break;
- case T_RowExpr: return "RowExpr"; break;
- case T_RowCompareExpr: return "RowCompareExpr"; break;
- case T_CoalesceExpr: return "CoalesceExpr"; break;
- case T_MinMaxExpr: return "MinMaxExpr"; break;
- case T_SQLValueFunction: return "SQLValueFunction"; break;
- case T_XmlExpr: return "XmlExpr"; break;
- case T_NullTest: return "NullTest"; break;
- case T_BooleanTest: return "BooleanTest"; break;
- case T_CoerceToDomain: return "CoerceToDomain"; break;
- case T_CoerceToDomainValue: return "CoerceToDomainValue"; break;
- case T_SetToDefault: return "SetToDefault"; break;
- case T_CurrentOfExpr: return "CurrentOfExpr"; break;
- case T_NextValueExpr: return "NextValueExpr"; break;
- case T_InferenceElem: return "InferenceElem"; break;
- case T_TargetEntry: return "TargetEntry"; break;
- case T_RangeTblRef: return "RangeTblRef"; break;
- case T_JoinExpr: return "JoinExpr"; break;
- case T_FromExpr: return "FromExpr"; break;
- case T_OnConflictExpr: return "OnConflictExpr"; break;
- case T_IntoClause: return "IntoClause"; break;
- case T_ExprState: return "ExprState"; break;
- case T_WindowFuncExprState: return "WindowFuncExprState"; break;
- case T_SetExprState: return "SetExprState"; break;
- case T_SubPlanState: return "SubPlanState"; break;
- case T_DomainConstraintState: return "DomainConstraintState"; break;
- case T_PlannerInfo: return "PlannerInfo"; break;
- case T_PlannerGlobal: return "PlannerGlobal"; break;
- case T_RelOptInfo: return "RelOptInfo"; break;
- case T_IndexOptInfo: return "IndexOptInfo"; break;
- case T_ForeignKeyOptInfo: return "ForeignKeyOptInfo"; break;
- case T_ParamPathInfo: return "ParamPathInfo"; break;
- case T_Path: return "Path"; break;
- case T_IndexPath: return "IndexPath"; break;
- case T_BitmapHeapPath: return "BitmapHeapPath"; break;
- case T_BitmapAndPath: return "BitmapAndPath"; break;
- case T_BitmapOrPath: return "BitmapOrPath"; break;
- case T_TidPath: return "TidPath"; break;
- case T_TidRangePath: return "TidRangePath"; break;
- case T_SubqueryScanPath: return "SubqueryScanPath"; break;
- case T_ForeignPath: return "ForeignPath"; break;
- case T_CustomPath: return "CustomPath"; break;
- case T_NestPath: return "NestPath"; break;
- case T_MergePath: return "MergePath"; break;
- case T_HashPath: return "HashPath"; break;
- case T_AppendPath: return "AppendPath"; break;
- case T_MergeAppendPath: return "MergeAppendPath"; break;
- case T_GroupResultPath: return "GroupResultPath"; break;
- case T_MaterialPath: return "MaterialPath"; break;
- case T_MemoizePath: return "MemoizePath"; break;
- case T_UniquePath: return "UniquePath"; break;
- case T_GatherPath: return "GatherPath"; break;
- case T_GatherMergePath: return "GatherMergePath"; break;
- case T_ProjectionPath: return "ProjectionPath"; break;
- case T_ProjectSetPath: return "ProjectSetPath"; break;
- case T_SortPath: return "SortPath"; break;
- case T_IncrementalSortPath: return "IncrementalSortPath"; break;
- case T_GroupPath: return "GroupPath"; break;
- case T_UpperUniquePath: return "UpperUniquePath"; break;
- case T_AggPath: return "AggPath"; break;
- case T_GroupingSetsPath: return "GroupingSetsPath"; break;
- case T_MinMaxAggPath: return "MinMaxAggPath"; break;
- case T_WindowAggPath: return "WindowAggPath"; break;
- case T_SetOpPath: return "SetOpPath"; break;
- case T_RecursiveUnionPath: return "RecursiveUnionPath"; break;
- case T_LockRowsPath: return "LockRowsPath"; break;
- case T_ModifyTablePath: return "ModifyTablePath"; break;
- case T_LimitPath: return "LimitPath"; break;
- case T_EquivalenceClass: return "EquivalenceClass"; break;
- case T_EquivalenceMember: return "EquivalenceMember"; break;
- case T_PathKey: return "PathKey"; break;
- case T_PathTarget: return "PathTarget"; break;
- case T_RestrictInfo: return "RestrictInfo"; break;
- case T_IndexClause: return "IndexClause"; break;
- case T_PlaceHolderVar: return "PlaceHolderVar"; break;
- case T_SpecialJoinInfo: return "SpecialJoinInfo"; break;
- case T_AppendRelInfo: return "AppendRelInfo"; break;
- case T_RowIdentityVarInfo: return "RowIdentityVarInfo"; break;
- case T_PlaceHolderInfo: return "PlaceHolderInfo"; break;
- case T_MinMaxAggInfo: return "MinMaxAggInfo"; break;
- case T_PlannerParamItem: return "PlannerParamItem"; break;
- case T_RollupData: return "RollupData"; break;
- case T_GroupingSetData: return "GroupingSetData"; break;
- case T_StatisticExtInfo: return "StatisticExtInfo"; break;
- case T_AllocSetContext: return "AllocSetContext"; break;
- case T_SlabContext: return "SlabContext"; break;
- case T_GenerationContext: return "GenerationContext"; break;
- case T_Integer: return "Integer"; break;
- case T_Float: return "Float"; break;
- case T_Boolean: return "Boolean"; break;
- case T_String: return "String"; break;
- case T_BitString: return "BitString"; break;
- case T_List: return "List"; break;
- case T_IntList: return "IntList"; break;
- case T_OidList: return "OidList"; break;
- case T_ExtensibleNode: return "ExtensibleNode"; break;
- case T_RawStmt: return "RawStmt"; break;
- case T_Query: return "Query"; break;
- case T_PlannedStmt: return "PlannedStmt"; break;
- case T_InsertStmt: return "InsertStmt"; break;
- case T_DeleteStmt: return "DeleteStmt"; break;
- case T_UpdateStmt: return "UpdateStmt"; break;
- case T_SelectStmt: return "SelectStmt"; break;
- case T_ReturnStmt: return "ReturnStmt"; break;
- case T_PLAssignStmt: return "PLAssignStmt"; break;
- case T_AlterTableStmt: return "AlterTableStmt"; break;
- case T_AlterTableCmd: return "AlterTableCmd"; break;
- case T_AlterDomainStmt: return "AlterDomainStmt"; break;
- case T_SetOperationStmt: return "SetOperationStmt"; break;
- case T_GrantStmt: return "GrantStmt"; break;
- case T_GrantRoleStmt: return "GrantRoleStmt"; break;
- case T_AlterDefaultPrivilegesStmt: return "AlterDefaultPrivilegesStmt"; break;
- case T_ClosePortalStmt: return "ClosePortalStmt"; break;
- case T_ClusterStmt: return "ClusterStmt"; break;
- case T_CopyStmt: return "CopyStmt"; break;
- case T_CreateStmt: return "CreateStmt"; break;
- case T_DefineStmt: return "DefineStmt"; break;
- case T_DropStmt: return "DropStmt"; break;
- case T_TruncateStmt: return "TruncateStmt"; break;
- case T_CommentStmt: return "CommentStmt"; break;
- case T_FetchStmt: return "FetchStmt"; break;
- case T_IndexStmt: return "IndexStmt"; break;
- case T_CreateFunctionStmt: return "CreateFunctionStmt"; break;
- case T_AlterFunctionStmt: return "AlterFunctionStmt"; break;
- case T_DoStmt: return "DoStmt"; break;
- case T_RenameStmt: return "RenameStmt"; break;
- case T_RuleStmt: return "RuleStmt"; break;
- case T_NotifyStmt: return "NotifyStmt"; break;
- case T_ListenStmt: return "ListenStmt"; break;
- case T_UnlistenStmt: return "UnlistenStmt"; break;
- case T_TransactionStmt: return "TransactionStmt"; break;
- case T_ViewStmt: return "ViewStmt"; break;
- case T_LoadStmt: return "LoadStmt"; break;
- case T_CreateDomainStmt: return "CreateDomainStmt"; break;
- case T_CreatedbStmt: return "CreatedbStmt"; break;
- case T_DropdbStmt: return "DropdbStmt"; break;
- case T_VacuumStmt: return "VacuumStmt"; break;
- case T_ExplainStmt: return "ExplainStmt"; break;
- case T_CreateTableAsStmt: return "CreateTableAsStmt"; break;
- case T_CreateSeqStmt: return "CreateSeqStmt"; break;
- case T_AlterSeqStmt: return "AlterSeqStmt"; break;
- case T_VariableSetStmt: return "VariableSetStmt"; break;
- case T_VariableShowStmt: return "VariableShowStmt"; break;
- case T_DiscardStmt: return "DiscardStmt"; break;
- case T_CreateTrigStmt: return "CreateTrigStmt"; break;
- case T_CreatePLangStmt: return "CreatePLangStmt"; break;
- case T_CreateRoleStmt: return "CreateRoleStmt"; break;
- case T_AlterRoleStmt: return "AlterRoleStmt"; break;
- case T_DropRoleStmt: return "DropRoleStmt"; break;
- case T_LockStmt: return "LockStmt"; break;
- case T_ConstraintsSetStmt: return "ConstraintsSetStmt"; break;
- case T_ReindexStmt: return "ReindexStmt"; break;
- case T_CheckPointStmt: return "CheckPointStmt"; break;
- case T_CreateSchemaStmt: return "CreateSchemaStmt"; break;
- case T_AlterDatabaseStmt: return "AlterDatabaseStmt"; break;
- case T_AlterDatabaseRefreshCollStmt: return "AlterDatabaseRefreshCollStmt"; break;
- case T_AlterDatabaseSetStmt: return "AlterDatabaseSetStmt"; break;
- case T_AlterRoleSetStmt: return "AlterRoleSetStmt"; break;
- case T_CreateConversionStmt: return "CreateConversionStmt"; break;
- case T_CreateCastStmt: return "CreateCastStmt"; break;
- case T_CreateOpClassStmt: return "CreateOpClassStmt"; break;
- case T_CreateOpFamilyStmt: return "CreateOpFamilyStmt"; break;
- case T_AlterOpFamilyStmt: return "AlterOpFamilyStmt"; break;
- case T_PrepareStmt: return "PrepareStmt"; break;
- case T_ExecuteStmt: return "ExecuteStmt"; break;
- case T_DeallocateStmt: return "DeallocateStmt"; break;
- case T_DeclareCursorStmt: return "DeclareCursorStmt"; break;
- case T_CreateTableSpaceStmt: return "CreateTableSpaceStmt"; break;
- case T_DropTableSpaceStmt: return "DropTableSpaceStmt"; break;
- case T_AlterObjectDependsStmt: return "AlterObjectDependsStmt"; break;
- case T_AlterObjectSchemaStmt: return "AlterObjectSchemaStmt"; break;
- case T_AlterOwnerStmt: return "AlterOwnerStmt"; break;
- case T_AlterOperatorStmt: return "AlterOperatorStmt"; break;
- case T_AlterTypeStmt: return "AlterTypeStmt"; break;
- case T_DropOwnedStmt: return "DropOwnedStmt"; break;
- case T_ReassignOwnedStmt: return "ReassignOwnedStmt"; break;
- case T_CompositeTypeStmt: return "CompositeTypeStmt"; break;
- case T_CreateEnumStmt: return "CreateEnumStmt"; break;
- case T_CreateRangeStmt: return "CreateRangeStmt"; break;
- case T_AlterEnumStmt: return "AlterEnumStmt"; break;
- case T_AlterTSDictionaryStmt: return "AlterTSDictionaryStmt"; break;
- case T_AlterTSConfigurationStmt: return "AlterTSConfigurationStmt"; break;
- case T_CreateFdwStmt: return "CreateFdwStmt"; break;
- case T_AlterFdwStmt: return "AlterFdwStmt"; break;
- case T_CreateForeignServerStmt: return "CreateForeignServerStmt"; break;
- case T_AlterForeignServerStmt: return "AlterForeignServerStmt"; break;
- case T_CreateUserMappingStmt: return "CreateUserMappingStmt"; break;
- case T_AlterUserMappingStmt: return "AlterUserMappingStmt"; break;
- case T_DropUserMappingStmt: return "DropUserMappingStmt"; break;
- case T_AlterTableSpaceOptionsStmt: return "AlterTableSpaceOptionsStmt"; break;
- case T_AlterTableMoveAllStmt: return "AlterTableMoveAllStmt"; break;
- case T_SecLabelStmt: return "SecLabelStmt"; break;
- case T_CreateForeignTableStmt: return "CreateForeignTableStmt"; break;
- case T_ImportForeignSchemaStmt: return "ImportForeignSchemaStmt"; break;
- case T_CreateExtensionStmt: return "CreateExtensionStmt"; break;
- case T_AlterExtensionStmt: return "AlterExtensionStmt"; break;
- case T_AlterExtensionContentsStmt: return "AlterExtensionContentsStmt"; break;
- case T_CreateEventTrigStmt: return "CreateEventTrigStmt"; break;
- case T_AlterEventTrigStmt: return "AlterEventTrigStmt"; break;
- case T_RefreshMatViewStmt: return "RefreshMatViewStmt"; break;
- case T_ReplicaIdentityStmt: return "ReplicaIdentityStmt"; break;
- case T_AlterSystemStmt: return "AlterSystemStmt"; break;
- case T_CreatePolicyStmt: return "CreatePolicyStmt"; break;
- case T_AlterPolicyStmt: return "AlterPolicyStmt"; break;
- case T_CreateTransformStmt: return "CreateTransformStmt"; break;
- case T_CreateAmStmt: return "CreateAmStmt"; break;
- case T_CreatePublicationStmt: return "CreatePublicationStmt"; break;
- case T_AlterPublicationStmt: return "AlterPublicationStmt"; break;
- case T_CreateSubscriptionStmt: return "CreateSubscriptionStmt"; break;
- case T_AlterSubscriptionStmt: return "AlterSubscriptionStmt"; break;
- case T_DropSubscriptionStmt: return "DropSubscriptionStmt"; break;
- case T_CreateStatsStmt: return "CreateStatsStmt"; break;
- case T_AlterCollationStmt: return "AlterCollationStmt"; break;
- case T_CallStmt: return "CallStmt"; break;
- case T_AlterStatsStmt: return "AlterStatsStmt"; break;
- case T_A_Expr: return "A_Expr"; break;
- case T_ColumnRef: return "ColumnRef"; break;
- case T_ParamRef: return "ParamRef"; break;
- case T_A_Const: return "A_Const"; break;
- case T_FuncCall: return "FuncCall"; break;
- case T_A_Star: return "A_Star"; break;
- case T_A_Indices: return "A_Indices"; break;
- case T_A_Indirection: return "A_Indirection"; break;
- case T_A_ArrayExpr: return "A_ArrayExpr"; break;
- case T_ResTarget: return "ResTarget"; break;
- case T_MultiAssignRef: return "MultiAssignRef"; break;
- case T_TypeCast: return "TypeCast"; break;
- case T_CollateClause: return "CollateClause"; break;
- case T_SortBy: return "SortBy"; break;
- case T_WindowDef: return "WindowDef"; break;
- case T_RangeSubselect: return "RangeSubselect"; break;
- case T_RangeFunction: return "RangeFunction"; break;
- case T_RangeTableSample: return "RangeTableSample"; break;
- case T_RangeTableFunc: return "RangeTableFunc"; break;
- case T_RangeTableFuncCol: return "RangeTableFuncCol"; break;
- case T_TypeName: return "TypeName"; break;
- case T_ColumnDef: return "ColumnDef"; break;
- case T_IndexElem: return "IndexElem"; break;
- case T_StatsElem: return "StatsElem"; break;
- case T_Constraint: return "Constraint"; break;
- case T_DefElem: return "DefElem"; break;
- case T_RangeTblEntry: return "RangeTblEntry"; break;
- case T_RangeTblFunction: return "RangeTblFunction"; break;
- case T_TableSampleClause: return "TableSampleClause"; break;
- case T_WithCheckOption: return "WithCheckOption"; break;
- case T_SortGroupClause: return "SortGroupClause"; break;
- case T_GroupingSet: return "GroupingSet"; break;
- case T_WindowClause: return "WindowClause"; break;
- case T_ObjectWithArgs: return "ObjectWithArgs"; break;
- case T_AccessPriv: return "AccessPriv"; break;
- case T_CreateOpClassItem: return "CreateOpClassItem"; break;
- case T_TableLikeClause: return "TableLikeClause"; break;
- case T_FunctionParameter: return "FunctionParameter"; break;
- case T_LockingClause: return "LockingClause"; break;
- case T_RowMarkClause: return "RowMarkClause"; break;
- case T_XmlSerialize: return "XmlSerialize"; break;
- case T_WithClause: return "WithClause"; break;
- case T_InferClause: return "InferClause"; break;
- case T_OnConflictClause: return "OnConflictClause"; break;
- case T_CTESearchClause: return "CTESearchClause"; break;
- case T_CTECycleClause: return "CTECycleClause"; break;
- case T_CommonTableExpr: return "CommonTableExpr"; break;
- case T_RoleSpec: return "RoleSpec"; break;
- case T_TriggerTransition: return "TriggerTransition"; break;
- case T_PartitionElem: return "PartitionElem"; break;
- case T_PartitionSpec: return "PartitionSpec"; break;
- case T_PartitionBoundSpec: return "PartitionBoundSpec"; break;
- case T_PartitionRangeDatum: return "PartitionRangeDatum"; break;
- case T_PartitionCmd: return "PartitionCmd"; break;
- case T_VacuumRelation: return "VacuumRelation"; break;
- case T_PublicationObjSpec: return "PublicationObjSpec"; break;
- case T_PublicationTable: return "PublicationTable"; break;
- case T_IdentifySystemCmd: return "IdentifySystemCmd"; break;
- case T_BaseBackupCmd: return "BaseBackupCmd"; break;
- case T_CreateReplicationSlotCmd: return "CreateReplicationSlotCmd"; break;
- case T_DropReplicationSlotCmd: return "DropReplicationSlotCmd"; break;
- case T_ReadReplicationSlotCmd: return "ReadReplicationSlotCmd"; break;
- case T_StartReplicationCmd: return "StartReplicationCmd"; break;
- case T_TimeLineHistoryCmd: return "TimeLineHistoryCmd"; break;
- case T_TriggerData: return "TriggerData"; break;
- case T_EventTriggerData: return "EventTriggerData"; break;
- case T_ReturnSetInfo: return "ReturnSetInfo"; break;
- case T_WindowObjectData: return "WindowObjectData"; break;
- case T_TIDBitmap: return "TIDBitmap"; break;
- case T_InlineCodeBlock: return "InlineCodeBlock"; break;
- case T_FdwRoutine: return "FdwRoutine"; break;
- case T_IndexAmRoutine: return "IndexAmRoutine"; break;
- case T_TableAmRoutine: return "TableAmRoutine"; break;
- case T_TsmRoutine: return "TsmRoutine"; break;
- case T_ForeignKeyCacheInfo: return "ForeignKeyCacheInfo"; break;
- case T_CallContext: return "CallContext"; break;
- case T_SupportRequestSimplify: return "SupportRequestSimplify"; break;
- case T_SupportRequestSelectivity: return "SupportRequestSelectivity"; break;
- case T_SupportRequestCost: return "SupportRequestCost"; break;
- case T_SupportRequestRows: return "SupportRequestRows"; break;
- case T_SupportRequestIndexCondition: return "SupportRequestIndexCondition"; break;
+ case T_Invalid:
+ return "Invalid";
+ break;
+ case T_IndexInfo:
+ return "IndexInfo";
+ break;
+ case T_ExprContext:
+ return "ExprContext";
+ break;
+ case T_ProjectionInfo:
+ return "ProjectionInfo";
+ break;
+ case T_JunkFilter:
+ return "JunkFilter";
+ break;
+ case T_OnConflictSetState:
+ return "OnConflictSetState";
+ break;
+ case T_ResultRelInfo:
+ return "ResultRelInfo";
+ break;
+ case T_EState:
+ return "EState";
+ break;
+ case T_TupleTableSlot:
+ return "TupleTableSlot";
+ break;
+ case T_Plan:
+ return "Plan";
+ break;
+ case T_Result:
+ return "Result";
+ break;
+ case T_ProjectSet:
+ return "ProjectSet";
+ break;
+ case T_ModifyTable:
+ return "ModifyTable";
+ break;
+ case T_Append:
+ return "Append";
+ break;
+ case T_MergeAppend:
+ return "MergeAppend";
+ break;
+ case T_RecursiveUnion:
+ return "RecursiveUnion";
+ break;
+ case T_BitmapAnd:
+ return "BitmapAnd";
+ break;
+ case T_BitmapOr:
+ return "BitmapOr";
+ break;
+ case T_Scan:
+ return "Scan";
+ break;
+ case T_SeqScan:
+ return "SeqScan";
+ break;
+ case T_SampleScan:
+ return "SampleScan";
+ break;
+ case T_IndexScan:
+ return "IndexScan";
+ break;
+ case T_IndexOnlyScan:
+ return "IndexOnlyScan";
+ break;
+ case T_BitmapIndexScan:
+ return "BitmapIndexScan";
+ break;
+ case T_BitmapHeapScan:
+ return "BitmapHeapScan";
+ break;
+ case T_TidScan:
+ return "TidScan";
+ break;
+ case T_TidRangeScan:
+ return "TidRangeScan";
+ break;
+ case T_SubqueryScan:
+ return "SubqueryScan";
+ break;
+ case T_FunctionScan:
+ return "FunctionScan";
+ break;
+ case T_ValuesScan:
+ return "ValuesScan";
+ break;
+ case T_TableFuncScan:
+ return "TableFuncScan";
+ break;
+ case T_CteScan:
+ return "CteScan";
+ break;
+ case T_NamedTuplestoreScan:
+ return "NamedTuplestoreScan";
+ break;
+ case T_WorkTableScan:
+ return "WorkTableScan";
+ break;
+ case T_ForeignScan:
+ return "ForeignScan";
+ break;
+ case T_CustomScan:
+ return "CustomScan";
+ break;
+ case T_Join:
+ return "Join";
+ break;
+ case T_NestLoop:
+ return "NestLoop";
+ break;
+ case T_MergeJoin:
+ return "MergeJoin";
+ break;
+ case T_HashJoin:
+ return "HashJoin";
+ break;
+ case T_Material:
+ return "Material";
+ break;
+ case T_Memoize:
+ return "Memoize";
+ break;
+ case T_Sort:
+ return "Sort";
+ break;
+ case T_IncrementalSort:
+ return "IncrementalSort";
+ break;
+ case T_Group:
+ return "Group";
+ break;
+ case T_Agg:
+ return "Agg";
+ break;
+ case T_WindowAgg:
+ return "WindowAgg";
+ break;
+ case T_Unique:
+ return "Unique";
+ break;
+ case T_Gather:
+ return "Gather";
+ break;
+ case T_GatherMerge:
+ return "GatherMerge";
+ break;
+ case T_Hash:
+ return "Hash";
+ break;
+ case T_SetOp:
+ return "SetOp";
+ break;
+ case T_LockRows:
+ return "LockRows";
+ break;
+ case T_Limit:
+ return "Limit";
+ break;
+ case T_NestLoopParam:
+ return "NestLoopParam";
+ break;
+ case T_PlanRowMark:
+ return "PlanRowMark";
+ break;
+ case T_PartitionPruneInfo:
+ return "PartitionPruneInfo";
+ break;
+ case T_PartitionedRelPruneInfo:
+ return "PartitionedRelPruneInfo";
+ break;
+ case T_PartitionPruneStepOp:
+ return "PartitionPruneStepOp";
+ break;
+ case T_PartitionPruneStepCombine:
+ return "PartitionPruneStepCombine";
+ break;
+ case T_PlanInvalItem:
+ return "PlanInvalItem";
+ break;
+ case T_PlanState:
+ return "PlanState";
+ break;
+ case T_ResultState:
+ return "ResultState";
+ break;
+ case T_ProjectSetState:
+ return "ProjectSetState";
+ break;
+ case T_ModifyTableState:
+ return "ModifyTableState";
+ break;
+ case T_AppendState:
+ return "AppendState";
+ break;
+ case T_MergeAppendState:
+ return "MergeAppendState";
+ break;
+ case T_RecursiveUnionState:
+ return "RecursiveUnionState";
+ break;
+ case T_BitmapAndState:
+ return "BitmapAndState";
+ break;
+ case T_BitmapOrState:
+ return "BitmapOrState";
+ break;
+ case T_ScanState:
+ return "ScanState";
+ break;
+ case T_SeqScanState:
+ return "SeqScanState";
+ break;
+ case T_SampleScanState:
+ return "SampleScanState";
+ break;
+ case T_IndexScanState:
+ return "IndexScanState";
+ break;
+ case T_IndexOnlyScanState:
+ return "IndexOnlyScanState";
+ break;
+ case T_BitmapIndexScanState:
+ return "BitmapIndexScanState";
+ break;
+ case T_BitmapHeapScanState:
+ return "BitmapHeapScanState";
+ break;
+ case T_TidScanState:
+ return "TidScanState";
+ break;
+ case T_TidRangeScanState:
+ return "TidRangeScanState";
+ break;
+ case T_SubqueryScanState:
+ return "SubqueryScanState";
+ break;
+ case T_FunctionScanState:
+ return "FunctionScanState";
+ break;
+ case T_TableFuncScanState:
+ return "TableFuncScanState";
+ break;
+ case T_ValuesScanState:
+ return "ValuesScanState";
+ break;
+ case T_CteScanState:
+ return "CteScanState";
+ break;
+ case T_NamedTuplestoreScanState:
+ return "NamedTuplestoreScanState";
+ break;
+ case T_WorkTableScanState:
+ return "WorkTableScanState";
+ break;
+ case T_ForeignScanState:
+ return "ForeignScanState";
+ break;
+ case T_CustomScanState:
+ return "CustomScanState";
+ break;
+ case T_JoinState:
+ return "JoinState";
+ break;
+ case T_NestLoopState:
+ return "NestLoopState";
+ break;
+ case T_MergeJoinState:
+ return "MergeJoinState";
+ break;
+ case T_HashJoinState:
+ return "HashJoinState";
+ break;
+ case T_MaterialState:
+ return "MaterialState";
+ break;
+ case T_MemoizeState:
+ return "MemoizeState";
+ break;
+ case T_SortState:
+ return "SortState";
+ break;
+ case T_IncrementalSortState:
+ return "IncrementalSortState";
+ break;
+ case T_GroupState:
+ return "GroupState";
+ break;
+ case T_AggState:
+ return "AggState";
+ break;
+ case T_WindowAggState:
+ return "WindowAggState";
+ break;
+ case T_UniqueState:
+ return "UniqueState";
+ break;
+ case T_GatherState:
+ return "GatherState";
+ break;
+ case T_GatherMergeState:
+ return "GatherMergeState";
+ break;
+ case T_HashState:
+ return "HashState";
+ break;
+ case T_SetOpState:
+ return "SetOpState";
+ break;
+ case T_LockRowsState:
+ return "LockRowsState";
+ break;
+ case T_LimitState:
+ return "LimitState";
+ break;
+ case T_Alias:
+ return "Alias";
+ break;
+ case T_RangeVar:
+ return "RangeVar";
+ break;
+ case T_TableFunc:
+ return "TableFunc";
+ break;
+ case T_Var:
+ return "Var";
+ break;
+ case T_Const:
+ return "Const";
+ break;
+ case T_Param:
+ return "Param";
+ break;
+ case T_Aggref:
+ return "Aggref";
+ break;
+ case T_GroupingFunc:
+ return "GroupingFunc";
+ break;
+ case T_WindowFunc:
+ return "WindowFunc";
+ break;
+ case T_SubscriptingRef:
+ return "SubscriptingRef";
+ break;
+ case T_FuncExpr:
+ return "FuncExpr";
+ break;
+ case T_NamedArgExpr:
+ return "NamedArgExpr";
+ break;
+ case T_OpExpr:
+ return "OpExpr";
+ break;
+ case T_DistinctExpr:
+ return "DistinctExpr";
+ break;
+ case T_NullIfExpr:
+ return "NullIfExpr";
+ break;
+ case T_ScalarArrayOpExpr:
+ return "ScalarArrayOpExpr";
+ break;
+ case T_BoolExpr:
+ return "BoolExpr";
+ break;
+ case T_SubLink:
+ return "SubLink";
+ break;
+ case T_SubPlan:
+ return "SubPlan";
+ break;
+ case T_AlternativeSubPlan:
+ return "AlternativeSubPlan";
+ break;
+ case T_FieldSelect:
+ return "FieldSelect";
+ break;
+ case T_FieldStore:
+ return "FieldStore";
+ break;
+ case T_RelabelType:
+ return "RelabelType";
+ break;
+ case T_CoerceViaIO:
+ return "CoerceViaIO";
+ break;
+ case T_ArrayCoerceExpr:
+ return "ArrayCoerceExpr";
+ break;
+ case T_ConvertRowtypeExpr:
+ return "ConvertRowtypeExpr";
+ break;
+ case T_CollateExpr:
+ return "CollateExpr";
+ break;
+ case T_CaseExpr:
+ return "CaseExpr";
+ break;
+ case T_CaseWhen:
+ return "CaseWhen";
+ break;
+ case T_CaseTestExpr:
+ return "CaseTestExpr";
+ break;
+ case T_ArrayExpr:
+ return "ArrayExpr";
+ break;
+ case T_RowExpr:
+ return "RowExpr";
+ break;
+ case T_RowCompareExpr:
+ return "RowCompareExpr";
+ break;
+ case T_CoalesceExpr:
+ return "CoalesceExpr";
+ break;
+ case T_MinMaxExpr:
+ return "MinMaxExpr";
+ break;
+ case T_SQLValueFunction:
+ return "SQLValueFunction";
+ break;
+ case T_XmlExpr:
+ return "XmlExpr";
+ break;
+ case T_NullTest:
+ return "NullTest";
+ break;
+ case T_BooleanTest:
+ return "BooleanTest";
+ break;
+ case T_CoerceToDomain:
+ return "CoerceToDomain";
+ break;
+ case T_CoerceToDomainValue:
+ return "CoerceToDomainValue";
+ break;
+ case T_SetToDefault:
+ return "SetToDefault";
+ break;
+ case T_CurrentOfExpr:
+ return "CurrentOfExpr";
+ break;
+ case T_NextValueExpr:
+ return "NextValueExpr";
+ break;
+ case T_InferenceElem:
+ return "InferenceElem";
+ break;
+ case T_TargetEntry:
+ return "TargetEntry";
+ break;
+ case T_RangeTblRef:
+ return "RangeTblRef";
+ break;
+ case T_JoinExpr:
+ return "JoinExpr";
+ break;
+ case T_FromExpr:
+ return "FromExpr";
+ break;
+ case T_OnConflictExpr:
+ return "OnConflictExpr";
+ break;
+ case T_IntoClause:
+ return "IntoClause";
+ break;
+ case T_ExprState:
+ return "ExprState";
+ break;
+ case T_WindowFuncExprState:
+ return "WindowFuncExprState";
+ break;
+ case T_SetExprState:
+ return "SetExprState";
+ break;
+ case T_SubPlanState:
+ return "SubPlanState";
+ break;
+ case T_DomainConstraintState:
+ return "DomainConstraintState";
+ break;
+ case T_PlannerInfo:
+ return "PlannerInfo";
+ break;
+ case T_PlannerGlobal:
+ return "PlannerGlobal";
+ break;
+ case T_RelOptInfo:
+ return "RelOptInfo";
+ break;
+ case T_IndexOptInfo:
+ return "IndexOptInfo";
+ break;
+ case T_ForeignKeyOptInfo:
+ return "ForeignKeyOptInfo";
+ break;
+ case T_ParamPathInfo:
+ return "ParamPathInfo";
+ break;
+ case T_Path:
+ return "Path";
+ break;
+ case T_IndexPath:
+ return "IndexPath";
+ break;
+ case T_BitmapHeapPath:
+ return "BitmapHeapPath";
+ break;
+ case T_BitmapAndPath:
+ return "BitmapAndPath";
+ break;
+ case T_BitmapOrPath:
+ return "BitmapOrPath";
+ break;
+ case T_TidPath:
+ return "TidPath";
+ break;
+ case T_TidRangePath:
+ return "TidRangePath";
+ break;
+ case T_SubqueryScanPath:
+ return "SubqueryScanPath";
+ break;
+ case T_ForeignPath:
+ return "ForeignPath";
+ break;
+ case T_CustomPath:
+ return "CustomPath";
+ break;
+ case T_NestPath:
+ return "NestPath";
+ break;
+ case T_MergePath:
+ return "MergePath";
+ break;
+ case T_HashPath:
+ return "HashPath";
+ break;
+ case T_AppendPath:
+ return "AppendPath";
+ break;
+ case T_MergeAppendPath:
+ return "MergeAppendPath";
+ break;
+ case T_GroupResultPath:
+ return "GroupResultPath";
+ break;
+ case T_MaterialPath:
+ return "MaterialPath";
+ break;
+ case T_MemoizePath:
+ return "MemoizePath";
+ break;
+ case T_UniquePath:
+ return "UniquePath";
+ break;
+ case T_GatherPath:
+ return "GatherPath";
+ break;
+ case T_GatherMergePath:
+ return "GatherMergePath";
+ break;
+ case T_ProjectionPath:
+ return "ProjectionPath";
+ break;
+ case T_ProjectSetPath:
+ return "ProjectSetPath";
+ break;
+ case T_SortPath:
+ return "SortPath";
+ break;
+ case T_IncrementalSortPath:
+ return "IncrementalSortPath";
+ break;
+ case T_GroupPath:
+ return "GroupPath";
+ break;
+ case T_UpperUniquePath:
+ return "UpperUniquePath";
+ break;
+ case T_AggPath:
+ return "AggPath";
+ break;
+ case T_GroupingSetsPath:
+ return "GroupingSetsPath";
+ break;
+ case T_MinMaxAggPath:
+ return "MinMaxAggPath";
+ break;
+ case T_WindowAggPath:
+ return "WindowAggPath";
+ break;
+ case T_SetOpPath:
+ return "SetOpPath";
+ break;
+ case T_RecursiveUnionPath:
+ return "RecursiveUnionPath";
+ break;
+ case T_LockRowsPath:
+ return "LockRowsPath";
+ break;
+ case T_ModifyTablePath:
+ return "ModifyTablePath";
+ break;
+ case T_LimitPath:
+ return "LimitPath";
+ break;
+ case T_EquivalenceClass:
+ return "EquivalenceClass";
+ break;
+ case T_EquivalenceMember:
+ return "EquivalenceMember";
+ break;
+ case T_PathKey:
+ return "PathKey";
+ break;
+ case T_PathTarget:
+ return "PathTarget";
+ break;
+ case T_RestrictInfo:
+ return "RestrictInfo";
+ break;
+ case T_IndexClause:
+ return "IndexClause";
+ break;
+ case T_PlaceHolderVar:
+ return "PlaceHolderVar";
+ break;
+ case T_SpecialJoinInfo:
+ return "SpecialJoinInfo";
+ break;
+ case T_AppendRelInfo:
+ return "AppendRelInfo";
+ break;
+ case T_RowIdentityVarInfo:
+ return "RowIdentityVarInfo";
+ break;
+ case T_PlaceHolderInfo:
+ return "PlaceHolderInfo";
+ break;
+ case T_MinMaxAggInfo:
+ return "MinMaxAggInfo";
+ break;
+ case T_PlannerParamItem:
+ return "PlannerParamItem";
+ break;
+ case T_RollupData:
+ return "RollupData";
+ break;
+ case T_GroupingSetData:
+ return "GroupingSetData";
+ break;
+ case T_StatisticExtInfo:
+ return "StatisticExtInfo";
+ break;
+ case T_AllocSetContext:
+ return "AllocSetContext";
+ break;
+ case T_SlabContext:
+ return "SlabContext";
+ break;
+ case T_GenerationContext:
+ return "GenerationContext";
+ break;
+ case T_Integer:
+ return "Integer";
+ break;
+ case T_Float:
+ return "Float";
+ break;
+ case T_Boolean:
+ return "Boolean";
+ break;
+ case T_String:
+ return "String";
+ break;
+ case T_BitString:
+ return "BitString";
+ break;
+ case T_List:
+ return "List";
+ break;
+ case T_IntList:
+ return "IntList";
+ break;
+ case T_OidList:
+ return "OidList";
+ break;
+ case T_ExtensibleNode:
+ return "ExtensibleNode";
+ break;
+ case T_RawStmt:
+ return "RawStmt";
+ break;
+ case T_Query:
+ return "Query";
+ break;
+ case T_PlannedStmt:
+ return "PlannedStmt";
+ break;
+ case T_InsertStmt:
+ return "InsertStmt";
+ break;
+ case T_DeleteStmt:
+ return "DeleteStmt";
+ break;
+ case T_UpdateStmt:
+ return "UpdateStmt";
+ break;
+ case T_SelectStmt:
+ return "SelectStmt";
+ break;
+ case T_ReturnStmt:
+ return "ReturnStmt";
+ break;
+ case T_PLAssignStmt:
+ return "PLAssignStmt";
+ break;
+ case T_AlterTableStmt:
+ return "AlterTableStmt";
+ break;
+ case T_AlterTableCmd:
+ return "AlterTableCmd";
+ break;
+ case T_AlterDomainStmt:
+ return "AlterDomainStmt";
+ break;
+ case T_SetOperationStmt:
+ return "SetOperationStmt";
+ break;
+ case T_GrantStmt:
+ return "GrantStmt";
+ break;
+ case T_GrantRoleStmt:
+ return "GrantRoleStmt";
+ break;
+ case T_AlterDefaultPrivilegesStmt:
+ return "AlterDefaultPrivilegesStmt";
+ break;
+ case T_ClosePortalStmt:
+ return "ClosePortalStmt";
+ break;
+ case T_ClusterStmt:
+ return "ClusterStmt";
+ break;
+ case T_CopyStmt:
+ return "CopyStmt";
+ break;
+ case T_CreateStmt:
+ return "CreateStmt";
+ break;
+ case T_DefineStmt:
+ return "DefineStmt";
+ break;
+ case T_DropStmt:
+ return "DropStmt";
+ break;
+ case T_TruncateStmt:
+ return "TruncateStmt";
+ break;
+ case T_CommentStmt:
+ return "CommentStmt";
+ break;
+ case T_FetchStmt:
+ return "FetchStmt";
+ break;
+ case T_IndexStmt:
+ return "IndexStmt";
+ break;
+ case T_CreateFunctionStmt:
+ return "CreateFunctionStmt";
+ break;
+ case T_AlterFunctionStmt:
+ return "AlterFunctionStmt";
+ break;
+ case T_DoStmt:
+ return "DoStmt";
+ break;
+ case T_RenameStmt:
+ return "RenameStmt";
+ break;
+ case T_RuleStmt:
+ return "RuleStmt";
+ break;
+ case T_NotifyStmt:
+ return "NotifyStmt";
+ break;
+ case T_ListenStmt:
+ return "ListenStmt";
+ break;
+ case T_UnlistenStmt:
+ return "UnlistenStmt";
+ break;
+ case T_TransactionStmt:
+ return "TransactionStmt";
+ break;
+ case T_ViewStmt:
+ return "ViewStmt";
+ break;
+ case T_LoadStmt:
+ return "LoadStmt";
+ break;
+ case T_CreateDomainStmt:
+ return "CreateDomainStmt";
+ break;
+ case T_CreatedbStmt:
+ return "CreatedbStmt";
+ break;
+ case T_DropdbStmt:
+ return "DropdbStmt";
+ break;
+ case T_VacuumStmt:
+ return "VacuumStmt";
+ break;
+ case T_ExplainStmt:
+ return "ExplainStmt";
+ break;
+ case T_CreateTableAsStmt:
+ return "CreateTableAsStmt";
+ break;
+ case T_CreateSeqStmt:
+ return "CreateSeqStmt";
+ break;
+ case T_AlterSeqStmt:
+ return "AlterSeqStmt";
+ break;
+ case T_VariableSetStmt:
+ return "VariableSetStmt";
+ break;
+ case T_VariableShowStmt:
+ return "VariableShowStmt";
+ break;
+ case T_DiscardStmt:
+ return "DiscardStmt";
+ break;
+ case T_CreateTrigStmt:
+ return "CreateTrigStmt";
+ break;
+ case T_CreatePLangStmt:
+ return "CreatePLangStmt";
+ break;
+ case T_CreateRoleStmt:
+ return "CreateRoleStmt";
+ break;
+ case T_AlterRoleStmt:
+ return "AlterRoleStmt";
+ break;
+ case T_DropRoleStmt:
+ return "DropRoleStmt";
+ break;
+ case T_LockStmt:
+ return "LockStmt";
+ break;
+ case T_ConstraintsSetStmt:
+ return "ConstraintsSetStmt";
+ break;
+ case T_ReindexStmt:
+ return "ReindexStmt";
+ break;
+ case T_CheckPointStmt:
+ return "CheckPointStmt";
+ break;
+ case T_CreateSchemaStmt:
+ return "CreateSchemaStmt";
+ break;
+ case T_AlterDatabaseStmt:
+ return "AlterDatabaseStmt";
+ break;
+ case T_AlterDatabaseRefreshCollStmt:
+ return "AlterDatabaseRefreshCollStmt";
+ break;
+ case T_AlterDatabaseSetStmt:
+ return "AlterDatabaseSetStmt";
+ break;
+ case T_AlterRoleSetStmt:
+ return "AlterRoleSetStmt";
+ break;
+ case T_CreateConversionStmt:
+ return "CreateConversionStmt";
+ break;
+ case T_CreateCastStmt:
+ return "CreateCastStmt";
+ break;
+ case T_CreateOpClassStmt:
+ return "CreateOpClassStmt";
+ break;
+ case T_CreateOpFamilyStmt:
+ return "CreateOpFamilyStmt";
+ break;
+ case T_AlterOpFamilyStmt:
+ return "AlterOpFamilyStmt";
+ break;
+ case T_PrepareStmt:
+ return "PrepareStmt";
+ break;
+ case T_ExecuteStmt:
+ return "ExecuteStmt";
+ break;
+ case T_DeallocateStmt:
+ return "DeallocateStmt";
+ break;
+ case T_DeclareCursorStmt:
+ return "DeclareCursorStmt";
+ break;
+ case T_CreateTableSpaceStmt:
+ return "CreateTableSpaceStmt";
+ break;
+ case T_DropTableSpaceStmt:
+ return "DropTableSpaceStmt";
+ break;
+ case T_AlterObjectDependsStmt:
+ return "AlterObjectDependsStmt";
+ break;
+ case T_AlterObjectSchemaStmt:
+ return "AlterObjectSchemaStmt";
+ break;
+ case T_AlterOwnerStmt:
+ return "AlterOwnerStmt";
+ break;
+ case T_AlterOperatorStmt:
+ return "AlterOperatorStmt";
+ break;
+ case T_AlterTypeStmt:
+ return "AlterTypeStmt";
+ break;
+ case T_DropOwnedStmt:
+ return "DropOwnedStmt";
+ break;
+ case T_ReassignOwnedStmt:
+ return "ReassignOwnedStmt";
+ break;
+ case T_CompositeTypeStmt:
+ return "CompositeTypeStmt";
+ break;
+ case T_CreateEnumStmt:
+ return "CreateEnumStmt";
+ break;
+ case T_CreateRangeStmt:
+ return "CreateRangeStmt";
+ break;
+ case T_AlterEnumStmt:
+ return "AlterEnumStmt";
+ break;
+ case T_AlterTSDictionaryStmt:
+ return "AlterTSDictionaryStmt";
+ break;
+ case T_AlterTSConfigurationStmt:
+ return "AlterTSConfigurationStmt";
+ break;
+ case T_CreateFdwStmt:
+ return "CreateFdwStmt";
+ break;
+ case T_AlterFdwStmt:
+ return "AlterFdwStmt";
+ break;
+ case T_CreateForeignServerStmt:
+ return "CreateForeignServerStmt";
+ break;
+ case T_AlterForeignServerStmt:
+ return "AlterForeignServerStmt";
+ break;
+ case T_CreateUserMappingStmt:
+ return "CreateUserMappingStmt";
+ break;
+ case T_AlterUserMappingStmt:
+ return "AlterUserMappingStmt";
+ break;
+ case T_DropUserMappingStmt:
+ return "DropUserMappingStmt";
+ break;
+ case T_AlterTableSpaceOptionsStmt:
+ return "AlterTableSpaceOptionsStmt";
+ break;
+ case T_AlterTableMoveAllStmt:
+ return "AlterTableMoveAllStmt";
+ break;
+ case T_SecLabelStmt:
+ return "SecLabelStmt";
+ break;
+ case T_CreateForeignTableStmt:
+ return "CreateForeignTableStmt";
+ break;
+ case T_ImportForeignSchemaStmt:
+ return "ImportForeignSchemaStmt";
+ break;
+ case T_CreateExtensionStmt:
+ return "CreateExtensionStmt";
+ break;
+ case T_AlterExtensionStmt:
+ return "AlterExtensionStmt";
+ break;
+ case T_AlterExtensionContentsStmt:
+ return "AlterExtensionContentsStmt";
+ break;
+ case T_CreateEventTrigStmt:
+ return "CreateEventTrigStmt";
+ break;
+ case T_AlterEventTrigStmt:
+ return "AlterEventTrigStmt";
+ break;
+ case T_RefreshMatViewStmt:
+ return "RefreshMatViewStmt";
+ break;
+ case T_ReplicaIdentityStmt:
+ return "ReplicaIdentityStmt";
+ break;
+ case T_AlterSystemStmt:
+ return "AlterSystemStmt";
+ break;
+ case T_CreatePolicyStmt:
+ return "CreatePolicyStmt";
+ break;
+ case T_AlterPolicyStmt:
+ return "AlterPolicyStmt";
+ break;
+ case T_CreateTransformStmt:
+ return "CreateTransformStmt";
+ break;
+ case T_CreateAmStmt:
+ return "CreateAmStmt";
+ break;
+ case T_CreatePublicationStmt:
+ return "CreatePublicationStmt";
+ break;
+ case T_AlterPublicationStmt:
+ return "AlterPublicationStmt";
+ break;
+ case T_CreateSubscriptionStmt:
+ return "CreateSubscriptionStmt";
+ break;
+ case T_AlterSubscriptionStmt:
+ return "AlterSubscriptionStmt";
+ break;
+ case T_DropSubscriptionStmt:
+ return "DropSubscriptionStmt";
+ break;
+ case T_CreateStatsStmt:
+ return "CreateStatsStmt";
+ break;
+ case T_AlterCollationStmt:
+ return "AlterCollationStmt";
+ break;
+ case T_CallStmt:
+ return "CallStmt";
+ break;
+ case T_AlterStatsStmt:
+ return "AlterStatsStmt";
+ break;
+ case T_A_Expr:
+ return "A_Expr";
+ break;
+ case T_ColumnRef:
+ return "ColumnRef";
+ break;
+ case T_ParamRef:
+ return "ParamRef";
+ break;
+ case T_A_Const:
+ return "A_Const";
+ break;
+ case T_FuncCall:
+ return "FuncCall";
+ break;
+ case T_A_Star:
+ return "A_Star";
+ break;
+ case T_A_Indices:
+ return "A_Indices";
+ break;
+ case T_A_Indirection:
+ return "A_Indirection";
+ break;
+ case T_A_ArrayExpr:
+ return "A_ArrayExpr";
+ break;
+ case T_ResTarget:
+ return "ResTarget";
+ break;
+ case T_MultiAssignRef:
+ return "MultiAssignRef";
+ break;
+ case T_TypeCast:
+ return "TypeCast";
+ break;
+ case T_CollateClause:
+ return "CollateClause";
+ break;
+ case T_SortBy:
+ return "SortBy";
+ break;
+ case T_WindowDef:
+ return "WindowDef";
+ break;
+ case T_RangeSubselect:
+ return "RangeSubselect";
+ break;
+ case T_RangeFunction:
+ return "RangeFunction";
+ break;
+ case T_RangeTableSample:
+ return "RangeTableSample";
+ break;
+ case T_RangeTableFunc:
+ return "RangeTableFunc";
+ break;
+ case T_RangeTableFuncCol:
+ return "RangeTableFuncCol";
+ break;
+ case T_TypeName:
+ return "TypeName";
+ break;
+ case T_ColumnDef:
+ return "ColumnDef";
+ break;
+ case T_IndexElem:
+ return "IndexElem";
+ break;
+ case T_StatsElem:
+ return "StatsElem";
+ break;
+ case T_Constraint:
+ return "Constraint";
+ break;
+ case T_DefElem:
+ return "DefElem";
+ break;
+ case T_RangeTblEntry:
+ return "RangeTblEntry";
+ break;
+ case T_RangeTblFunction:
+ return "RangeTblFunction";
+ break;
+ case T_TableSampleClause:
+ return "TableSampleClause";
+ break;
+ case T_WithCheckOption:
+ return "WithCheckOption";
+ break;
+ case T_SortGroupClause:
+ return "SortGroupClause";
+ break;
+ case T_GroupingSet:
+ return "GroupingSet";
+ break;
+ case T_WindowClause:
+ return "WindowClause";
+ break;
+ case T_ObjectWithArgs:
+ return "ObjectWithArgs";
+ break;
+ case T_AccessPriv:
+ return "AccessPriv";
+ break;
+ case T_CreateOpClassItem:
+ return "CreateOpClassItem";
+ break;
+ case T_TableLikeClause:
+ return "TableLikeClause";
+ break;
+ case T_FunctionParameter:
+ return "FunctionParameter";
+ break;
+ case T_LockingClause:
+ return "LockingClause";
+ break;
+ case T_RowMarkClause:
+ return "RowMarkClause";
+ break;
+ case T_XmlSerialize:
+ return "XmlSerialize";
+ break;
+ case T_WithClause:
+ return "WithClause";
+ break;
+ case T_InferClause:
+ return "InferClause";
+ break;
+ case T_OnConflictClause:
+ return "OnConflictClause";
+ break;
+ case T_CTESearchClause:
+ return "CTESearchClause";
+ break;
+ case T_CTECycleClause:
+ return "CTECycleClause";
+ break;
+ case T_CommonTableExpr:
+ return "CommonTableExpr";
+ break;
+ case T_RoleSpec:
+ return "RoleSpec";
+ break;
+ case T_TriggerTransition:
+ return "TriggerTransition";
+ break;
+ case T_PartitionElem:
+ return "PartitionElem";
+ break;
+ case T_PartitionSpec:
+ return "PartitionSpec";
+ break;
+ case T_PartitionBoundSpec:
+ return "PartitionBoundSpec";
+ break;
+ case T_PartitionRangeDatum:
+ return "PartitionRangeDatum";
+ break;
+ case T_PartitionCmd:
+ return "PartitionCmd";
+ break;
+ case T_VacuumRelation:
+ return "VacuumRelation";
+ break;
+ case T_PublicationObjSpec:
+ return "PublicationObjSpec";
+ break;
+ case T_PublicationTable:
+ return "PublicationTable";
+ break;
+ case T_IdentifySystemCmd:
+ return "IdentifySystemCmd";
+ break;
+ case T_BaseBackupCmd:
+ return "BaseBackupCmd";
+ break;
+ case T_CreateReplicationSlotCmd:
+ return "CreateReplicationSlotCmd";
+ break;
+ case T_DropReplicationSlotCmd:
+ return "DropReplicationSlotCmd";
+ break;
+ case T_ReadReplicationSlotCmd:
+ return "ReadReplicationSlotCmd";
+ break;
+ case T_StartReplicationCmd:
+ return "StartReplicationCmd";
+ break;
+ case T_TimeLineHistoryCmd:
+ return "TimeLineHistoryCmd";
+ break;
+ case T_TriggerData:
+ return "TriggerData";
+ break;
+ case T_EventTriggerData:
+ return "EventTriggerData";
+ break;
+ case T_ReturnSetInfo:
+ return "ReturnSetInfo";
+ break;
+ case T_WindowObjectData:
+ return "WindowObjectData";
+ break;
+ case T_TIDBitmap:
+ return "TIDBitmap";
+ break;
+ case T_InlineCodeBlock:
+ return "InlineCodeBlock";
+ break;
+ case T_FdwRoutine:
+ return "FdwRoutine";
+ break;
+ case T_IndexAmRoutine:
+ return "IndexAmRoutine";
+ break;
+ case T_TableAmRoutine:
+ return "TableAmRoutine";
+ break;
+ case T_TsmRoutine:
+ return "TsmRoutine";
+ break;
+ case T_ForeignKeyCacheInfo:
+ return "ForeignKeyCacheInfo";
+ break;
+ case T_CallContext:
+ return "CallContext";
+ break;
+ case T_SupportRequestSimplify:
+ return "SupportRequestSimplify";
+ break;
+ case T_SupportRequestSelectivity:
+ return "SupportRequestSelectivity";
+ break;
+ case T_SupportRequestCost:
+ return "SupportRequestCost";
+ break;
+ case T_SupportRequestRows:
+ return "SupportRequestRows";
+ break;
+ case T_SupportRequestIndexCondition:
+ return "SupportRequestIndexCondition";
+ break;
default:
break;
}
@@ -928,45 +1792,46 @@ accesstype_arg_to_string(ObjectAccessType access, void *arg)
{
case OAT_POST_CREATE:
{
- ObjectAccessPostCreate *pc_arg = (ObjectAccessPostCreate *)arg;
+ ObjectAccessPostCreate *pc_arg = (ObjectAccessPostCreate *) arg;
+
return pstrdup(pc_arg->is_internal ? "internal" : "explicit");
}
break;
case OAT_DROP:
{
- ObjectAccessDrop *drop_arg = (ObjectAccessDrop *)arg;
+ ObjectAccessDrop *drop_arg = (ObjectAccessDrop *) arg;
return psprintf("%s%s%s%s%s%s",
- ((drop_arg->dropflags & PERFORM_DELETION_INTERNAL)
- ? "internal action," : ""),
- ((drop_arg->dropflags & PERFORM_DELETION_INTERNAL)
- ? "concurrent drop," : ""),
- ((drop_arg->dropflags & PERFORM_DELETION_INTERNAL)
- ? "suppress notices," : ""),
- ((drop_arg->dropflags & PERFORM_DELETION_INTERNAL)
- ? "keep original object," : ""),
- ((drop_arg->dropflags & PERFORM_DELETION_INTERNAL)
- ? "keep extensions," : ""),
- ((drop_arg->dropflags & PERFORM_DELETION_INTERNAL)
- ? "normal concurrent drop," : ""));
+ ((drop_arg->dropflags & PERFORM_DELETION_INTERNAL)
+ ? "internal action," : ""),
+ ((drop_arg->dropflags & PERFORM_DELETION_INTERNAL)
+ ? "concurrent drop," : ""),
+ ((drop_arg->dropflags & PERFORM_DELETION_INTERNAL)
+ ? "suppress notices," : ""),
+ ((drop_arg->dropflags & PERFORM_DELETION_INTERNAL)
+ ? "keep original object," : ""),
+ ((drop_arg->dropflags & PERFORM_DELETION_INTERNAL)
+ ? "keep extensions," : ""),
+ ((drop_arg->dropflags & PERFORM_DELETION_INTERNAL)
+ ? "normal concurrent drop," : ""));
}
break;
case OAT_POST_ALTER:
{
- ObjectAccessPostAlter *pa_arg = (ObjectAccessPostAlter*)arg;
+ ObjectAccessPostAlter *pa_arg = (ObjectAccessPostAlter *) arg;
return psprintf("%s %s auxiliary object",
- (pa_arg->is_internal ? "internal" : "explicit"),
- (OidIsValid(pa_arg->auxiliary_id) ? "with" : "without"));
+ (pa_arg->is_internal ? "internal" : "explicit"),
+ (OidIsValid(pa_arg->auxiliary_id) ? "with" : "without"));
}
break;
case OAT_NAMESPACE_SEARCH:
{
- ObjectAccessNamespaceSearch *ns_arg = (ObjectAccessNamespaceSearch *)arg;
+ ObjectAccessNamespaceSearch *ns_arg = (ObjectAccessNamespaceSearch *) arg;
return psprintf("%s, %s",
- (ns_arg->ereport_on_violation ? "report on violation" : "no report on violation"),
- (ns_arg->result ? "allowed" : "denied"));
+ (ns_arg->ereport_on_violation ? "report on violation" : "no report on violation"),
+ (ns_arg->result ? "allowed" : "denied"));
}
break;
case OAT_TRUNCATE:
diff --git a/src/test/modules/test_pg_dump/t/001_base.pl b/src/test/modules/test_pg_dump/t/001_base.pl
index d842f934a3a..f5da6bf46d6 100644
--- a/src/test/modules/test_pg_dump/t/001_base.pl
+++ b/src/test/modules/test_pg_dump/t/001_base.pl
@@ -8,7 +8,7 @@ use PostgreSQL::Test::Cluster;
use PostgreSQL::Test::Utils;
use Test::More;
-my $tempdir = PostgreSQL::Test::Utils::tempdir;
+my $tempdir = PostgreSQL::Test::Utils::tempdir;
###############################################################
# This structure is based off of the src/bin/pg_dump/t test
diff --git a/src/test/perl/PostgreSQL/Test/Cluster.pm b/src/test/perl/PostgreSQL/Test/Cluster.pm
index 9a2ada0a103..f842be1a72b 100644
--- a/src/test/perl/PostgreSQL/Test/Cluster.pm
+++ b/src/test/perl/PostgreSQL/Test/Cluster.pm
@@ -135,8 +135,8 @@ INIT
$test_pghost = PostgreSQL::Test::Utils::tempdir_short;
$test_pghost =~ s!\\!/!g if $PostgreSQL::Test::Utils::windows_os;
}
- $ENV{PGHOST} = $test_pghost;
- $ENV{PGDATABASE} = 'postgres';
+ $ENV{PGHOST} = $test_pghost;
+ $ENV{PGDATABASE} = 'postgres';
# Tracking of last port value assigned to accelerate free port lookup.
$last_port_assigned = int(rand() * 16384) + 49152;
@@ -409,8 +409,10 @@ sub set_replication_conf
or croak "set_replication_conf only works with the default host";
open my $hba, '>>', "$pgdata/pg_hba.conf";
- print $hba "\n# Allow replication (set up by PostgreSQL::Test::Cluster.pm)\n";
- if ($PostgreSQL::Test::Utils::windows_os && !$PostgreSQL::Test::Utils::use_unix_sockets)
+ print $hba
+ "\n# Allow replication (set up by PostgreSQL::Test::Cluster.pm)\n";
+ if ($PostgreSQL::Test::Utils::windows_os
+ && !$PostgreSQL::Test::Utils::use_unix_sockets)
{
print $hba
"host replication all $test_localhost/32 sspi include_realm=1 map=regress\n";
@@ -459,10 +461,10 @@ sub init
mkdir $self->backup_dir;
mkdir $self->archive_dir;
- PostgreSQL::Test::Utils::system_or_bail('initdb', '-D', $pgdata, '-A', 'trust', '-N',
- @{ $params{extra} });
- PostgreSQL::Test::Utils::system_or_bail($ENV{PG_REGRESS}, '--config-auth', $pgdata,
- @{ $params{auth_extra} });
+ PostgreSQL::Test::Utils::system_or_bail('initdb', '-D', $pgdata, '-A',
+ 'trust', '-N', @{ $params{extra} });
+ PostgreSQL::Test::Utils::system_or_bail($ENV{PG_REGRESS},
+ '--config-auth', $pgdata, @{ $params{auth_extra} });
open my $conf, '>>', "$pgdata/postgresql.conf";
print $conf "\n# Added by PostgreSQL::Test::Cluster.pm\n";
@@ -575,7 +577,7 @@ sub adjust_conf
my $conffile = $self->data_dir . '/' . $filename;
my $contents = PostgreSQL::Test::Utils::slurp_file($conffile);
- my @lines = split(/\n/, $contents);
+ my @lines = split(/\n/, $contents);
my @result;
my $eq = $skip_equals ? '' : '= ';
foreach my $line (@lines)
@@ -809,8 +811,10 @@ sub start
# sub init) so that it does not get copied to standbys.
# -w is now the default but having it here does no harm and helps
# compatibility with older versions.
- $ret = PostgreSQL::Test::Utils::system_log('pg_ctl', '-w', '-D', $self->data_dir, '-l',
- $self->logfile, '-o', "--cluster-name=$name", 'start');
+ $ret = PostgreSQL::Test::Utils::system_log(
+ 'pg_ctl', '-w', '-D', $self->data_dir,
+ '-l', $self->logfile, '-o', "--cluster-name=$name",
+ 'start');
if ($ret != 0)
{
@@ -919,7 +923,8 @@ sub reload
local %ENV = $self->_get_env();
print "### Reloading node \"$name\"\n";
- PostgreSQL::Test::Utils::system_or_bail('pg_ctl', '-D', $pgdata, 'reload');
+ PostgreSQL::Test::Utils::system_or_bail('pg_ctl', '-D', $pgdata,
+ 'reload');
return;
}
@@ -945,8 +950,8 @@ sub restart
# -w is now the default but having it here does no harm and helps
# compatibility with older versions.
- PostgreSQL::Test::Utils::system_or_bail('pg_ctl', '-w', '-D', $pgdata, '-l', $logfile,
- 'restart');
+ PostgreSQL::Test::Utils::system_or_bail('pg_ctl', '-w', '-D', $pgdata,
+ '-l', $logfile, 'restart');
$self->_update_pid(1);
return;
@@ -971,8 +976,8 @@ sub promote
local %ENV = $self->_get_env();
print "### Promoting node \"$name\"\n";
- PostgreSQL::Test::Utils::system_or_bail('pg_ctl', '-D', $pgdata, '-l', $logfile,
- 'promote');
+ PostgreSQL::Test::Utils::system_or_bail('pg_ctl', '-D', $pgdata, '-l',
+ $logfile, 'promote');
return;
}
@@ -995,8 +1000,8 @@ sub logrotate
local %ENV = $self->_get_env();
print "### Rotating log in node \"$name\"\n";
- PostgreSQL::Test::Utils::system_or_bail('pg_ctl', '-D', $pgdata, '-l', $logfile,
- 'logrotate');
+ PostgreSQL::Test::Utils::system_or_bail('pg_ctl', '-D', $pgdata, '-l',
+ $logfile, 'logrotate');
return;
}
@@ -1232,13 +1237,16 @@ sub new
my $testname = basename($0);
$testname =~ s/\.[^.]+$//;
my $node = {
- _port => $port,
- _host => $host,
- _basedir => "$PostgreSQL::Test::Utils::tmp_check/t_${testname}_${name}_data",
- _name => $name,
+ _port => $port,
+ _host => $host,
+ _basedir =>
+ "$PostgreSQL::Test::Utils::tmp_check/t_${testname}_${name}_data",
+ _name => $name,
_logfile_generation => 0,
- _logfile_base => "$PostgreSQL::Test::Utils::log_path/${testname}_${name}",
- _logfile => "$PostgreSQL::Test::Utils::log_path/${testname}_${name}.log"
+ _logfile_base =>
+ "$PostgreSQL::Test::Utils::log_path/${testname}_${name}",
+ _logfile =>
+ "$PostgreSQL::Test::Utils::log_path/${testname}_${name}.log"
};
if ($params{install_path})
@@ -1261,8 +1269,8 @@ sub new
# isn't fully compatible. Warn if the version is too old and thus we don't
# have a subclass of this class.
if (ref $ver && $ver < $min_compat)
- {
- my $maj = $ver->major(separator => '_');
+ {
+ my $maj = $ver->major(separator => '_');
my $subclass = $class . "::V_$maj";
if ($subclass->isa($class))
{
@@ -1270,9 +1278,10 @@ sub new
}
else
{
- carp "PostgreSQL::Test::Cluster isn't fully compatible with version $ver";
+ carp
+ "PostgreSQL::Test::Cluster isn't fully compatible with version $ver";
}
- }
+ }
# Add node to list of nodes
push(@all_nodes, $node);
@@ -1528,7 +1537,8 @@ END
next if defined $ENV{'PG_TEST_NOCLEAN'};
# clean basedir on clean test invocation
- $node->clean_node if $exit_code == 0 && PostgreSQL::Test::Utils::all_tests_passing();
+ $node->clean_node
+ if $exit_code == 0 && PostgreSQL::Test::Utils::all_tests_passing();
}
$? = $exit_code;
@@ -2178,7 +2188,8 @@ sub connect_ok
if (@log_like or @log_unlike)
{
- my $log_contents = PostgreSQL::Test::Utils::slurp_file($self->logfile, $log_location);
+ my $log_contents =
+ PostgreSQL::Test::Utils::slurp_file($self->logfile, $log_location);
while (my $regex = shift @log_like)
{
@@ -2248,7 +2259,8 @@ sub connect_fails
if (@log_like or @log_unlike)
{
- my $log_contents = PostgreSQL::Test::Utils::slurp_file($self->logfile, $log_location);
+ my $log_contents =
+ PostgreSQL::Test::Utils::slurp_file($self->logfile, $log_location);
while (my $regex = shift @log_like)
{
@@ -2444,7 +2456,8 @@ sub issues_sql_like
my $result = PostgreSQL::Test::Utils::run_log($cmd);
ok($result, "@$cmd exit code 0");
- my $log = PostgreSQL::Test::Utils::slurp_file($self->logfile, $log_location);
+ my $log =
+ PostgreSQL::Test::Utils::slurp_file($self->logfile, $log_location);
like($log, $expected_sql, "$test_name: SQL found in server log");
return;
}
@@ -2550,7 +2563,8 @@ sub wait_for_catchup
unless exists($valid_modes{$mode});
# Allow passing of a PostgreSQL::Test::Cluster instance as shorthand
- if (blessed($standby_name) && $standby_name->isa("PostgreSQL::Test::Cluster"))
+ if (blessed($standby_name)
+ && $standby_name->isa("PostgreSQL::Test::Cluster"))
{
$standby_name = $standby_name->name;
}
@@ -2566,8 +2580,7 @@ sub wait_for_catchup
. $self->name . "\n";
# Before release 12 walreceiver just set the application name to
# "walreceiver"
- my $query =
- qq[SELECT '$target_lsn' <= ${mode}_lsn AND state = 'streaming'
+ my $query = qq[SELECT '$target_lsn' <= ${mode}_lsn AND state = 'streaming'
FROM pg_catalog.pg_stat_replication
WHERE application_name IN ('$standby_name', 'walreceiver')];
$self->poll_query_until('postgres', $query)
@@ -2641,9 +2654,10 @@ sub wait_for_log
while ($attempts < $max_attempts)
{
- my $log = PostgreSQL::Test::Utils::slurp_file($self->logfile, $offset);
+ my $log =
+ PostgreSQL::Test::Utils::slurp_file($self->logfile, $offset);
- return $offset+length($log) if ($log =~ m/$regexp/);
+ return $offset + length($log) if ($log =~ m/$regexp/);
# Wait 0.1 second before retrying.
usleep(100_000);
@@ -2858,7 +2872,8 @@ sub corrupt_page_checksum
##########################################################################
-package PostgreSQL::Test::Cluster::V_11; ## no critic (ProhibitMultiplePackages)
+package PostgreSQL::Test::Cluster::V_11
+ ; ## no critic (ProhibitMultiplePackages)
# parent.pm is not present in all perl versions before 5.10.1, so instead
# do directly what it would do for this:
@@ -2874,21 +2889,22 @@ sub _recovery_file { return "recovery.conf"; }
sub set_standby_mode
{
- my $self = shift;
- $self->append_conf("recovery.conf", "standby_mode = on\n");
+ my $self = shift;
+ $self->append_conf("recovery.conf", "standby_mode = on\n");
}
sub init
{
- my ($self, %params) = @_;
- $self->SUPER::init(%params);
- $self->adjust_conf('postgresql.conf', 'max_wal_senders',
- $params{allows_streaming} ? 5 : 0);
+ my ($self, %params) = @_;
+ $self->SUPER::init(%params);
+ $self->adjust_conf('postgresql.conf', 'max_wal_senders',
+ $params{allows_streaming} ? 5 : 0);
}
##########################################################################
-package PostgreSQL::Test::Cluster::V_10; ## no critic (ProhibitMultiplePackages)
+package PostgreSQL::Test::Cluster::V_10
+ ; ## no critic (ProhibitMultiplePackages)
# use parent -norequire, qw(PostgreSQL::Test::Cluster::V_11);
push @PostgreSQL::Test::Cluster::V_10::ISA, 'PostgreSQL::Test::Cluster::V_11';
diff --git a/src/test/perl/PostgreSQL/Test/SimpleTee.pm b/src/test/perl/PostgreSQL/Test/SimpleTee.pm
index 7cb8591fed2..ec13714c331 100644
--- a/src/test/perl/PostgreSQL/Test/SimpleTee.pm
+++ b/src/test/perl/PostgreSQL/Test/SimpleTee.pm
@@ -27,13 +27,13 @@ BEGIN { $last_time = time; }
sub _time_str
{
- my $tm = time;
+ my $tm = time;
my $diff = $tm - $last_time;
$last_time = $tm;
my ($sec, $min, $hour) = localtime($tm);
my $msec = int(1000 * ($tm - int($tm)));
return sprintf("[%.2d:%.2d:%.2d.%.3d](%.3fs) ",
- $hour, $min, $sec, $msec, $diff);
+ $hour, $min, $sec, $msec, $diff);
}
sub TIEHANDLE
@@ -50,11 +50,11 @@ sub PRINT
# the original stdout, which is what PROVE sees. Additional decorations
# confuse it, so only put out the time string on files after the first.
my $skip = 1;
- my $ts = _time_str;
+ my $ts = _time_str;
for my $fh (@$self)
{
print $fh ($skip ? "" : $ts), @_ or $ok = 0;
- $fh->flush or $ok = 0;
+ $fh->flush or $ok = 0;
$skip = 0;
}
return $ok;
diff --git a/src/test/perl/PostgreSQL/Test/Utils.pm b/src/test/perl/PostgreSQL/Test/Utils.pm
index dca1b3b17c4..1ca2cc59170 100644
--- a/src/test/perl/PostgreSQL/Test/Utils.pm
+++ b/src/test/perl/PostgreSQL/Test/Utils.pm
@@ -142,14 +142,15 @@ BEGIN
# Must be set early
$windows_os = $Config{osname} eq 'MSWin32' || $Config{osname} eq 'msys';
# Check if this environment is MSYS2.
- $is_msys2 = $windows_os && -x '/usr/bin/uname' &&
- `uname -or` =~ /^[2-9].*Msys/;
+ $is_msys2 =
+ $windows_os
+ && -x '/usr/bin/uname'
+ && `uname -or` =~ /^[2-9].*Msys/;
if ($windows_os)
{
require Win32API::File;
- Win32API::File->import(
- qw(createFile OsFHandleOpen CloseHandle));
+ Win32API::File->import(qw(createFile OsFHandleOpen CloseHandle));
}
# Specifies whether to use Unix sockets for test setups. On
@@ -428,12 +429,16 @@ sub pump_until
last if $$stream =~ /$until/;
if ($timeout->is_expired)
{
- diag("pump_until: timeout expired when searching for \"$until\" with stream: \"$$stream\"");
+ diag(
+ "pump_until: timeout expired when searching for \"$until\" with stream: \"$$stream\""
+ );
return 0;
}
if (not $proc->pumpable())
{
- diag("pump_until: process terminated unexpectedly when searching for \"$until\" with stream: \"$$stream\"");
+ diag(
+ "pump_until: process terminated unexpectedly when searching for \"$until\" with stream: \"$$stream\""
+ );
return 0;
}
$proc->pump();
diff --git a/src/test/perl/PostgreSQL/Version.pm b/src/test/perl/PostgreSQL/Version.pm
index 30d328103b5..8f704911895 100644
--- a/src/test/perl/PostgreSQL/Version.pm
+++ b/src/test/perl/PostgreSQL/Version.pm
@@ -151,14 +151,14 @@ a dot unless the separator argument is given.
sub major
{
- my ($self, %params) = @_;
- my $result = $self->{num}->[0];
- if ($result + 0 < 10)
- {
- my $sep = $params{separator} || '.';
- $result .= "$sep$self->{num}->[1]";
- }
- return $result;
+ my ($self, %params) = @_;
+ my $result = $self->{num}->[0];
+ if ($result + 0 < 10)
+ {
+ my $sep = $params{separator} || '.';
+ $result .= "$sep$self->{num}->[1]";
+ }
+ return $result;
}
1;
diff --git a/src/test/recovery/t/001_stream_rep.pl b/src/test/recovery/t/001_stream_rep.pl
index 583ee87da82..86864098f9e 100644
--- a/src/test/recovery/t/001_stream_rep.pl
+++ b/src/test/recovery/t/001_stream_rep.pl
@@ -374,7 +374,8 @@ sub replay_check
);
my $primary_lsn = $node_primary->lsn('write');
$node_primary->wait_for_catchup($node_standby_1, 'replay', $primary_lsn);
- $node_standby_1->wait_for_catchup($node_standby_2, 'replay', $primary_lsn);
+ $node_standby_1->wait_for_catchup($node_standby_2, 'replay',
+ $primary_lsn);
$node_standby_1->safe_psql('postgres',
qq[SELECT 1 FROM replayed WHERE val = $newval])
diff --git a/src/test/recovery/t/002_archiving.pl b/src/test/recovery/t/002_archiving.pl
index 01c52d8e7f7..d69da4e5efd 100644
--- a/src/test/recovery/t/002_archiving.pl
+++ b/src/test/recovery/t/002_archiving.pl
@@ -125,7 +125,7 @@ my $log_location = -s $node_standby2->logfile;
$node_standby2->promote;
# Check the logs of the standby to see that the commands have failed.
-my $log_contents = slurp_file($node_standby2->logfile, $log_location);
+my $log_contents = slurp_file($node_standby2->logfile, $log_location);
my $node_standby2_data = $node_standby2->data_dir;
like(
diff --git a/src/test/recovery/t/006_logical_decoding.pl b/src/test/recovery/t/006_logical_decoding.pl
index 3ccced2ea24..0cd0467fbb8 100644
--- a/src/test/recovery/t/006_logical_decoding.pl
+++ b/src/test/recovery/t/006_logical_decoding.pl
@@ -206,62 +206,68 @@ my $stats_test_slot2 = 'logical_slot';
# Test that reset works for pg_stat_replication_slots
# Stats exist for stats test slot 1
-is($node_primary->safe_psql(
- 'postgres',
- qq(SELECT total_bytes > 0, stats_reset IS NULL FROM pg_stat_replication_slots WHERE slot_name = '$stats_test_slot1')
-), qq(t|t), qq(Total bytes is > 0 and stats_reset is NULL for slot '$stats_test_slot1'.));
+is( $node_primary->safe_psql(
+ 'postgres',
+ qq(SELECT total_bytes > 0, stats_reset IS NULL FROM pg_stat_replication_slots WHERE slot_name = '$stats_test_slot1')
+ ),
+ qq(t|t),
+ qq(Total bytes is > 0 and stats_reset is NULL for slot '$stats_test_slot1'.)
+);
# Do reset of stats for stats test slot 1
-$node_primary->safe_psql(
- 'postgres',
- qq(SELECT pg_stat_reset_replication_slot('$stats_test_slot1'))
-);
+$node_primary->safe_psql('postgres',
+ qq(SELECT pg_stat_reset_replication_slot('$stats_test_slot1')));
# Get reset value after reset
-my $reset1 = $node_primary->safe_psql(
- 'postgres',
+my $reset1 = $node_primary->safe_psql('postgres',
qq(SELECT stats_reset FROM pg_stat_replication_slots WHERE slot_name = '$stats_test_slot1')
);
# Do reset again
-$node_primary->safe_psql(
- 'postgres',
- qq(SELECT pg_stat_reset_replication_slot('$stats_test_slot1'))
-);
+$node_primary->safe_psql('postgres',
+ qq(SELECT pg_stat_reset_replication_slot('$stats_test_slot1')));
-is($node_primary->safe_psql(
- 'postgres',
- qq(SELECT stats_reset > '$reset1'::timestamptz, total_bytes = 0 FROM pg_stat_replication_slots WHERE slot_name = '$stats_test_slot1')
-), qq(t|t), qq(Check that reset timestamp is later after the second reset of stats for slot '$stats_test_slot1' and confirm total_bytes was set to 0.));
+is( $node_primary->safe_psql(
+ 'postgres',
+ qq(SELECT stats_reset > '$reset1'::timestamptz, total_bytes = 0 FROM pg_stat_replication_slots WHERE slot_name = '$stats_test_slot1')
+ ),
+ qq(t|t),
+ qq(Check that reset timestamp is later after the second reset of stats for slot '$stats_test_slot1' and confirm total_bytes was set to 0.)
+);
# Check that test slot 2 has NULL in reset timestamp
-is($node_primary->safe_psql(
- 'postgres',
- qq(SELECT stats_reset IS NULL FROM pg_stat_replication_slots WHERE slot_name = '$stats_test_slot2')
-), qq(t), qq(Stats_reset is NULL for slot '$stats_test_slot2' before reset.));
+is( $node_primary->safe_psql(
+ 'postgres',
+ qq(SELECT stats_reset IS NULL FROM pg_stat_replication_slots WHERE slot_name = '$stats_test_slot2')
+ ),
+ qq(t),
+ qq(Stats_reset is NULL for slot '$stats_test_slot2' before reset.));
# Get reset value again for test slot 1
-$reset1 = $node_primary->safe_psql(
- 'postgres',
+$reset1 = $node_primary->safe_psql('postgres',
qq(SELECT stats_reset FROM pg_stat_replication_slots WHERE slot_name = '$stats_test_slot1')
);
# Reset stats for all replication slots
-$node_primary->safe_psql(
- 'postgres',
- qq(SELECT pg_stat_reset_replication_slot(NULL))
-);
+$node_primary->safe_psql('postgres',
+ qq(SELECT pg_stat_reset_replication_slot(NULL)));
# Check that test slot 2 reset timestamp is no longer NULL after reset
-is($node_primary->safe_psql(
- 'postgres',
- qq(SELECT stats_reset IS NOT NULL FROM pg_stat_replication_slots WHERE slot_name = '$stats_test_slot2')
-), qq(t), qq(Stats_reset is not NULL for slot '$stats_test_slot2' after reset all.));
-
-is($node_primary->safe_psql(
- 'postgres',
- qq(SELECT stats_reset > '$reset1'::timestamptz FROM pg_stat_replication_slots WHERE slot_name = '$stats_test_slot1')
-), qq(t), qq(Check that reset timestamp is later after resetting stats for slot '$stats_test_slot1' again.));
+is( $node_primary->safe_psql(
+ 'postgres',
+ qq(SELECT stats_reset IS NOT NULL FROM pg_stat_replication_slots WHERE slot_name = '$stats_test_slot2')
+ ),
+ qq(t),
+ qq(Stats_reset is not NULL for slot '$stats_test_slot2' after reset all.)
+);
+
+is( $node_primary->safe_psql(
+ 'postgres',
+ qq(SELECT stats_reset > '$reset1'::timestamptz FROM pg_stat_replication_slots WHERE slot_name = '$stats_test_slot1')
+ ),
+ qq(t),
+ qq(Check that reset timestamp is later after resetting stats for slot '$stats_test_slot1' again.)
+);
# done with the node
$node_primary->stop;
diff --git a/src/test/recovery/t/013_crash_restart.pl b/src/test/recovery/t/013_crash_restart.pl
index 10da6cb0c1c..c22844d39c0 100644
--- a/src/test/recovery/t/013_crash_restart.pl
+++ b/src/test/recovery/t/013_crash_restart.pl
@@ -66,7 +66,8 @@ CREATE TABLE alive(status text);
INSERT INTO alive VALUES($$committed-before-sigquit$$);
SELECT pg_backend_pid();
];
-ok(pump_until($killme, $psql_timeout, \$killme_stdout, qr/[[:digit:]]+[\r\n]$/m),
+ok( pump_until(
+ $killme, $psql_timeout, \$killme_stdout, qr/[[:digit:]]+[\r\n]$/m),
'acquired pid for SIGQUIT');
my $pid = $killme_stdout;
chomp($pid);
@@ -78,7 +79,9 @@ $killme_stdin .= q[
BEGIN;
INSERT INTO alive VALUES($$in-progress-before-sigquit$$) RETURNING status;
];
-ok(pump_until($killme, $psql_timeout, \$killme_stdout, qr/in-progress-before-sigquit/m),
+ok( pump_until(
+ $killme, $psql_timeout,
+ \$killme_stdout, qr/in-progress-before-sigquit/m),
'inserted in-progress-before-sigquit');
$killme_stdout = '';
$killme_stderr = '';
@@ -91,7 +94,8 @@ $monitor_stdin .= q[
SELECT $$psql-connected$$;
SELECT pg_sleep(3600);
];
-ok(pump_until($monitor, $psql_timeout, \$monitor_stdout, qr/psql-connected/m),
+ok( pump_until(
+ $monitor, $psql_timeout, \$monitor_stdout, qr/psql-connected/m),
'monitor connected');
$monitor_stdout = '';
$monitor_stderr = '';
@@ -145,7 +149,8 @@ $monitor->run();
$killme_stdin .= q[
SELECT pg_backend_pid();
];
-ok(pump_until($killme, $psql_timeout, \$killme_stdout, qr/[[:digit:]]+[\r\n]$/m),
+ok( pump_until(
+ $killme, $psql_timeout, \$killme_stdout, qr/[[:digit:]]+[\r\n]$/m),
"acquired pid for SIGKILL");
$pid = $killme_stdout;
chomp($pid);
@@ -158,7 +163,9 @@ INSERT INTO alive VALUES($$committed-before-sigkill$$) RETURNING status;
BEGIN;
INSERT INTO alive VALUES($$in-progress-before-sigkill$$) RETURNING status;
];
-ok(pump_until($killme, $psql_timeout, \$killme_stdout, qr/in-progress-before-sigkill/m),
+ok( pump_until(
+ $killme, $psql_timeout,
+ \$killme_stdout, qr/in-progress-before-sigkill/m),
'inserted in-progress-before-sigkill');
$killme_stdout = '';
$killme_stderr = '';
@@ -170,7 +177,8 @@ $monitor_stdin .= q[
SELECT $$psql-connected$$;
SELECT pg_sleep(3600);
];
-ok(pump_until($monitor, $psql_timeout, \$monitor_stdout, qr/psql-connected/m),
+ok( pump_until(
+ $monitor, $psql_timeout, \$monitor_stdout, qr/psql-connected/m),
'monitor connected');
$monitor_stdout = '';
$monitor_stderr = '';
diff --git a/src/test/recovery/t/014_unlogged_reinit.pl b/src/test/recovery/t/014_unlogged_reinit.pl
index 0dca3f69fe3..72895104ed9 100644
--- a/src/test/recovery/t/014_unlogged_reinit.pl
+++ b/src/test/recovery/t/014_unlogged_reinit.pl
@@ -44,7 +44,8 @@ is($node->safe_psql('postgres', "SELECT nextval('seq_unlogged')"),
my $tablespaceDir = PostgreSQL::Test::Utils::tempdir;
-$node->safe_psql('postgres', "CREATE TABLESPACE ts1 LOCATION '$tablespaceDir'");
+$node->safe_psql('postgres',
+ "CREATE TABLESPACE ts1 LOCATION '$tablespaceDir'");
$node->safe_psql('postgres',
'CREATE UNLOGGED TABLE ts1_unlogged (id int) TABLESPACE ts1');
diff --git a/src/test/recovery/t/019_replslot_limit.pl b/src/test/recovery/t/019_replslot_limit.pl
index 5654f3b5455..6bbf55c3ee1 100644
--- a/src/test/recovery/t/019_replslot_limit.pl
+++ b/src/test/recovery/t/019_replslot_limit.pl
@@ -347,16 +347,18 @@ while (1)
my ($stdout, $stderr);
$senderpid = $node_primary3->safe_psql('postgres',
- "SELECT pid FROM pg_stat_activity WHERE backend_type = 'walsender'");
+ "SELECT pid FROM pg_stat_activity WHERE backend_type = 'walsender'");
last if $senderpid =~ qr/^[0-9]+$/;
diag "multiple walsenders active in iteration $i";
# show information about all active connections
- $node_primary3->psql('postgres',
- "\\a\\t\nSELECT * FROM pg_stat_activity",
- stdout => \$stdout, stderr => \$stderr);
+ $node_primary3->psql(
+ 'postgres',
+ "\\a\\t\nSELECT * FROM pg_stat_activity",
+ stdout => \$stdout,
+ stderr => \$stderr);
diag $stdout, $stderr;
# unlikely that the problem would resolve after 15s, so give up at point
diff --git a/src/test/recovery/t/022_crash_temp_files.pl b/src/test/recovery/t/022_crash_temp_files.pl
index 24fb141785d..53a55c7a8ac 100644
--- a/src/test/recovery/t/022_crash_temp_files.pl
+++ b/src/test/recovery/t/022_crash_temp_files.pl
@@ -53,7 +53,8 @@ my $killme = IPC::Run::start(
$killme_stdin .= q[
SELECT pg_backend_pid();
];
-ok(pump_until($killme, $psql_timeout, \$killme_stdout, qr/[[:digit:]]+[\r\n]$/m),
+ok( pump_until(
+ $killme, $psql_timeout, \$killme_stdout, qr/[[:digit:]]+[\r\n]$/m),
'acquired pid for SIGKILL');
my $pid = $killme_stdout;
chomp($pid);
@@ -82,7 +83,8 @@ BEGIN;
INSERT INTO tab_crash (a) VALUES(1);
SELECT $$insert-tuple-to-lock-next-insert$$;
];
-pump_until($killme2, $psql_timeout, \$killme_stdout2, qr/insert-tuple-to-lock-next-insert/m);
+pump_until($killme2, $psql_timeout, \$killme_stdout2,
+ qr/insert-tuple-to-lock-next-insert/m);
$killme_stdout2 = '';
$killme_stderr2 = '';
@@ -95,7 +97,9 @@ BEGIN;
SELECT $$in-progress-before-sigkill$$;
INSERT INTO tab_crash (a) SELECT i FROM generate_series(1, 5000) s(i);
];
-ok(pump_until($killme, $psql_timeout, \$killme_stdout, qr/in-progress-before-sigkill/m),
+ok( pump_until(
+ $killme, $psql_timeout,
+ \$killme_stdout, qr/in-progress-before-sigkill/m),
'insert in-progress-before-sigkill');
$killme_stdout = '';
$killme_stderr = '';
@@ -117,7 +121,8 @@ END; $c$;
SELECT $$insert-tuple-lock-waiting$$;
];
-pump_until($killme2, $psql_timeout, \$killme_stdout2, qr/insert-tuple-lock-waiting/m);
+pump_until($killme2, $psql_timeout, \$killme_stdout2,
+ qr/insert-tuple-lock-waiting/m);
$killme_stdout2 = '';
$killme_stderr2 = '';
@@ -167,7 +172,8 @@ $killme->run();
$killme_stdin .= q[
SELECT pg_backend_pid();
];
-ok(pump_until($killme, $psql_timeout, \$killme_stdout, qr/[[:digit:]]+[\r\n]$/m),
+ok( pump_until(
+ $killme, $psql_timeout, \$killme_stdout, qr/[[:digit:]]+[\r\n]$/m),
'acquired pid for SIGKILL');
$pid = $killme_stdout;
chomp($pid);
@@ -184,7 +190,8 @@ BEGIN;
INSERT INTO tab_crash (a) VALUES(1);
SELECT $$insert-tuple-to-lock-next-insert$$;
];
-pump_until($killme2, $psql_timeout, \$killme_stdout2, qr/insert-tuple-to-lock-next-insert/m);
+pump_until($killme2, $psql_timeout, \$killme_stdout2,
+ qr/insert-tuple-to-lock-next-insert/m);
$killme_stdout2 = '';
$killme_stderr2 = '';
@@ -197,7 +204,9 @@ BEGIN;
SELECT $$in-progress-before-sigkill$$;
INSERT INTO tab_crash (a) SELECT i FROM generate_series(1, 5000) s(i);
];
-ok(pump_until($killme, $psql_timeout, \$killme_stdout, qr/in-progress-before-sigkill/m),
+ok( pump_until(
+ $killme, $psql_timeout,
+ \$killme_stdout, qr/in-progress-before-sigkill/m),
'insert in-progress-before-sigkill');
$killme_stdout = '';
$killme_stderr = '';
@@ -219,7 +228,8 @@ END; $c$;
SELECT $$insert-tuple-lock-waiting$$;
];
-pump_until($killme2, $psql_timeout, \$killme_stdout2, qr/insert-tuple-lock-waiting/m);
+pump_until($killme2, $psql_timeout, \$killme_stdout2,
+ qr/insert-tuple-lock-waiting/m);
$killme_stdout2 = '';
$killme_stderr2 = '';
diff --git a/src/test/recovery/t/027_stream_regress.pl b/src/test/recovery/t/027_stream_regress.pl
index be9799c0a46..fdb4ea0bf50 100644
--- a/src/test/recovery/t/027_stream_regress.pl
+++ b/src/test/recovery/t/027_stream_regress.pl
@@ -19,7 +19,8 @@ $node_primary->init(allows_streaming => 1);
# Increase some settings that Cluster->new makes too low by default.
$node_primary->adjust_conf('postgresql.conf', 'max_connections', '25');
-$node_primary->append_conf('postgresql.conf', 'max_prepared_transactions = 10');
+$node_primary->append_conf('postgresql.conf',
+ 'max_prepared_transactions = 10');
# We'll stick with Cluster->new's small default shared_buffers, but since that
# makes synchronized seqscans more probable, it risks changing the results of
# some test queries. Disable synchronized seqscans to prevent that.
@@ -27,18 +28,19 @@ $node_primary->append_conf('postgresql.conf', 'synchronize_seqscans = off');
# WAL consistency checking is resource intensive so require opt-in with the
# PG_TEST_EXTRA environment variable.
-if ($ENV{PG_TEST_EXTRA} &&
- $ENV{PG_TEST_EXTRA} =~ m/\bwal_consistency_checking\b/) {
+if ( $ENV{PG_TEST_EXTRA}
+ && $ENV{PG_TEST_EXTRA} =~ m/\bwal_consistency_checking\b/)
+{
$node_primary->append_conf('postgresql.conf',
'wal_consistency_checking = all');
}
$node_primary->start;
is( $node_primary->psql(
- 'postgres',
- qq[SELECT pg_create_physical_replication_slot('standby_1');]),
- 0,
- 'physical slot created on primary');
+ 'postgres',
+ qq[SELECT pg_create_physical_replication_slot('standby_1');]),
+ 0,
+ 'physical slot created on primary');
my $backup_name = 'my_backup';
# Take backup
@@ -49,25 +51,29 @@ my $node_standby_1 = PostgreSQL::Test::Cluster->new('standby_1');
$node_standby_1->init_from_backup($node_primary, $backup_name,
has_streaming => 1);
$node_standby_1->append_conf('postgresql.conf',
- "primary_slot_name = standby_1");
+ "primary_slot_name = standby_1");
$node_standby_1->append_conf('postgresql.conf',
'max_standby_streaming_delay = 600s');
$node_standby_1->start;
-my $dlpath = dirname($ENV{REGRESS_SHLIB});
+my $dlpath = dirname($ENV{REGRESS_SHLIB});
my $outputdir = $PostgreSQL::Test::Utils::tmp_check;
# Run the regression tests against the primary.
my $extra_opts = $ENV{EXTRA_REGRESS_OPTS} || "";
-my $rc = system($ENV{PG_REGRESS} . " $extra_opts " .
- "--dlpath=\"$dlpath\" " .
- "--bindir= " .
- "--host=" . $node_primary->host . " " .
- "--port=" . $node_primary->port . " " .
- "--schedule=../regress/parallel_schedule " .
- "--max-concurrent-tests=20 " .
- "--inputdir=../regress " .
- "--outputdir=\"$outputdir\"");
+my $rc =
+ system($ENV{PG_REGRESS}
+ . " $extra_opts "
+ . "--dlpath=\"$dlpath\" "
+ . "--bindir= "
+ . "--host="
+ . $node_primary->host . " "
+ . "--port="
+ . $node_primary->port . " "
+ . "--schedule=../regress/parallel_schedule "
+ . "--max-concurrent-tests=20 "
+ . "--inputdir=../regress "
+ . "--outputdir=\"$outputdir\"");
if ($rc != 0)
{
# Dump out the regression diffs file, if there is one
@@ -92,12 +98,16 @@ $node_primary->wait_for_catchup($node_standby_1, 'replay',
# Perform a logical dump of primary and standby, and check that they match
command_ok(
- [ 'pg_dumpall', '-f', $outputdir . '/primary.dump', '--no-sync',
- '-p', $node_primary->port ],
+ [
+ 'pg_dumpall', '-f', $outputdir . '/primary.dump',
+ '--no-sync', '-p', $node_primary->port
+ ],
'dump primary server');
command_ok(
- [ 'pg_dumpall', '-f', $outputdir . '/standby.dump', '--no-sync',
- '-p', $node_standby_1->port ],
+ [
+ 'pg_dumpall', '-f', $outputdir . '/standby.dump',
+ '--no-sync', '-p', $node_standby_1->port
+ ],
'dump standby server');
command_ok(
[ 'diff', $outputdir . '/primary.dump', $outputdir . '/standby.dump' ],
diff --git a/src/test/recovery/t/029_stats_restart.pl b/src/test/recovery/t/029_stats_restart.pl
index 2fe8db88079..1bf7b568ccb 100644
--- a/src/test/recovery/t/029_stats_restart.pl
+++ b/src/test/recovery/t/029_stats_restart.pl
@@ -273,7 +273,8 @@ $sect = "post immediate restart";
my $wal_restart_immediate = wal_stats();
cmp_ok(
- $wal_reset_restart->{reset}, 'lt',
+ $wal_reset_restart->{reset},
+ 'lt',
$wal_restart_immediate->{reset},
"$sect: reset timestamp is new");
diff --git a/src/test/recovery/t/031_recovery_conflict.pl b/src/test/recovery/t/031_recovery_conflict.pl
index 8dcb3da0de9..545d523edff 100644
--- a/src/test/recovery/t/031_recovery_conflict.pl
+++ b/src/test/recovery/t/031_recovery_conflict.pl
@@ -229,8 +229,10 @@ $expected_conflicts++;
# Want to test recovery deadlock conflicts, not buffer pin conflicts. Without
# changing max_standby_streaming_delay it'd be timing dependent what we hit
# first
-$node_standby->adjust_conf('postgresql.conf', 'max_standby_streaming_delay',
- "${PostgreSQL::Test::Utils::timeout_default}s");
+$node_standby->adjust_conf(
+ 'postgresql.conf',
+ 'max_standby_streaming_delay',
+ "${PostgreSQL::Test::Utils::timeout_default}s");
$node_standby->restart();
reconnect_and_clear();
@@ -289,7 +291,8 @@ check_conflict_stat("deadlock");
# clean up for next tests
$node_primary->safe_psql($test_db, qq[ROLLBACK PREPARED 'lock';]);
-$node_standby->adjust_conf('postgresql.conf', 'max_standby_streaming_delay', '50ms');
+$node_standby->adjust_conf('postgresql.conf', 'max_standby_streaming_delay',
+ '50ms');
$node_standby->restart();
reconnect_and_clear();
diff --git a/src/test/recovery/t/032_relfilenode_reuse.pl b/src/test/recovery/t/032_relfilenode_reuse.pl
index ac9340b7dd1..ae7e32763fb 100644
--- a/src/test/recovery/t/032_relfilenode_reuse.pl
+++ b/src/test/recovery/t/032_relfilenode_reuse.pl
@@ -8,7 +8,8 @@ use File::Basename;
my $node_primary = PostgreSQL::Test::Cluster->new('primary');
$node_primary->init(allows_streaming => 1);
-$node_primary->append_conf('postgresql.conf', q[
+$node_primary->append_conf(
+ 'postgresql.conf', q[
allow_in_place_tablespaces = true
log_connections=on
# to avoid "repairing" corruption
@@ -61,28 +62,28 @@ $psql_standby{run} = IPC::Run::start(
# rows. Using a template database + preexisting rows makes it a bit easier to
# reproduce, because there's no cache invalidations generated.
-$node_primary->safe_psql('postgres', "CREATE DATABASE conflict_db_template OID = 50000;");
-$node_primary->safe_psql('conflict_db_template', q[
+$node_primary->safe_psql('postgres',
+ "CREATE DATABASE conflict_db_template OID = 50000;");
+$node_primary->safe_psql(
+ 'conflict_db_template', q[
CREATE TABLE large(id serial primary key, dataa text, datab text);
- INSERT INTO large(dataa, datab) SELECT g.i::text, 1 FROM generate_series(1, 4000) g(i);]);
-$node_primary->safe_psql('postgres', "CREATE DATABASE conflict_db TEMPLATE conflict_db_template OID = 50001;");
+ INSERT INTO large(dataa, datab) SELECT g.i::text, 1 FROM generate_series(1, 4000) g(i);]
+);
+$node_primary->safe_psql('postgres',
+ "CREATE DATABASE conflict_db TEMPLATE conflict_db_template OID = 50001;");
-$node_primary->safe_psql('postgres', q[
+$node_primary->safe_psql(
+ 'postgres', q[
CREATE EXTENSION pg_prewarm;
CREATE TABLE replace_sb(data text);
- INSERT INTO replace_sb(data) SELECT random()::text FROM generate_series(1, 15000);]);
+ INSERT INTO replace_sb(data) SELECT random()::text FROM generate_series(1, 15000);]
+);
$node_primary->wait_for_catchup($node_standby);
# Use longrunning transactions, so that AtEOXact_SMgr doesn't close files
-send_query_and_wait(
- \%psql_primary,
- q[BEGIN;],
- qr/BEGIN/m);
-send_query_and_wait(
- \%psql_standby,
- q[BEGIN;],
- qr/BEGIN/m);
+send_query_and_wait(\%psql_primary, q[BEGIN;], qr/BEGIN/m);
+send_query_and_wait(\%psql_standby, q[BEGIN;], qr/BEGIN/m);
# Cause lots of dirty rows in shared_buffers
$node_primary->safe_psql('conflict_db', "UPDATE large SET datab = 1;");
@@ -94,10 +95,10 @@ cause_eviction(\%psql_primary, \%psql_standby);
# drop and recreate database
$node_primary->safe_psql('postgres', "DROP DATABASE conflict_db;");
-$node_primary->safe_psql('postgres', "CREATE DATABASE conflict_db TEMPLATE conflict_db_template OID = 50001;");
+$node_primary->safe_psql('postgres',
+ "CREATE DATABASE conflict_db TEMPLATE conflict_db_template OID = 50001;");
-verify($node_primary, $node_standby, 1,
- "initial contents as expected");
+verify($node_primary, $node_standby, 1, "initial contents as expected");
# Again cause lots of dirty rows in shared_buffers, but use a different update
# value so we can check everything is OK
@@ -109,17 +110,17 @@ $node_primary->safe_psql('conflict_db', "UPDATE large SET datab = 2;");
cause_eviction(\%psql_primary, \%psql_standby);
verify($node_primary, $node_standby, 2,
- "update to reused relfilenode (due to DB oid conflict) is not lost");
+ "update to reused relfilenode (due to DB oid conflict) is not lost");
$node_primary->safe_psql('conflict_db', "VACUUM FULL large;");
$node_primary->safe_psql('conflict_db', "UPDATE large SET datab = 3;");
-verify($node_primary, $node_standby, 3,
- "restored contents as expected");
+verify($node_primary, $node_standby, 3, "restored contents as expected");
# Test for old filehandles after moving a database in / out of tablespace
-$node_primary->safe_psql('postgres', q[CREATE TABLESPACE test_tablespace LOCATION '']);
+$node_primary->safe_psql('postgres',
+ q[CREATE TABLESPACE test_tablespace LOCATION '']);
# cause dirty buffers
$node_primary->safe_psql('conflict_db', "UPDATE large SET datab = 4;");
@@ -127,23 +128,25 @@ $node_primary->safe_psql('conflict_db', "UPDATE large SET datab = 4;");
cause_eviction(\%psql_primary, \%psql_standby);
# move database back / forth
-$node_primary->safe_psql('postgres', 'ALTER DATABASE conflict_db SET TABLESPACE test_tablespace');
-$node_primary->safe_psql('postgres', 'ALTER DATABASE conflict_db SET TABLESPACE pg_default');
+$node_primary->safe_psql('postgres',
+ 'ALTER DATABASE conflict_db SET TABLESPACE test_tablespace');
+$node_primary->safe_psql('postgres',
+ 'ALTER DATABASE conflict_db SET TABLESPACE pg_default');
# cause dirty buffers
$node_primary->safe_psql('conflict_db', "UPDATE large SET datab = 5;");
cause_eviction(\%psql_primary, \%psql_standby);
-verify($node_primary, $node_standby, 5,
- "post move contents as expected");
+verify($node_primary, $node_standby, 5, "post move contents as expected");
-$node_primary->safe_psql('postgres', 'ALTER DATABASE conflict_db SET TABLESPACE test_tablespace');
+$node_primary->safe_psql('postgres',
+ 'ALTER DATABASE conflict_db SET TABLESPACE test_tablespace');
$node_primary->safe_psql('conflict_db', "UPDATE large SET datab = 7;");
cause_eviction(\%psql_primary, \%psql_standby);
$node_primary->safe_psql('conflict_db', "UPDATE large SET datab = 8;");
-$node_primary->safe_psql('postgres', 'DROP DATABASE conflict_db');
-$node_primary->safe_psql('postgres', 'DROP TABLESPACE test_tablespace');
+$node_primary->safe_psql('postgres', 'DROP DATABASE conflict_db');
+$node_primary->safe_psql('postgres', 'DROP TABLESPACE test_tablespace');
$node_primary->safe_psql('postgres', 'REINDEX TABLE pg_database');
@@ -160,25 +163,28 @@ $node_standby->stop();
# Make sure that there weren't crashes during shutdown
-command_like([ 'pg_controldata', $node_primary->data_dir ],
- qr/Database cluster state:\s+shut down\n/, 'primary shut down ok');
-command_like([ 'pg_controldata', $node_standby->data_dir ],
- qr/Database cluster state:\s+shut down in recovery\n/, 'standby shut down ok');
+command_like(
+ [ 'pg_controldata', $node_primary->data_dir ],
+ qr/Database cluster state:\s+shut down\n/,
+ 'primary shut down ok');
+command_like(
+ [ 'pg_controldata', $node_standby->data_dir ],
+ qr/Database cluster state:\s+shut down in recovery\n/,
+ 'standby shut down ok');
done_testing();
sub verify
{
my ($primary, $standby, $counter, $message) = @_;
- my $query = "SELECT datab, count(*) FROM large GROUP BY 1 ORDER BY 1 LIMIT 10";
+ my $query =
+ "SELECT datab, count(*) FROM large GROUP BY 1 ORDER BY 1 LIMIT 10";
is($primary->safe_psql('conflict_db', $query),
- "$counter|4000",
- "primary: $message");
+ "$counter|4000", "primary: $message");
$primary->wait_for_catchup($standby);
is($standby->safe_psql('conflict_db', $query),
- "$counter|4000",
- "standby: $message");
+ "$counter|4000", "standby: $message");
}
sub cause_eviction
diff --git a/src/test/regress/regress.c b/src/test/regress/regress.c
index ade4b51fb8d..ba3532a51e8 100644
--- a/src/test/regress/regress.c
+++ b/src/test/regress/regress.c
@@ -1226,8 +1226,8 @@ PG_FUNCTION_INFO_V1(get_columns_length);
Datum
get_columns_length(PG_FUNCTION_ARGS)
{
- ArrayType *ta = PG_GETARG_ARRAYTYPE_P(0);
- Oid *type_oids;
+ ArrayType *ta = PG_GETARG_ARRAYTYPE_P(0);
+ Oid *type_oids;
int ntypes;
int column_offset = 0;
@@ -1241,7 +1241,7 @@ get_columns_length(PG_FUNCTION_ARGS)
ntypes = ArrayGetNItems(ARR_NDIM(ta), ARR_DIMS(ta));
for (int i = 0; i < ntypes; i++)
{
- Oid typeoid = type_oids[i];
+ Oid typeoid = type_oids[i];
int16 typlen;
bool typbyval;
char typalign;
diff --git a/src/test/ssl/t/001_ssltests.pl b/src/test/ssl/t/001_ssltests.pl
index 58d2bc336f5..c0b4a5739ce 100644
--- a/src/test/ssl/t/001_ssltests.pl
+++ b/src/test/ssl/t/001_ssltests.pl
@@ -19,10 +19,12 @@ if ($ENV{with_ssl} ne 'openssl')
}
my $ssl_server = SSL::Server->new();
+
sub sslkey
{
return $ssl_server->sslkey(@_);
}
+
sub switch_server_cert
{
$ssl_server->switch_server_cert(@_);
@@ -56,28 +58,30 @@ my $result = $node->safe_psql('postgres', "SHOW ssl_library");
is($result, $ssl_server->ssl_library(), 'ssl_library parameter');
$ssl_server->configure_test_server_for_ssl($node, $SERVERHOSTADDR,
- $SERVERHOSTCIDR, 'trust');
+ $SERVERHOSTCIDR, 'trust');
note "testing password-protected keys";
-switch_server_cert($node,
- certfile => 'server-cn-only',
- cafile => 'root+client_ca',
- keyfile => 'server-password',
+switch_server_cert(
+ $node,
+ certfile => 'server-cn-only',
+ cafile => 'root+client_ca',
+ keyfile => 'server-password',
passphrase_cmd => 'echo wrongpassword',
- restart => 'no' );
+ restart => 'no');
command_fails(
[ 'pg_ctl', '-D', $node->data_dir, '-l', $node->logfile, 'restart' ],
'restart fails with password-protected key file with wrong password');
$node->_update_pid(0);
-switch_server_cert($node,
- certfile => 'server-cn-only',
- cafile => 'root+client_ca',
- keyfile => 'server-password',
+switch_server_cert(
+ $node,
+ certfile => 'server-cn-only',
+ cafile => 'root+client_ca',
+ keyfile => 'server-password',
passphrase_cmd => 'echo secret1',
- restart => 'no');
+ restart => 'no');
command_ok(
[ 'pg_ctl', '-D', $node->data_dir, '-l', $node->logfile, 'restart' ],
@@ -115,7 +119,8 @@ switch_server_cert($node, certfile => 'server-cn-only');
# Set of default settings for SSL parameters in connection string. This
# makes the tests protected against any defaults the environment may have
# in ~/.postgresql/.
-my $default_ssl_connstr = "sslkey=invalid sslcert=invalid sslrootcert=invalid sslcrl=invalid sslcrldir=invalid";
+my $default_ssl_connstr =
+ "sslkey=invalid sslcert=invalid sslrootcert=invalid sslcrl=invalid sslcrldir=invalid";
$common_connstr =
"$default_ssl_connstr user=ssltestuser dbname=trustdb hostaddr=$SERVERHOSTADDR host=common-name.pg-ssltest.test";
@@ -416,9 +421,11 @@ switch_server_cert($node, certfile => 'server-ip-cn-and-dns-alt-names');
$node->connect_ok("$common_connstr host=192.0.2.1",
"certificate with both an IP CN and DNS SANs matches CN");
-$node->connect_ok("$common_connstr host=dns1.alt-name.pg-ssltest.test",
+$node->connect_ok(
+ "$common_connstr host=dns1.alt-name.pg-ssltest.test",
"certificate with both an IP CN and DNS SANs matches SAN 1");
-$node->connect_ok("$common_connstr host=dns2.alt-name.pg-ssltest.test",
+$node->connect_ok(
+ "$common_connstr host=dns2.alt-name.pg-ssltest.test",
"certificate with both an IP CN and DNS SANs matches SAN 2");
# Finally, test a server certificate that has no CN or SANs. Of course, that's
@@ -506,42 +513,50 @@ $node->connect_fails(
# correct client cert in unencrypted PEM
$node->connect_ok(
- "$common_connstr user=ssltestuser sslcert=ssl/client.crt " . sslkey('client.key'),
+ "$common_connstr user=ssltestuser sslcert=ssl/client.crt "
+ . sslkey('client.key'),
"certificate authorization succeeds with correct client cert in PEM format"
);
# correct client cert in unencrypted DER
$node->connect_ok(
- "$common_connstr user=ssltestuser sslcert=ssl/client.crt " . sslkey('client-der.key'),
+ "$common_connstr user=ssltestuser sslcert=ssl/client.crt "
+ . sslkey('client-der.key'),
"certificate authorization succeeds with correct client cert in DER format"
);
# correct client cert in encrypted PEM
$node->connect_ok(
- "$common_connstr user=ssltestuser sslcert=ssl/client.crt " . sslkey('client-encrypted-pem.key') . " sslpassword='dUmmyP^#+'",
+ "$common_connstr user=ssltestuser sslcert=ssl/client.crt "
+ . sslkey('client-encrypted-pem.key')
+ . " sslpassword='dUmmyP^#+'",
"certificate authorization succeeds with correct client cert in encrypted PEM format"
);
# correct client cert in encrypted DER
$node->connect_ok(
- "$common_connstr user=ssltestuser sslcert=ssl/client.crt " . sslkey('client-encrypted-der.key') . " sslpassword='dUmmyP^#+'",
+ "$common_connstr user=ssltestuser sslcert=ssl/client.crt "
+ . sslkey('client-encrypted-der.key')
+ . " sslpassword='dUmmyP^#+'",
"certificate authorization succeeds with correct client cert in encrypted DER format"
);
# correct client cert in encrypted PEM with wrong password
$node->connect_fails(
- "$common_connstr user=ssltestuser sslcert=ssl/client.crt " . sslkey('client-encrypted-pem.key') . " sslpassword='wrong'",
+ "$common_connstr user=ssltestuser sslcert=ssl/client.crt "
+ . sslkey('client-encrypted-pem.key')
+ . " sslpassword='wrong'",
"certificate authorization fails with correct client cert and wrong password in encrypted PEM format",
expected_stderr =>
- qr!private key file \".*client-encrypted-pem\.key\": bad decrypt!,
-);
+ qr!private key file \".*client-encrypted-pem\.key\": bad decrypt!,);
# correct client cert using whole DN
my $dn_connstr = "$common_connstr dbname=certdb_dn";
$node->connect_ok(
- "$dn_connstr user=ssltestuser sslcert=ssl/client-dn.crt " . sslkey('client-dn.key'),
+ "$dn_connstr user=ssltestuser sslcert=ssl/client-dn.crt "
+ . sslkey('client-dn.key'),
"certificate authorization succeeds with DN mapping",
log_like => [
qr/connection authenticated: identity="CN=ssltestuser-dn,OU=Testing,OU=Engineering,O=PGDG" method=cert/
@@ -551,14 +566,16 @@ $node->connect_ok(
$dn_connstr = "$common_connstr dbname=certdb_dn_re";
$node->connect_ok(
- "$dn_connstr user=ssltestuser sslcert=ssl/client-dn.crt " . sslkey('client-dn.key'),
+ "$dn_connstr user=ssltestuser sslcert=ssl/client-dn.crt "
+ . sslkey('client-dn.key'),
"certificate authorization succeeds with DN regex mapping");
# same thing but using explicit CN
$dn_connstr = "$common_connstr dbname=certdb_cn";
$node->connect_ok(
- "$dn_connstr user=ssltestuser sslcert=ssl/client-dn.crt " . sslkey('client-dn.key'),
+ "$dn_connstr user=ssltestuser sslcert=ssl/client-dn.crt "
+ . sslkey('client-dn.key'),
"certificate authorization succeeds with CN mapping",
# the full DN should still be used as the authenticated identity
log_like => [
@@ -576,7 +593,9 @@ TODO:
# correct client cert in encrypted PEM with empty password
$node->connect_fails(
- "$common_connstr user=ssltestuser sslcert=ssl/client.crt " . sslkey('client-encrypted-pem.key') . " sslpassword=''",
+ "$common_connstr user=ssltestuser sslcert=ssl/client.crt "
+ . sslkey('client-encrypted-pem.key')
+ . " sslpassword=''",
"certificate authorization fails with correct client cert and empty password in encrypted PEM format",
expected_stderr =>
qr!private key file \".*client-encrypted-pem\.key\": processing error!
@@ -584,7 +603,8 @@ TODO:
# correct client cert in encrypted PEM with no password
$node->connect_fails(
- "$common_connstr user=ssltestuser sslcert=ssl/client.crt " . sslkey('client-encrypted-pem.key'),
+ "$common_connstr user=ssltestuser sslcert=ssl/client.crt "
+ . sslkey('client-encrypted-pem.key'),
"certificate authorization fails with correct client cert and no password in encrypted PEM format",
expected_stderr =>
qr!private key file \".*client-encrypted-pem\.key\": processing error!
@@ -630,7 +650,8 @@ command_like(
'-P',
'null=_null_',
'-d',
- "$common_connstr user=ssltestuser sslcert=ssl/client.crt " . sslkey('client.key'),
+ "$common_connstr user=ssltestuser sslcert=ssl/client.crt "
+ . sslkey('client.key'),
'-c',
"SELECT * FROM pg_stat_ssl WHERE pid = pg_backend_pid()"
],
@@ -644,7 +665,8 @@ SKIP:
skip "Permissions check not enforced on Windows", 2 if ($windows_os);
$node->connect_fails(
- "$common_connstr user=ssltestuser sslcert=ssl/client.crt " . sslkey('client_wrongperms.key'),
+ "$common_connstr user=ssltestuser sslcert=ssl/client.crt "
+ . sslkey('client_wrongperms.key'),
"certificate authorization fails because of file permissions",
expected_stderr =>
qr!private key file \".*client_wrongperms\.key\" has group or world access!
@@ -653,7 +675,8 @@ SKIP:
# client cert belonging to another user
$node->connect_fails(
- "$common_connstr user=anotheruser sslcert=ssl/client.crt " . sslkey('client.key'),
+ "$common_connstr user=anotheruser sslcert=ssl/client.crt "
+ . sslkey('client.key'),
"certificate authorization fails with client cert belonging to another user",
expected_stderr =>
qr/certificate authentication failed for user "anotheruser"/,
@@ -663,7 +686,8 @@ $node->connect_fails(
# revoked client cert
$node->connect_fails(
- "$common_connstr user=ssltestuser sslcert=ssl/client-revoked.crt " . sslkey('client-revoked.key'),
+ "$common_connstr user=ssltestuser sslcert=ssl/client-revoked.crt "
+ . sslkey('client-revoked.key'),
"certificate authorization fails with revoked client cert",
expected_stderr => qr/SSL error: sslv3 alert certificate revoked/,
# revoked certificates should not authenticate the user
@@ -676,13 +700,15 @@ $common_connstr =
"$default_ssl_connstr sslrootcert=ssl/root+server_ca.crt sslmode=require dbname=verifydb hostaddr=$SERVERHOSTADDR host=localhost";
$node->connect_ok(
- "$common_connstr user=ssltestuser sslcert=ssl/client.crt " . sslkey('client.key'),
+ "$common_connstr user=ssltestuser sslcert=ssl/client.crt "
+ . sslkey('client.key'),
"auth_option clientcert=verify-full succeeds with matching username and Common Name",
# verify-full does not provide authentication
log_unlike => [qr/connection authenticated:/],);
$node->connect_fails(
- "$common_connstr user=anotheruser sslcert=ssl/client.crt " . sslkey('client.key'),
+ "$common_connstr user=anotheruser sslcert=ssl/client.crt "
+ . sslkey('client.key'),
"auth_option clientcert=verify-full fails with mismatching username and Common Name",
expected_stderr =>
qr/FATAL: .* "trust" authentication failed for user "anotheruser"/,
@@ -692,7 +718,8 @@ $node->connect_fails(
# Check that connecting with auth-option verify-ca in pg_hba :
# works, when username doesn't match Common Name
$node->connect_ok(
- "$common_connstr user=yetanotheruser sslcert=ssl/client.crt " . sslkey('client.key'),
+ "$common_connstr user=yetanotheruser sslcert=ssl/client.crt "
+ . sslkey('client.key'),
"auth_option clientcert=verify-ca succeeds with mismatching username and Common Name",
# verify-full does not provide authentication
log_unlike => [qr/connection authenticated:/],);
@@ -700,7 +727,9 @@ $node->connect_ok(
# intermediate client_ca.crt is provided by client, and isn't in server's ssl_ca_file
switch_server_cert($node, certfile => 'server-cn-only', cafile => 'root_ca');
$common_connstr =
- "$default_ssl_connstr user=ssltestuser dbname=certdb " . sslkey('client.key') . " sslrootcert=ssl/root+server_ca.crt hostaddr=$SERVERHOSTADDR host=localhost";
+ "$default_ssl_connstr user=ssltestuser dbname=certdb "
+ . sslkey('client.key')
+ . " sslrootcert=ssl/root+server_ca.crt hostaddr=$SERVERHOSTADDR host=localhost";
$node->connect_ok(
"$common_connstr sslmode=require sslcert=ssl/client+client_ca.crt",
@@ -711,11 +740,15 @@ $node->connect_fails(
expected_stderr => qr/SSL error: tlsv1 alert unknown ca/);
# test server-side CRL directory
-switch_server_cert($node, certfile => 'server-cn-only', crldir => 'root+client-crldir');
+switch_server_cert(
+ $node,
+ certfile => 'server-cn-only',
+ crldir => 'root+client-crldir');
# revoked client cert
$node->connect_fails(
- "$common_connstr user=ssltestuser sslcert=ssl/client-revoked.crt " . sslkey('client-revoked.key'),
+ "$common_connstr user=ssltestuser sslcert=ssl/client-revoked.crt "
+ . sslkey('client-revoked.key'),
"certificate authorization fails with revoked client cert with server-side CRL directory",
expected_stderr => qr/SSL error: sslv3 alert certificate revoked/);
diff --git a/src/test/ssl/t/002_scram.pl b/src/test/ssl/t/002_scram.pl
index 4354901f539..588f47a39b9 100644
--- a/src/test/ssl/t/002_scram.pl
+++ b/src/test/ssl/t/002_scram.pl
@@ -22,10 +22,12 @@ if ($ENV{with_ssl} ne 'openssl')
}
my $ssl_server = SSL::Server->new();
+
sub sslkey
{
return $ssl_server->sslkey(@_);
}
+
sub switch_server_cert
{
$ssl_server->switch_server_cert(@_);
@@ -57,8 +59,11 @@ $ENV{PGPORT} = $node->port;
$node->start;
# Configure server for SSL connections, with password handling.
-$ssl_server->configure_test_server_for_ssl($node, $SERVERHOSTADDR, $SERVERHOSTCIDR,
- "scram-sha-256", 'password' => "pass", 'password_enc' => "scram-sha-256");
+$ssl_server->configure_test_server_for_ssl(
+ $node, $SERVERHOSTADDR, $SERVERHOSTCIDR,
+ "scram-sha-256",
+ 'password' => "pass",
+ 'password_enc' => "scram-sha-256");
switch_server_cert($node, certfile => 'server-cn-only');
$ENV{PGPASSWORD} = "pass";
$common_connstr =
@@ -104,7 +109,7 @@ $node->connect_fails(
# because channel binding is not performed. Note that ssl/client.key may
# be used in a different test, so the name of this temporary client key
# is chosen here to be unique.
-my $cert_tempdir = PostgreSQL::Test::Utils::tempdir();
+my $cert_tempdir = PostgreSQL::Test::Utils::tempdir();
my $client_tmp_key = "$cert_tempdir/client_scram.key";
copy("ssl/client.key", "$cert_tempdir/client_scram.key")
or die
diff --git a/src/test/ssl/t/003_sslinfo.pl b/src/test/ssl/t/003_sslinfo.pl
index 96a5db86721..87fb18a31e0 100644
--- a/src/test/ssl/t/003_sslinfo.pl
+++ b/src/test/ssl/t/003_sslinfo.pl
@@ -21,10 +21,12 @@ if ($ENV{with_ssl} ne 'openssl')
#### Some configuration
my $ssl_server = SSL::Server->new();
+
sub sslkey
{
return $ssl_server->sslkey(@_);
}
+
sub switch_server_cert
{
$ssl_server->switch_server_cert(@_);
@@ -52,8 +54,8 @@ $ENV{PGHOST} = $node->host;
$ENV{PGPORT} = $node->port;
$node->start;
-$ssl_server->configure_test_server_for_ssl($node, $SERVERHOSTADDR, $SERVERHOSTCIDR,
- 'trust', extensions => [ qw(sslinfo) ]);
+$ssl_server->configure_test_server_for_ssl($node, $SERVERHOSTADDR,
+ $SERVERHOSTCIDR, 'trust', extensions => [qw(sslinfo)]);
# We aren't using any CRL's in this suite so we can keep using server-revoked
# as server certificate for simple client.crt connection much like how the
@@ -63,11 +65,13 @@ switch_server_cert($node, certfile => 'server-revoked');
# Set of default settings for SSL parameters in connection string. This
# makes the tests protected against any defaults the environment may have
# in ~/.postgresql/.
-my $default_ssl_connstr = "sslkey=invalid sslcert=invalid sslrootcert=invalid sslcrl=invalid sslcrldir=invalid";
+my $default_ssl_connstr =
+ "sslkey=invalid sslcert=invalid sslrootcert=invalid sslcrl=invalid sslcrldir=invalid";
$common_connstr =
- "$default_ssl_connstr sslrootcert=ssl/root+server_ca.crt sslmode=require dbname=certdb hostaddr=$SERVERHOSTADDR host=localhost " .
- "user=ssltestuser sslcert=ssl/client_ext.crt " . sslkey('client_ext.key');
+ "$default_ssl_connstr sslrootcert=ssl/root+server_ca.crt sslmode=require dbname=certdb hostaddr=$SERVERHOSTADDR host=localhost "
+ . "user=ssltestuser sslcert=ssl/client_ext.crt "
+ . sslkey('client_ext.key');
# Make sure we can connect even though previous test suites have established this
$node->connect_ok(
@@ -77,62 +81,85 @@ $node->connect_ok(
my $result;
-$result = $node->safe_psql("certdb", "SELECT ssl_is_used();",
- connstr => $common_connstr);
+$result = $node->safe_psql(
+ "certdb",
+ "SELECT ssl_is_used();",
+ connstr => $common_connstr);
is($result, 't', "ssl_is_used() for TLS connection");
-$result = $node->safe_psql("certdb", "SELECT ssl_version();",
- connstr => $common_connstr . " ssl_min_protocol_version=TLSv1.2 " .
- "ssl_max_protocol_version=TLSv1.2");
+$result = $node->safe_psql(
+ "certdb",
+ "SELECT ssl_version();",
+ connstr => $common_connstr
+ . " ssl_min_protocol_version=TLSv1.2 "
+ . "ssl_max_protocol_version=TLSv1.2");
is($result, 'TLSv1.2', "ssl_version() correctly returning TLS protocol");
-$result = $node->safe_psql("certdb",
- "SELECT ssl_cipher() = cipher FROM pg_stat_ssl WHERE pid = pg_backend_pid();",
- connstr => $common_connstr);
+$result = $node->safe_psql(
+ "certdb",
+ "SELECT ssl_cipher() = cipher FROM pg_stat_ssl WHERE pid = pg_backend_pid();",
+ connstr => $common_connstr);
is($result, 't', "ssl_cipher() compared with pg_stat_ssl");
-$result = $node->safe_psql("certdb", "SELECT ssl_client_cert_present();",
- connstr => $common_connstr);
+$result = $node->safe_psql(
+ "certdb",
+ "SELECT ssl_client_cert_present();",
+ connstr => $common_connstr);
is($result, 't', "ssl_client_cert_present() for connection with cert");
-$result = $node->safe_psql("trustdb", "SELECT ssl_client_cert_present();",
- connstr => "$default_ssl_connstr sslrootcert=ssl/root+server_ca.crt sslmode=require " .
- "dbname=trustdb hostaddr=$SERVERHOSTADDR user=ssltestuser host=localhost");
+$result = $node->safe_psql(
+ "trustdb",
+ "SELECT ssl_client_cert_present();",
+ connstr =>
+ "$default_ssl_connstr sslrootcert=ssl/root+server_ca.crt sslmode=require "
+ . "dbname=trustdb hostaddr=$SERVERHOSTADDR user=ssltestuser host=localhost"
+);
is($result, 'f', "ssl_client_cert_present() for connection without cert");
-$result = $node->safe_psql("certdb",
- "SELECT ssl_client_serial() = client_serial FROM pg_stat_ssl WHERE pid = pg_backend_pid();",
- connstr => $common_connstr);
+$result = $node->safe_psql(
+ "certdb",
+ "SELECT ssl_client_serial() = client_serial FROM pg_stat_ssl WHERE pid = pg_backend_pid();",
+ connstr => $common_connstr);
is($result, 't', "ssl_client_serial() compared with pg_stat_ssl");
# Must not use safe_psql since we expect an error here
-$result = $node->psql("certdb", "SELECT ssl_client_dn_field('invalid');",
- connstr => $common_connstr);
+$result = $node->psql(
+ "certdb",
+ "SELECT ssl_client_dn_field('invalid');",
+ connstr => $common_connstr);
is($result, '3', "ssl_client_dn_field() for an invalid field");
-$result = $node->safe_psql("trustdb", "SELECT ssl_client_dn_field('commonName');",
- connstr => "$default_ssl_connstr sslrootcert=ssl/root+server_ca.crt sslmode=require " .
- "dbname=trustdb hostaddr=$SERVERHOSTADDR user=ssltestuser host=localhost");
+$result = $node->safe_psql(
+ "trustdb",
+ "SELECT ssl_client_dn_field('commonName');",
+ connstr =>
+ "$default_ssl_connstr sslrootcert=ssl/root+server_ca.crt sslmode=require "
+ . "dbname=trustdb hostaddr=$SERVERHOSTADDR user=ssltestuser host=localhost"
+);
is($result, '', "ssl_client_dn_field() for connection without cert");
-$result = $node->safe_psql("certdb",
- "SELECT '/CN=' || ssl_client_dn_field('commonName') = client_dn FROM pg_stat_ssl WHERE pid = pg_backend_pid();",
- connstr => $common_connstr);
+$result = $node->safe_psql(
+ "certdb",
+ "SELECT '/CN=' || ssl_client_dn_field('commonName') = client_dn FROM pg_stat_ssl WHERE pid = pg_backend_pid();",
+ connstr => $common_connstr);
is($result, 't', "ssl_client_dn_field() for commonName");
-$result = $node->safe_psql("certdb",
- "SELECT ssl_issuer_dn() = issuer_dn FROM pg_stat_ssl WHERE pid = pg_backend_pid();",
- connstr => $common_connstr);
+$result = $node->safe_psql(
+ "certdb",
+ "SELECT ssl_issuer_dn() = issuer_dn FROM pg_stat_ssl WHERE pid = pg_backend_pid();",
+ connstr => $common_connstr);
is($result, 't', "ssl_issuer_dn() for connection with cert");
-$result = $node->safe_psql("certdb",
- "SELECT '/CN=' || ssl_issuer_field('commonName') = issuer_dn FROM pg_stat_ssl WHERE pid = pg_backend_pid();",
- connstr => $common_connstr);
+$result = $node->safe_psql(
+ "certdb",
+ "SELECT '/CN=' || ssl_issuer_field('commonName') = issuer_dn FROM pg_stat_ssl WHERE pid = pg_backend_pid();",
+ connstr => $common_connstr);
is($result, 't', "ssl_issuer_field() for commonName");
-$result = $node->safe_psql("certdb",
- "SELECT value, critical FROM ssl_extension_info() WHERE name = 'basicConstraints';",
- connstr => $common_connstr);
+$result = $node->safe_psql(
+ "certdb",
+ "SELECT value, critical FROM ssl_extension_info() WHERE name = 'basicConstraints';",
+ connstr => $common_connstr);
is($result, 'CA:FALSE|t', 'extract extension from cert');
done_testing();
diff --git a/src/test/ssl/t/SSL/Backend/OpenSSL.pm b/src/test/ssl/t/SSL/Backend/OpenSSL.pm
index d6d99fa636a..aed6005b432 100644
--- a/src/test/ssl/t/SSL/Backend/OpenSSL.pm
+++ b/src/test/ssl/t/SSL/Backend/OpenSSL.pm
@@ -84,7 +84,7 @@ sub init
# the tests. To get the full path for inclusion in connection strings, the
# %key hash can be interrogated.
my $cert_tempdir = PostgreSQL::Test::Utils::tempdir();
- my @keys = (
+ my @keys = (
"client.key", "client-revoked.key",
"client-der.key", "client-encrypted-pem.key",
"client-encrypted-der.key", "client-dn.key",
@@ -108,8 +108,10 @@ sub init
or die
"couldn't copy ssl/client_key to $cert_tempdir/client_wrongperms.key for permission change: $!";
chmod 0644, "$cert_tempdir/client_wrongperms.key"
- or die "failed to change permissions on $cert_tempdir/client_wrongperms.key: $!";
- $self->{key}->{'client_wrongperms.key'} = "$cert_tempdir/client_wrongperms.key";
+ or die
+ "failed to change permissions on $cert_tempdir/client_wrongperms.key: $!";
+ $self->{key}->{'client_wrongperms.key'} =
+ "$cert_tempdir/client_wrongperms.key";
$self->{key}->{'client_wrongperms.key'} =~ s!\\!/!g
if $PostgreSQL::Test::Utils::windows_os;
}
@@ -171,9 +173,10 @@ sub set_server_cert
{
my ($self, $params) = @_;
- $params->{cafile} = 'root+client_ca' unless defined $params->{cafile};
+ $params->{cafile} = 'root+client_ca' unless defined $params->{cafile};
$params->{crlfile} = 'root+client.crl' unless defined $params->{crlfile};
- $params->{keyfile} = $params->{certfile} unless defined $params->{keyfile};
+ $params->{keyfile} = $params->{certfile}
+ unless defined $params->{keyfile};
my $sslconf =
"ssl_ca_file='$params->{cafile}.crt'\n"
diff --git a/src/test/ssl/t/SSL/Server.pm b/src/test/ssl/t/SSL/Server.pm
index de460c2d96f..62f54dcbf16 100644
--- a/src/test/ssl/t/SSL/Server.pm
+++ b/src/test/ssl/t/SSL/Server.pm
@@ -94,7 +94,7 @@ sub new
bless $self, $class;
if ($flavor =~ /\Aopenssl\z/i)
{
- $self->{flavor} = 'openssl';
+ $self->{flavor} = 'openssl';
$self->{backend} = SSL::Backend::OpenSSL->new();
}
else
@@ -115,7 +115,7 @@ string.
sub sslkey
{
- my $self = shift;
+ my $self = shift;
my $keyfile = shift;
my $backend = $self->{backend};
@@ -140,12 +140,14 @@ C<listen_addresses> and B<cidr> for configuring C<pg_hba.conf>.
sub configure_test_server_for_ssl
{
- my $self=shift;
+ my $self = shift;
my ($node, $serverhost, $servercidr, $authmethod, %params) = @_;
my $backend = $self->{backend};
- my $pgdata = $node->data_dir;
+ my $pgdata = $node->data_dir;
- my @databases = ( 'trustdb', 'certdb', 'certdb_dn', 'certdb_dn_re', 'certdb_cn', 'verifydb' );
+ my @databases = (
+ 'trustdb', 'certdb', 'certdb_dn', 'certdb_dn_re',
+ 'certdb_cn', 'verifydb');
# Create test users and databases
$node->psql('postgres', "CREATE USER ssltestuser");
@@ -162,7 +164,7 @@ sub configure_test_server_for_ssl
if (defined($params{password}))
{
die "Password encryption must be specified when password is set"
- unless defined($params{password_enc});
+ unless defined($params{password_enc});
$node->psql('postgres',
"SET password_encryption='$params{password_enc}'; ALTER USER ssltestuser PASSWORD '$params{password}';"
@@ -179,7 +181,7 @@ sub configure_test_server_for_ssl
# Create any extensions requested in the setup
if (defined($params{extensions}))
{
- foreach my $extension (@{$params{extensions}})
+ foreach my $extension (@{ $params{extensions} })
{
foreach my $db (@databases)
{
@@ -227,7 +229,7 @@ Get the name of the currently used SSL backend.
sub ssl_library
{
- my $self = shift;
+ my $self = shift;
my $backend = $self->{backend};
return $backend->get_library();
@@ -282,16 +284,17 @@ returning.
sub switch_server_cert
{
- my $self = shift;
- my $node = shift;
+ my $self = shift;
+ my $node = shift;
my $backend = $self->{backend};
- my %params = @_;
- my $pgdata = $node->data_dir;
+ my %params = @_;
+ my $pgdata = $node->data_dir;
open my $sslconf, '>', "$pgdata/sslconfig.conf";
print $sslconf "ssl=on\n";
print $sslconf $backend->set_server_cert(\%params);
- print $sslconf "ssl_passphrase_command='" . $params{passphrase_cmd} . "'\n"
+ print $sslconf "ssl_passphrase_command='"
+ . $params{passphrase_cmd} . "'\n"
if defined $params{passphrase_cmd};
close $sslconf;
diff --git a/src/test/subscription/t/001_rep_changes.pl b/src/test/subscription/t/001_rep_changes.pl
index d35a133f154..f53b3b7db0c 100644
--- a/src/test/subscription/t/001_rep_changes.pl
+++ b/src/test/subscription/t/001_rep_changes.pl
@@ -427,7 +427,9 @@ $node_subscriber->safe_psql('postgres',
);
$node_publisher->poll_query_until('postgres',
"SELECT pid != $oldpid FROM pg_stat_replication WHERE application_name = 'tap_sub' AND state = 'streaming';"
-) or die "Timed out while waiting for apply to restart after changing CONNECTION";
+ )
+ or die
+ "Timed out while waiting for apply to restart after changing CONNECTION";
$oldpid = $node_publisher->safe_psql('postgres',
"SELECT pid FROM pg_stat_replication WHERE application_name = 'tap_sub' AND state = 'streaming';"
@@ -437,7 +439,9 @@ $node_subscriber->safe_psql('postgres',
);
$node_publisher->poll_query_until('postgres',
"SELECT pid != $oldpid FROM pg_stat_replication WHERE application_name = 'tap_sub' AND state = 'streaming';"
-) or die "Timed out while waiting for apply to restart after changing PUBLICATION";
+ )
+ or die
+ "Timed out while waiting for apply to restart after changing PUBLICATION";
$node_publisher->safe_psql('postgres',
"INSERT INTO tab_ins SELECT generate_series(1001,1100)");
@@ -489,16 +493,14 @@ $node_publisher->safe_psql('postgres', "INSERT INTO tab_notrep VALUES (11)");
$node_publisher->wait_for_catchup('tap_sub');
$logfile = slurp_file($node_publisher->logfile, $log_location);
-ok( $logfile =~
- qr/skipped replication of an empty transaction with XID/,
+ok($logfile =~ qr/skipped replication of an empty transaction with XID/,
'empty transaction is skipped');
-$result = $node_subscriber->safe_psql('postgres',
- "SELECT count(*) FROM tab_notrep");
+$result =
+ $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_notrep");
is($result, qq(0), 'check non-replicated table is empty on subscriber');
-$node_publisher->append_conf('postgresql.conf',
- "log_min_messages = warning");
+$node_publisher->append_conf('postgresql.conf', "log_min_messages = warning");
$node_publisher->reload;
# note that data are different on provider and subscriber
@@ -519,7 +521,9 @@ $node_subscriber->safe_psql('postgres',
"ALTER SUBSCRIPTION tap_sub RENAME TO tap_sub_renamed");
$node_publisher->poll_query_until('postgres',
"SELECT pid != $oldpid FROM pg_stat_replication WHERE application_name = 'tap_sub_renamed' AND state = 'streaming';"
-) or die "Timed out while waiting for apply to restart after renaming SUBSCRIPTION";
+ )
+ or die
+ "Timed out while waiting for apply to restart after renaming SUBSCRIPTION";
# check all the cleanup
$node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub_renamed");
diff --git a/src/test/subscription/t/007_ddl.pl b/src/test/subscription/t/007_ddl.pl
index 39c32eda44d..cdd6b119ffb 100644
--- a/src/test/subscription/t/007_ddl.pl
+++ b/src/test/subscription/t/007_ddl.pl
@@ -62,21 +62,21 @@ $node_subscriber->poll_query_until('postgres', $synced_query)
or die "Timed out while waiting for subscriber to synchronize data";
# Specifying non-existent publication along with add publication.
-($ret, $stdout, $stderr) = $node_subscriber->psql(
- 'postgres',
+($ret, $stdout, $stderr) = $node_subscriber->psql('postgres',
"ALTER SUBSCRIPTION mysub1 ADD PUBLICATION non_existent_pub1, non_existent_pub2"
);
ok( $stderr =~
m/WARNING: publications "non_existent_pub1", "non_existent_pub2" do not exist in the publisher/,
- "Alter subscription add publication throws warning for non-existent publications");
+ "Alter subscription add publication throws warning for non-existent publications"
+);
# Specifying non-existent publication along with set publication.
($ret, $stdout, $stderr) = $node_subscriber->psql('postgres',
- "ALTER SUBSCRIPTION mysub1 SET PUBLICATION non_existent_pub"
-);
+ "ALTER SUBSCRIPTION mysub1 SET PUBLICATION non_existent_pub");
ok( $stderr =~
m/WARNING: publication "non_existent_pub" does not exist in the publisher/,
- "Alter subscription set publication throws warning for non-existent publication");
+ "Alter subscription set publication throws warning for non-existent publication"
+);
$node_subscriber->stop;
$node_publisher->stop;
diff --git a/src/test/subscription/t/013_partition.pl b/src/test/subscription/t/013_partition.pl
index 66e63e755ef..e7f4a94f197 100644
--- a/src/test/subscription/t/013_partition.pl
+++ b/src/test/subscription/t/013_partition.pl
@@ -413,7 +413,8 @@ $node_publisher->safe_psql('postgres',
$node_publisher->safe_psql('postgres',
"CREATE TABLE tab4 (a int PRIMARY KEY) PARTITION BY LIST (a)");
$node_publisher->safe_psql('postgres',
- "CREATE TABLE tab4_1 PARTITION OF tab4 FOR VALUES IN (0, 1) PARTITION BY LIST (a)");
+ "CREATE TABLE tab4_1 PARTITION OF tab4 FOR VALUES IN (0, 1) PARTITION BY LIST (a)"
+);
$node_publisher->safe_psql('postgres',
"CREATE TABLE tab4_1_1 PARTITION OF tab4_1 FOR VALUES IN (0, 1)");
@@ -479,11 +480,9 @@ $node_subscriber2->safe_psql('postgres',
# Note: We create two separate tables, not a partitioned one, so that we can
# easily identity through which relation were the changes replicated.
$node_subscriber2->safe_psql('postgres',
- "CREATE TABLE tab4 (a int PRIMARY KEY)"
-);
+ "CREATE TABLE tab4 (a int PRIMARY KEY)");
$node_subscriber2->safe_psql('postgres',
- "CREATE TABLE tab4_1 (a int PRIMARY KEY)"
-);
+ "CREATE TABLE tab4_1 (a int PRIMARY KEY)");
# Publication that sub2 points to now publishes via root, so must update
# subscription target relations. We set the list of publications so that
# the FOR ALL TABLES publication is second (the list order matters).
@@ -497,9 +496,8 @@ $node_subscriber2->poll_query_until('postgres', $synced_query)
or die "Timed out while waiting for subscriber to synchronize data";
# check that data is synced correctly
-$result = $node_subscriber1->safe_psql('postgres',
- "SELECT c, a FROM tab2");
-is( $result, qq(sub1_tab2|1), 'initial data synced for pub_viaroot');
+$result = $node_subscriber1->safe_psql('postgres', "SELECT c, a FROM tab2");
+is($result, qq(sub1_tab2|1), 'initial data synced for pub_viaroot');
# insert
$node_publisher->safe_psql('postgres', "INSERT INTO tab1 VALUES (1), (0)");
@@ -512,8 +510,7 @@ $node_publisher->safe_psql('postgres',
# Insert a row into the leaf partition, should be replicated through the
# partition root (thanks to the FOR ALL TABLES partition).
-$node_publisher->safe_psql('postgres',
- "INSERT INTO tab4 VALUES (0)");
+$node_publisher->safe_psql('postgres', "INSERT INTO tab4 VALUES (0)");
$node_publisher->wait_for_catchup('sub_viaroot');
$node_publisher->wait_for_catchup('sub2');
@@ -555,13 +552,13 @@ sub2_tab3|5), 'inserts into tab3 replicated');
# tab4 change should be replicated through the root partition, which
# maps to the tab4 relation on subscriber.
-$result = $node_subscriber2->safe_psql('postgres',
- "SELECT a FROM tab4 ORDER BY 1");
-is( $result, qq(0), 'inserts into tab4 replicated');
+$result =
+ $node_subscriber2->safe_psql('postgres', "SELECT a FROM tab4 ORDER BY 1");
+is($result, qq(0), 'inserts into tab4 replicated');
-$result = $node_subscriber2->safe_psql('postgres',
- "SELECT a FROM tab4_1 ORDER BY 1");
-is( $result, qq(), 'inserts into tab4_1 replicated');
+$result =
+ $node_subscriber2->safe_psql('postgres', "SELECT a FROM tab4_1 ORDER BY 1");
+is($result, qq(), 'inserts into tab4_1 replicated');
# now switch the order of publications in the list, try again, the result
@@ -576,21 +573,20 @@ $node_subscriber2->poll_query_until('postgres', $synced_query)
# Insert a change into the leaf partition, should be replicated through
# the partition root (thanks to the FOR ALL TABLES partition).
-$node_publisher->safe_psql('postgres',
- "INSERT INTO tab4 VALUES (1)");
+$node_publisher->safe_psql('postgres', "INSERT INTO tab4 VALUES (1)");
$node_publisher->wait_for_catchup('sub2');
# tab4 change should be replicated through the root partition, which
# maps to the tab4 relation on subscriber.
-$result = $node_subscriber2->safe_psql('postgres',
- "SELECT a FROM tab4 ORDER BY 1");
+$result =
+ $node_subscriber2->safe_psql('postgres', "SELECT a FROM tab4 ORDER BY 1");
is( $result, qq(0
1), 'inserts into tab4 replicated');
-$result = $node_subscriber2->safe_psql('postgres',
- "SELECT a FROM tab4_1 ORDER BY 1");
-is( $result, qq(), 'inserts into tab4_1 replicated');
+$result =
+ $node_subscriber2->safe_psql('postgres', "SELECT a FROM tab4_1 ORDER BY 1");
+is($result, qq(), 'inserts into tab4_1 replicated');
# update (replicated as update)
diff --git a/src/test/subscription/t/021_twophase.pl b/src/test/subscription/t/021_twophase.pl
index aacc0fcf462..c3e9857f7ce 100644
--- a/src/test/subscription/t/021_twophase.pl
+++ b/src/test/subscription/t/021_twophase.pl
@@ -29,7 +29,8 @@ $node_subscriber->start;
# Create some pre-existing content on publisher
$node_publisher->safe_psql('postgres',
"CREATE TABLE tab_full (a int PRIMARY KEY)");
-$node_publisher->safe_psql('postgres', "
+$node_publisher->safe_psql(
+ 'postgres', "
BEGIN;
INSERT INTO tab_full SELECT generate_series(1,10);
PREPARE TRANSACTION 'some_initial_data';
@@ -45,7 +46,8 @@ $node_publisher->safe_psql('postgres',
"CREATE PUBLICATION tap_pub FOR TABLE tab_full");
my $appname = 'tap_sub';
-$node_subscriber->safe_psql('postgres', "
+$node_subscriber->safe_psql(
+ 'postgres', "
CREATE SUBSCRIPTION tap_sub
CONNECTION '$publisher_connstr application_name=$appname'
PUBLICATION tap_pub
@@ -56,13 +58,13 @@ $node_publisher->wait_for_catchup($appname);
# Also wait for initial table sync to finish
my $synced_query =
- "SELECT count(1) = 0 FROM pg_subscription_rel WHERE srsubstate NOT IN ('r', 's');";
+ "SELECT count(1) = 0 FROM pg_subscription_rel WHERE srsubstate NOT IN ('r', 's');";
$node_subscriber->poll_query_until('postgres', $synced_query)
or die "Timed out while waiting for subscriber to synchronize data";
# Also wait for two-phase to be enabled
my $twophase_query =
- "SELECT count(1) = 0 FROM pg_subscription WHERE subtwophasestate NOT IN ('e');";
+ "SELECT count(1) = 0 FROM pg_subscription WHERE subtwophasestate NOT IN ('e');";
$node_subscriber->poll_query_until('postgres', $twophase_query)
or die "Timed out while waiting for subscriber to enable twophase";
@@ -71,7 +73,8 @@ $node_subscriber->poll_query_until('postgres', $twophase_query)
# then COMMIT PREPARED
###############################
-$node_publisher->safe_psql('postgres', "
+$node_publisher->safe_psql(
+ 'postgres', "
BEGIN;
INSERT INTO tab_full VALUES (11);
PREPARE TRANSACTION 'test_prepared_tab_full';");
@@ -79,19 +82,23 @@ $node_publisher->safe_psql('postgres', "
$node_publisher->wait_for_catchup($appname);
# check that transaction is in prepared state on subscriber
-my $result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+my $result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(1), 'transaction is prepared on subscriber');
# check that 2PC gets committed on subscriber
-$node_publisher->safe_psql('postgres', "COMMIT PREPARED 'test_prepared_tab_full';");
+$node_publisher->safe_psql('postgres',
+ "COMMIT PREPARED 'test_prepared_tab_full';");
$node_publisher->wait_for_catchup($appname);
# check that transaction is committed on subscriber
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_full where a = 11;");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM tab_full where a = 11;");
is($result, qq(1), 'Row inserted via 2PC has committed on subscriber');
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(0), 'transaction is committed on subscriber');
###############################
@@ -99,7 +106,8 @@ is($result, qq(0), 'transaction is committed on subscriber');
# then ROLLBACK PREPARED
###############################
-$node_publisher->safe_psql('postgres',"
+$node_publisher->safe_psql(
+ 'postgres', "
BEGIN;
INSERT INTO tab_full VALUES (12);
PREPARE TRANSACTION 'test_prepared_tab_full';");
@@ -107,19 +115,23 @@ $node_publisher->safe_psql('postgres',"
$node_publisher->wait_for_catchup($appname);
# check that transaction is in prepared state on subscriber
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(1), 'transaction is prepared on subscriber');
# check that 2PC gets aborted on subscriber
-$node_publisher->safe_psql('postgres', "ROLLBACK PREPARED 'test_prepared_tab_full';");
+$node_publisher->safe_psql('postgres',
+ "ROLLBACK PREPARED 'test_prepared_tab_full';");
$node_publisher->wait_for_catchup($appname);
# check that transaction is aborted on subscriber
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_full where a = 12;");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM tab_full where a = 12;");
is($result, qq(0), 'Row inserted via 2PC is not present on subscriber');
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(0), 'transaction is aborted on subscriber');
###############################
@@ -127,7 +139,8 @@ is($result, qq(0), 'transaction is aborted on subscriber');
# (publisher and subscriber crash)
###############################
-$node_publisher->safe_psql('postgres', "
+$node_publisher->safe_psql(
+ 'postgres', "
BEGIN;
INSERT INTO tab_full VALUES (12);
INSERT INTO tab_full VALUES (13);
@@ -140,11 +153,13 @@ $node_publisher->start;
$node_subscriber->start;
# rollback post the restart
-$node_publisher->safe_psql('postgres', "ROLLBACK PREPARED 'test_prepared_tab';");
+$node_publisher->safe_psql('postgres',
+ "ROLLBACK PREPARED 'test_prepared_tab';");
$node_publisher->wait_for_catchup($appname);
# check inserts are rolled back
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_full where a IN (12,13);");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM tab_full where a IN (12,13);");
is($result, qq(0), 'Rows rolled back are not on the subscriber');
###############################
@@ -152,7 +167,8 @@ is($result, qq(0), 'Rows rolled back are not on the subscriber');
# (publisher and subscriber crash)
###############################
-$node_publisher->safe_psql('postgres', "
+$node_publisher->safe_psql(
+ 'postgres', "
BEGIN;
INSERT INTO tab_full VALUES (12);
INSERT INTO tab_full VALUES (13);
@@ -165,11 +181,13 @@ $node_publisher->start;
$node_subscriber->start;
# commit post the restart
-$node_publisher->safe_psql('postgres', "COMMIT PREPARED 'test_prepared_tab';");
+$node_publisher->safe_psql('postgres',
+ "COMMIT PREPARED 'test_prepared_tab';");
$node_publisher->wait_for_catchup($appname);
# check inserts are visible
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_full where a IN (12,13);");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM tab_full where a IN (12,13);");
is($result, qq(2), 'Rows inserted via 2PC are visible on the subscriber');
###############################
@@ -177,7 +195,8 @@ is($result, qq(2), 'Rows inserted via 2PC are visible on the subscriber');
# (subscriber only crash)
###############################
-$node_publisher->safe_psql('postgres', "
+$node_publisher->safe_psql(
+ 'postgres', "
BEGIN;
INSERT INTO tab_full VALUES (14);
INSERT INTO tab_full VALUES (15);
@@ -187,11 +206,13 @@ $node_subscriber->stop('immediate');
$node_subscriber->start;
# commit post the restart
-$node_publisher->safe_psql('postgres', "COMMIT PREPARED 'test_prepared_tab';");
+$node_publisher->safe_psql('postgres',
+ "COMMIT PREPARED 'test_prepared_tab';");
$node_publisher->wait_for_catchup($appname);
# check inserts are visible
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_full where a IN (14,15);");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM tab_full where a IN (14,15);");
is($result, qq(2), 'Rows inserted via 2PC are visible on the subscriber');
###############################
@@ -199,7 +220,8 @@ is($result, qq(2), 'Rows inserted via 2PC are visible on the subscriber');
# (publisher only crash)
###############################
-$node_publisher->safe_psql('postgres', "
+$node_publisher->safe_psql(
+ 'postgres', "
BEGIN;
INSERT INTO tab_full VALUES (16);
INSERT INTO tab_full VALUES (17);
@@ -209,11 +231,13 @@ $node_publisher->stop('immediate');
$node_publisher->start;
# commit post the restart
-$node_publisher->safe_psql('postgres', "COMMIT PREPARED 'test_prepared_tab';");
+$node_publisher->safe_psql('postgres',
+ "COMMIT PREPARED 'test_prepared_tab';");
$node_publisher->wait_for_catchup($appname);
# check inserts are visible
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_full where a IN (16,17);");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM tab_full where a IN (16,17);");
is($result, qq(2), 'Rows inserted via 2PC are visible on the subscriber');
###############################
@@ -221,7 +245,8 @@ is($result, qq(2), 'Rows inserted via 2PC are visible on the subscriber');
###############################
# check that 2PC gets replicated to subscriber
-$node_publisher->safe_psql('postgres', "
+$node_publisher->safe_psql(
+ 'postgres', "
BEGIN;
INSERT INTO tab_full VALUES (21);
SAVEPOINT sp_inner;
@@ -232,7 +257,8 @@ $node_publisher->safe_psql('postgres', "
$node_publisher->wait_for_catchup($appname);
# check that transaction is in prepared state on subscriber
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(1), 'transaction is prepared on subscriber');
# COMMIT
@@ -241,11 +267,13 @@ $node_publisher->safe_psql('postgres', "COMMIT PREPARED 'outer';");
$node_publisher->wait_for_catchup($appname);
# check the transaction state on subscriber
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(0), 'transaction is ended on subscriber');
# check inserts are visible. 22 should be rolled back. 21 should be committed.
-$result = $node_subscriber->safe_psql('postgres', "SELECT a FROM tab_full where a IN (21,22);");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT a FROM tab_full where a IN (21,22);");
is($result, qq(21), 'Rows committed are on the subscriber');
###############################
@@ -253,14 +281,16 @@ is($result, qq(21), 'Rows committed are on the subscriber');
###############################
# check that 2PC gets replicated to subscriber
-$node_publisher->safe_psql('postgres', "
+$node_publisher->safe_psql(
+ 'postgres', "
BEGIN;
INSERT INTO tab_full VALUES (51);
PREPARE TRANSACTION '';");
$node_publisher->wait_for_catchup($appname);
# check that transaction is in prepared state on subscriber
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(1), 'transaction is prepared on subscriber');
# ROLLBACK
@@ -269,7 +299,8 @@ $node_publisher->safe_psql('postgres', "ROLLBACK PREPARED '';");
# check that 2PC gets aborted on subscriber
$node_publisher->wait_for_catchup($appname);
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(0), 'transaction is aborted on subscriber');
###############################
@@ -277,11 +308,15 @@ is($result, qq(0), 'transaction is aborted on subscriber');
###############################
#create some test tables for copy tests
-$node_publisher->safe_psql('postgres', "CREATE TABLE tab_copy (a int PRIMARY KEY)");
-$node_publisher->safe_psql('postgres', "INSERT INTO tab_copy SELECT generate_series(1,5);");
-$node_subscriber->safe_psql('postgres', "CREATE TABLE tab_copy (a int PRIMARY KEY)");
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE tab_copy (a int PRIMARY KEY)");
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO tab_copy SELECT generate_series(1,5);");
+$node_subscriber->safe_psql('postgres',
+ "CREATE TABLE tab_copy (a int PRIMARY KEY)");
$node_subscriber->safe_psql('postgres', "INSERT INTO tab_copy VALUES (88);");
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_copy;");
+$result =
+ $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_copy;");
is($result, qq(1), 'initial data in subscriber table');
# Setup logical replication
@@ -289,7 +324,8 @@ $node_publisher->safe_psql('postgres',
"CREATE PUBLICATION tap_pub_copy FOR TABLE tab_copy;");
my $appname_copy = 'appname_copy';
-$node_subscriber->safe_psql('postgres', "
+$node_subscriber->safe_psql(
+ 'postgres', "
CREATE SUBSCRIPTION tap_sub_copy
CONNECTION '$publisher_connstr application_name=$appname_copy'
PUBLICATION tap_pub_copy
@@ -307,11 +343,13 @@ $node_subscriber->poll_query_until('postgres', $twophase_query)
or die "Timed out while waiting for subscriber to enable twophase";
# Check that the initial table data was NOT replicated (because we said copy_data=false)
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_copy;");
+$result =
+ $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_copy;");
is($result, qq(1), 'initial data in subscriber table');
# Now do a prepare on publisher and check that it IS replicated
-$node_publisher->safe_psql('postgres', "
+$node_publisher->safe_psql(
+ 'postgres', "
BEGIN;
INSERT INTO tab_copy VALUES (99);
PREPARE TRANSACTION 'mygid';");
@@ -322,18 +360,21 @@ $node_publisher->wait_for_catchup($appname);
# Check that the transaction has been prepared on the subscriber, there will be 2
# prepared transactions for the 2 subscriptions.
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(2), 'transaction is prepared on subscriber');
# Now commit the insert and verify that it IS replicated
$node_publisher->safe_psql('postgres', "COMMIT PREPARED 'mygid';");
-$result = $node_publisher->safe_psql('postgres', "SELECT count(*) FROM tab_copy;");
+$result =
+ $node_publisher->safe_psql('postgres', "SELECT count(*) FROM tab_copy;");
is($result, qq(6), 'publisher inserted data');
$node_publisher->wait_for_catchup($appname_copy);
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_copy;");
+$result =
+ $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_copy;");
is($result, qq(2), 'replicated data in subscriber table');
$node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub_copy;");
@@ -345,16 +386,21 @@ $node_publisher->safe_psql('postgres', "DROP PUBLICATION tap_pub_copy;");
$node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub");
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_subscription");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_subscription");
is($result, qq(0), 'check subscription was dropped on subscriber');
-$result = $node_publisher->safe_psql('postgres', "SELECT count(*) FROM pg_replication_slots");
+$result = $node_publisher->safe_psql('postgres',
+ "SELECT count(*) FROM pg_replication_slots");
is($result, qq(0), 'check replication slot was dropped on publisher');
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_subscription_rel");
-is($result, qq(0), 'check subscription relation status was dropped on subscriber');
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_subscription_rel");
+is($result, qq(0),
+ 'check subscription relation status was dropped on subscriber');
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_replication_origin");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_replication_origin");
is($result, qq(0), 'check replication origin was dropped on subscriber');
$node_subscriber->stop('fast');
diff --git a/src/test/subscription/t/022_twophase_cascade.pl b/src/test/subscription/t/022_twophase_cascade.pl
index 900c25d5ce2..7a797f37bad 100644
--- a/src/test/subscription/t/022_twophase_cascade.pl
+++ b/src/test/subscription/t/022_twophase_cascade.pl
@@ -20,7 +20,8 @@ use Test::More;
# node_A
my $node_A = PostgreSQL::Test::Cluster->new('node_A');
$node_A->init(allows_streaming => 'logical');
-$node_A->append_conf('postgresql.conf', qq(
+$node_A->append_conf(
+ 'postgresql.conf', qq(
max_prepared_transactions = 10
logical_decoding_work_mem = 64kB
));
@@ -28,7 +29,8 @@ $node_A->start;
# node_B
my $node_B = PostgreSQL::Test::Cluster->new('node_B');
$node_B->init(allows_streaming => 'logical');
-$node_B->append_conf('postgresql.conf', qq(
+$node_B->append_conf(
+ 'postgresql.conf', qq(
max_prepared_transactions = 10
logical_decoding_work_mem = 64kB
));
@@ -36,23 +38,22 @@ $node_B->start;
# node_C
my $node_C = PostgreSQL::Test::Cluster->new('node_C');
$node_C->init(allows_streaming => 'logical');
-$node_C->append_conf('postgresql.conf', qq(
+$node_C->append_conf(
+ 'postgresql.conf', qq(
max_prepared_transactions = 10
logical_decoding_work_mem = 64kB
));
$node_C->start;
# Create some pre-existing content on node_A
-$node_A->safe_psql('postgres',
- "CREATE TABLE tab_full (a int PRIMARY KEY)");
-$node_A->safe_psql('postgres', "
+$node_A->safe_psql('postgres', "CREATE TABLE tab_full (a int PRIMARY KEY)");
+$node_A->safe_psql(
+ 'postgres', "
INSERT INTO tab_full SELECT generate_series(1,10);");
# Create the same tables on node_B and node_C
-$node_B->safe_psql('postgres',
- "CREATE TABLE tab_full (a int PRIMARY KEY)");
-$node_C->safe_psql('postgres',
- "CREATE TABLE tab_full (a int PRIMARY KEY)");
+$node_B->safe_psql('postgres', "CREATE TABLE tab_full (a int PRIMARY KEY)");
+$node_C->safe_psql('postgres', "CREATE TABLE tab_full (a int PRIMARY KEY)");
# Create some pre-existing content on node_A (for streaming tests)
$node_A->safe_psql('postgres',
@@ -63,9 +64,11 @@ $node_A->safe_psql('postgres',
# Create the same tables on node_B and node_C
# columns a and b are compatible with same table name on node_A
$node_B->safe_psql('postgres',
- "CREATE TABLE test_tab (a int primary key, b text, c timestamptz DEFAULT now(), d bigint DEFAULT 999)");
+ "CREATE TABLE test_tab (a int primary key, b text, c timestamptz DEFAULT now(), d bigint DEFAULT 999)"
+);
$node_C->safe_psql('postgres',
- "CREATE TABLE test_tab (a int primary key, b text, c timestamptz DEFAULT now(), d bigint DEFAULT 999)");
+ "CREATE TABLE test_tab (a int primary key, b text, c timestamptz DEFAULT now(), d bigint DEFAULT 999)"
+);
# Setup logical replication
@@ -78,7 +81,8 @@ my $node_A_connstr = $node_A->connstr . ' dbname=postgres';
$node_A->safe_psql('postgres',
"CREATE PUBLICATION tap_pub_A FOR TABLE tab_full, test_tab");
my $appname_B = 'tap_sub_B';
-$node_B->safe_psql('postgres', "
+$node_B->safe_psql(
+ 'postgres', "
CREATE SUBSCRIPTION tap_sub_B
CONNECTION '$node_A_connstr application_name=$appname_B'
PUBLICATION tap_pub_A
@@ -89,7 +93,8 @@ my $node_B_connstr = $node_B->connstr . ' dbname=postgres';
$node_B->safe_psql('postgres',
"CREATE PUBLICATION tap_pub_B FOR TABLE tab_full, test_tab");
my $appname_C = 'tap_sub_C';
-$node_C->safe_psql('postgres', "
+$node_C->safe_psql(
+ 'postgres', "
CREATE SUBSCRIPTION tap_sub_C
CONNECTION '$node_B_connstr application_name=$appname_C'
PUBLICATION tap_pub_B
@@ -100,13 +105,14 @@ $node_A->wait_for_catchup($appname_B);
$node_B->wait_for_catchup($appname_C);
# Also wait for two-phase to be enabled
-my $twophase_query = "SELECT count(1) = 0 FROM pg_subscription WHERE subtwophasestate NOT IN ('e');";
+my $twophase_query =
+ "SELECT count(1) = 0 FROM pg_subscription WHERE subtwophasestate NOT IN ('e');";
$node_B->poll_query_until('postgres', $twophase_query)
- or die "Timed out while waiting for subscriber to enable twophase";
+ or die "Timed out while waiting for subscriber to enable twophase";
$node_C->poll_query_until('postgres', $twophase_query)
- or die "Timed out while waiting for subscriber to enable twophase";
+ or die "Timed out while waiting for subscriber to enable twophase";
-is(1,1, "Cascade setup is complete");
+is(1, 1, "Cascade setup is complete");
my $result;
@@ -116,7 +122,8 @@ my $result;
###############################
# 2PC PREPARE
-$node_A->safe_psql('postgres', "
+$node_A->safe_psql(
+ 'postgres', "
BEGIN;
INSERT INTO tab_full VALUES (11);
PREPARE TRANSACTION 'test_prepared_tab_full';");
@@ -125,9 +132,11 @@ $node_A->wait_for_catchup($appname_B);
$node_B->wait_for_catchup($appname_C);
# check the transaction state is prepared on subscriber(s)
-$result = $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result =
+ $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(1), 'transaction is prepared on subscriber B');
-$result = $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result =
+ $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(1), 'transaction is prepared on subscriber C');
# 2PC COMMIT
@@ -137,15 +146,19 @@ $node_A->wait_for_catchup($appname_B);
$node_B->wait_for_catchup($appname_C);
# check that transaction was committed on subscriber(s)
-$result = $node_B->safe_psql('postgres', "SELECT count(*) FROM tab_full where a = 11;");
+$result = $node_B->safe_psql('postgres',
+ "SELECT count(*) FROM tab_full where a = 11;");
is($result, qq(1), 'Row inserted via 2PC has committed on subscriber B');
-$result = $node_C->safe_psql('postgres', "SELECT count(*) FROM tab_full where a = 11;");
+$result = $node_C->safe_psql('postgres',
+ "SELECT count(*) FROM tab_full where a = 11;");
is($result, qq(1), 'Row inserted via 2PC has committed on subscriber C');
# check the transaction state is ended on subscriber(s)
-$result = $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result =
+ $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(0), 'transaction is committed on subscriber B');
-$result = $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result =
+ $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(0), 'transaction is committed on subscriber C');
###############################
@@ -154,7 +167,8 @@ is($result, qq(0), 'transaction is committed on subscriber C');
###############################
# 2PC PREPARE
-$node_A->safe_psql('postgres', "
+$node_A->safe_psql(
+ 'postgres', "
BEGIN;
INSERT INTO tab_full VALUES (12);
PREPARE TRANSACTION 'test_prepared_tab_full';");
@@ -163,9 +177,11 @@ $node_A->wait_for_catchup($appname_B);
$node_B->wait_for_catchup($appname_C);
# check the transaction state is prepared on subscriber(s)
-$result = $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result =
+ $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(1), 'transaction is prepared on subscriber B');
-$result = $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result =
+ $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(1), 'transaction is prepared on subscriber C');
# 2PC ROLLBACK
@@ -175,15 +191,19 @@ $node_A->wait_for_catchup($appname_B);
$node_B->wait_for_catchup($appname_C);
# check that transaction is aborted on subscriber(s)
-$result = $node_B->safe_psql('postgres', "SELECT count(*) FROM tab_full where a = 12;");
+$result = $node_B->safe_psql('postgres',
+ "SELECT count(*) FROM tab_full where a = 12;");
is($result, qq(0), 'Row inserted via 2PC is not present on subscriber B');
-$result = $node_C->safe_psql('postgres', "SELECT count(*) FROM tab_full where a = 12;");
+$result = $node_C->safe_psql('postgres',
+ "SELECT count(*) FROM tab_full where a = 12;");
is($result, qq(0), 'Row inserted via 2PC is not present on subscriber C');
# check the transaction state is ended on subscriber(s)
-$result = $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result =
+ $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(0), 'transaction is ended on subscriber B');
-$result = $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result =
+ $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(0), 'transaction is ended on subscriber C');
###############################
@@ -191,7 +211,8 @@ is($result, qq(0), 'transaction is ended on subscriber C');
###############################
# 2PC PREPARE with a nested ROLLBACK TO SAVEPOINT
-$node_A->safe_psql('postgres', "
+$node_A->safe_psql(
+ 'postgres', "
BEGIN;
INSERT INTO tab_full VALUES (21);
SAVEPOINT sp_inner;
@@ -204,9 +225,11 @@ $node_A->wait_for_catchup($appname_B);
$node_B->wait_for_catchup($appname_C);
# check the transaction state prepared on subscriber(s)
-$result = $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result =
+ $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(1), 'transaction is prepared on subscriber B');
-$result = $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result =
+ $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(1), 'transaction is prepared on subscriber C');
# 2PC COMMIT
@@ -216,46 +239,56 @@ $node_A->wait_for_catchup($appname_B);
$node_B->wait_for_catchup($appname_C);
# check the transaction state is ended on subscriber
-$result = $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result =
+ $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(0), 'transaction is ended on subscriber B');
-$result = $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result =
+ $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(0), 'transaction is ended on subscriber C');
# check inserts are visible at subscriber(s).
# 22 should be rolled back.
# 21 should be committed.
-$result = $node_B->safe_psql('postgres', "SELECT a FROM tab_full where a IN (21,22);");
+$result = $node_B->safe_psql('postgres',
+ "SELECT a FROM tab_full where a IN (21,22);");
is($result, qq(21), 'Rows committed are present on subscriber B');
-$result = $node_C->safe_psql('postgres', "SELECT a FROM tab_full where a IN (21,22);");
+$result = $node_C->safe_psql('postgres',
+ "SELECT a FROM tab_full where a IN (21,22);");
is($result, qq(21), 'Rows committed are present on subscriber C');
# ---------------------
# 2PC + STREAMING TESTS
# ---------------------
-my $oldpid_B = $node_A->safe_psql('postgres', "
+my $oldpid_B = $node_A->safe_psql(
+ 'postgres', "
SELECT pid FROM pg_stat_replication
WHERE application_name = '$appname_B' AND state = 'streaming';");
-my $oldpid_C = $node_B->safe_psql('postgres', "
+my $oldpid_C = $node_B->safe_psql(
+ 'postgres', "
SELECT pid FROM pg_stat_replication
WHERE application_name = '$appname_C' AND state = 'streaming';");
# Setup logical replication (streaming = on)
-$node_B->safe_psql('postgres', "
+$node_B->safe_psql(
+ 'postgres', "
ALTER SUBSCRIPTION tap_sub_B
SET (streaming = on);");
-$node_C->safe_psql('postgres', "
+$node_C->safe_psql(
+ 'postgres', "
ALTER SUBSCRIPTION tap_sub_C
SET (streaming = on)");
# Wait for subscribers to finish initialization
-$node_A->poll_query_until('postgres', "
+$node_A->poll_query_until(
+ 'postgres', "
SELECT pid != $oldpid_B FROM pg_stat_replication
WHERE application_name = '$appname_B' AND state = 'streaming';"
) or die "Timed out while waiting for apply to restart";
-$node_B->poll_query_until('postgres', "
+$node_B->poll_query_until(
+ 'postgres', "
SELECT pid != $oldpid_C FROM pg_stat_replication
WHERE application_name = '$appname_C' AND state = 'streaming';"
) or die "Timed out while waiting for apply to restart";
@@ -270,7 +303,8 @@ $node_B->poll_query_until('postgres', "
# Insert, update and delete enough rows to exceed the 64kB limit.
# Then 2PC PREPARE
-$node_A->safe_psql('postgres', q{
+$node_A->safe_psql(
+ 'postgres', q{
BEGIN;
INSERT INTO test_tab SELECT i, md5(i::text) FROM generate_series(3, 5000) s(i);
UPDATE test_tab SET b = md5(b) WHERE mod(a,2) = 0;
@@ -281,9 +315,11 @@ $node_A->wait_for_catchup($appname_B);
$node_B->wait_for_catchup($appname_C);
# check the transaction state is prepared on subscriber(s)
-$result = $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result =
+ $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(1), 'transaction is prepared on subscriber B');
-$result = $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result =
+ $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(1), 'transaction is prepared on subscriber C');
# 2PC COMMIT
@@ -293,15 +329,23 @@ $node_A->wait_for_catchup($appname_B);
$node_B->wait_for_catchup($appname_C);
# check that transaction was committed on subscriber(s)
-$result = $node_B->safe_psql('postgres', "SELECT count(*), count(c), count(d = 999) FROM test_tab");
-is($result, qq(3334|3334|3334), 'Rows inserted by 2PC have committed on subscriber B, and extra columns have local defaults');
-$result = $node_C->safe_psql('postgres', "SELECT count(*), count(c), count(d = 999) FROM test_tab");
-is($result, qq(3334|3334|3334), 'Rows inserted by 2PC have committed on subscriber C, and extra columns have local defaults');
+$result = $node_B->safe_psql('postgres',
+ "SELECT count(*), count(c), count(d = 999) FROM test_tab");
+is($result, qq(3334|3334|3334),
+ 'Rows inserted by 2PC have committed on subscriber B, and extra columns have local defaults'
+);
+$result = $node_C->safe_psql('postgres',
+ "SELECT count(*), count(c), count(d = 999) FROM test_tab");
+is($result, qq(3334|3334|3334),
+ 'Rows inserted by 2PC have committed on subscriber C, and extra columns have local defaults'
+);
# check the transaction state is ended on subscriber(s)
-$result = $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result =
+ $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(0), 'transaction is committed on subscriber B');
-$result = $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result =
+ $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(0), 'transaction is committed on subscriber C');
###############################
@@ -320,7 +364,8 @@ is($result, qq(0), 'transaction is committed on subscriber C');
$node_A->safe_psql('postgres', "DELETE FROM test_tab WHERE a > 2;");
# 2PC PREPARE with a nested ROLLBACK TO SAVEPOINT
-$node_A->safe_psql('postgres', "
+$node_A->safe_psql(
+ 'postgres', "
BEGIN;
INSERT INTO test_tab VALUES (9999, 'foobar');
SAVEPOINT sp_inner;
@@ -335,9 +380,11 @@ $node_A->wait_for_catchup($appname_B);
$node_B->wait_for_catchup($appname_C);
# check the transaction state prepared on subscriber(s)
-$result = $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result =
+ $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(1), 'transaction is prepared on subscriber B');
-$result = $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result =
+ $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(1), 'transaction is prepared on subscriber C');
# 2PC COMMIT
@@ -347,19 +394,23 @@ $node_A->wait_for_catchup($appname_B);
$node_B->wait_for_catchup($appname_C);
# check the transaction state is ended on subscriber
-$result = $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result =
+ $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(0), 'transaction is ended on subscriber B');
-$result = $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result =
+ $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(0), 'transaction is ended on subscriber C');
# check inserts are visible at subscriber(s).
# All the streamed data (prior to the SAVEPOINT) should be rolled back.
# (9999, 'foobar') should be committed.
-$result = $node_B->safe_psql('postgres', "SELECT count(*) FROM test_tab where b = 'foobar';");
+$result = $node_B->safe_psql('postgres',
+ "SELECT count(*) FROM test_tab where b = 'foobar';");
is($result, qq(1), 'Rows committed are present on subscriber B');
$result = $node_B->safe_psql('postgres', "SELECT count(*) FROM test_tab;");
is($result, qq(3), 'Rows committed are present on subscriber B');
-$result = $node_C->safe_psql('postgres', "SELECT count(*) FROM test_tab where b = 'foobar';");
+$result = $node_C->safe_psql('postgres',
+ "SELECT count(*) FROM test_tab where b = 'foobar';");
is($result, qq(1), 'Rows committed are present on subscriber C');
$result = $node_C->safe_psql('postgres', "SELECT count(*) FROM test_tab;");
is($result, qq(3), 'Rows committed are present on subscriber C');
@@ -370,24 +421,36 @@ is($result, qq(3), 'Rows committed are present on subscriber C');
# cleanup the node_B => node_C pub/sub
$node_C->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub_C");
-$result = $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_subscription");
+$result =
+ $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_subscription");
is($result, qq(0), 'check subscription was dropped on subscriber node C');
-$result = $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_subscription_rel");
-is($result, qq(0), 'check subscription relation status was dropped on subscriber node C');
-$result = $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_replication_origin");
-is($result, qq(0), 'check replication origin was dropped on subscriber node C');
-$result = $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_replication_slots");
+$result =
+ $node_C->safe_psql('postgres', "SELECT count(*) FROM pg_subscription_rel");
+is($result, qq(0),
+ 'check subscription relation status was dropped on subscriber node C');
+$result = $node_C->safe_psql('postgres',
+ "SELECT count(*) FROM pg_replication_origin");
+is($result, qq(0),
+ 'check replication origin was dropped on subscriber node C');
+$result =
+ $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_replication_slots");
is($result, qq(0), 'check replication slot was dropped on publisher node B');
# cleanup the node_A => node_B pub/sub
$node_B->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub_B");
-$result = $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_subscription");
+$result =
+ $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_subscription");
is($result, qq(0), 'check subscription was dropped on subscriber node B');
-$result = $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_subscription_rel");
-is($result, qq(0), 'check subscription relation status was dropped on subscriber node B');
-$result = $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_replication_origin");
-is($result, qq(0), 'check replication origin was dropped on subscriber node B');
-$result = $node_A->safe_psql('postgres', "SELECT count(*) FROM pg_replication_slots");
+$result =
+ $node_B->safe_psql('postgres', "SELECT count(*) FROM pg_subscription_rel");
+is($result, qq(0),
+ 'check subscription relation status was dropped on subscriber node B');
+$result = $node_B->safe_psql('postgres',
+ "SELECT count(*) FROM pg_replication_origin");
+is($result, qq(0),
+ 'check replication origin was dropped on subscriber node B');
+$result =
+ $node_A->safe_psql('postgres', "SELECT count(*) FROM pg_replication_slots");
is($result, qq(0), 'check replication slot was dropped on publisher node A');
# shutdown
diff --git a/src/test/subscription/t/023_twophase_stream.pl b/src/test/subscription/t/023_twophase_stream.pl
index 93ce3ef132d..d8475d25a49 100644
--- a/src/test/subscription/t/023_twophase_stream.pl
+++ b/src/test/subscription/t/023_twophase_stream.pl
@@ -15,7 +15,8 @@ use Test::More;
# Initialize publisher node
my $node_publisher = PostgreSQL::Test::Cluster->new('publisher');
$node_publisher->init(allows_streaming => 'logical');
-$node_publisher->append_conf('postgresql.conf', qq(
+$node_publisher->append_conf(
+ 'postgresql.conf', qq(
max_prepared_transactions = 10
logical_decoding_work_mem = 64kB
));
@@ -24,25 +25,31 @@ $node_publisher->start;
# Create subscriber node
my $node_subscriber = PostgreSQL::Test::Cluster->new('subscriber');
$node_subscriber->init(allows_streaming => 'logical');
-$node_subscriber->append_conf('postgresql.conf', qq(
+$node_subscriber->append_conf(
+ 'postgresql.conf', qq(
max_prepared_transactions = 10
));
$node_subscriber->start;
# Create some pre-existing content on publisher
-$node_publisher->safe_psql('postgres', "CREATE TABLE test_tab (a int primary key, b varchar)");
-$node_publisher->safe_psql('postgres', "INSERT INTO test_tab VALUES (1, 'foo'), (2, 'bar')");
+$node_publisher->safe_psql('postgres',
+ "CREATE TABLE test_tab (a int primary key, b varchar)");
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO test_tab VALUES (1, 'foo'), (2, 'bar')");
# Setup structure on subscriber (columns a and b are compatible with same table name on publisher)
$node_subscriber->safe_psql('postgres',
- "CREATE TABLE test_tab (a int primary key, b text, c timestamptz DEFAULT now(), d bigint DEFAULT 999)");
+ "CREATE TABLE test_tab (a int primary key, b text, c timestamptz DEFAULT now(), d bigint DEFAULT 999)"
+);
# Setup logical replication (streaming = on)
my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
-$node_publisher->safe_psql('postgres', "CREATE PUBLICATION tap_pub FOR TABLE test_tab");
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION tap_pub FOR TABLE test_tab");
my $appname = 'tap_sub';
-$node_subscriber->safe_psql('postgres', "
+$node_subscriber->safe_psql(
+ 'postgres', "
CREATE SUBSCRIPTION tap_sub
CONNECTION '$publisher_connstr application_name=$appname'
PUBLICATION tap_pub
@@ -53,20 +60,21 @@ $node_publisher->wait_for_catchup($appname);
# Also wait for initial table sync to finish
my $synced_query =
- "SELECT count(1) = 0 FROM pg_subscription_rel WHERE srsubstate NOT IN ('r', 's');";
+ "SELECT count(1) = 0 FROM pg_subscription_rel WHERE srsubstate NOT IN ('r', 's');";
$node_subscriber->poll_query_until('postgres', $synced_query)
or die "Timed out while waiting for subscriber to synchronize data";
# Also wait for two-phase to be enabled
my $twophase_query =
- "SELECT count(1) = 0 FROM pg_subscription WHERE subtwophasestate NOT IN ('e');";
+ "SELECT count(1) = 0 FROM pg_subscription WHERE subtwophasestate NOT IN ('e');";
$node_subscriber->poll_query_until('postgres', $twophase_query)
or die "Timed out while waiting for subscriber to enable twophase";
###############################
# Check initial data was copied to subscriber
###############################
-my $result = $node_subscriber->safe_psql('postgres', "SELECT count(*), count(c), count(d = 999) FROM test_tab");
+my $result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), count(c), count(d = 999) FROM test_tab");
is($result, qq(2|2|2), 'check initial data was copied to subscriber');
###############################
@@ -79,7 +87,8 @@ is($result, qq(2|2|2), 'check initial data was copied to subscriber');
# check that 2PC gets replicated to subscriber
# Insert, update and delete enough rows to exceed the 64kB limit.
-$node_publisher->safe_psql('postgres', q{
+$node_publisher->safe_psql(
+ 'postgres', q{
BEGIN;
INSERT INTO test_tab SELECT i, md5(i::text) FROM generate_series(3, 5000) s(i);
UPDATE test_tab SET b = md5(b) WHERE mod(a,2) = 0;
@@ -89,18 +98,24 @@ $node_publisher->safe_psql('postgres', q{
$node_publisher->wait_for_catchup($appname);
# check that transaction is in prepared state on subscriber
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(1), 'transaction is prepared on subscriber');
# 2PC transaction gets committed
-$node_publisher->safe_psql('postgres', "COMMIT PREPARED 'test_prepared_tab';");
+$node_publisher->safe_psql('postgres',
+ "COMMIT PREPARED 'test_prepared_tab';");
$node_publisher->wait_for_catchup($appname);
# check that transaction is committed on subscriber
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*), count(c), count(d = 999) FROM test_tab");
-is($result, qq(3334|3334|3334), 'Rows inserted by 2PC have committed on subscriber, and extra columns contain local defaults');
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), count(c), count(d = 999) FROM test_tab");
+is($result, qq(3334|3334|3334),
+ 'Rows inserted by 2PC have committed on subscriber, and extra columns contain local defaults'
+);
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(0), 'transaction is committed on subscriber');
###############################
@@ -113,10 +128,11 @@ is($result, qq(0), 'transaction is committed on subscriber');
###############################
# First, delete the data except for 2 rows (will be replicated)
-$node_publisher->safe_psql('postgres', "DELETE FROM test_tab WHERE a > 2;");
+$node_publisher->safe_psql('postgres', "DELETE FROM test_tab WHERE a > 2;");
# Then insert, update and delete enough rows to exceed the 64kB limit.
-$node_publisher->safe_psql('postgres', q{
+$node_publisher->safe_psql(
+ 'postgres', q{
BEGIN;
INSERT INTO test_tab SELECT i, md5(i::text) FROM generate_series(3, 5000) s(i);
UPDATE test_tab SET b = md5(b) WHERE mod(a,2) = 0;
@@ -126,19 +142,24 @@ $node_publisher->safe_psql('postgres', q{
$node_publisher->wait_for_catchup($appname);
# check that transaction is in prepared state on subscriber
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(1), 'transaction is prepared on subscriber');
# 2PC transaction gets aborted
-$node_publisher->safe_psql('postgres', "ROLLBACK PREPARED 'test_prepared_tab';");
+$node_publisher->safe_psql('postgres',
+ "ROLLBACK PREPARED 'test_prepared_tab';");
$node_publisher->wait_for_catchup($appname);
# check that transaction is aborted on subscriber
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*), count(c), count(d = 999) FROM test_tab");
-is($result, qq(2|2|2), 'Rows inserted by 2PC are rolled back, leaving only the original 2 rows');
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), count(c), count(d = 999) FROM test_tab");
+is($result, qq(2|2|2),
+ 'Rows inserted by 2PC are rolled back, leaving only the original 2 rows');
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(0), 'transaction is aborted on subscriber');
###############################
@@ -151,7 +172,8 @@ is($result, qq(0), 'transaction is aborted on subscriber');
# Note: both publisher and subscriber do crash/restart.
###############################
-$node_publisher->safe_psql('postgres', q{
+$node_publisher->safe_psql(
+ 'postgres', q{
BEGIN;
INSERT INTO test_tab SELECT i, md5(i::text) FROM generate_series(3, 5000) s(i);
UPDATE test_tab SET b = md5(b) WHERE mod(a,2) = 0;
@@ -165,12 +187,16 @@ $node_publisher->start;
$node_subscriber->start;
# commit post the restart
-$node_publisher->safe_psql('postgres', "COMMIT PREPARED 'test_prepared_tab';");
+$node_publisher->safe_psql('postgres',
+ "COMMIT PREPARED 'test_prepared_tab';");
$node_publisher->wait_for_catchup($appname);
# check inserts are visible
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*), count(c), count(d = 999) FROM test_tab");
-is($result, qq(3334|3334|3334), 'Rows inserted by 2PC have committed on subscriber, and extra columns contain local defaults');
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), count(c), count(d = 999) FROM test_tab");
+is($result, qq(3334|3334|3334),
+ 'Rows inserted by 2PC have committed on subscriber, and extra columns contain local defaults'
+);
###############################
# Do INSERT after the PREPARE but before ROLLBACK PREPARED.
@@ -187,7 +213,8 @@ is($result, qq(3334|3334|3334), 'Rows inserted by 2PC have committed on subscrib
$node_publisher->safe_psql('postgres', "DELETE FROM test_tab WHERE a > 2;");
# Then insert, update and delete enough rows to exceed the 64kB limit.
-$node_publisher->safe_psql('postgres', q{
+$node_publisher->safe_psql(
+ 'postgres', q{
BEGIN;
INSERT INTO test_tab SELECT i, md5(i::text) FROM generate_series(3, 5000) s(i);
UPDATE test_tab SET b = md5(b) WHERE mod(a,2) = 0;
@@ -197,24 +224,29 @@ $node_publisher->safe_psql('postgres', q{
$node_publisher->wait_for_catchup($appname);
# check that transaction is in prepared state on subscriber
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(1), 'transaction is prepared on subscriber');
# Insert a different record (now we are outside of the 2PC transaction)
# Note: the 2PC transaction still holds row locks so make sure this insert is for a separate primary key
-$node_publisher->safe_psql('postgres', "INSERT INTO test_tab VALUES (99999, 'foobar')");
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO test_tab VALUES (99999, 'foobar')");
# 2PC transaction gets aborted
-$node_publisher->safe_psql('postgres', "ROLLBACK PREPARED 'test_prepared_tab';");
+$node_publisher->safe_psql('postgres',
+ "ROLLBACK PREPARED 'test_prepared_tab';");
$node_publisher->wait_for_catchup($appname);
# check that transaction is aborted on subscriber,
# but the extra INSERT outside of the 2PC still was replicated
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*), count(c), count(d = 999) FROM test_tab");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), count(c), count(d = 999) FROM test_tab");
is($result, qq(3|3|3), 'check the outside insert was copied to subscriber');
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(0), 'transaction is aborted on subscriber');
###############################
@@ -232,7 +264,8 @@ is($result, qq(0), 'transaction is aborted on subscriber');
$node_publisher->safe_psql('postgres', "DELETE FROM test_tab WHERE a > 2;");
# Then insert, update and delete enough rows to exceed the 64kB limit.
-$node_publisher->safe_psql('postgres', q{
+$node_publisher->safe_psql(
+ 'postgres', q{
BEGIN;
INSERT INTO test_tab SELECT i, md5(i::text) FROM generate_series(3, 5000) s(i);
UPDATE test_tab SET b = md5(b) WHERE mod(a,2) = 0;
@@ -242,24 +275,30 @@ $node_publisher->safe_psql('postgres', q{
$node_publisher->wait_for_catchup($appname);
# check that transaction is in prepared state on subscriber
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(1), 'transaction is prepared on subscriber');
# Insert a different record (now we are outside of the 2PC transaction)
# Note: the 2PC transaction still holds row locks so make sure this insert is for a separare primary key
-$node_publisher->safe_psql('postgres', "INSERT INTO test_tab VALUES (99999, 'foobar')");
+$node_publisher->safe_psql('postgres',
+ "INSERT INTO test_tab VALUES (99999, 'foobar')");
# 2PC transaction gets committed
-$node_publisher->safe_psql('postgres', "COMMIT PREPARED 'test_prepared_tab';");
+$node_publisher->safe_psql('postgres',
+ "COMMIT PREPARED 'test_prepared_tab';");
$node_publisher->wait_for_catchup($appname);
# check that transaction is committed on subscriber
$result = $node_subscriber->safe_psql('postgres',
"SELECT count(*), count(c), count(d = 999) FROM test_tab");
-is($result, qq(3335|3335|3335), 'Rows inserted by 2PC (as well as outside insert) have committed on subscriber, and extra columns contain local defaults');
+is($result, qq(3335|3335|3335),
+ 'Rows inserted by 2PC (as well as outside insert) have committed on subscriber, and extra columns contain local defaults'
+);
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_prepared_xacts;");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_prepared_xacts;");
is($result, qq(0), 'transaction is committed on subscriber');
###############################
@@ -268,16 +307,21 @@ is($result, qq(0), 'transaction is committed on subscriber');
$node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub");
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_subscription");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_subscription");
is($result, qq(0), 'check subscription was dropped on subscriber');
-$result = $node_publisher->safe_psql('postgres', "SELECT count(*) FROM pg_replication_slots");
+$result = $node_publisher->safe_psql('postgres',
+ "SELECT count(*) FROM pg_replication_slots");
is($result, qq(0), 'check replication slot was dropped on publisher');
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_subscription_rel");
-is($result, qq(0), 'check subscription relation status was dropped on subscriber');
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_subscription_rel");
+is($result, qq(0),
+ 'check subscription relation status was dropped on subscriber');
-$result = $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_replication_origin");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_replication_origin");
is($result, qq(0), 'check replication origin was dropped on subscriber');
$node_subscriber->stop('fast');
diff --git a/src/test/subscription/t/024_add_drop_pub.pl b/src/test/subscription/t/024_add_drop_pub.pl
index 561ddde4216..246f8c92372 100644
--- a/src/test/subscription/t/024_add_drop_pub.pl
+++ b/src/test/subscription/t/024_add_drop_pub.pl
@@ -30,8 +30,7 @@ $node_subscriber->safe_psql('postgres', "CREATE TABLE tab_1 (a int)");
my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
$node_publisher->safe_psql('postgres',
"CREATE PUBLICATION tap_pub_1 FOR TABLE tab_1");
-$node_publisher->safe_psql('postgres',
- "CREATE PUBLICATION tap_pub_2");
+$node_publisher->safe_psql('postgres', "CREATE PUBLICATION tap_pub_2");
$node_subscriber->safe_psql('postgres',
"CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr' PUBLICATION tap_pub_1, tap_pub_2"
diff --git a/src/test/subscription/t/025_rep_changes_for_schema.pl b/src/test/subscription/t/025_rep_changes_for_schema.pl
index 2a6ba5403da..5ce275cf725 100644
--- a/src/test/subscription/t/025_rep_changes_for_schema.pl
+++ b/src/test/subscription/t/025_rep_changes_for_schema.pl
@@ -27,11 +27,14 @@ $node_publisher->safe_psql('postgres',
$node_publisher->safe_psql('postgres',
"CREATE TABLE sch1.tab2 AS SELECT generate_series(1,10) AS a");
$node_publisher->safe_psql('postgres',
- "CREATE TABLE sch1.tab1_parent (a int PRIMARY KEY, b text) PARTITION BY LIST (a)");
+ "CREATE TABLE sch1.tab1_parent (a int PRIMARY KEY, b text) PARTITION BY LIST (a)"
+);
$node_publisher->safe_psql('postgres',
- "CREATE TABLE public.tab1_child1 PARTITION OF sch1.tab1_parent FOR VALUES IN (1, 2, 3)");
+ "CREATE TABLE public.tab1_child1 PARTITION OF sch1.tab1_parent FOR VALUES IN (1, 2, 3)"
+);
$node_publisher->safe_psql('postgres',
- "CREATE TABLE public.tab1_child2 PARTITION OF sch1.tab1_parent FOR VALUES IN (4, 5, 6)");
+ "CREATE TABLE public.tab1_child2 PARTITION OF sch1.tab1_parent FOR VALUES IN (4, 5, 6)"
+);
$node_publisher->safe_psql('postgres',
"INSERT INTO sch1.tab1_parent values (1),(4)");
@@ -41,11 +44,14 @@ $node_subscriber->safe_psql('postgres', "CREATE SCHEMA sch1");
$node_subscriber->safe_psql('postgres', "CREATE TABLE sch1.tab1 (a int)");
$node_subscriber->safe_psql('postgres', "CREATE TABLE sch1.tab2 (a int)");
$node_subscriber->safe_psql('postgres',
- "CREATE TABLE sch1.tab1_parent (a int PRIMARY KEY, b text) PARTITION BY LIST (a)");
+ "CREATE TABLE sch1.tab1_parent (a int PRIMARY KEY, b text) PARTITION BY LIST (a)"
+);
$node_subscriber->safe_psql('postgres',
- "CREATE TABLE public.tab1_child1 PARTITION OF sch1.tab1_parent FOR VALUES IN (1, 2, 3)");
+ "CREATE TABLE public.tab1_child1 PARTITION OF sch1.tab1_parent FOR VALUES IN (1, 2, 3)"
+);
$node_subscriber->safe_psql('postgres',
- "CREATE TABLE public.tab1_child2 PARTITION OF sch1.tab1_parent FOR VALUES IN (4, 5, 6)");
+ "CREATE TABLE public.tab1_child2 PARTITION OF sch1.tab1_parent FOR VALUES IN (4, 5, 6)"
+);
# Setup logical replication
my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
@@ -75,7 +81,7 @@ is($result, qq(10|1|10), 'check rows on subscriber catchup');
$result = $node_subscriber->safe_psql('postgres',
"SELECT * FROM sch1.tab1_parent order by 1");
-is($result, qq(1|
+is( $result, qq(1|
4|), 'check rows on subscriber catchup');
# Insert some data into few tables and verify that inserted data is replicated
@@ -93,7 +99,7 @@ is($result, qq(20|1|20), 'check replicated inserts on subscriber');
$result = $node_subscriber->safe_psql('postgres',
"SELECT * FROM sch1.tab1_parent order by 1");
-is($result, qq(1|
+is( $result, qq(1|
2|
4|
5|), 'check replicated inserts on subscriber');
@@ -189,7 +195,8 @@ is($result, qq(3),
# Drop schema from publication, verify that the inserts are not published after
# dropping the schema from publication. Here 2nd insert should not be
# published.
-$node_publisher->safe_psql('postgres', "
+$node_publisher->safe_psql(
+ 'postgres', "
INSERT INTO sch1.tab1 VALUES(21);
ALTER PUBLICATION tap_pub_schema DROP ALL TABLES IN SCHEMA sch1;
INSERT INTO sch1.tab1 values(22);"
diff --git a/src/test/subscription/t/027_nosuperuser.pl b/src/test/subscription/t/027_nosuperuser.pl
index 4815e6ccffe..350bc40efcb 100644
--- a/src/test/subscription/t/027_nosuperuser.pl
+++ b/src/test/subscription/t/027_nosuperuser.pl
@@ -12,8 +12,9 @@ $offset = 0;
sub publish_insert
{
- my ($tbl, $new_i) = @_;
- $node_publisher->safe_psql('postgres', qq(
+ my ($tbl, $new_i) = @_;
+ $node_publisher->safe_psql(
+ 'postgres', qq(
SET SESSION AUTHORIZATION regress_alice;
INSERT INTO $tbl (i) VALUES ($new_i);
));
@@ -21,8 +22,9 @@ sub publish_insert
sub publish_update
{
- my ($tbl, $old_i, $new_i) = @_;
- $node_publisher->safe_psql('postgres', qq(
+ my ($tbl, $old_i, $new_i) = @_;
+ $node_publisher->safe_psql(
+ 'postgres', qq(
SET SESSION AUTHORIZATION regress_alice;
UPDATE $tbl SET i = $new_i WHERE i = $old_i;
));
@@ -30,8 +32,9 @@ sub publish_update
sub publish_delete
{
- my ($tbl, $old_i) = @_;
- $node_publisher->safe_psql('postgres', qq(
+ my ($tbl, $old_i) = @_;
+ $node_publisher->safe_psql(
+ 'postgres', qq(
SET SESSION AUTHORIZATION regress_alice;
DELETE FROM $tbl WHERE i = $old_i;
));
@@ -39,47 +42,53 @@ sub publish_delete
sub expect_replication
{
- my ($tbl, $cnt, $min, $max, $testname) = @_;
- $node_publisher->wait_for_catchup('admin_sub');
- $result = $node_subscriber->safe_psql('postgres', qq(
+ my ($tbl, $cnt, $min, $max, $testname) = @_;
+ $node_publisher->wait_for_catchup('admin_sub');
+ $result = $node_subscriber->safe_psql(
+ 'postgres', qq(
SELECT COUNT(i), MIN(i), MAX(i) FROM $tbl));
- is ($result, "$cnt|$min|$max", $testname);
+ is($result, "$cnt|$min|$max", $testname);
}
sub expect_failure
{
- my ($tbl, $cnt, $min, $max, $re, $testname) = @_;
- $offset = $node_subscriber->wait_for_log($re, $offset);
- $result = $node_subscriber->safe_psql('postgres', qq(
+ my ($tbl, $cnt, $min, $max, $re, $testname) = @_;
+ $offset = $node_subscriber->wait_for_log($re, $offset);
+ $result = $node_subscriber->safe_psql(
+ 'postgres', qq(
SELECT COUNT(i), MIN(i), MAX(i) FROM $tbl));
- is ($result, "$cnt|$min|$max", $testname);
+ is($result, "$cnt|$min|$max", $testname);
}
sub revoke_superuser
{
- my ($role) = @_;
- $node_subscriber->safe_psql('postgres', qq(
+ my ($role) = @_;
+ $node_subscriber->safe_psql(
+ 'postgres', qq(
ALTER ROLE $role NOSUPERUSER));
}
sub grant_superuser
{
- my ($role) = @_;
- $node_subscriber->safe_psql('postgres', qq(
+ my ($role) = @_;
+ $node_subscriber->safe_psql(
+ 'postgres', qq(
ALTER ROLE $role SUPERUSER));
}
sub revoke_bypassrls
{
- my ($role) = @_;
- $node_subscriber->safe_psql('postgres', qq(
+ my ($role) = @_;
+ $node_subscriber->safe_psql(
+ 'postgres', qq(
ALTER ROLE $role NOBYPASSRLS));
}
sub grant_bypassrls
{
- my ($role) = @_;
- $node_subscriber->safe_psql('postgres', qq(
+ my ($role) = @_;
+ $node_subscriber->safe_psql(
+ 'postgres', qq(
ALTER ROLE $role BYPASSRLS));
}
@@ -88,7 +97,7 @@ sub grant_bypassrls
# "regress_admin". For partitioned tables, layout the partitions differently
# on the publisher than on the subscriber.
#
-$node_publisher = PostgreSQL::Test::Cluster->new('publisher');
+$node_publisher = PostgreSQL::Test::Cluster->new('publisher');
$node_subscriber = PostgreSQL::Test::Cluster->new('subscriber');
$node_publisher->init(allows_streaming => 'logical');
$node_subscriber->init;
@@ -96,17 +105,18 @@ $node_publisher->start;
$node_subscriber->start;
$publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
my %remainder_a = (
- publisher => 0,
- subscriber => 1);
+ publisher => 0,
+ subscriber => 1);
my %remainder_b = (
- publisher => 1,
- subscriber => 0);
+ publisher => 1,
+ subscriber => 0);
for my $node ($node_publisher, $node_subscriber)
{
- my $remainder_a = $remainder_a{$node->name};
- my $remainder_b = $remainder_b{$node->name};
- $node->safe_psql('postgres', qq(
+ my $remainder_a = $remainder_a{ $node->name };
+ my $remainder_b = $remainder_b{ $node->name };
+ $node->safe_psql(
+ 'postgres', qq(
CREATE ROLE regress_admin SUPERUSER LOGIN;
CREATE ROLE regress_alice NOSUPERUSER LOGIN;
GRANT CREATE ON DATABASE postgres TO regress_alice;
@@ -129,14 +139,16 @@ for my $node ($node_publisher, $node_subscriber)
ALTER TABLE alice.hashpart_b REPLICA IDENTITY FULL;
));
}
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
SET SESSION AUTHORIZATION regress_alice;
CREATE PUBLICATION alice
FOR TABLE alice.unpartitioned, alice.hashpart
WITH (publish_via_partition_root = true);
));
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
SET SESSION AUTHORIZATION regress_admin;
CREATE SUBSCRIPTION admin_sub CONNECTION '$publisher_connstr' PUBLICATION alice;
));
@@ -156,9 +168,8 @@ publish_insert("alice.unpartitioned", 3);
publish_insert("alice.unpartitioned", 5);
publish_update("alice.unpartitioned", 1 => 7);
publish_delete("alice.unpartitioned", 3);
-expect_replication(
- "alice.unpartitioned", 2, 5, 7,
- "superuser admin replicates into unpartitioned");
+expect_replication("alice.unpartitioned", 2, 5, 7,
+ "superuser admin replicates into unpartitioned");
# Revoke and restore superuser privilege for "regress_admin",
# verifying that replication fails while superuser privilege is
@@ -166,12 +177,13 @@ expect_replication(
#
revoke_superuser("regress_admin");
publish_update("alice.unpartitioned", 5 => 9);
-expect_failure("alice.unpartitioned", 2, 5, 7,
- qr/ERROR: permission denied for table unpartitioned/msi,
- "non-superuser admin fails to replicate update");
+expect_failure(
+ "alice.unpartitioned", 2, 5, 7,
+ qr/ERROR: permission denied for table unpartitioned/msi,
+ "non-superuser admin fails to replicate update");
grant_superuser("regress_admin");
expect_replication("alice.unpartitioned", 2, 7, 9,
- "admin with restored superuser privilege replicates update");
+ "admin with restored superuser privilege replicates update");
# Grant INSERT, UPDATE, DELETE privileges on the target tables to
# "regress_admin" so that superuser privileges are not necessary for
@@ -180,7 +192,8 @@ expect_replication("alice.unpartitioned", 2, 7, 9,
# Note that UPDATE and DELETE also require SELECT privileges, which
# will be granted in subsequent test.
#
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
ALTER ROLE regress_admin NOSUPERUSER;
SET SESSION AUTHORIZATION regress_alice;
GRANT INSERT,UPDATE,DELETE ON
@@ -192,16 +205,23 @@ REVOKE SELECT ON alice.unpartitioned FROM regress_admin;
publish_insert("alice.unpartitioned", 11);
expect_replication("alice.unpartitioned", 3, 7, 11,
- "nosuperuser admin with INSERT privileges can replicate into unpartitioned");
+ "nosuperuser admin with INSERT privileges can replicate into unpartitioned"
+);
publish_update("alice.unpartitioned", 7 => 13);
-expect_failure("alice.unpartitioned", 3, 7, 11,
- qr/ERROR: permission denied for table unpartitioned/msi,
- "non-superuser admin without SELECT privileges fails to replicate update");
+expect_failure(
+ "alice.unpartitioned",
+ 3,
+ 7,
+ 11,
+ qr/ERROR: permission denied for table unpartitioned/msi,
+ "non-superuser admin without SELECT privileges fails to replicate update"
+);
# Now grant SELECT
#
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
SET SESSION AUTHORIZATION regress_alice;
GRANT SELECT ON
alice.unpartitioned,
@@ -211,7 +231,8 @@ GRANT SELECT ON
publish_delete("alice.unpartitioned", 9);
expect_replication("alice.unpartitioned", 2, 11, 13,
- "nosuperuser admin with all table privileges can replicate into unpartitioned");
+ "nosuperuser admin with all table privileges can replicate into unpartitioned"
+);
# Test partitioning
#
@@ -221,50 +242,68 @@ publish_insert("alice.hashpart", 103);
publish_update("alice.hashpart", 102 => 120);
publish_delete("alice.hashpart", 101);
expect_replication("alice.hashpart", 2, 103, 120,
- "nosuperuser admin with all table privileges can replicate into hashpart");
+ "nosuperuser admin with all table privileges can replicate into hashpart"
+);
# Enable RLS on the target table and check that "regress_admin" can
# only replicate into it when superuser or bypassrls.
#
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
SET SESSION AUTHORIZATION regress_alice;
ALTER TABLE alice.unpartitioned ENABLE ROW LEVEL SECURITY;
));
revoke_superuser("regress_admin");
publish_insert("alice.unpartitioned", 15);
-expect_failure("alice.unpartitioned", 2, 11, 13,
- qr/ERROR: "regress_admin" cannot replicate into relation with row-level security enabled: "unpartitioned\w*"/msi,
- "non-superuser admin fails to replicate insert into rls enabled table");
+expect_failure(
+ "alice.unpartitioned",
+ 2,
+ 11,
+ 13,
+ qr/ERROR: "regress_admin" cannot replicate into relation with row-level security enabled: "unpartitioned\w*"/msi,
+ "non-superuser admin fails to replicate insert into rls enabled table");
grant_superuser("regress_admin");
expect_replication("alice.unpartitioned", 3, 11, 15,
- "admin with restored superuser privilege replicates insert into rls enabled unpartitioned");
+ "admin with restored superuser privilege replicates insert into rls enabled unpartitioned"
+);
revoke_superuser("regress_admin");
publish_update("alice.unpartitioned", 11 => 17);
-expect_failure("alice.unpartitioned", 3, 11, 15,
- qr/ERROR: "regress_admin" cannot replicate into relation with row-level security enabled: "unpartitioned\w*"/msi,
- "non-superuser admin fails to replicate update into rls enabled unpartitioned");
+expect_failure(
+ "alice.unpartitioned",
+ 3,
+ 11,
+ 15,
+ qr/ERROR: "regress_admin" cannot replicate into relation with row-level security enabled: "unpartitioned\w*"/msi,
+ "non-superuser admin fails to replicate update into rls enabled unpartitioned"
+);
grant_bypassrls("regress_admin");
expect_replication("alice.unpartitioned", 3, 13, 17,
- "admin with bypassrls replicates update into rls enabled unpartitioned");
+ "admin with bypassrls replicates update into rls enabled unpartitioned");
revoke_bypassrls("regress_admin");
publish_delete("alice.unpartitioned", 13);
-expect_failure("alice.unpartitioned", 3, 13, 17,
- qr/ERROR: "regress_admin" cannot replicate into relation with row-level security enabled: "unpartitioned\w*"/msi,
- "non-superuser admin without bypassrls fails to replicate delete into rls enabled unpartitioned");
+expect_failure(
+ "alice.unpartitioned",
+ 3,
+ 13,
+ 17,
+ qr/ERROR: "regress_admin" cannot replicate into relation with row-level security enabled: "unpartitioned\w*"/msi,
+ "non-superuser admin without bypassrls fails to replicate delete into rls enabled unpartitioned"
+);
grant_bypassrls("regress_admin");
expect_replication("alice.unpartitioned", 2, 15, 17,
- "admin with bypassrls replicates delete into rls enabled unpartitioned");
+ "admin with bypassrls replicates delete into rls enabled unpartitioned");
grant_superuser("regress_admin");
# Alter the subscription owner to "regress_alice". She has neither superuser
# nor bypassrls, but as the table owner should be able to replicate.
#
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
ALTER SUBSCRIPTION admin_sub DISABLE;
ALTER ROLE regress_alice SUPERUSER;
ALTER SUBSCRIPTION admin_sub OWNER TO regress_alice;
@@ -275,8 +314,8 @@ ALTER SUBSCRIPTION admin_sub ENABLE;
publish_insert("alice.unpartitioned", 23);
publish_update("alice.unpartitioned", 15 => 25);
publish_delete("alice.unpartitioned", 17);
-expect_replication(
- "alice.unpartitioned", 2, 23, 25,
- "nosuperuser nobypassrls table owner can replicate delete into unpartitioned despite rls");
+expect_replication("alice.unpartitioned", 2, 23, 25,
+ "nosuperuser nobypassrls table owner can replicate delete into unpartitioned despite rls"
+);
done_testing();
diff --git a/src/test/subscription/t/028_row_filter.pl b/src/test/subscription/t/028_row_filter.pl
index 82c4eb6ef62..0dc0a6d10f5 100644
--- a/src/test/subscription/t/028_row_filter.pl
+++ b/src/test/subscription/t/028_row_filter.pl
@@ -291,8 +291,7 @@ $node_subscriber->safe_psql('postgres',
$node_subscriber->safe_psql('postgres',
"CREATE TABLE tab_rowfilter_viaroot_part (a int)");
$node_subscriber->safe_psql('postgres',
- "CREATE TABLE tab_rowfilter_viaroot_part_1 (a int)"
-);
+ "CREATE TABLE tab_rowfilter_viaroot_part_1 (a int)");
# setup logical replication
$node_publisher->safe_psql('postgres',
@@ -720,18 +719,14 @@ is($result, qq(t|1), 'check replicated rows to tab_rowfilter_toast');
$result =
$node_subscriber->safe_psql('postgres',
"SELECT a FROM tab_rowfilter_viaroot_part");
-is( $result, qq(16),
- 'check replicated rows to tab_rowfilter_viaroot_part'
-);
+is($result, qq(16), 'check replicated rows to tab_rowfilter_viaroot_part');
# Check there is no data in tab_rowfilter_viaroot_part_1 because rows are
# replicated via the top most parent table tab_rowfilter_viaroot_part
$result =
$node_subscriber->safe_psql('postgres',
"SELECT a FROM tab_rowfilter_viaroot_part_1");
-is( $result, qq(),
- 'check replicated rows to tab_rowfilter_viaroot_part_1'
-);
+is($result, qq(), 'check replicated rows to tab_rowfilter_viaroot_part_1');
# Testcase end: FOR TABLE with row filter publications
# ======================================================
diff --git a/src/test/subscription/t/031_column_list.pl b/src/test/subscription/t/031_column_list.pl
index bdcf3e4a248..19812e11f31 100644
--- a/src/test/subscription/t/031_column_list.pl
+++ b/src/test/subscription/t/031_column_list.pl
@@ -26,51 +26,60 @@ sub wait_for_subscription_sync
my ($node) = @_;
# Also wait for initial table sync to finish
- my $synced_query = "SELECT count(1) = 0 FROM pg_subscription_rel WHERE srsubstate NOT IN ('r', 's');";
+ my $synced_query =
+ "SELECT count(1) = 0 FROM pg_subscription_rel WHERE srsubstate NOT IN ('r', 's');";
$node->poll_query_until('postgres', $synced_query)
- or die "Timed out while waiting for subscriber to synchronize data";
+ or die "Timed out while waiting for subscriber to synchronize data";
}
# setup tables on both nodes
# tab1: simple 1:1 replication
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
CREATE TABLE tab1 (a int PRIMARY KEY, "B" int, c int)
));
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
CREATE TABLE tab1 (a int PRIMARY KEY, "B" int, c int)
));
# tab2: replication from regular to table with fewer columns
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
CREATE TABLE tab2 (a int PRIMARY KEY, b varchar, c int);
));
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
CREATE TABLE tab2 (a int PRIMARY KEY, b varchar)
));
# tab3: simple 1:1 replication with weird column names
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
CREATE TABLE tab3 ("a'" int PRIMARY KEY, "B" varchar, "c'" int)
));
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
CREATE TABLE tab3 ("a'" int PRIMARY KEY, "c'" int)
));
# test_part: partitioned tables, with partitioning (including multi-level
# partitioning, and fewer columns on the subscriber)
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
CREATE TABLE test_part (a int PRIMARY KEY, b text, c timestamptz) PARTITION BY LIST (a);
CREATE TABLE test_part_1_1 PARTITION OF test_part FOR VALUES IN (1,2,3,4,5,6);
CREATE TABLE test_part_2_1 PARTITION OF test_part FOR VALUES IN (7,8,9,10,11,12) PARTITION BY LIST (a);
CREATE TABLE test_part_2_2 PARTITION OF test_part_2_1 FOR VALUES IN (7,8,9,10);
));
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
CREATE TABLE test_part (a int PRIMARY KEY, b text) PARTITION BY LIST (a);
CREATE TABLE test_part_1_1 PARTITION OF test_part FOR VALUES IN (1,2,3,4,5,6);
CREATE TABLE test_part_2_1 PARTITION OF test_part FOR VALUES IN (7,8,9,10,11,12) PARTITION BY LIST (a);
@@ -78,12 +87,14 @@ $node_subscriber->safe_psql('postgres', qq(
));
# tab4: table with user-defined enum types
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
CREATE TYPE test_typ AS ENUM ('blue', 'red');
CREATE TABLE tab4 (a INT PRIMARY KEY, b test_typ, c int, d text);
));
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
CREATE TYPE test_typ AS ENUM ('blue', 'red');
CREATE TABLE tab4 (a INT PRIMARY KEY, b test_typ, d text);
));
@@ -91,7 +102,8 @@ $node_subscriber->safe_psql('postgres', qq(
# TEST: create publication and subscription for some of the tables with
# column lists
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
CREATE PUBLICATION pub1
FOR TABLE tab1 (a, "B"), tab3 ("a'", "c'"), test_part (a, b), tab4 (a, b, d)
WITH (publish_via_partition_root = 'true');
@@ -99,36 +111,41 @@ $node_publisher->safe_psql('postgres', qq(
# check that we got the right prattrs values for the publication in the
# pg_publication_rel catalog (order by relname, to get stable ordering)
-my $result = $node_publisher->safe_psql('postgres', qq(
+my $result = $node_publisher->safe_psql(
+ 'postgres', qq(
SELECT relname, prattrs
FROM pg_publication_rel pb JOIN pg_class pc ON(pb.prrelid = pc.oid)
ORDER BY relname
));
-is($result, qq(tab1|1 2
+is( $result, qq(tab1|1 2
tab3|1 3
tab4|1 2 4
test_part|1 2), 'publication relation updated');
# TEST: insert data into the tables, create subscription and see if sync
# replicates the right columns
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
INSERT INTO tab1 VALUES (1, 2, 3);
INSERT INTO tab1 VALUES (4, 5, 6);
));
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
INSERT INTO tab3 VALUES (1, 2, 3);
INSERT INTO tab3 VALUES (4, 5, 6);
));
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
INSERT INTO tab4 VALUES (1, 'red', 3, 'oh my');
INSERT INTO tab4 VALUES (2, 'blue', 4, 'hello');
));
# replication of partitioned table
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
INSERT INTO test_part VALUES (1, 'abc', '2021-07-04 12:00:00');
INSERT INTO test_part VALUES (2, 'bcd', '2021-07-03 11:12:13');
INSERT INTO test_part VALUES (7, 'abc', '2021-07-04 12:00:00');
@@ -137,34 +154,35 @@ $node_publisher->safe_psql('postgres', qq(
# create subscription for the publication, wait for sync to complete,
# then check the sync results
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
CREATE SUBSCRIPTION sub1 CONNECTION '$publisher_connstr' PUBLICATION pub1
));
wait_for_subscription_sync($node_subscriber);
# tab1: only (a,b) is replicated
-$result = $node_subscriber->safe_psql('postgres',
- "SELECT * FROM tab1 ORDER BY a");
-is($result, qq(1|2|
+$result =
+ $node_subscriber->safe_psql('postgres', "SELECT * FROM tab1 ORDER BY a");
+is( $result, qq(1|2|
4|5|), 'insert on column tab1.c is not replicated');
# tab3: only (a,c) is replicated
$result = $node_subscriber->safe_psql('postgres',
qq(SELECT * FROM tab3 ORDER BY "a'"));
-is($result, qq(1|3
+is( $result, qq(1|3
4|6), 'insert on column tab3.b is not replicated');
# tab4: only (a,b,d) is replicated
-$result = $node_subscriber->safe_psql('postgres',
- "SELECT * FROM tab4 ORDER BY a");
-is($result, qq(1|red|oh my
+$result =
+ $node_subscriber->safe_psql('postgres', "SELECT * FROM tab4 ORDER BY a");
+is( $result, qq(1|red|oh my
2|blue|hello), 'insert on column tab4.c is not replicated');
# test_part: (a,b) is replicated
$result = $node_subscriber->safe_psql('postgres',
"SELECT * FROM test_part ORDER BY a");
-is($result, qq(1|abc
+is( $result, qq(1|abc
2|bcd
7|abc
8|bcd), 'insert on column test_part.c columns is not replicated');
@@ -173,23 +191,27 @@ is($result, qq(1|abc
# TEST: now insert more data into the tables, and wait until we replicate
# them (not by tablesync, but regular decoding and replication)
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
INSERT INTO tab1 VALUES (2, 3, 4);
INSERT INTO tab1 VALUES (5, 6, 7);
));
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
INSERT INTO tab3 VALUES (2, 3, 4);
INSERT INTO tab3 VALUES (5, 6, 7);
));
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
INSERT INTO tab4 VALUES (3, 'red', 5, 'foo');
INSERT INTO tab4 VALUES (4, 'blue', 6, 'bar');
));
# replication of partitioned table
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
INSERT INTO test_part VALUES (3, 'xxx', '2022-02-01 10:00:00');
INSERT INTO test_part VALUES (4, 'yyy', '2022-03-02 15:12:13');
INSERT INTO test_part VALUES (9, 'zzz', '2022-04-03 21:00:00');
@@ -200,9 +222,9 @@ $node_publisher->safe_psql('postgres', qq(
$node_publisher->wait_for_catchup('sub1');
# tab1: only (a,b) is replicated
-$result = $node_subscriber->safe_psql('postgres',
- "SELECT * FROM tab1 ORDER BY a");
-is($result, qq(1|2|
+$result =
+ $node_subscriber->safe_psql('postgres', "SELECT * FROM tab1 ORDER BY a");
+is( $result, qq(1|2|
2|3|
4|5|
5|6|), 'insert on column tab1.c is not replicated');
@@ -210,15 +232,15 @@ is($result, qq(1|2|
# tab3: only (a,c) is replicated
$result = $node_subscriber->safe_psql('postgres',
qq(SELECT * FROM tab3 ORDER BY "a'"));
-is($result, qq(1|3
+is( $result, qq(1|3
2|4
4|6
5|7), 'insert on column tab3.b is not replicated');
# tab4: only (a,b,d) is replicated
-$result = $node_subscriber->safe_psql('postgres',
- "SELECT * FROM tab4 ORDER BY a");
-is($result, qq(1|red|oh my
+$result =
+ $node_subscriber->safe_psql('postgres', "SELECT * FROM tab4 ORDER BY a");
+is( $result, qq(1|red|oh my
2|blue|hello
3|red|foo
4|blue|bar), 'insert on column tab4.c is not replicated');
@@ -226,7 +248,7 @@ is($result, qq(1|red|oh my
# test_part: (a,b) is replicated
$result = $node_subscriber->safe_psql('postgres',
"SELECT * FROM test_part ORDER BY a");
-is($result, qq(1|abc
+is( $result, qq(1|abc
2|bcd
3|xxx
4|yyy
@@ -257,36 +279,38 @@ $node_publisher->safe_psql('postgres',
# tab4
$node_publisher->safe_psql('postgres',
- qq(UPDATE tab4 SET b = 'blue', c = c * 2, d = d || ' updated' where a = 1));
+ qq(UPDATE tab4 SET b = 'blue', c = c * 2, d = d || ' updated' where a = 1)
+);
# tab4
$node_publisher->safe_psql('postgres',
- qq(UPDATE tab4 SET b = 'red', c = c * 2, d = d || ' updated' where a = 2));
+ qq(UPDATE tab4 SET b = 'red', c = c * 2, d = d || ' updated' where a = 2)
+);
# wait for the replication to catch up, and check the UPDATE results got
# replicated correctly, with the right column list
$node_publisher->wait_for_catchup('sub1');
-$result = $node_subscriber->safe_psql('postgres',
- qq(SELECT * FROM tab1 ORDER BY a));
-is($result,
-qq(1|4|
+$result =
+ $node_subscriber->safe_psql('postgres', qq(SELECT * FROM tab1 ORDER BY a));
+is( $result,
+ qq(1|4|
2|3|
4|5|
5|6|), 'only update on column tab1.b is replicated');
$result = $node_subscriber->safe_psql('postgres',
qq(SELECT * FROM tab3 ORDER BY "a'"));
-is($result,
-qq(1|6
+is( $result,
+ qq(1|6
2|4
4|6
5|7), 'only update on column tab3.c is replicated');
-$result = $node_subscriber->safe_psql('postgres',
- qq(SELECT * FROM tab4 ORDER BY a));
+$result =
+ $node_subscriber->safe_psql('postgres', qq(SELECT * FROM tab4 ORDER BY a));
-is($result, qq(1|blue|oh my updated
+is( $result, qq(1|blue|oh my updated
2|red|hello updated
3|red|foo
4|blue|bar), 'update on column tab4.c is not replicated');
@@ -295,7 +319,8 @@ is($result, qq(1|blue|oh my updated
# TEST: add table with a column list, insert data, replicate
# insert some data before adding it to the publication
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
INSERT INTO tab2 VALUES (1, 'abc', 3);
));
@@ -309,34 +334,37 @@ $node_subscriber->safe_psql('postgres',
# the results of the replication
wait_for_subscription_sync($node_subscriber);
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
INSERT INTO tab2 VALUES (2, 'def', 6);
));
$node_publisher->wait_for_catchup('sub1');
-$result = $node_subscriber->safe_psql('postgres',
- "SELECT * FROM tab2 ORDER BY a");
-is($result, qq(1|abc
+$result =
+ $node_subscriber->safe_psql('postgres', "SELECT * FROM tab2 ORDER BY a");
+is( $result, qq(1|abc
2|def), 'insert on column tab2.c is not replicated');
# do a couple updates, check the correct stuff gets replicated
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
UPDATE tab2 SET c = 5 where a = 1;
UPDATE tab2 SET b = 'xyz' where a = 2;
));
$node_publisher->wait_for_catchup('sub1');
-$result = $node_subscriber->safe_psql('postgres',
- "SELECT * FROM tab2 ORDER BY a");
-is($result, qq(1|abc
+$result =
+ $node_subscriber->safe_psql('postgres', "SELECT * FROM tab2 ORDER BY a");
+is( $result, qq(1|abc
2|xyz), 'update on column tab2.c is not replicated');
# TEST: add a table to two publications with different column lists, and
# create a single subscription replicating both publications
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
CREATE TABLE tab5 (a int PRIMARY KEY, b int, c int, d int);
CREATE PUBLICATION pub2 FOR TABLE tab5 (a, b);
CREATE PUBLICATION pub3 FOR TABLE tab5 (a, d);
@@ -346,11 +374,13 @@ $node_publisher->safe_psql('postgres', qq(
INSERT INTO tab5 VALUES (2, 22, 222, 2222);
));
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
CREATE TABLE tab5 (a int PRIMARY KEY, b int, d int);
));
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
ALTER SUBSCRIPTION sub1 SET PUBLICATION pub2, pub3
));
@@ -360,52 +390,57 @@ $node_publisher->wait_for_catchup('sub1');
# insert data and make sure all the columns (union of the columns lists)
# get fully replicated
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
INSERT INTO tab5 VALUES (3, 33, 333, 3333);
INSERT INTO tab5 VALUES (4, 44, 444, 4444);
));
$node_publisher->wait_for_catchup('sub1');
-is($node_subscriber->safe_psql('postgres',"SELECT * FROM tab5 ORDER BY a"),
- qq(1|11|1111
+is( $node_subscriber->safe_psql('postgres', "SELECT * FROM tab5 ORDER BY a"),
+ qq(1|11|1111
2|22|2222
3|33|3333
4|44|4444),
- 'overlapping publications with overlapping column lists');
+ 'overlapping publications with overlapping column lists');
# and finally, remove the column list for one of the publications, which
# means replicating all columns (removing the column list), but first add
# the missing column to the table on subscriber
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
ALTER PUBLICATION pub3 SET TABLE tab5;
));
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
ALTER SUBSCRIPTION sub1 REFRESH PUBLICATION;
ALTER TABLE tab5 ADD COLUMN c INT;
));
wait_for_subscription_sync($node_subscriber);
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
INSERT INTO tab5 VALUES (5, 55, 555, 5555);
));
$node_publisher->wait_for_catchup('sub1');
-is($node_subscriber->safe_psql('postgres',"SELECT * FROM tab5 ORDER BY a"),
- qq(1|11|1111|
+is( $node_subscriber->safe_psql('postgres', "SELECT * FROM tab5 ORDER BY a"),
+ qq(1|11|1111|
2|22|2222|
3|33|3333|
4|44|4444|
5|55|5555|555),
- 'overlapping publications with overlapping column lists');
+ 'overlapping publications with overlapping column lists');
# TEST: create a table with a column list, then change the replica
# identity by replacing a primary key (but use a different column in
# the column list)
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
CREATE TABLE tab6 (a int PRIMARY KEY, b int, c int, d int);
CREATE PUBLICATION pub4 FOR TABLE tab6 (a, b);
@@ -413,31 +448,35 @@ $node_publisher->safe_psql('postgres', qq(
INSERT INTO tab6 VALUES (1, 22, 333, 4444);
));
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
CREATE TABLE tab6 (a int PRIMARY KEY, b int, c int, d int);
));
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
ALTER SUBSCRIPTION sub1 SET PUBLICATION pub4
));
wait_for_subscription_sync($node_subscriber);
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
INSERT INTO tab6 VALUES (2, 33, 444, 5555);
UPDATE tab6 SET b = b * 2, c = c * 3, d = d * 4;
));
$node_publisher->wait_for_catchup('sub1');
-is($node_subscriber->safe_psql('postgres',"SELECT * FROM tab6 ORDER BY a"),
+is( $node_subscriber->safe_psql('postgres', "SELECT * FROM tab6 ORDER BY a"),
qq(1|44||
2|66||), 'replication with the original primary key');
# now redefine the constraint - move the primary key to a different column
# (which is still covered by the column list, though)
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
ALTER TABLE tab6 DROP CONSTRAINT tab6_pkey;
ALTER TABLE tab6 ADD PRIMARY KEY (b);
));
@@ -445,35 +484,39 @@ $node_publisher->safe_psql('postgres', qq(
# we need to do the same thing on the subscriber
# XXX What would happen if this happens before the publisher ALTER? Or
# interleaved, somehow? But that seems unrelated to column lists.
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
ALTER TABLE tab6 DROP CONSTRAINT tab6_pkey;
ALTER TABLE tab6 ADD PRIMARY KEY (b);
));
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
ALTER SUBSCRIPTION sub1 REFRESH PUBLICATION
));
wait_for_subscription_sync($node_subscriber);
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
INSERT INTO tab6 VALUES (3, 55, 666, 8888);
UPDATE tab6 SET b = b * 2, c = c * 3, d = d * 4;
));
$node_publisher->wait_for_catchup('sub1');
-is($node_subscriber->safe_psql('postgres',"SELECT * FROM tab6 ORDER BY a"),
- qq(1|88||
+is( $node_subscriber->safe_psql('postgres', "SELECT * FROM tab6 ORDER BY a"),
+ qq(1|88||
2|132||
3|110||),
- 'replication with the modified primary key');
+ 'replication with the modified primary key');
# TEST: create a table with a column list, then change the replica
# identity by replacing a primary key with a key on multiple columns
# (all of them covered by the column list)
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
CREATE TABLE tab7 (a int PRIMARY KEY, b int, c int, d int);
CREATE PUBLICATION pub5 FOR TABLE tab7 (a, b);
@@ -481,52 +524,58 @@ $node_publisher->safe_psql('postgres', qq(
INSERT INTO tab7 VALUES (1, 22, 333, 4444);
));
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
CREATE TABLE tab7 (a int PRIMARY KEY, b int, c int, d int);
));
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
ALTER SUBSCRIPTION sub1 SET PUBLICATION pub5
));
wait_for_subscription_sync($node_subscriber);
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
INSERT INTO tab7 VALUES (2, 33, 444, 5555);
UPDATE tab7 SET b = b * 2, c = c * 3, d = d * 4;
));
$node_publisher->wait_for_catchup('sub1');
-is($node_subscriber->safe_psql('postgres',"SELECT * FROM tab7 ORDER BY a"),
- qq(1|44||
+is( $node_subscriber->safe_psql('postgres', "SELECT * FROM tab7 ORDER BY a"),
+ qq(1|44||
2|66||), 'replication with the original primary key');
# now redefine the constraint - move the primary key to a different column
# (which is not covered by the column list)
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
ALTER TABLE tab7 DROP CONSTRAINT tab7_pkey;
ALTER TABLE tab7 ADD PRIMARY KEY (a, b);
));
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
INSERT INTO tab7 VALUES (3, 55, 666, 7777);
UPDATE tab7 SET b = b * 2, c = c * 3, d = d * 4;
));
$node_publisher->wait_for_catchup('sub1');
-is($node_subscriber->safe_psql('postgres',"SELECT * FROM tab7 ORDER BY a"),
- qq(1|88||
+is( $node_subscriber->safe_psql('postgres', "SELECT * FROM tab7 ORDER BY a"),
+ qq(1|88||
2|132||
3|110||),
- 'replication with the modified primary key');
+ 'replication with the modified primary key');
# now switch the primary key again to another columns not covered by the
# column list, but also generate writes between the drop and creation
# of the new constraint
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
ALTER TABLE tab7 DROP CONSTRAINT tab7_pkey;
INSERT INTO tab7 VALUES (4, 77, 888, 9999);
-- update/delete is not allowed for tables without RI
@@ -535,16 +584,17 @@ $node_publisher->safe_psql('postgres', qq(
DELETE FROM tab7 WHERE a = 1;
));
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
));
$node_publisher->wait_for_catchup('sub1');
-is($node_subscriber->safe_psql('postgres',"SELECT * FROM tab7 ORDER BY a"),
- qq(2|264||
+is( $node_subscriber->safe_psql('postgres', "SELECT * FROM tab7 ORDER BY a"),
+ qq(2|264||
3|220||
4|154||),
- 'replication with the modified primary key');
+ 'replication with the modified primary key');
# TEST: partitioned tables (with publish_via_partition_root = false)
@@ -555,7 +605,8 @@ is($node_subscriber->safe_psql('postgres',"SELECT * FROM tab7 ORDER BY a"),
# First, let's create a partitioned table with two partitions, each with
# a different RI, but a column list not covering all those RI.
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
CREATE TABLE test_part_a (a int, b int, c int) PARTITION BY LIST (a);
CREATE TABLE test_part_a_1 PARTITION OF test_part_a FOR VALUES IN (1,2,3,4,5);
@@ -572,7 +623,8 @@ $node_publisher->safe_psql('postgres', qq(
));
# do the same thing on the subscriber (with the opposite column order)
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
CREATE TABLE test_part_a (b int, a int) PARTITION BY LIST (a);
CREATE TABLE test_part_a_1 PARTITION OF test_part_a FOR VALUES IN (1,2,3,4,5);
@@ -586,38 +638,43 @@ $node_subscriber->safe_psql('postgres', qq(
# create a publication replicating just the column "a", which is not enough
# for the second partition
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
CREATE PUBLICATION pub6 FOR TABLE test_part_a (b, a) WITH (publish_via_partition_root = true);
ALTER PUBLICATION pub6 ADD TABLE test_part_a_1 (a);
ALTER PUBLICATION pub6 ADD TABLE test_part_a_2 (b);
));
# add the publication to our subscription, wait for sync to complete
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
ALTER SUBSCRIPTION sub1 SET PUBLICATION pub6
));
wait_for_subscription_sync($node_subscriber);
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
INSERT INTO test_part_a VALUES (2, 5);
INSERT INTO test_part_a VALUES (7, 6);
));
$node_publisher->wait_for_catchup('sub1');
-is($node_subscriber->safe_psql('postgres',"SELECT a, b FROM test_part_a ORDER BY a, b"),
- qq(1|3
+is( $node_subscriber->safe_psql(
+ 'postgres', "SELECT a, b FROM test_part_a ORDER BY a, b"),
+ qq(1|3
2|5
6|4
7|6),
- 'partitions with different replica identities not replicated correctly');
+ 'partitions with different replica identities not replicated correctly');
# This time start with a column list covering RI for all partitions, but
# then update the column list to not cover column "b" (needed by the
# second partition)
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
CREATE TABLE test_part_b (a int, b int) PARTITION BY LIST (a);
CREATE TABLE test_part_b_1 PARTITION OF test_part_b FOR VALUES IN (1,2,3,4,5);
@@ -634,7 +691,8 @@ $node_publisher->safe_psql('postgres', qq(
));
# do the same thing on the subscriber
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
CREATE TABLE test_part_b (a int, b int) PARTITION BY LIST (a);
CREATE TABLE test_part_b_1 PARTITION OF test_part_b FOR VALUES IN (1,2,3,4,5);
@@ -648,37 +706,42 @@ $node_subscriber->safe_psql('postgres', qq(
# create a publication replicating both columns, which is sufficient for
# both partitions
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
CREATE PUBLICATION pub7 FOR TABLE test_part_b (a, b) WITH (publish_via_partition_root = true);
));
# add the publication to our subscription, wait for sync to complete
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
ALTER SUBSCRIPTION sub1 SET PUBLICATION pub7
));
wait_for_subscription_sync($node_subscriber);
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
INSERT INTO test_part_b VALUES (2, 3);
INSERT INTO test_part_b VALUES (7, 4);
));
$node_publisher->wait_for_catchup('sub1');
-is($node_subscriber->safe_psql('postgres',"SELECT * FROM test_part_b ORDER BY a, b"),
- qq(1|1
+is( $node_subscriber->safe_psql(
+ 'postgres', "SELECT * FROM test_part_b ORDER BY a, b"),
+ qq(1|1
2|3
6|2
7|4),
- 'partitions with different replica identities not replicated correctly');
+ 'partitions with different replica identities not replicated correctly');
# TEST: This time start with a column list covering RI for all partitions,
# but then update RI for one of the partitions to not be covered by the
# column list anymore.
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
CREATE TABLE test_part_c (a int, b int, c int) PARTITION BY LIST (a);
CREATE TABLE test_part_c_1 PARTITION OF test_part_c FOR VALUES IN (1,3);
@@ -695,7 +758,8 @@ $node_publisher->safe_psql('postgres', qq(
));
# do the same thing on the subscriber
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
CREATE TABLE test_part_c (a int, b int, c int) PARTITION BY LIST (a);
CREATE TABLE test_part_c_1 PARTITION OF test_part_c FOR VALUES IN (1,3);
@@ -710,39 +774,44 @@ $node_subscriber->safe_psql('postgres', qq(
# create a publication replicating data through partition root, with a column
# list on the root, and then add the partitions one by one with separate
# column lists (but those are not applied)
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
CREATE PUBLICATION pub8 FOR TABLE test_part_c WITH (publish_via_partition_root = false);
ALTER PUBLICATION pub8 ADD TABLE test_part_c_1 (a,c);
ALTER PUBLICATION pub8 ADD TABLE test_part_c_2 (a,b);
));
# add the publication to our subscription, wait for sync to complete
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
DROP SUBSCRIPTION sub1;
CREATE SUBSCRIPTION sub1 CONNECTION '$publisher_connstr' PUBLICATION pub8;
));
wait_for_subscription_sync($node_subscriber);
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
INSERT INTO test_part_c VALUES (3, 7, 8);
INSERT INTO test_part_c VALUES (4, 9, 10);
));
$node_publisher->wait_for_catchup('sub1');
-is($node_subscriber->safe_psql('postgres',"SELECT * FROM test_part_c ORDER BY a, b"),
- qq(1||5
+is( $node_subscriber->safe_psql(
+ 'postgres', "SELECT * FROM test_part_c ORDER BY a, b"),
+ qq(1||5
2|4|
3||8
4|9|),
- 'partitions with different replica identities not replicated correctly');
+ 'partitions with different replica identities not replicated correctly');
# create a publication not replicating data through partition root, without
# a column list on the root, and then add the partitions one by one with
# separate column lists
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
DROP PUBLICATION pub8;
CREATE PUBLICATION pub8 FOR TABLE test_part_c WITH (publish_via_partition_root = false);
ALTER PUBLICATION pub8 ADD TABLE test_part_c_1 (a);
@@ -750,14 +819,16 @@ $node_publisher->safe_psql('postgres', qq(
));
# add the publication to our subscription, wait for sync to complete
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
ALTER SUBSCRIPTION sub1 REFRESH PUBLICATION;
TRUNCATE test_part_c;
));
wait_for_subscription_sync($node_subscriber);
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
TRUNCATE test_part_c;
INSERT INTO test_part_c VALUES (1, 3, 5);
INSERT INTO test_part_c VALUES (2, 4, 6);
@@ -765,16 +836,18 @@ $node_publisher->safe_psql('postgres', qq(
$node_publisher->wait_for_catchup('sub1');
-is($node_subscriber->safe_psql('postgres',"SELECT * FROM test_part_c ORDER BY a, b"),
- qq(1||
+is( $node_subscriber->safe_psql(
+ 'postgres', "SELECT * FROM test_part_c ORDER BY a, b"),
+ qq(1||
2|4|),
- 'partitions with different replica identities not replicated correctly');
+ 'partitions with different replica identities not replicated correctly');
# TEST: Start with a single partition, with RI compatible with the column
# list, and then attach a partition with incompatible RI.
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
CREATE TABLE test_part_d (a int, b int) PARTITION BY LIST (a);
CREATE TABLE test_part_d_1 PARTITION OF test_part_d FOR VALUES IN (1,3);
@@ -786,7 +859,8 @@ $node_publisher->safe_psql('postgres', qq(
# do the same thing on the subscriber (in fact, create both partitions right
# away, no need to delay that)
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
CREATE TABLE test_part_d (a int, b int) PARTITION BY LIST (a);
CREATE TABLE test_part_d_1 PARTITION OF test_part_d FOR VALUES IN (1,3);
@@ -800,33 +874,38 @@ $node_subscriber->safe_psql('postgres', qq(
# create a publication replicating both columns, which is sufficient for
# both partitions
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
CREATE PUBLICATION pub9 FOR TABLE test_part_d (a) WITH (publish_via_partition_root = true);
));
# add the publication to our subscription, wait for sync to complete
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
ALTER SUBSCRIPTION sub1 SET PUBLICATION pub9
));
wait_for_subscription_sync($node_subscriber);
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
INSERT INTO test_part_d VALUES (3, 4);
));
$node_publisher->wait_for_catchup('sub1');
-is($node_subscriber->safe_psql('postgres',"SELECT * FROM test_part_d ORDER BY a, b"),
- qq(1|
+is( $node_subscriber->safe_psql(
+ 'postgres', "SELECT * FROM test_part_d ORDER BY a, b"),
+ qq(1|
3|),
- 'partitions with different replica identities not replicated correctly');
+ 'partitions with different replica identities not replicated correctly');
# TEST: With a table included in multiple publications, we should use a
# union of the column lists. So with column lists (a,b) and (a,c) we
# should replicate (a,b,c).
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
CREATE TABLE test_mix_1 (a int PRIMARY KEY, b int, c int);
CREATE PUBLICATION pub_mix_1 FOR TABLE test_mix_1 (a, b);
CREATE PUBLICATION pub_mix_2 FOR TABLE test_mix_1 (a, c);
@@ -835,23 +914,26 @@ $node_publisher->safe_psql('postgres', qq(
INSERT INTO test_mix_1 VALUES (1, 2, 3);
));
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
CREATE TABLE test_mix_1 (a int PRIMARY KEY, b int, c int);
ALTER SUBSCRIPTION sub1 SET PUBLICATION pub_mix_1, pub_mix_2;
));
wait_for_subscription_sync($node_subscriber);
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
INSERT INTO test_mix_1 VALUES (4, 5, 6);
));
$node_publisher->wait_for_catchup('sub1');
-is($node_subscriber->safe_psql('postgres',"SELECT * FROM test_mix_1 ORDER BY a"),
- qq(1|2|3
+is( $node_subscriber->safe_psql(
+ 'postgres', "SELECT * FROM test_mix_1 ORDER BY a"),
+ qq(1|2|3
4|5|6),
- 'a mix of publications should use a union of column list');
+ 'a mix of publications should use a union of column list');
# TEST: With a table included in multiple publications, we should use a
@@ -859,12 +941,14 @@ is($node_subscriber->safe_psql('postgres',"SELECT * FROM test_mix_1 ORDER BY a")
# TABLES, we should replicate all columns.
# drop unnecessary tables, so as not to interfere with the FOR ALL TABLES
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
DROP TABLE tab1, tab2, tab3, tab4, tab5, tab6, tab7, test_mix_1,
test_part, test_part_a, test_part_b, test_part_c, test_part_d;
));
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
CREATE TABLE test_mix_2 (a int PRIMARY KEY, b int, c int);
CREATE PUBLICATION pub_mix_3 FOR TABLE test_mix_2 (a, b);
CREATE PUBLICATION pub_mix_4 FOR ALL TABLES;
@@ -873,7 +957,8 @@ $node_publisher->safe_psql('postgres', qq(
INSERT INTO test_mix_2 VALUES (1, 2, 3);
));
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
CREATE TABLE test_mix_2 (a int PRIMARY KEY, b int, c int);
ALTER SUBSCRIPTION sub1 SET PUBLICATION pub_mix_3, pub_mix_4;
ALTER SUBSCRIPTION sub1 REFRESH PUBLICATION;
@@ -881,28 +966,31 @@ $node_subscriber->safe_psql('postgres', qq(
wait_for_subscription_sync($node_subscriber);
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
INSERT INTO test_mix_2 VALUES (4, 5, 6);
));
$node_publisher->wait_for_catchup('sub1');
-is($node_subscriber->safe_psql('postgres',"SELECT * FROM test_mix_2"),
- qq(1|2|3
+is( $node_subscriber->safe_psql('postgres', "SELECT * FROM test_mix_2"),
+ qq(1|2|3
4|5|6),
- 'a mix of publications should use a union of column list');
+ 'a mix of publications should use a union of column list');
# TEST: With a table included in multiple publications, we should use a
# union of the column lists. If any of the publications is FOR ALL
# TABLES IN SCHEMA, we should replicate all columns.
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
DROP SUBSCRIPTION sub1;
CREATE TABLE test_mix_3 (a int PRIMARY KEY, b int, c int);
));
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
DROP TABLE test_mix_2;
CREATE TABLE test_mix_3 (a int PRIMARY KEY, b int, c int);
CREATE PUBLICATION pub_mix_5 FOR TABLE test_mix_3 (a, b);
@@ -912,22 +1000,24 @@ $node_publisher->safe_psql('postgres', qq(
INSERT INTO test_mix_3 VALUES (1, 2, 3);
));
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
CREATE SUBSCRIPTION sub1 CONNECTION '$publisher_connstr' PUBLICATION pub_mix_5, pub_mix_6;
));
wait_for_subscription_sync($node_subscriber);
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
INSERT INTO test_mix_3 VALUES (4, 5, 6);
));
$node_publisher->wait_for_catchup('sub1');
-is($node_subscriber->safe_psql('postgres',"SELECT * FROM test_mix_3"),
- qq(1|2|3
+is( $node_subscriber->safe_psql('postgres', "SELECT * FROM test_mix_3"),
+ qq(1|2|3
4|5|6),
- 'a mix of publications should use a union of column list');
+ 'a mix of publications should use a union of column list');
# TEST: Check handling of publish_via_partition_root - if a partition is
@@ -935,7 +1025,8 @@ is($node_subscriber->safe_psql('postgres',"SELECT * FROM test_mix_3"),
# defined for the whole table (not the partitions) - both during the initial
# sync and when replicating changes. This is what we do for row filters.
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
DROP SUBSCRIPTION sub1;
CREATE TABLE test_root (a int PRIMARY KEY, b int, c int) PARTITION BY RANGE (a);
@@ -943,7 +1034,8 @@ $node_subscriber->safe_psql('postgres', qq(
CREATE TABLE test_root_2 PARTITION OF test_root FOR VALUES FROM (10) TO (20);
));
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
CREATE TABLE test_root (a int PRIMARY KEY, b int, c int) PARTITION BY RANGE (a);
CREATE TABLE test_root_1 PARTITION OF test_root FOR VALUES FROM (1) TO (10);
CREATE TABLE test_root_2 PARTITION OF test_root FOR VALUES FROM (10) TO (20);
@@ -955,25 +1047,28 @@ $node_publisher->safe_psql('postgres', qq(
INSERT INTO test_root VALUES (10, 20, 30);
));
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
CREATE SUBSCRIPTION sub1 CONNECTION '$publisher_connstr' PUBLICATION pub_root_true;
));
wait_for_subscription_sync($node_subscriber);
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
INSERT INTO test_root VALUES (2, 3, 4);
INSERT INTO test_root VALUES (11, 21, 31);
));
$node_publisher->wait_for_catchup('sub1');
-is($node_subscriber->safe_psql('postgres',"SELECT * FROM test_root ORDER BY a, b, c"),
- qq(1||
+is( $node_subscriber->safe_psql(
+ 'postgres', "SELECT * FROM test_root ORDER BY a, b, c"),
+ qq(1||
2||
10||
11||),
- 'publication via partition root applies column list');
+ 'publication via partition root applies column list');
# TEST: Multiple publications which publish schema of parent table and
@@ -982,7 +1077,8 @@ is($node_subscriber->safe_psql('postgres',"SELECT * FROM test_root ORDER BY a, b
# also directly (with a columns list). The expected outcome is there is
# no column list.
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
DROP PUBLICATION pub1, pub2, pub3, pub4, pub5, pub6, pub7, pub8;
CREATE SCHEMA s1;
@@ -996,7 +1092,8 @@ $node_publisher->safe_psql('postgres', qq(
INSERT INTO s1.t VALUES (1, 2, 3);
));
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
CREATE SCHEMA s1;
CREATE TABLE s1.t (a int, b int, c int) PARTITION BY RANGE (a);
CREATE TABLE t_1 PARTITION OF s1.t FOR VALUES FROM (1) TO (10);
@@ -1006,21 +1103,23 @@ $node_subscriber->safe_psql('postgres', qq(
wait_for_subscription_sync($node_subscriber);
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
INSERT INTO s1.t VALUES (4, 5, 6);
));
$node_publisher->wait_for_catchup('sub1');
-is($node_subscriber->safe_psql('postgres',"SELECT * FROM s1.t ORDER BY a"),
- qq(1|2|3
+is( $node_subscriber->safe_psql('postgres', "SELECT * FROM s1.t ORDER BY a"),
+ qq(1|2|3
4|5|6),
- 'two publications, publishing the same relation');
+ 'two publications, publishing the same relation');
# Now resync the subcription, but with publications in the opposite order.
# The result should be the same.
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
TRUNCATE s1.t;
ALTER SUBSCRIPTION sub1 SET PUBLICATION pub2, pub1;
@@ -1028,22 +1127,24 @@ $node_subscriber->safe_psql('postgres', qq(
wait_for_subscription_sync($node_subscriber);
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
INSERT INTO s1.t VALUES (7, 8, 9);
));
$node_publisher->wait_for_catchup('sub1');
-is($node_subscriber->safe_psql('postgres',"SELECT * FROM s1.t ORDER BY a"),
- qq(7|8|9),
- 'two publications, publishing the same relation');
+is( $node_subscriber->safe_psql('postgres', "SELECT * FROM s1.t ORDER BY a"),
+ qq(7|8|9),
+ 'two publications, publishing the same relation');
# TEST: One publication, containing both the parent and child relations.
# The expected outcome is list "a", because that's the column list defined
# for the top-most ancestor added to the publication.
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
DROP SCHEMA s1 CASCADE;
CREATE TABLE t (a int, b int, c int) PARTITION BY RANGE (a);
CREATE TABLE t_1 PARTITION OF t FOR VALUES FROM (1) TO (10)
@@ -1057,7 +1158,8 @@ $node_publisher->safe_psql('postgres', qq(
INSERT INTO t VALUES (1, 2, 3);
));
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
DROP SCHEMA s1 CASCADE;
CREATE TABLE t (a int, b int, c int) PARTITION BY RANGE (a);
CREATE TABLE t_1 PARTITION OF t FOR VALUES FROM (1) TO (10)
@@ -1069,16 +1171,18 @@ $node_subscriber->safe_psql('postgres', qq(
wait_for_subscription_sync($node_subscriber);
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
INSERT INTO t VALUES (4, 5, 6);
));
$node_publisher->wait_for_catchup('sub1');
-is($node_subscriber->safe_psql('postgres',"SELECT * FROM t ORDER BY a, b, c"),
- qq(1||
+is( $node_subscriber->safe_psql(
+ 'postgres', "SELECT * FROM t ORDER BY a, b, c"),
+ qq(1||
4||),
- 'publication containing both parent and child relation');
+ 'publication containing both parent and child relation');
# TEST: One publication, containing both the parent and child relations.
@@ -1087,7 +1191,8 @@ is($node_subscriber->safe_psql('postgres',"SELECT * FROM t ORDER BY a, b, c"),
# Note: The difference from the preceding test is that in this case both
# relations have a column list defined.
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
DROP TABLE t;
CREATE TABLE t (a int, b int, c int) PARTITION BY RANGE (a);
CREATE TABLE t_1 PARTITION OF t FOR VALUES FROM (1) TO (10)
@@ -1101,7 +1206,8 @@ $node_publisher->safe_psql('postgres', qq(
INSERT INTO t VALUES (1, 2, 3);
));
-$node_subscriber->safe_psql('postgres', qq(
+$node_subscriber->safe_psql(
+ 'postgres', qq(
DROP TABLE t;
CREATE TABLE t (a int, b int, c int) PARTITION BY RANGE (a);
CREATE TABLE t_1 PARTITION OF t FOR VALUES FROM (1) TO (10)
@@ -1113,16 +1219,18 @@ $node_subscriber->safe_psql('postgres', qq(
wait_for_subscription_sync($node_subscriber);
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
INSERT INTO t VALUES (4, 5, 6);
));
$node_publisher->wait_for_catchup('sub1');
-is($node_subscriber->safe_psql('postgres',"SELECT * FROM t ORDER BY a, b, c"),
- qq(1||
+is( $node_subscriber->safe_psql(
+ 'postgres', "SELECT * FROM t ORDER BY a, b, c"),
+ qq(1||
4||),
- 'publication containing both parent and child relation');
+ 'publication containing both parent and child relation');
$node_subscriber->stop('fast');