diff options
Diffstat (limited to 'src/test')
25 files changed, 563 insertions, 563 deletions
diff --git a/src/test/authentication/t/001_password.pl b/src/test/authentication/t/001_password.pl index 82536eb60fb..1305de0051a 100644 --- a/src/test/authentication/t/001_password.pl +++ b/src/test/authentication/t/001_password.pl @@ -51,8 +51,8 @@ sub test_role return; } -# Initialize master node -my $node = get_new_node('master'); +# Initialize primary node +my $node = get_new_node('primary'); $node->init; $node->start; diff --git a/src/test/authentication/t/002_saslprep.pl b/src/test/authentication/t/002_saslprep.pl index 32d4e43fc7d..0aaab090ec5 100644 --- a/src/test/authentication/t/002_saslprep.pl +++ b/src/test/authentication/t/002_saslprep.pl @@ -49,9 +49,9 @@ sub test_login return; } -# Initialize master node. Force UTF-8 encoding, so that we can use non-ASCII +# Initialize primary node. Force UTF-8 encoding, so that we can use non-ASCII # characters in the passwords below. -my $node = get_new_node('master'); +my $node = get_new_node('primary'); $node->init(extra => [ '--locale=C', '--encoding=UTF8' ]); $node->start; diff --git a/src/test/modules/commit_ts/t/002_standby.pl b/src/test/modules/commit_ts/t/002_standby.pl index f376b595962..872efb2e8ea 100644 --- a/src/test/modules/commit_ts/t/002_standby.pl +++ b/src/test/modules/commit_ts/t/002_standby.pl @@ -8,45 +8,45 @@ use Test::More tests => 4; use PostgresNode; my $bkplabel = 'backup'; -my $master = get_new_node('master'); -$master->init(allows_streaming => 1); +my $primary = get_new_node('primary'); +$primary->init(allows_streaming => 1); -$master->append_conf( +$primary->append_conf( 'postgresql.conf', qq{ track_commit_timestamp = on max_wal_senders = 5 }); -$master->start; -$master->backup($bkplabel); +$primary->start; +$primary->backup($bkplabel); my $standby = get_new_node('standby'); -$standby->init_from_backup($master, $bkplabel, has_streaming => 1); +$standby->init_from_backup($primary, $bkplabel, has_streaming => 1); $standby->start; for my $i (1 .. 10) { - $master->safe_psql('postgres', "create table t$i()"); + $primary->safe_psql('postgres', "create table t$i()"); } -my $master_ts = $master->safe_psql('postgres', +my $primary_ts = $primary->safe_psql('postgres', qq{SELECT ts.* FROM pg_class, pg_xact_commit_timestamp(xmin) AS ts WHERE relname = 't10'} ); -my $master_lsn = - $master->safe_psql('postgres', 'select pg_current_wal_lsn()'); +my $primary_lsn = + $primary->safe_psql('postgres', 'select pg_current_wal_lsn()'); $standby->poll_query_until('postgres', - qq{SELECT '$master_lsn'::pg_lsn <= pg_last_wal_replay_lsn()}) + qq{SELECT '$primary_lsn'::pg_lsn <= pg_last_wal_replay_lsn()}) or die "standby never caught up"; my $standby_ts = $standby->safe_psql('postgres', qq{select ts.* from pg_class, pg_xact_commit_timestamp(xmin) ts where relname = 't10'} ); -is($master_ts, $standby_ts, "standby gives same value as master"); +is($primary_ts, $standby_ts, "standby gives same value as primary"); -$master->append_conf('postgresql.conf', 'track_commit_timestamp = off'); -$master->restart; -$master->safe_psql('postgres', 'checkpoint'); -$master_lsn = $master->safe_psql('postgres', 'select pg_current_wal_lsn()'); +$primary->append_conf('postgresql.conf', 'track_commit_timestamp = off'); +$primary->restart; +$primary->safe_psql('postgres', 'checkpoint'); +$primary_lsn = $primary->safe_psql('postgres', 'select pg_current_wal_lsn()'); $standby->poll_query_until('postgres', - qq{SELECT '$master_lsn'::pg_lsn <= pg_last_wal_replay_lsn()}) + qq{SELECT '$primary_lsn'::pg_lsn <= pg_last_wal_replay_lsn()}) or die "standby never caught up"; $standby->safe_psql('postgres', 'checkpoint'); @@ -54,10 +54,10 @@ $standby->safe_psql('postgres', 'checkpoint'); my ($ret, $standby_ts_stdout, $standby_ts_stderr) = $standby->psql('postgres', 'select ts.* from pg_class, pg_xact_commit_timestamp(xmin) ts where relname = \'t10\'' ); -is($ret, 3, 'standby errors when master turned feature off'); +is($ret, 3, 'standby errors when primary turned feature off'); is($standby_ts_stdout, '', - "standby gives no value when master turned feature off"); + "standby gives no value when primary turned feature off"); like( $standby_ts_stderr, qr/could not get commit timestamp data/, - 'expected error when master turned feature off'); + 'expected error when primary turned feature off'); diff --git a/src/test/modules/commit_ts/t/003_standby_2.pl b/src/test/modules/commit_ts/t/003_standby_2.pl index 9165d500536..36ab829dfdd 100644 --- a/src/test/modules/commit_ts/t/003_standby_2.pl +++ b/src/test/modules/commit_ts/t/003_standby_2.pl @@ -1,4 +1,4 @@ -# Test master/standby scenario where the track_commit_timestamp GUC is +# Test primary/standby scenario where the track_commit_timestamp GUC is # repeatedly toggled on and off. use strict; use warnings; @@ -8,31 +8,31 @@ use Test::More tests => 4; use PostgresNode; my $bkplabel = 'backup'; -my $master = get_new_node('master'); -$master->init(allows_streaming => 1); -$master->append_conf( +my $primary = get_new_node('primary'); +$primary->init(allows_streaming => 1); +$primary->append_conf( 'postgresql.conf', qq{ track_commit_timestamp = on max_wal_senders = 5 }); -$master->start; -$master->backup($bkplabel); +$primary->start; +$primary->backup($bkplabel); my $standby = get_new_node('standby'); -$standby->init_from_backup($master, $bkplabel, has_streaming => 1); +$standby->init_from_backup($primary, $bkplabel, has_streaming => 1); $standby->start; for my $i (1 .. 10) { - $master->safe_psql('postgres', "create table t$i()"); + $primary->safe_psql('postgres', "create table t$i()"); } -$master->append_conf('postgresql.conf', 'track_commit_timestamp = off'); -$master->restart; -$master->safe_psql('postgres', 'checkpoint'); -my $master_lsn = - $master->safe_psql('postgres', 'select pg_current_wal_lsn()'); +$primary->append_conf('postgresql.conf', 'track_commit_timestamp = off'); +$primary->restart; +$primary->safe_psql('postgres', 'checkpoint'); +my $primary_lsn = + $primary->safe_psql('postgres', 'select pg_current_wal_lsn()'); $standby->poll_query_until('postgres', - qq{SELECT '$master_lsn'::pg_lsn <= pg_last_wal_replay_lsn()}) + qq{SELECT '$primary_lsn'::pg_lsn <= pg_last_wal_replay_lsn()}) or die "standby never caught up"; $standby->safe_psql('postgres', 'checkpoint'); @@ -49,10 +49,10 @@ like( qr/could not get commit timestamp data/, 'expected err msg after restart'); -$master->append_conf('postgresql.conf', 'track_commit_timestamp = on'); -$master->restart; -$master->append_conf('postgresql.conf', 'track_commit_timestamp = off'); -$master->restart; +$primary->append_conf('postgresql.conf', 'track_commit_timestamp = on'); +$primary->restart; +$primary->append_conf('postgresql.conf', 'track_commit_timestamp = off'); +$primary->restart; system_or_bail('pg_ctl', '-D', $standby->data_dir, 'promote'); diff --git a/src/test/modules/commit_ts/t/004_restart.pl b/src/test/modules/commit_ts/t/004_restart.pl index 39ca25a06bf..4e6ae776b97 100644 --- a/src/test/modules/commit_ts/t/004_restart.pl +++ b/src/test/modules/commit_ts/t/004_restart.pl @@ -5,15 +5,15 @@ use PostgresNode; use TestLib; use Test::More tests => 16; -my $node_master = get_new_node('master'); -$node_master->init(allows_streaming => 1); -$node_master->append_conf('postgresql.conf', 'track_commit_timestamp = on'); -$node_master->start; +my $node_primary = get_new_node('primary'); +$node_primary->init(allows_streaming => 1); +$node_primary->append_conf('postgresql.conf', 'track_commit_timestamp = on'); +$node_primary->start; my ($ret, $stdout, $stderr); ($ret, $stdout, $stderr) = - $node_master->psql('postgres', qq[SELECT pg_xact_commit_timestamp('0');]); + $node_primary->psql('postgres', qq[SELECT pg_xact_commit_timestamp('0');]); is($ret, 3, 'getting ts of InvalidTransactionId reports error'); like( $stderr, @@ -21,27 +21,27 @@ like( 'expected error from InvalidTransactionId'); ($ret, $stdout, $stderr) = - $node_master->psql('postgres', qq[SELECT pg_xact_commit_timestamp('1');]); + $node_primary->psql('postgres', qq[SELECT pg_xact_commit_timestamp('1');]); is($ret, 0, 'getting ts of BootstrapTransactionId succeeds'); is($stdout, '', 'timestamp of BootstrapTransactionId is null'); ($ret, $stdout, $stderr) = - $node_master->psql('postgres', qq[SELECT pg_xact_commit_timestamp('2');]); + $node_primary->psql('postgres', qq[SELECT pg_xact_commit_timestamp('2');]); is($ret, 0, 'getting ts of FrozenTransactionId succeeds'); is($stdout, '', 'timestamp of FrozenTransactionId is null'); # Since FirstNormalTransactionId will've occurred during initdb, long before we # enabled commit timestamps, it'll be null since we have no cts data for it but # cts are enabled. -is( $node_master->safe_psql( +is( $node_primary->safe_psql( 'postgres', qq[SELECT pg_xact_commit_timestamp('3');]), '', 'committs for FirstNormalTransactionId is null'); -$node_master->safe_psql('postgres', +$node_primary->safe_psql('postgres', qq[CREATE TABLE committs_test(x integer, y timestamp with time zone);]); -my $xid = $node_master->safe_psql( +my $xid = $node_primary->safe_psql( 'postgres', qq[ BEGIN; INSERT INTO committs_test(x, y) VALUES (1, current_timestamp); @@ -49,43 +49,43 @@ my $xid = $node_master->safe_psql( COMMIT; ]); -my $before_restart_ts = $node_master->safe_psql('postgres', +my $before_restart_ts = $node_primary->safe_psql('postgres', qq[SELECT pg_xact_commit_timestamp('$xid');]); ok($before_restart_ts ne '' && $before_restart_ts ne 'null', 'commit timestamp recorded'); -$node_master->stop('immediate'); -$node_master->start; +$node_primary->stop('immediate'); +$node_primary->start; -my $after_crash_ts = $node_master->safe_psql('postgres', +my $after_crash_ts = $node_primary->safe_psql('postgres', qq[SELECT pg_xact_commit_timestamp('$xid');]); is($after_crash_ts, $before_restart_ts, 'timestamps before and after crash are equal'); -$node_master->stop('fast'); -$node_master->start; +$node_primary->stop('fast'); +$node_primary->start; -my $after_restart_ts = $node_master->safe_psql('postgres', +my $after_restart_ts = $node_primary->safe_psql('postgres', qq[SELECT pg_xact_commit_timestamp('$xid');]); is($after_restart_ts, $before_restart_ts, 'timestamps before and after restart are equal'); # Now disable commit timestamps -$node_master->append_conf('postgresql.conf', 'track_commit_timestamp = off'); -$node_master->stop('fast'); +$node_primary->append_conf('postgresql.conf', 'track_commit_timestamp = off'); +$node_primary->stop('fast'); # Start the server, which generates a XLOG_PARAMETER_CHANGE record where # the parameter change is registered. -$node_master->start; +$node_primary->start; # Now restart again the server so as no XLOG_PARAMETER_CHANGE record are # replayed with the follow-up immediate shutdown. -$node_master->restart; +$node_primary->restart; # Move commit timestamps across page boundaries. Things should still # be able to work across restarts with those transactions committed while # track_commit_timestamp is disabled. -$node_master->safe_psql( +$node_primary->safe_psql( 'postgres', qq(CREATE PROCEDURE consume_xid(cnt int) AS \$\$ @@ -100,9 +100,9 @@ DECLARE \$\$ LANGUAGE plpgsql; )); -$node_master->safe_psql('postgres', 'CALL consume_xid(2000)'); +$node_primary->safe_psql('postgres', 'CALL consume_xid(2000)'); -($ret, $stdout, $stderr) = $node_master->psql('postgres', +($ret, $stdout, $stderr) = $node_primary->psql('postgres', qq[SELECT pg_xact_commit_timestamp('$xid');]); is($ret, 3, 'no commit timestamp from enable tx when cts disabled'); like( @@ -111,7 +111,7 @@ like( 'expected error from enabled tx when committs disabled'); # Do a tx while cts disabled -my $xid_disabled = $node_master->safe_psql( +my $xid_disabled = $node_primary->safe_psql( 'postgres', qq[ BEGIN; INSERT INTO committs_test(x, y) VALUES (2, current_timestamp); @@ -120,7 +120,7 @@ my $xid_disabled = $node_master->safe_psql( ]); # Should be inaccessible -($ret, $stdout, $stderr) = $node_master->psql('postgres', +($ret, $stdout, $stderr) = $node_primary->psql('postgres', qq[SELECT pg_xact_commit_timestamp('$xid_disabled');]); is($ret, 3, 'no commit timestamp when disabled'); like( @@ -129,21 +129,21 @@ like( 'expected error from disabled tx when committs disabled'); # Re-enable, restart and ensure we can still get the old timestamps -$node_master->append_conf('postgresql.conf', 'track_commit_timestamp = on'); +$node_primary->append_conf('postgresql.conf', 'track_commit_timestamp = on'); # An immediate shutdown is used here. At next startup recovery will # replay transactions which committed when track_commit_timestamp was # disabled, and the facility should be able to work properly. -$node_master->stop('immediate'); -$node_master->start; +$node_primary->stop('immediate'); +$node_primary->start; -my $after_enable_ts = $node_master->safe_psql('postgres', +my $after_enable_ts = $node_primary->safe_psql('postgres', qq[SELECT pg_xact_commit_timestamp('$xid');]); is($after_enable_ts, '', 'timestamp of enabled tx null after re-enable'); -my $after_enable_disabled_ts = $node_master->safe_psql('postgres', +my $after_enable_disabled_ts = $node_primary->safe_psql('postgres', qq[SELECT pg_xact_commit_timestamp('$xid_disabled');]); is($after_enable_disabled_ts, '', 'timestamp of disabled tx null after re-enable'); -$node_master->stop; +$node_primary->stop; diff --git a/src/test/modules/test_misc/t/001_constraint_validation.pl b/src/test/modules/test_misc/t/001_constraint_validation.pl index f762bc21c19..22497f22b01 100644 --- a/src/test/modules/test_misc/t/001_constraint_validation.pl +++ b/src/test/modules/test_misc/t/001_constraint_validation.pl @@ -7,7 +7,7 @@ use TestLib; use Test::More tests => 42; # Initialize a test cluster -my $node = get_new_node('master'); +my $node = get_new_node('primary'); $node->init(); # Turn message level up to DEBUG1 so that we get the messages we want to see $node->append_conf('postgresql.conf', 'client_min_messages = DEBUG1'); diff --git a/src/test/perl/PostgresNode.pm b/src/test/perl/PostgresNode.pm index 1407359aef6..b216bbbe4bb 100644 --- a/src/test/perl/PostgresNode.pm +++ b/src/test/perl/PostgresNode.pm @@ -1822,11 +1822,11 @@ sub run_log Look up WAL locations on the server: - * insert location (master only, error on replica) - * write location (master only, error on replica) - * flush location (master only, error on replica) - * receive location (always undef on master) - * replay location (always undef on master) + * insert location (primary only, error on replica) + * write location (primary only, error on replica) + * flush location (primary only, error on replica) + * receive location (always undef on primary) + * replay location (always undef on primary) mode must be specified. @@ -1876,7 +1876,7 @@ poll_query_until timeout. Requires that the 'postgres' db exists and is accessible. -target_lsn may be any arbitrary lsn, but is typically $master_node->lsn('insert'). +target_lsn may be any arbitrary lsn, but is typically $primary_node->lsn('insert'). If omitted, pg_current_wal_lsn() is used. This is not a test. It die()s on failure. @@ -1935,7 +1935,7 @@ This is not a test. It die()s on failure. If the slot is not active, will time out after poll_query_until's timeout. -target_lsn may be any arbitrary lsn, but is typically $master_node->lsn('insert'). +target_lsn may be any arbitrary lsn, but is typically $primary_node->lsn('insert'). Note that for logical slots, restart_lsn is held down by the oldest in-progress tx. diff --git a/src/test/perl/README b/src/test/perl/README index c61c3f5e942..fd9394957f7 100644 --- a/src/test/perl/README +++ b/src/test/perl/README @@ -48,7 +48,7 @@ Each test script should begin with: then it will generally need to set up one or more nodes, run commands against them and evaluate the results. For example: - my $node = PostgresNode->get_new_node('master'); + my $node = PostgresNode->get_new_node('primary'); $node->init; $node->start; diff --git a/src/test/recovery/t/001_stream_rep.pl b/src/test/recovery/t/001_stream_rep.pl index 778f11b28b4..9e31a53de77 100644 --- a/src/test/recovery/t/001_stream_rep.pl +++ b/src/test/recovery/t/001_stream_rep.pl @@ -5,22 +5,22 @@ use PostgresNode; use TestLib; use Test::More tests => 36; -# Initialize master node -my $node_master = get_new_node('master'); +# Initialize primary node +my $node_primary = get_new_node('primary'); # A specific role is created to perform some tests related to replication, # and it needs proper authentication configuration. -$node_master->init( +$node_primary->init( allows_streaming => 1, auth_extra => [ '--create-role', 'repl_role' ]); -$node_master->start; +$node_primary->start; my $backup_name = 'my_backup'; # Take backup -$node_master->backup($backup_name); +$node_primary->backup($backup_name); -# Create streaming standby linking to master +# Create streaming standby linking to primary my $node_standby_1 = get_new_node('standby_1'); -$node_standby_1->init_from_backup($node_master, $backup_name, +$node_standby_1->init_from_backup($node_primary, $backup_name, has_streaming => 1); $node_standby_1->start; @@ -28,10 +28,10 @@ $node_standby_1->start; # pg_basebackup works on a standby). $node_standby_1->backup($backup_name); -# Take a second backup of the standby while the master is offline. -$node_master->stop; +# Take a second backup of the standby while the primary is offline. +$node_primary->stop; $node_standby_1->backup('my_backup_2'); -$node_master->start; +$node_primary->start; # Create second standby node linking to standby 1 my $node_standby_2 = get_new_node('standby_2'); @@ -39,13 +39,13 @@ $node_standby_2->init_from_backup($node_standby_1, $backup_name, has_streaming => 1); $node_standby_2->start; -# Create some content on master and check its presence in standby 1 -$node_master->safe_psql('postgres', +# Create some content on primary and check its presence in standby 1 +$node_primary->safe_psql('postgres', "CREATE TABLE tab_int AS SELECT generate_series(1,1002) AS a"); # Wait for standbys to catch up -$node_master->wait_for_catchup($node_standby_1, 'replay', - $node_master->lsn('insert')); +$node_primary->wait_for_catchup($node_standby_1, 'replay', + $node_primary->lsn('insert')); $node_standby_1->wait_for_catchup($node_standby_2, 'replay', $node_standby_1->lsn('replay')); @@ -105,57 +105,57 @@ sub test_target_session_attrs return; } -# Connect to master in "read-write" mode with master,standby1 list. -test_target_session_attrs($node_master, $node_standby_1, $node_master, +# Connect to primary in "read-write" mode with primary,standby1 list. +test_target_session_attrs($node_primary, $node_standby_1, $node_primary, "read-write", 0); -# Connect to master in "read-write" mode with standby1,master list. -test_target_session_attrs($node_standby_1, $node_master, $node_master, +# Connect to primary in "read-write" mode with standby1,primary list. +test_target_session_attrs($node_standby_1, $node_primary, $node_primary, "read-write", 0); -# Connect to master in "any" mode with master,standby1 list. -test_target_session_attrs($node_master, $node_standby_1, $node_master, "any", +# Connect to primary in "any" mode with primary,standby1 list. +test_target_session_attrs($node_primary, $node_standby_1, $node_primary, "any", 0); -# Connect to standby1 in "any" mode with standby1,master list. -test_target_session_attrs($node_standby_1, $node_master, $node_standby_1, +# Connect to standby1 in "any" mode with standby1,primary list. +test_target_session_attrs($node_standby_1, $node_primary, $node_standby_1, "any", 0); # Test for SHOW commands using a WAL sender connection with a replication # role. note "testing SHOW commands for replication connection"; -$node_master->psql( +$node_primary->psql( 'postgres', " CREATE ROLE repl_role REPLICATION LOGIN; GRANT pg_read_all_settings TO repl_role;"); -my $master_host = $node_master->host; -my $master_port = $node_master->port; -my $connstr_common = "host=$master_host port=$master_port user=repl_role"; +my $primary_host = $node_primary->host; +my $primary_port = $node_primary->port; +my $connstr_common = "host=$primary_host port=$primary_port user=repl_role"; my $connstr_rep = "$connstr_common replication=1"; my $connstr_db = "$connstr_common replication=database dbname=postgres"; # Test SHOW ALL -my ($ret, $stdout, $stderr) = $node_master->psql( +my ($ret, $stdout, $stderr) = $node_primary->psql( 'postgres', 'SHOW ALL;', on_error_die => 1, extra_params => [ '-d', $connstr_rep ]); ok($ret == 0, "SHOW ALL with replication role and physical replication"); -($ret, $stdout, $stderr) = $node_master->psql( +($ret, $stdout, $stderr) = $node_primary->psql( 'postgres', 'SHOW ALL;', on_error_die => 1, extra_params => [ '-d', $connstr_db ]); ok($ret == 0, "SHOW ALL with replication role and logical replication"); # Test SHOW with a user-settable parameter -($ret, $stdout, $stderr) = $node_master->psql( +($ret, $stdout, $stderr) = $node_primary->psql( 'postgres', 'SHOW work_mem;', on_error_die => 1, extra_params => [ '-d', $connstr_rep ]); ok( $ret == 0, "SHOW with user-settable parameter, replication role and physical replication" ); -($ret, $stdout, $stderr) = $node_master->psql( +($ret, $stdout, $stderr) = $node_primary->psql( 'postgres', 'SHOW work_mem;', on_error_die => 1, extra_params => [ '-d', $connstr_db ]); @@ -164,14 +164,14 @@ ok( $ret == 0, ); # Test SHOW with a superuser-settable parameter -($ret, $stdout, $stderr) = $node_master->psql( +($ret, $stdout, $stderr) = $node_primary->psql( 'postgres', 'SHOW primary_conninfo;', on_error_die => 1, extra_params => [ '-d', $connstr_rep ]); ok( $ret == 0, "SHOW with superuser-settable parameter, replication role and physical replication" ); -($ret, $stdout, $stderr) = $node_master->psql( +($ret, $stdout, $stderr) = $node_primary->psql( 'postgres', 'SHOW primary_conninfo;', on_error_die => 1, extra_params => [ '-d', $connstr_db ]); @@ -186,13 +186,13 @@ note "switching to physical replication slot"; # standbys. Since we're going to be testing things that affect the slot state, # also increase the standby feedback interval to ensure timely updates. my ($slotname_1, $slotname_2) = ('standby_1', 'standby_2'); -$node_master->append_conf('postgresql.conf', "max_replication_slots = 4"); -$node_master->restart; -is( $node_master->psql( +$node_primary->append_conf('postgresql.conf', "max_replication_slots = 4"); +$node_primary->restart; +is( $node_primary->psql( 'postgres', qq[SELECT pg_create_physical_replication_slot('$slotname_1');]), 0, - 'physical slot created on master'); + 'physical slot created on primary'); $node_standby_1->append_conf('postgresql.conf', "primary_slot_name = $slotname_1"); $node_standby_1->append_conf('postgresql.conf', @@ -231,7 +231,7 @@ sub get_slot_xmins # There's no hot standby feedback and there are no logical slots on either peer # so xmin and catalog_xmin should be null on both slots. -my ($xmin, $catalog_xmin) = get_slot_xmins($node_master, $slotname_1, +my ($xmin, $catalog_xmin) = get_slot_xmins($node_primary, $slotname_1, "xmin IS NULL AND catalog_xmin IS NULL"); is($xmin, '', 'xmin of non-cascaded slot null with no hs_feedback'); is($catalog_xmin, '', @@ -244,20 +244,20 @@ is($catalog_xmin, '', 'catalog xmin of cascaded slot null with no hs_feedback'); # Replication still works? -$node_master->safe_psql('postgres', 'CREATE TABLE replayed(val integer);'); +$node_primary->safe_psql('postgres', 'CREATE TABLE replayed(val integer);'); sub replay_check { - my $newval = $node_master->safe_psql('postgres', + my $newval = $node_primary->safe_psql('postgres', 'INSERT INTO replayed(val) SELECT coalesce(max(val),0) + 1 AS newval FROM replayed RETURNING val' ); - $node_master->wait_for_catchup($node_standby_1, 'replay', - $node_master->lsn('insert')); + $node_primary->wait_for_catchup($node_standby_1, 'replay', + $node_primary->lsn('insert')); $node_standby_1->wait_for_catchup($node_standby_2, 'replay', $node_standby_1->lsn('replay')); $node_standby_1->safe_psql('postgres', qq[SELECT 1 FROM replayed WHERE val = $newval]) - or die "standby_1 didn't replay master value $newval"; + or die "standby_1 didn't replay primary value $newval"; $node_standby_2->safe_psql('postgres', qq[SELECT 1 FROM replayed WHERE val = $newval]) or die "standby_2 didn't replay standby_1 value $newval"; @@ -278,7 +278,7 @@ $node_standby_2->safe_psql('postgres', $node_standby_2->reload; replay_check(); -($xmin, $catalog_xmin) = get_slot_xmins($node_master, $slotname_1, +($xmin, $catalog_xmin) = get_slot_xmins($node_primary, $slotname_1, "xmin IS NOT NULL AND catalog_xmin IS NULL"); isnt($xmin, '', 'xmin of non-cascaded slot non-null with hs feedback'); is($catalog_xmin, '', @@ -291,7 +291,7 @@ is($catalog_xmin1, '', 'catalog xmin of cascaded slot still null with hs_feedback'); note "doing some work to advance xmin"; -$node_master->safe_psql( +$node_primary->safe_psql( 'postgres', q{ do $$ begin @@ -306,12 +306,12 @@ begin end$$; }); -$node_master->safe_psql('postgres', 'VACUUM;'); -$node_master->safe_psql('postgres', 'CHECKPOINT;'); +$node_primary->safe_psql('postgres', 'VACUUM;'); +$node_primary->safe_psql('postgres', 'CHECKPOINT;'); my ($xmin2, $catalog_xmin2) = - get_slot_xmins($node_master, $slotname_1, "xmin <> '$xmin'"); -note "master slot's new xmin $xmin2, old xmin $xmin"; + get_slot_xmins($node_primary, $slotname_1, "xmin <> '$xmin'"); +note "primary slot's new xmin $xmin2, old xmin $xmin"; isnt($xmin2, $xmin, 'xmin of non-cascaded slot with hs feedback has changed'); is($catalog_xmin2, '', 'catalog xmin of non-cascaded slot still null with hs_feedback unchanged' @@ -335,7 +335,7 @@ $node_standby_2->safe_psql('postgres', $node_standby_2->reload; replay_check(); -($xmin, $catalog_xmin) = get_slot_xmins($node_master, $slotname_1, +($xmin, $catalog_xmin) = get_slot_xmins($node_primary, $slotname_1, "xmin IS NULL AND catalog_xmin IS NULL"); is($xmin, '', 'xmin of non-cascaded slot null with hs feedback reset'); is($catalog_xmin, '', @@ -349,55 +349,55 @@ is($catalog_xmin, '', note "check change primary_conninfo without restart"; $node_standby_2->append_conf('postgresql.conf', "primary_slot_name = ''"); -$node_standby_2->enable_streaming($node_master); +$node_standby_2->enable_streaming($node_primary); $node_standby_2->reload; # be sure do not streaming from cascade $node_standby_1->stop; -my $newval = $node_master->safe_psql('postgres', +my $newval = $node_primary->safe_psql('postgres', 'INSERT INTO replayed(val) SELECT coalesce(max(val),0) + 1 AS newval FROM replayed RETURNING val' ); -$node_master->wait_for_catchup($node_standby_2, 'replay', - $node_master->lsn('insert')); +$node_primary->wait_for_catchup($node_standby_2, 'replay', + $node_primary->lsn('insert')); my $is_replayed = $node_standby_2->safe_psql('postgres', qq[SELECT 1 FROM replayed WHERE val = $newval]); -is($is_replayed, qq(1), "standby_2 didn't replay master value $newval"); +is($is_replayed, qq(1), "standby_2 didn't replay primary value $newval"); # Drop any existing slots on the primary, for the follow-up tests. -$node_master->safe_psql('postgres', +$node_primary->safe_psql('postgres', "SELECT pg_drop_replication_slot(slot_name) FROM pg_replication_slots;"); # Test physical slot advancing and its durability. Create a new slot on # the primary, not used by any of the standbys. This reserves WAL at creation. my $phys_slot = 'phys_slot'; -$node_master->safe_psql('postgres', +$node_primary->safe_psql('postgres', "SELECT pg_create_physical_replication_slot('$phys_slot', true);"); # Generate some WAL, and switch to a new segment, used to check that # the previous segment is correctly getting recycled as the slot advancing # would recompute the minimum LSN calculated across all slots. -my $segment_removed = $node_master->safe_psql('postgres', +my $segment_removed = $node_primary->safe_psql('postgres', 'SELECT pg_walfile_name(pg_current_wal_lsn())'); chomp($segment_removed); -$node_master->psql( +$node_primary->psql( 'postgres', " CREATE TABLE tab_phys_slot (a int); INSERT INTO tab_phys_slot VALUES (generate_series(1,10)); SELECT pg_switch_wal();"); my $current_lsn = - $node_master->safe_psql('postgres', "SELECT pg_current_wal_lsn();"); + $node_primary->safe_psql('postgres', "SELECT pg_current_wal_lsn();"); chomp($current_lsn); -my $psql_rc = $node_master->psql('postgres', +my $psql_rc = $node_primary->psql('postgres', "SELECT pg_replication_slot_advance('$phys_slot', '$current_lsn'::pg_lsn);" ); is($psql_rc, '0', 'slot advancing with physical slot'); -my $phys_restart_lsn_pre = $node_master->safe_psql('postgres', +my $phys_restart_lsn_pre = $node_primary->safe_psql('postgres', "SELECT restart_lsn from pg_replication_slots WHERE slot_name = '$phys_slot';" ); chomp($phys_restart_lsn_pre); # Slot advance should persist across clean restarts. -$node_master->restart; -my $phys_restart_lsn_post = $node_master->safe_psql('postgres', +$node_primary->restart; +my $phys_restart_lsn_post = $node_primary->safe_psql('postgres', "SELECT restart_lsn from pg_replication_slots WHERE slot_name = '$phys_slot';" ); chomp($phys_restart_lsn_post); @@ -406,6 +406,6 @@ ok( ($phys_restart_lsn_pre cmp $phys_restart_lsn_post) == 0, # Check if the previous segment gets correctly recycled after the # server stopped cleanly, causing a shutdown checkpoint to be generated. -my $master_data = $node_master->data_dir; -ok(!-f "$master_data/pg_wal/$segment_removed", +my $primary_data = $node_primary->data_dir; +ok(!-f "$primary_data/pg_wal/$segment_removed", "WAL segment $segment_removed recycled after physical slot advancing"); diff --git a/src/test/recovery/t/002_archiving.pl b/src/test/recovery/t/002_archiving.pl index 683c33b5100..cf8988f62a7 100644 --- a/src/test/recovery/t/002_archiving.pl +++ b/src/test/recovery/t/002_archiving.pl @@ -6,38 +6,38 @@ use TestLib; use Test::More tests => 3; use File::Copy; -# Initialize master node, doing archives -my $node_master = get_new_node('master'); -$node_master->init( +# Initialize primary node, doing archives +my $node_primary = get_new_node('primary'); +$node_primary->init( has_archiving => 1, allows_streaming => 1); my $backup_name = 'my_backup'; # Start it -$node_master->start; +$node_primary->start; # Take backup for standby -$node_master->backup($backup_name); +$node_primary->backup($backup_name); # Initialize standby node from backup, fetching WAL from archives my $node_standby = get_new_node('standby'); -$node_standby->init_from_backup($node_master, $backup_name, +$node_standby->init_from_backup($node_primary, $backup_name, has_restoring => 1); $node_standby->append_conf('postgresql.conf', "wal_retrieve_retry_interval = '100ms'"); $node_standby->start; -# Create some content on master -$node_master->safe_psql('postgres', +# Create some content on primary +$node_primary->safe_psql('postgres', "CREATE TABLE tab_int AS SELECT generate_series(1,1000) AS a"); my $current_lsn = - $node_master->safe_psql('postgres', "SELECT pg_current_wal_lsn();"); + $node_primary->safe_psql('postgres', "SELECT pg_current_wal_lsn();"); -# Force archiving of WAL file to make it present on master -$node_master->safe_psql('postgres', "SELECT pg_switch_wal()"); +# Force archiving of WAL file to make it present on primary +$node_primary->safe_psql('postgres', "SELECT pg_switch_wal()"); # Add some more content, it should not be present on standby -$node_master->safe_psql('postgres', +$node_primary->safe_psql('postgres', "INSERT INTO tab_int VALUES (generate_series(1001,2000))"); # Wait until necessary replay has been done on standby @@ -60,7 +60,7 @@ is($result, qq(1000), 'check content from archives'); $node_standby->promote; my $node_standby2 = get_new_node('standby2'); -$node_standby2->init_from_backup($node_master, $backup_name, +$node_standby2->init_from_backup($node_primary, $backup_name, has_restoring => 1); $node_standby2->start; diff --git a/src/test/recovery/t/003_recovery_targets.pl b/src/test/recovery/t/003_recovery_targets.pl index 8d114eb7ad5..cc701c5539e 100644 --- a/src/test/recovery/t/003_recovery_targets.pl +++ b/src/test/recovery/t/003_recovery_targets.pl @@ -13,13 +13,13 @@ sub test_recovery_standby { my $test_name = shift; my $node_name = shift; - my $node_master = shift; + my $node_primary = shift; my $recovery_params = shift; my $num_rows = shift; my $until_lsn = shift; my $node_standby = get_new_node($node_name); - $node_standby->init_from_backup($node_master, 'my_backup', + $node_standby->init_from_backup($node_primary, 'my_backup', has_restoring => 1); foreach my $param_item (@$recovery_params) @@ -35,7 +35,7 @@ sub test_recovery_standby $node_standby->poll_query_until('postgres', $caughtup_query) or die "Timed out while waiting for standby to catch up"; - # Create some content on master and check its presence in standby + # Create some content on primary and check its presence in standby my $result = $node_standby->safe_psql('postgres', "SELECT count(*) FROM tab_int"); is($result, qq($num_rows), "check standby content for $test_name"); @@ -46,74 +46,74 @@ sub test_recovery_standby return; } -# Initialize master node -my $node_master = get_new_node('master'); -$node_master->init(has_archiving => 1, allows_streaming => 1); +# Initialize primary node +my $node_primary = get_new_node('primary'); +$node_primary->init(has_archiving => 1, allows_streaming => 1); # Start it -$node_master->start; +$node_primary->start; # Create data before taking the backup, aimed at testing # recovery_target = 'immediate' -$node_master->safe_psql('postgres', +$node_primary->safe_psql('postgres', "CREATE TABLE tab_int AS SELECT generate_series(1,1000) AS a"); my $lsn1 = - $node_master->safe_psql('postgres', "SELECT pg_current_wal_lsn();"); + $node_primary->safe_psql('postgres', "SELECT pg_current_wal_lsn();"); # Take backup from which all operations will be run -$node_master->backup('my_backup'); +$node_primary->backup('my_backup'); # Insert some data with used as a replay reference, with a recovery # target TXID. -$node_master->safe_psql('postgres', +$node_primary->safe_psql('postgres', "INSERT INTO tab_int VALUES (generate_series(1001,2000))"); -my $ret = $node_master->safe_psql('postgres', +my $ret = $node_primary->safe_psql('postgres', "SELECT pg_current_wal_lsn(), pg_current_xact_id();"); my ($lsn2, $recovery_txid) = split /\|/, $ret; # More data, with recovery target timestamp -$node_master->safe_psql('postgres', +$node_primary->safe_psql('postgres', "INSERT INTO tab_int VALUES (generate_series(2001,3000))"); my $lsn3 = - $node_master->safe_psql('postgres', "SELECT pg_current_wal_lsn();"); -my $recovery_time = $node_master->safe_psql('postgres', "SELECT now()"); + $node_primary->safe_psql('postgres', "SELECT pg_current_wal_lsn();"); +my $recovery_time = $node_primary->safe_psql('postgres', "SELECT now()"); # Even more data, this time with a recovery target name -$node_master->safe_psql('postgres', +$node_primary->safe_psql('postgres', "INSERT INTO tab_int VALUES (generate_series(3001,4000))"); my $recovery_name = "my_target"; my $lsn4 = - $node_master->safe_psql('postgres', "SELECT pg_current_wal_lsn();"); -$node_master->safe_psql('postgres', + $node_primary->safe_psql('postgres', "SELECT pg_current_wal_lsn();"); +$node_primary->safe_psql('postgres', "SELECT pg_create_restore_point('$recovery_name');"); # And now for a recovery target LSN -$node_master->safe_psql('postgres', +$node_primary->safe_psql('postgres', "INSERT INTO tab_int VALUES (generate_series(4001,5000))"); my $lsn5 = my $recovery_lsn = - $node_master->safe_psql('postgres', "SELECT pg_current_wal_lsn()"); + $node_primary->safe_psql('postgres', "SELECT pg_current_wal_lsn()"); -$node_master->safe_psql('postgres', +$node_primary->safe_psql('postgres', "INSERT INTO tab_int VALUES (generate_series(5001,6000))"); # Force archiving of WAL file -$node_master->safe_psql('postgres', "SELECT pg_switch_wal()"); +$node_primary->safe_psql('postgres', "SELECT pg_switch_wal()"); # Test recovery targets my @recovery_params = ("recovery_target = 'immediate'"); test_recovery_standby('immediate target', - 'standby_1', $node_master, \@recovery_params, "1000", $lsn1); + 'standby_1', $node_primary, \@recovery_params, "1000", $lsn1); @recovery_params = ("recovery_target_xid = '$recovery_txid'"); -test_recovery_standby('XID', 'standby_2', $node_master, \@recovery_params, +test_recovery_standby('XID', 'standby_2', $node_primary, \@recovery_params, "2000", $lsn2); @recovery_params = ("recovery_target_time = '$recovery_time'"); -test_recovery_standby('time', 'standby_3', $node_master, \@recovery_params, +test_recovery_standby('time', 'standby_3', $node_primary, \@recovery_params, "3000", $lsn3); @recovery_params = ("recovery_target_name = '$recovery_name'"); -test_recovery_standby('name', 'standby_4', $node_master, \@recovery_params, +test_recovery_standby('name', 'standby_4', $node_primary, \@recovery_params, "4000", $lsn4); @recovery_params = ("recovery_target_lsn = '$recovery_lsn'"); -test_recovery_standby('LSN', 'standby_5', $node_master, \@recovery_params, +test_recovery_standby('LSN', 'standby_5', $node_primary, \@recovery_params, "5000", $lsn5); # Multiple targets @@ -127,10 +127,10 @@ test_recovery_standby('LSN', 'standby_5', $node_master, \@recovery_params, "recovery_target_name = ''", "recovery_target_time = '$recovery_time'"); test_recovery_standby('multiple overriding settings', - 'standby_6', $node_master, \@recovery_params, "3000", $lsn3); + 'standby_6', $node_primary, \@recovery_params, "3000", $lsn3); my $node_standby = get_new_node('standby_7'); -$node_standby->init_from_backup($node_master, 'my_backup', +$node_standby->init_from_backup($node_primary, 'my_backup', has_restoring => 1); $node_standby->append_conf( 'postgresql.conf', "recovery_target_name = '$recovery_name' @@ -151,7 +151,7 @@ ok($logfile =~ qr/multiple recovery targets specified/, $node_standby = get_new_node('standby_8'); $node_standby->init_from_backup( - $node_master, 'my_backup', + $node_primary, 'my_backup', has_restoring => 1, standby => 0); $node_standby->append_conf('postgresql.conf', diff --git a/src/test/recovery/t/004_timeline_switch.pl b/src/test/recovery/t/004_timeline_switch.pl index 7e952d36676..1ecdb0eba0d 100644 --- a/src/test/recovery/t/004_timeline_switch.pl +++ b/src/test/recovery/t/004_timeline_switch.pl @@ -10,35 +10,35 @@ use Test::More tests => 2; $ENV{PGDATABASE} = 'postgres'; -# Initialize master node -my $node_master = get_new_node('master'); -$node_master->init(allows_streaming => 1); -$node_master->start; +# Initialize primary node +my $node_primary = get_new_node('primary'); +$node_primary->init(allows_streaming => 1); +$node_primary->start; # Take backup my $backup_name = 'my_backup'; -$node_master->backup($backup_name); +$node_primary->backup($backup_name); # Create two standbys linking to it my $node_standby_1 = get_new_node('standby_1'); -$node_standby_1->init_from_backup($node_master, $backup_name, +$node_standby_1->init_from_backup($node_primary, $backup_name, has_streaming => 1); $node_standby_1->start; my $node_standby_2 = get_new_node('standby_2'); -$node_standby_2->init_from_backup($node_master, $backup_name, +$node_standby_2->init_from_backup($node_primary, $backup_name, has_streaming => 1); $node_standby_2->start; -# Create some content on master -$node_master->safe_psql('postgres', +# Create some content on primary +$node_primary->safe_psql('postgres', "CREATE TABLE tab_int AS SELECT generate_series(1,1000) AS a"); # Wait until standby has replayed enough data on standby 1 -$node_master->wait_for_catchup($node_standby_1, 'replay', - $node_master->lsn('write')); +$node_primary->wait_for_catchup($node_standby_1, 'replay', + $node_primary->lsn('write')); -# Stop and remove master -$node_master->teardown_node; +# Stop and remove primary +$node_primary->teardown_node; # promote standby 1 using "pg_promote", switching it to a new timeline my $psql_out = ''; diff --git a/src/test/recovery/t/005_replay_delay.pl b/src/test/recovery/t/005_replay_delay.pl index 6c85c928c10..459772f6c44 100644 --- a/src/test/recovery/t/005_replay_delay.pl +++ b/src/test/recovery/t/005_replay_delay.pl @@ -6,23 +6,23 @@ use PostgresNode; use TestLib; use Test::More tests => 1; -# Initialize master node -my $node_master = get_new_node('master'); -$node_master->init(allows_streaming => 1); -$node_master->start; +# Initialize primary node +my $node_primary = get_new_node('primary'); +$node_primary->init(allows_streaming => 1); +$node_primary->start; # And some content -$node_master->safe_psql('postgres', +$node_primary->safe_psql('postgres', "CREATE TABLE tab_int AS SELECT generate_series(1, 10) AS a"); # Take backup my $backup_name = 'my_backup'; -$node_master->backup($backup_name); +$node_primary->backup($backup_name); # Create streaming standby from backup my $node_standby = get_new_node('standby'); my $delay = 3; -$node_standby->init_from_backup($node_master, $backup_name, +$node_standby->init_from_backup($node_primary, $backup_name, has_streaming => 1); $node_standby->append_conf( 'postgresql.conf', qq( @@ -30,19 +30,19 @@ recovery_min_apply_delay = '${delay}s' )); $node_standby->start; -# Make new content on master and check its presence in standby depending +# Make new content on primary and check its presence in standby depending # on the delay applied above. Before doing the insertion, get the # current timestamp that will be used as a comparison base. Even on slow # machines, this allows to have a predictable behavior when comparing the -# delay between data insertion moment on master and replay time on standby. -my $master_insert_time = time(); -$node_master->safe_psql('postgres', +# delay between data insertion moment on primary and replay time on standby. +my $primary_insert_time = time(); +$node_primary->safe_psql('postgres', "INSERT INTO tab_int VALUES (generate_series(11, 20))"); # Now wait for replay to complete on standby. We're done waiting when the -# standby has replayed up to the previously saved master LSN. +# standby has replayed up to the previously saved primary LSN. my $until_lsn = - $node_master->safe_psql('postgres', "SELECT pg_current_wal_lsn()"); + $node_primary->safe_psql('postgres', "SELECT pg_current_wal_lsn()"); $node_standby->poll_query_until('postgres', "SELECT (pg_last_wal_replay_lsn() - '$until_lsn'::pg_lsn) >= 0") @@ -50,5 +50,5 @@ $node_standby->poll_query_until('postgres', # This test is successful if and only if the LSN has been applied with at least # the configured apply delay. -ok(time() - $master_insert_time >= $delay, +ok(time() - $primary_insert_time >= $delay, "standby applies WAL only after replication delay"); diff --git a/src/test/recovery/t/006_logical_decoding.pl b/src/test/recovery/t/006_logical_decoding.pl index 78229a7b92b..8cdfae1e1e2 100644 --- a/src/test/recovery/t/006_logical_decoding.pl +++ b/src/test/recovery/t/006_logical_decoding.pl @@ -10,25 +10,25 @@ use TestLib; use Test::More tests => 14; use Config; -# Initialize master node -my $node_master = get_new_node('master'); -$node_master->init(allows_streaming => 1); -$node_master->append_conf( +# Initialize primary node +my $node_primary = get_new_node('primary'); +$node_primary->init(allows_streaming => 1); +$node_primary->append_conf( 'postgresql.conf', qq( wal_level = logical )); -$node_master->start; -my $backup_name = 'master_backup'; +$node_primary->start; +my $backup_name = 'primary_backup'; -$node_master->safe_psql('postgres', +$node_primary->safe_psql('postgres', qq[CREATE TABLE decoding_test(x integer, y text);]); -$node_master->safe_psql('postgres', +$node_primary->safe_psql('postgres', qq[SELECT pg_create_logical_replication_slot('test_slot', 'test_decoding');] ); # Cover walsender error shutdown code -my ($result, $stdout, $stderr) = $node_master->psql( +my ($result, $stdout, $stderr) = $node_primary->psql( 'template1', qq[START_REPLICATION SLOT test_slot LOGICAL 0/0], replication => 'database'); @@ -38,19 +38,19 @@ ok( $stderr =~ # Check case of walsender not using a database connection. Logical # decoding should not be allowed. -($result, $stdout, $stderr) = $node_master->psql( +($result, $stdout, $stderr) = $node_primary->psql( 'template1', qq[START_REPLICATION SLOT s1 LOGICAL 0/1], replication => 'true'); ok($stderr =~ /ERROR: logical decoding requires a database connection/, "Logical decoding fails on non-database connection"); -$node_master->safe_psql('postgres', +$node_primary->safe_psql('postgres', qq[INSERT INTO decoding_test(x,y) SELECT s, s::text FROM generate_series(1,10) s;] ); # Basic decoding works -$result = $node_master->safe_psql('postgres', +$result = $node_primary->safe_psql('postgres', qq[SELECT pg_logical_slot_get_changes('test_slot', NULL, NULL);]); is(scalar(my @foobar = split /^/m, $result), 12, 'Decoding produced 12 rows inc BEGIN/COMMIT'); @@ -58,17 +58,17 @@ is(scalar(my @foobar = split /^/m, $result), # If we immediately crash the server we might lose the progress we just made # and replay the same changes again. But a clean shutdown should never repeat # the same changes when we use the SQL decoding interface. -$node_master->restart('fast'); +$node_primary->restart('fast'); # There are no new writes, so the result should be empty. -$result = $node_master->safe_psql('postgres', +$result = $node_primary->safe_psql('postgres', qq[SELECT pg_logical_slot_get_changes('test_slot', NULL, NULL);]); chomp($result); is($result, '', 'Decoding after fast restart repeats no rows'); # Insert some rows and verify that we get the same results from pg_recvlogical # and the SQL interface. -$node_master->safe_psql('postgres', +$node_primary->safe_psql('postgres', qq[INSERT INTO decoding_test(x,y) SELECT s, s::text FROM generate_series(1,4) s;] ); @@ -79,22 +79,22 @@ table public.decoding_test: INSERT: x[integer]:3 y[text]:'3' table public.decoding_test: INSERT: x[integer]:4 y[text]:'4' COMMIT}; -my $stdout_sql = $node_master->safe_psql('postgres', +my $stdout_sql = $node_primary->safe_psql('postgres', qq[SELECT data FROM pg_logical_slot_peek_changes('test_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1');] ); is($stdout_sql, $expected, 'got expected output from SQL decoding session'); -my $endpos = $node_master->safe_psql('postgres', +my $endpos = $node_primary->safe_psql('postgres', "SELECT lsn FROM pg_logical_slot_peek_changes('test_slot', NULL, NULL) ORDER BY lsn DESC LIMIT 1;" ); print "waiting to replay $endpos\n"; # Insert some rows after $endpos, which we won't read. -$node_master->safe_psql('postgres', +$node_primary->safe_psql('postgres', qq[INSERT INTO decoding_test(x,y) SELECT s, s::text FROM generate_series(5,50) s;] ); -my $stdout_recv = $node_master->pg_recvlogical_upto( +my $stdout_recv = $node_primary->pg_recvlogical_upto( 'postgres', 'test_slot', $endpos, 180, 'include-xids' => '0', 'skip-empty-xacts' => '1'); @@ -102,27 +102,27 @@ chomp($stdout_recv); is($stdout_recv, $expected, 'got same expected output from pg_recvlogical decoding session'); -$node_master->poll_query_until('postgres', +$node_primary->poll_query_until('postgres', "SELECT EXISTS (SELECT 1 FROM pg_replication_slots WHERE slot_name = 'test_slot' AND active_pid IS NULL)" ) or die "slot never became inactive"; -$stdout_recv = $node_master->pg_recvlogical_upto( +$stdout_recv = $node_primary->pg_recvlogical_upto( 'postgres', 'test_slot', $endpos, 180, 'include-xids' => '0', 'skip-empty-xacts' => '1'); chomp($stdout_recv); is($stdout_recv, '', 'pg_recvlogical acknowledged changes'); -$node_master->safe_psql('postgres', 'CREATE DATABASE otherdb'); +$node_primary->safe_psql('postgres', 'CREATE DATABASE otherdb'); -is( $node_master->psql( +is( $node_primary->psql( 'otherdb', "SELECT lsn FROM pg_logical_slot_peek_changes('test_slot', NULL, NULL) ORDER BY lsn DESC LIMIT 1;" ), 3, 'replaying logical slot from another database fails'); -$node_master->safe_psql('otherdb', +$node_primary->safe_psql('otherdb', qq[SELECT pg_create_logical_replication_slot('otherdb_slot', 'test_decoding');] ); @@ -135,51 +135,51 @@ SKIP: my $pg_recvlogical = IPC::Run::start( [ - 'pg_recvlogical', '-d', $node_master->connstr('otherdb'), + 'pg_recvlogical', '-d', $node_primary->connstr('otherdb'), '-S', 'otherdb_slot', '-f', '-', '--start' ]); - $node_master->poll_query_until('otherdb', + $node_primary->poll_query_until('otherdb', "SELECT EXISTS (SELECT 1 FROM pg_replication_slots WHERE slot_name = 'otherdb_slot' AND active_pid IS NOT NULL)" ) or die "slot never became active"; - is($node_master->psql('postgres', 'DROP DATABASE otherdb'), + is($node_primary->psql('postgres', 'DROP DATABASE otherdb'), 3, 'dropping a DB with active logical slots fails'); $pg_recvlogical->kill_kill; - is($node_master->slot('otherdb_slot')->{'slot_name'}, + is($node_primary->slot('otherdb_slot')->{'slot_name'}, undef, 'logical slot still exists'); } -$node_master->poll_query_until('otherdb', +$node_primary->poll_query_until('otherdb', "SELECT EXISTS (SELECT 1 FROM pg_replication_slots WHERE slot_name = 'otherdb_slot' AND active_pid IS NULL)" ) or die "slot never became inactive"; -is($node_master->psql('postgres', 'DROP DATABASE otherdb'), +is($node_primary->psql('postgres', 'DROP DATABASE otherdb'), 0, 'dropping a DB with inactive logical slots succeeds'); -is($node_master->slot('otherdb_slot')->{'slot_name'}, +is($node_primary->slot('otherdb_slot')->{'slot_name'}, undef, 'logical slot was actually dropped with DB'); # Test logical slot advancing and its durability. my $logical_slot = 'logical_slot'; -$node_master->safe_psql('postgres', +$node_primary->safe_psql('postgres', "SELECT pg_create_logical_replication_slot('$logical_slot', 'test_decoding', false);" ); -$node_master->psql( +$node_primary->psql( 'postgres', " CREATE TABLE tab_logical_slot (a int); INSERT INTO tab_logical_slot VALUES (generate_series(1,10));"); my $current_lsn = - $node_master->safe_psql('postgres', "SELECT pg_current_wal_lsn();"); + $node_primary->safe_psql('postgres', "SELECT pg_current_wal_lsn();"); chomp($current_lsn); -my $psql_rc = $node_master->psql('postgres', +my $psql_rc = $node_primary->psql('postgres', "SELECT pg_replication_slot_advance('$logical_slot', '$current_lsn'::pg_lsn);" ); is($psql_rc, '0', 'slot advancing with logical slot'); -my $logical_restart_lsn_pre = $node_master->safe_psql('postgres', +my $logical_restart_lsn_pre = $node_primary->safe_psql('postgres', "SELECT restart_lsn from pg_replication_slots WHERE slot_name = '$logical_slot';" ); chomp($logical_restart_lsn_pre); # Slot advance should persist across clean restarts. -$node_master->restart; -my $logical_restart_lsn_post = $node_master->safe_psql('postgres', +$node_primary->restart; +my $logical_restart_lsn_post = $node_primary->safe_psql('postgres', "SELECT restart_lsn from pg_replication_slots WHERE slot_name = '$logical_slot';" ); chomp($logical_restart_lsn_post); @@ -187,4 +187,4 @@ ok(($logical_restart_lsn_pre cmp $logical_restart_lsn_post) == 0, "logical slot advance persists across restarts"); # done with the node -$node_master->stop; +$node_primary->stop; diff --git a/src/test/recovery/t/007_sync_rep.pl b/src/test/recovery/t/007_sync_rep.pl index 05803bed4e3..e3c6738d3ab 100644 --- a/src/test/recovery/t/007_sync_rep.pl +++ b/src/test/recovery/t/007_sync_rep.pl @@ -32,53 +32,53 @@ sub test_sync_state # until the standby is confirmed as registered. sub start_standby_and_wait { - my ($master, $standby) = @_; - my $master_name = $master->name; + my ($primary, $standby) = @_; + my $primary_name = $primary->name; my $standby_name = $standby->name; my $query = "SELECT count(1) = 1 FROM pg_stat_replication WHERE application_name = '$standby_name'"; $standby->start; - print("### Waiting for standby \"$standby_name\" on \"$master_name\"\n"); - $master->poll_query_until('postgres', $query); + print("### Waiting for standby \"$standby_name\" on \"$primary_name\"\n"); + $primary->poll_query_until('postgres', $query); return; } -# Initialize master node -my $node_master = get_new_node('master'); -$node_master->init(allows_streaming => 1); -$node_master->start; -my $backup_name = 'master_backup'; +# Initialize primary node +my $node_primary = get_new_node('primary'); +$node_primary->init(allows_streaming => 1); +$node_primary->start; +my $backup_name = 'primary_backup'; # Take backup -$node_master->backup($backup_name); +$node_primary->backup($backup_name); # Create all the standbys. Their status on the primary is checked to ensure # the ordering of each one of them in the WAL sender array of the primary. -# Create standby1 linking to master +# Create standby1 linking to primary my $node_standby_1 = get_new_node('standby1'); -$node_standby_1->init_from_backup($node_master, $backup_name, +$node_standby_1->init_from_backup($node_primary, $backup_name, has_streaming => 1); -start_standby_and_wait($node_master, $node_standby_1); +start_standby_and_wait($node_primary, $node_standby_1); -# Create standby2 linking to master +# Create standby2 linking to primary my $node_standby_2 = get_new_node('standby2'); -$node_standby_2->init_from_backup($node_master, $backup_name, +$node_standby_2->init_from_backup($node_primary, $backup_name, has_streaming => 1); -start_standby_and_wait($node_master, $node_standby_2); +start_standby_and_wait($node_primary, $node_standby_2); -# Create standby3 linking to master +# Create standby3 linking to primary my $node_standby_3 = get_new_node('standby3'); -$node_standby_3->init_from_backup($node_master, $backup_name, +$node_standby_3->init_from_backup($node_primary, $backup_name, has_streaming => 1); -start_standby_and_wait($node_master, $node_standby_3); +start_standby_and_wait($node_primary, $node_standby_3); # Check that sync_state is determined correctly when # synchronous_standby_names is specified in old syntax. test_sync_state( - $node_master, qq(standby1|1|sync + $node_primary, qq(standby1|1|sync standby2|2|potential standby3|0|async), 'old syntax of synchronous_standby_names', @@ -90,7 +90,7 @@ standby3|0|async), # it's stored in the head of WalSnd array which manages # all the standbys though they have the same priority. test_sync_state( - $node_master, qq(standby1|1|sync + $node_primary, qq(standby1|1|sync standby2|1|potential standby3|1|potential), 'asterisk in synchronous_standby_names', @@ -105,23 +105,23 @@ $node_standby_3->stop; # Make sure that each standby reports back to the primary in the wanted # order. -start_standby_and_wait($node_master, $node_standby_2); -start_standby_and_wait($node_master, $node_standby_3); +start_standby_and_wait($node_primary, $node_standby_2); +start_standby_and_wait($node_primary, $node_standby_3); # Specify 2 as the number of sync standbys. # Check that two standbys are in 'sync' state. test_sync_state( - $node_master, qq(standby2|2|sync + $node_primary, qq(standby2|2|sync standby3|3|sync), '2 synchronous standbys', '2(standby1,standby2,standby3)'); # Start standby1 -start_standby_and_wait($node_master, $node_standby_1); +start_standby_and_wait($node_primary, $node_standby_1); -# Create standby4 linking to master +# Create standby4 linking to primary my $node_standby_4 = get_new_node('standby4'); -$node_standby_4->init_from_backup($node_master, $backup_name, +$node_standby_4->init_from_backup($node_primary, $backup_name, has_streaming => 1); $node_standby_4->start; @@ -130,7 +130,7 @@ $node_standby_4->start; # standby3 appearing later represents potential, and standby4 is # in 'async' state because it's not in the list. test_sync_state( - $node_master, qq(standby1|1|sync + $node_primary, qq(standby1|1|sync standby2|2|sync standby3|3|potential standby4|0|async), @@ -140,7 +140,7 @@ standby4|0|async), # when num_sync exceeds the number of names of potential sync standbys # specified in synchronous_standby_names. test_sync_state( - $node_master, qq(standby1|0|async + $node_primary, qq(standby1|0|async standby2|4|sync standby3|3|sync standby4|1|sync), @@ -154,7 +154,7 @@ standby4|1|sync), # second standby listed first in the WAL sender array, which is # standby2 in this case. test_sync_state( - $node_master, qq(standby1|1|sync + $node_primary, qq(standby1|1|sync standby2|2|sync standby3|2|potential standby4|2|potential), @@ -164,7 +164,7 @@ standby4|2|potential), # Check that the setting of '2(*)' chooses standby2 and standby3 that are stored # earlier in WalSnd array as sync standbys. test_sync_state( - $node_master, qq(standby1|1|potential + $node_primary, qq(standby1|1|potential standby2|1|sync standby3|1|sync standby4|1|potential), @@ -177,7 +177,7 @@ $node_standby_3->stop; # Check that the state of standby1 stored earlier in WalSnd array than # standby4 is transited from potential to sync. test_sync_state( - $node_master, qq(standby1|1|sync + $node_primary, qq(standby1|1|sync standby2|1|sync standby4|1|potential), 'potential standby found earlier in array is promoted to sync'); @@ -185,7 +185,7 @@ standby4|1|potential), # Check that standby1 and standby2 are chosen as sync standbys # based on their priorities. test_sync_state( - $node_master, qq(standby1|1|sync + $node_primary, qq(standby1|1|sync standby2|2|sync standby4|0|async), 'priority-based sync replication specified by FIRST keyword', @@ -194,7 +194,7 @@ standby4|0|async), # Check that all the listed standbys are considered as candidates # for sync standbys in a quorum-based sync replication. test_sync_state( - $node_master, qq(standby1|1|quorum + $node_primary, qq(standby1|1|quorum standby2|1|quorum standby4|0|async), '2 quorum and 1 async', @@ -206,7 +206,7 @@ $node_standby_3->start; # Check that the setting of 'ANY 2(*)' chooses all standbys as # candidates for quorum sync standbys. test_sync_state( - $node_master, qq(standby1|1|quorum + $node_primary, qq(standby1|1|quorum standby2|1|quorum standby3|1|quorum standby4|1|quorum), diff --git a/src/test/recovery/t/008_fsm_truncation.pl b/src/test/recovery/t/008_fsm_truncation.pl index ddab464a973..37967c11744 100644 --- a/src/test/recovery/t/008_fsm_truncation.pl +++ b/src/test/recovery/t/008_fsm_truncation.pl @@ -9,10 +9,10 @@ use PostgresNode; use TestLib; use Test::More tests => 1; -my $node_master = get_new_node('master'); -$node_master->init(allows_streaming => 1); +my $node_primary = get_new_node('primary'); +$node_primary->init(allows_streaming => 1); -$node_master->append_conf( +$node_primary->append_conf( 'postgresql.conf', qq{ fsync = on wal_log_hints = on @@ -20,17 +20,17 @@ max_prepared_transactions = 5 autovacuum = off }); -# Create a master node and its standby, initializing both with some data +# Create a primary node and its standby, initializing both with some data # at the same time. -$node_master->start; +$node_primary->start; -$node_master->backup('master_backup'); +$node_primary->backup('primary_backup'); my $node_standby = get_new_node('standby'); -$node_standby->init_from_backup($node_master, 'master_backup', +$node_standby->init_from_backup($node_primary, 'primary_backup', has_streaming => 1); $node_standby->start; -$node_master->psql( +$node_primary->psql( 'postgres', qq{ create table testtab (a int, b char(100)); insert into testtab select generate_series(1,1000), 'foo'; @@ -39,7 +39,7 @@ delete from testtab where ctid > '(8,0)'; }); # Take a lock on the table to prevent following vacuum from truncating it -$node_master->psql( +$node_primary->psql( 'postgres', qq{ begin; lock table testtab in row share mode; @@ -47,14 +47,14 @@ prepare transaction 'p1'; }); # Vacuum, update FSM without truncation -$node_master->psql('postgres', 'vacuum verbose testtab'); +$node_primary->psql('postgres', 'vacuum verbose testtab'); # Force a checkpoint -$node_master->psql('postgres', 'checkpoint'); +$node_primary->psql('postgres', 'checkpoint'); # Now do some more insert/deletes, another vacuum to ensure full-page writes # are done -$node_master->psql( +$node_primary->psql( 'postgres', qq{ insert into testtab select generate_series(1,1000), 'foo'; delete from testtab where ctid > '(8,0)'; @@ -65,15 +65,15 @@ vacuum verbose testtab; $node_standby->psql('postgres', 'checkpoint'); # Release the lock, vacuum again which should lead to truncation -$node_master->psql( +$node_primary->psql( 'postgres', qq{ rollback prepared 'p1'; vacuum verbose testtab; }); -$node_master->psql('postgres', 'checkpoint'); +$node_primary->psql('postgres', 'checkpoint'); my $until_lsn = - $node_master->safe_psql('postgres', "SELECT pg_current_wal_lsn();"); + $node_primary->safe_psql('postgres', "SELECT pg_current_wal_lsn();"); # Wait long enough for standby to receive and apply all WAL my $caughtup_query = diff --git a/src/test/recovery/t/009_twophase.pl b/src/test/recovery/t/009_twophase.pl index 1b748ad857b..9da3464bc1d 100644 --- a/src/test/recovery/t/009_twophase.pl +++ b/src/test/recovery/t/009_twophase.pl @@ -23,7 +23,7 @@ sub configure_and_reload return; } -# Set up two nodes, which will alternately be master and replication standby. +# Set up two nodes, which will alternately be primary and replication standby. # Setup london node my $node_london = get_new_node("london"); @@ -46,13 +46,13 @@ $node_paris->start; configure_and_reload($node_london, "synchronous_standby_names = 'paris'"); configure_and_reload($node_paris, "synchronous_standby_names = 'london'"); -# Set up nonce names for current master and standby nodes -note "Initially, london is master and paris is standby"; -my ($cur_master, $cur_standby) = ($node_london, $node_paris); -my $cur_master_name = $cur_master->name; +# Set up nonce names for current primary and standby nodes +note "Initially, london is primary and paris is standby"; +my ($cur_primary, $cur_standby) = ($node_london, $node_paris); +my $cur_primary_name = $cur_primary->name; # Create table we'll use in the test transactions -$cur_master->psql('postgres', "CREATE TABLE t_009_tbl (id int, msg text)"); +$cur_primary->psql('postgres', "CREATE TABLE t_009_tbl (id int, msg text)"); ############################################################################### # Check that we can commit and abort transaction after soft restart. @@ -61,25 +61,25 @@ $cur_master->psql('postgres', "CREATE TABLE t_009_tbl (id int, msg text)"); # files. ############################################################################### -$cur_master->psql( +$cur_primary->psql( 'postgres', " BEGIN; - INSERT INTO t_009_tbl VALUES (1, 'issued to ${cur_master_name}'); + INSERT INTO t_009_tbl VALUES (1, 'issued to ${cur_primary_name}'); SAVEPOINT s1; - INSERT INTO t_009_tbl VALUES (2, 'issued to ${cur_master_name}'); + INSERT INTO t_009_tbl VALUES (2, 'issued to ${cur_primary_name}'); PREPARE TRANSACTION 'xact_009_1'; BEGIN; - INSERT INTO t_009_tbl VALUES (3, 'issued to ${cur_master_name}'); + INSERT INTO t_009_tbl VALUES (3, 'issued to ${cur_primary_name}'); SAVEPOINT s1; - INSERT INTO t_009_tbl VALUES (4, 'issued to ${cur_master_name}'); + INSERT INTO t_009_tbl VALUES (4, 'issued to ${cur_primary_name}'); PREPARE TRANSACTION 'xact_009_2';"); -$cur_master->stop; -$cur_master->start; +$cur_primary->stop; +$cur_primary->start; -$psql_rc = $cur_master->psql('postgres', "COMMIT PREPARED 'xact_009_1'"); +$psql_rc = $cur_primary->psql('postgres', "COMMIT PREPARED 'xact_009_1'"); is($psql_rc, '0', 'Commit prepared transaction after restart'); -$psql_rc = $cur_master->psql('postgres', "ROLLBACK PREPARED 'xact_009_2'"); +$psql_rc = $cur_primary->psql('postgres', "ROLLBACK PREPARED 'xact_009_2'"); is($psql_rc, '0', 'Rollback prepared transaction after restart'); ############################################################################### @@ -88,50 +88,50 @@ is($psql_rc, '0', 'Rollback prepared transaction after restart'); # transaction using dedicated WAL records. ############################################################################### -$cur_master->psql( +$cur_primary->psql( 'postgres', " CHECKPOINT; BEGIN; - INSERT INTO t_009_tbl VALUES (5, 'issued to ${cur_master_name}'); + INSERT INTO t_009_tbl VALUES (5, 'issued to ${cur_primary_name}'); SAVEPOINT s1; - INSERT INTO t_009_tbl VALUES (6, 'issued to ${cur_master_name}'); + INSERT INTO t_009_tbl VALUES (6, 'issued to ${cur_primary_name}'); PREPARE TRANSACTION 'xact_009_3'; BEGIN; - INSERT INTO t_009_tbl VALUES (7, 'issued to ${cur_master_name}'); + INSERT INTO t_009_tbl VALUES (7, 'issued to ${cur_primary_name}'); SAVEPOINT s1; - INSERT INTO t_009_tbl VALUES (8, 'issued to ${cur_master_name}'); + INSERT INTO t_009_tbl VALUES (8, 'issued to ${cur_primary_name}'); PREPARE TRANSACTION 'xact_009_4';"); -$cur_master->teardown_node; -$cur_master->start; +$cur_primary->teardown_node; +$cur_primary->start; -$psql_rc = $cur_master->psql('postgres', "COMMIT PREPARED 'xact_009_3'"); +$psql_rc = $cur_primary->psql('postgres', "COMMIT PREPARED 'xact_009_3'"); is($psql_rc, '0', 'Commit prepared transaction after teardown'); -$psql_rc = $cur_master->psql('postgres', "ROLLBACK PREPARED 'xact_009_4'"); +$psql_rc = $cur_primary->psql('postgres', "ROLLBACK PREPARED 'xact_009_4'"); is($psql_rc, '0', 'Rollback prepared transaction after teardown'); ############################################################################### # Check that WAL replay can handle several transactions with same GID name. ############################################################################### -$cur_master->psql( +$cur_primary->psql( 'postgres', " CHECKPOINT; BEGIN; - INSERT INTO t_009_tbl VALUES (9, 'issued to ${cur_master_name}'); + INSERT INTO t_009_tbl VALUES (9, 'issued to ${cur_primary_name}'); SAVEPOINT s1; - INSERT INTO t_009_tbl VALUES (10, 'issued to ${cur_master_name}'); + INSERT INTO t_009_tbl VALUES (10, 'issued to ${cur_primary_name}'); PREPARE TRANSACTION 'xact_009_5'; COMMIT PREPARED 'xact_009_5'; BEGIN; - INSERT INTO t_009_tbl VALUES (11, 'issued to ${cur_master_name}'); + INSERT INTO t_009_tbl VALUES (11, 'issued to ${cur_primary_name}'); SAVEPOINT s1; - INSERT INTO t_009_tbl VALUES (12, 'issued to ${cur_master_name}'); + INSERT INTO t_009_tbl VALUES (12, 'issued to ${cur_primary_name}'); PREPARE TRANSACTION 'xact_009_5';"); -$cur_master->teardown_node; -$cur_master->start; +$cur_primary->teardown_node; +$cur_primary->start; -$psql_rc = $cur_master->psql('postgres', "COMMIT PREPARED 'xact_009_5'"); +$psql_rc = $cur_primary->psql('postgres', "COMMIT PREPARED 'xact_009_5'"); is($psql_rc, '0', 'Replay several transactions with same GID'); ############################################################################### @@ -139,39 +139,39 @@ is($psql_rc, '0', 'Replay several transactions with same GID'); # while replaying transaction commits. ############################################################################### -$cur_master->psql( +$cur_primary->psql( 'postgres', " BEGIN; - INSERT INTO t_009_tbl VALUES (13, 'issued to ${cur_master_name}'); + INSERT INTO t_009_tbl VALUES (13, 'issued to ${cur_primary_name}'); SAVEPOINT s1; - INSERT INTO t_009_tbl VALUES (14, 'issued to ${cur_master_name}'); + INSERT INTO t_009_tbl VALUES (14, 'issued to ${cur_primary_name}'); PREPARE TRANSACTION 'xact_009_6'; COMMIT PREPARED 'xact_009_6';"); -$cur_master->teardown_node; -$cur_master->start; -$psql_rc = $cur_master->psql( +$cur_primary->teardown_node; +$cur_primary->start; +$psql_rc = $cur_primary->psql( 'postgres', " BEGIN; - INSERT INTO t_009_tbl VALUES (15, 'issued to ${cur_master_name}'); + INSERT INTO t_009_tbl VALUES (15, 'issued to ${cur_primary_name}'); SAVEPOINT s1; - INSERT INTO t_009_tbl VALUES (16, 'issued to ${cur_master_name}'); + INSERT INTO t_009_tbl VALUES (16, 'issued to ${cur_primary_name}'); -- This prepare can fail due to conflicting GID or locks conflicts if -- replay did not fully cleanup its state on previous commit. PREPARE TRANSACTION 'xact_009_7';"); is($psql_rc, '0', "Cleanup of shared memory state for 2PC commit"); -$cur_master->psql('postgres', "COMMIT PREPARED 'xact_009_7'"); +$cur_primary->psql('postgres', "COMMIT PREPARED 'xact_009_7'"); ############################################################################### # Check that WAL replay will cleanup its shared memory state on running standby. ############################################################################### -$cur_master->psql( +$cur_primary->psql( 'postgres', " BEGIN; - INSERT INTO t_009_tbl VALUES (17, 'issued to ${cur_master_name}'); + INSERT INTO t_009_tbl VALUES (17, 'issued to ${cur_primary_name}'); SAVEPOINT s1; - INSERT INTO t_009_tbl VALUES (18, 'issued to ${cur_master_name}'); + INSERT INTO t_009_tbl VALUES (18, 'issued to ${cur_primary_name}'); PREPARE TRANSACTION 'xact_009_8'; COMMIT PREPARED 'xact_009_8';"); $cur_standby->psql( @@ -186,15 +186,15 @@ is($psql_out, '0', # prepare and commit to use on-disk twophase files. ############################################################################### -$cur_master->psql( +$cur_primary->psql( 'postgres', " BEGIN; - INSERT INTO t_009_tbl VALUES (19, 'issued to ${cur_master_name}'); + INSERT INTO t_009_tbl VALUES (19, 'issued to ${cur_primary_name}'); SAVEPOINT s1; - INSERT INTO t_009_tbl VALUES (20, 'issued to ${cur_master_name}'); + INSERT INTO t_009_tbl VALUES (20, 'issued to ${cur_primary_name}'); PREPARE TRANSACTION 'xact_009_9';"); $cur_standby->psql('postgres', "CHECKPOINT"); -$cur_master->psql('postgres', "COMMIT PREPARED 'xact_009_9'"); +$cur_primary->psql('postgres', "COMMIT PREPARED 'xact_009_9'"); $cur_standby->psql( 'postgres', "SELECT count(*) FROM pg_prepared_xacts", @@ -206,114 +206,114 @@ is($psql_out, '0', # Check that prepared transactions can be committed on promoted standby. ############################################################################### -$cur_master->psql( +$cur_primary->psql( 'postgres', " BEGIN; - INSERT INTO t_009_tbl VALUES (21, 'issued to ${cur_master_name}'); + INSERT INTO t_009_tbl VALUES (21, 'issued to ${cur_primary_name}'); SAVEPOINT s1; - INSERT INTO t_009_tbl VALUES (22, 'issued to ${cur_master_name}'); + INSERT INTO t_009_tbl VALUES (22, 'issued to ${cur_primary_name}'); PREPARE TRANSACTION 'xact_009_10';"); -$cur_master->teardown_node; +$cur_primary->teardown_node; $cur_standby->promote; # change roles -note "Now paris is master and london is standby"; -($cur_master, $cur_standby) = ($node_paris, $node_london); -$cur_master_name = $cur_master->name; +note "Now paris is primary and london is standby"; +($cur_primary, $cur_standby) = ($node_paris, $node_london); +$cur_primary_name = $cur_primary->name; # because london is not running at this point, we can't use syncrep commit # on this command -$psql_rc = $cur_master->psql('postgres', +$psql_rc = $cur_primary->psql('postgres', "SET synchronous_commit = off; COMMIT PREPARED 'xact_009_10'"); is($psql_rc, '0', "Restore of prepared transaction on promoted standby"); -# restart old master as new standby -$cur_standby->enable_streaming($cur_master); +# restart old primary as new standby +$cur_standby->enable_streaming($cur_primary); $cur_standby->start; ############################################################################### # Check that prepared transactions are replayed after soft restart of standby -# while master is down. Since standby knows that master is down it uses a +# while primary is down. Since standby knows that primary is down it uses a # different code path on startup to ensure that the status of transactions is # consistent. ############################################################################### -$cur_master->psql( +$cur_primary->psql( 'postgres', " BEGIN; - INSERT INTO t_009_tbl VALUES (23, 'issued to ${cur_master_name}'); + INSERT INTO t_009_tbl VALUES (23, 'issued to ${cur_primary_name}'); SAVEPOINT s1; - INSERT INTO t_009_tbl VALUES (24, 'issued to ${cur_master_name}'); + INSERT INTO t_009_tbl VALUES (24, 'issued to ${cur_primary_name}'); PREPARE TRANSACTION 'xact_009_11';"); -$cur_master->stop; +$cur_primary->stop; $cur_standby->restart; $cur_standby->promote; # change roles -note "Now london is master and paris is standby"; -($cur_master, $cur_standby) = ($node_london, $node_paris); -$cur_master_name = $cur_master->name; +note "Now london is primary and paris is standby"; +($cur_primary, $cur_standby) = ($node_london, $node_paris); +$cur_primary_name = $cur_primary->name; -$cur_master->psql( +$cur_primary->psql( 'postgres', "SELECT count(*) FROM pg_prepared_xacts", stdout => \$psql_out); is($psql_out, '1', - "Restore prepared transactions from files with master down"); + "Restore prepared transactions from files with primary down"); -# restart old master as new standby -$cur_standby->enable_streaming($cur_master); +# restart old primary as new standby +$cur_standby->enable_streaming($cur_primary); $cur_standby->start; -$cur_master->psql('postgres', "COMMIT PREPARED 'xact_009_11'"); +$cur_primary->psql('postgres', "COMMIT PREPARED 'xact_009_11'"); ############################################################################### # Check that prepared transactions are correctly replayed after standby hard -# restart while master is down. +# restart while primary is down. ############################################################################### -$cur_master->psql( +$cur_primary->psql( 'postgres', " BEGIN; - INSERT INTO t_009_tbl VALUES (25, 'issued to ${cur_master_name}'); + INSERT INTO t_009_tbl VALUES (25, 'issued to ${cur_primary_name}'); SAVEPOINT s1; - INSERT INTO t_009_tbl VALUES (26, 'issued to ${cur_master_name}'); + INSERT INTO t_009_tbl VALUES (26, 'issued to ${cur_primary_name}'); PREPARE TRANSACTION 'xact_009_12'; "); -$cur_master->stop; +$cur_primary->stop; $cur_standby->teardown_node; $cur_standby->start; $cur_standby->promote; # change roles -note "Now paris is master and london is standby"; -($cur_master, $cur_standby) = ($node_paris, $node_london); -$cur_master_name = $cur_master->name; +note "Now paris is primary and london is standby"; +($cur_primary, $cur_standby) = ($node_paris, $node_london); +$cur_primary_name = $cur_primary->name; -$cur_master->psql( +$cur_primary->psql( 'postgres', "SELECT count(*) FROM pg_prepared_xacts", stdout => \$psql_out); is($psql_out, '1', - "Restore prepared transactions from records with master down"); + "Restore prepared transactions from records with primary down"); -# restart old master as new standby -$cur_standby->enable_streaming($cur_master); +# restart old primary as new standby +$cur_standby->enable_streaming($cur_primary); $cur_standby->start; -$cur_master->psql('postgres', "COMMIT PREPARED 'xact_009_12'"); +$cur_primary->psql('postgres', "COMMIT PREPARED 'xact_009_12'"); ############################################################################### # Check for a lock conflict between prepared transaction with DDL inside and # replay of XLOG_STANDBY_LOCK wal record. ############################################################################### -$cur_master->psql( +$cur_primary->psql( 'postgres', " BEGIN; CREATE TABLE t_009_tbl2 (id int, msg text); SAVEPOINT s1; - INSERT INTO t_009_tbl2 VALUES (27, 'issued to ${cur_master_name}'); + INSERT INTO t_009_tbl2 VALUES (27, 'issued to ${cur_primary_name}'); PREPARE TRANSACTION 'xact_009_13'; -- checkpoint will issue XLOG_STANDBY_LOCK that can conflict with lock -- held by 'create table' statement @@ -321,10 +321,10 @@ $cur_master->psql( COMMIT PREPARED 'xact_009_13';"); # Ensure that last transaction is replayed on standby. -my $cur_master_lsn = - $cur_master->safe_psql('postgres', "SELECT pg_current_wal_lsn()"); +my $cur_primary_lsn = + $cur_primary->safe_psql('postgres', "SELECT pg_current_wal_lsn()"); my $caughtup_query = - "SELECT '$cur_master_lsn'::pg_lsn <= pg_last_wal_replay_lsn()"; + "SELECT '$cur_primary_lsn'::pg_lsn <= pg_last_wal_replay_lsn()"; $cur_standby->poll_query_until('postgres', $caughtup_query) or die "Timed out while waiting for standby to catch up"; @@ -336,69 +336,69 @@ is($psql_out, '1', "Replay prepared transaction with DDL"); ############################################################################### # Check recovery of prepared transaction with DDL inside after a hard restart -# of the master. +# of the primary. ############################################################################### -$cur_master->psql( +$cur_primary->psql( 'postgres', " BEGIN; CREATE TABLE t_009_tbl3 (id int, msg text); SAVEPOINT s1; - INSERT INTO t_009_tbl3 VALUES (28, 'issued to ${cur_master_name}'); + INSERT INTO t_009_tbl3 VALUES (28, 'issued to ${cur_primary_name}'); PREPARE TRANSACTION 'xact_009_14'; BEGIN; CREATE TABLE t_009_tbl4 (id int, msg text); SAVEPOINT s1; - INSERT INTO t_009_tbl4 VALUES (29, 'issued to ${cur_master_name}'); + INSERT INTO t_009_tbl4 VALUES (29, 'issued to ${cur_primary_name}'); PREPARE TRANSACTION 'xact_009_15';"); -$cur_master->teardown_node; -$cur_master->start; +$cur_primary->teardown_node; +$cur_primary->start; -$psql_rc = $cur_master->psql('postgres', "COMMIT PREPARED 'xact_009_14'"); +$psql_rc = $cur_primary->psql('postgres', "COMMIT PREPARED 'xact_009_14'"); is($psql_rc, '0', 'Commit prepared transaction after teardown'); -$psql_rc = $cur_master->psql('postgres', "ROLLBACK PREPARED 'xact_009_15'"); +$psql_rc = $cur_primary->psql('postgres', "ROLLBACK PREPARED 'xact_009_15'"); is($psql_rc, '0', 'Rollback prepared transaction after teardown'); ############################################################################### # Check recovery of prepared transaction with DDL inside after a soft restart -# of the master. +# of the primary. ############################################################################### -$cur_master->psql( +$cur_primary->psql( 'postgres', " BEGIN; CREATE TABLE t_009_tbl5 (id int, msg text); SAVEPOINT s1; - INSERT INTO t_009_tbl5 VALUES (30, 'issued to ${cur_master_name}'); + INSERT INTO t_009_tbl5 VALUES (30, 'issued to ${cur_primary_name}'); PREPARE TRANSACTION 'xact_009_16'; BEGIN; CREATE TABLE t_009_tbl6 (id int, msg text); SAVEPOINT s1; - INSERT INTO t_009_tbl6 VALUES (31, 'issued to ${cur_master_name}'); + INSERT INTO t_009_tbl6 VALUES (31, 'issued to ${cur_primary_name}'); PREPARE TRANSACTION 'xact_009_17';"); -$cur_master->stop; -$cur_master->start; +$cur_primary->stop; +$cur_primary->start; -$psql_rc = $cur_master->psql('postgres', "COMMIT PREPARED 'xact_009_16'"); +$psql_rc = $cur_primary->psql('postgres', "COMMIT PREPARED 'xact_009_16'"); is($psql_rc, '0', 'Commit prepared transaction after restart'); -$psql_rc = $cur_master->psql('postgres', "ROLLBACK PREPARED 'xact_009_17'"); +$psql_rc = $cur_primary->psql('postgres', "ROLLBACK PREPARED 'xact_009_17'"); is($psql_rc, '0', 'Rollback prepared transaction after restart'); ############################################################################### # Verify expected data appears on both servers. ############################################################################### -$cur_master->psql( +$cur_primary->psql( 'postgres', "SELECT count(*) FROM pg_prepared_xacts", stdout => \$psql_out); -is($psql_out, '0', "No uncommitted prepared transactions on master"); +is($psql_out, '0', "No uncommitted prepared transactions on primary"); -$cur_master->psql( +$cur_primary->psql( 'postgres', "SELECT * FROM t_009_tbl ORDER BY id", stdout => \$psql_out); @@ -424,15 +424,15 @@ is( $psql_out, qq{1|issued to london 24|issued to paris 25|issued to london 26|issued to london}, - "Check expected t_009_tbl data on master"); + "Check expected t_009_tbl data on primary"); -$cur_master->psql( +$cur_primary->psql( 'postgres', "SELECT * FROM t_009_tbl2", stdout => \$psql_out); is( $psql_out, qq{27|issued to paris}, - "Check expected t_009_tbl2 data on master"); + "Check expected t_009_tbl2 data on primary"); $cur_standby->psql( 'postgres', diff --git a/src/test/recovery/t/010_logical_decoding_timelines.pl b/src/test/recovery/t/010_logical_decoding_timelines.pl index 11f5595e2bf..09aaefa9f03 100644 --- a/src/test/recovery/t/010_logical_decoding_timelines.pl +++ b/src/test/recovery/t/010_logical_decoding_timelines.pl @@ -30,10 +30,10 @@ use Scalar::Util qw(blessed); my ($stdout, $stderr, $ret); -# Initialize master node -my $node_master = get_new_node('master'); -$node_master->init(allows_streaming => 1, has_archiving => 1); -$node_master->append_conf( +# Initialize primary node +my $node_primary = get_new_node('primary'); +$node_primary->init(allows_streaming => 1, has_archiving => 1); +$node_primary->append_conf( 'postgresql.conf', q[ wal_level = 'logical' max_replication_slots = 3 @@ -42,38 +42,38 @@ log_min_messages = 'debug2' hot_standby_feedback = on wal_receiver_status_interval = 1 ]); -$node_master->dump_info; -$node_master->start; +$node_primary->dump_info; +$node_primary->start; note "testing logical timeline following with a filesystem-level copy"; -$node_master->safe_psql('postgres', +$node_primary->safe_psql('postgres', "SELECT pg_create_logical_replication_slot('before_basebackup', 'test_decoding');" ); -$node_master->safe_psql('postgres', "CREATE TABLE decoding(blah text);"); -$node_master->safe_psql('postgres', +$node_primary->safe_psql('postgres', "CREATE TABLE decoding(blah text);"); +$node_primary->safe_psql('postgres', "INSERT INTO decoding(blah) VALUES ('beforebb');"); # We also want to verify that DROP DATABASE on a standby with a logical # slot works. This isn't strictly related to timeline following, but # the only way to get a logical slot on a standby right now is to use # the same physical copy trick, so: -$node_master->safe_psql('postgres', 'CREATE DATABASE dropme;'); -$node_master->safe_psql('dropme', +$node_primary->safe_psql('postgres', 'CREATE DATABASE dropme;'); +$node_primary->safe_psql('dropme', "SELECT pg_create_logical_replication_slot('dropme_slot', 'test_decoding');" ); -$node_master->safe_psql('postgres', 'CHECKPOINT;'); +$node_primary->safe_psql('postgres', 'CHECKPOINT;'); my $backup_name = 'b1'; -$node_master->backup_fs_hot($backup_name); +$node_primary->backup_fs_hot($backup_name); -$node_master->safe_psql('postgres', +$node_primary->safe_psql('postgres', q[SELECT pg_create_physical_replication_slot('phys_slot');]); my $node_replica = get_new_node('replica'); $node_replica->init_from_backup( - $node_master, $backup_name, + $node_primary, $backup_name, has_streaming => 1, has_restoring => 1); $node_replica->append_conf('postgresql.conf', @@ -81,26 +81,26 @@ $node_replica->append_conf('postgresql.conf', $node_replica->start; -# If we drop 'dropme' on the master, the standby should drop the +# If we drop 'dropme' on the primary, the standby should drop the # db and associated slot. -is($node_master->psql('postgres', 'DROP DATABASE dropme'), - 0, 'dropped DB with logical slot OK on master'); -$node_master->wait_for_catchup($node_replica, 'replay', - $node_master->lsn('insert')); +is($node_primary->psql('postgres', 'DROP DATABASE dropme'), + 0, 'dropped DB with logical slot OK on primary'); +$node_primary->wait_for_catchup($node_replica, 'replay', + $node_primary->lsn('insert')); is( $node_replica->safe_psql( 'postgres', q[SELECT 1 FROM pg_database WHERE datname = 'dropme']), '', 'dropped DB dropme on standby'); -is($node_master->slot('dropme_slot')->{'slot_name'}, +is($node_primary->slot('dropme_slot')->{'slot_name'}, undef, 'logical slot was actually dropped on standby'); # Back to testing failover... -$node_master->safe_psql('postgres', +$node_primary->safe_psql('postgres', "SELECT pg_create_logical_replication_slot('after_basebackup', 'test_decoding');" ); -$node_master->safe_psql('postgres', +$node_primary->safe_psql('postgres', "INSERT INTO decoding(blah) VALUES ('afterbb');"); -$node_master->safe_psql('postgres', 'CHECKPOINT;'); +$node_primary->safe_psql('postgres', 'CHECKPOINT;'); # Verify that only the before base_backup slot is on the replica $stdout = $node_replica->safe_psql('postgres', @@ -109,20 +109,20 @@ is($stdout, 'before_basebackup', 'Expected to find only slot before_basebackup on replica'); # Examine the physical slot the replica uses to stream changes -# from the master to make sure its hot_standby_feedback +# from the primary to make sure its hot_standby_feedback # has locked in a catalog_xmin on the physical slot, and that # any xmin is < the catalog_xmin -$node_master->poll_query_until( +$node_primary->poll_query_until( 'postgres', q[ SELECT catalog_xmin IS NOT NULL FROM pg_replication_slots WHERE slot_name = 'phys_slot' ]) or die "slot's catalog_xmin never became set"; -my $phys_slot = $node_master->slot('phys_slot'); -isnt($phys_slot->{'xmin'}, '', 'xmin assigned on physical slot of master'); +my $phys_slot = $node_primary->slot('phys_slot'); +isnt($phys_slot->{'xmin'}, '', 'xmin assigned on physical slot of primary'); isnt($phys_slot->{'catalog_xmin'}, - '', 'catalog_xmin assigned on physical slot of master'); + '', 'catalog_xmin assigned on physical slot of primary'); # Ignore wrap-around here, we're on a new cluster: cmp_ok( @@ -130,11 +130,11 @@ cmp_ok( $phys_slot->{'catalog_xmin'}, 'xmin on physical slot must not be lower than catalog_xmin'); -$node_master->safe_psql('postgres', 'CHECKPOINT'); -$node_master->wait_for_catchup($node_replica, 'write'); +$node_primary->safe_psql('postgres', 'CHECKPOINT'); +$node_primary->wait_for_catchup($node_replica, 'write'); # Boom, crash -$node_master->stop('immediate'); +$node_primary->stop('immediate'); $node_replica->promote; diff --git a/src/test/recovery/t/011_crash_recovery.pl b/src/test/recovery/t/011_crash_recovery.pl index ca6e92b50df..5fe917978c6 100644 --- a/src/test/recovery/t/011_crash_recovery.pl +++ b/src/test/recovery/t/011_crash_recovery.pl @@ -18,7 +18,7 @@ else plan tests => 3; } -my $node = get_new_node('master'); +my $node = get_new_node('primary'); $node->init(allows_streaming => 1); $node->start; diff --git a/src/test/recovery/t/012_subtransactions.pl b/src/test/recovery/t/012_subtransactions.pl index 292cd40fe2d..6b9e29ae3c7 100644 --- a/src/test/recovery/t/012_subtransactions.pl +++ b/src/test/recovery/t/012_subtransactions.pl @@ -6,30 +6,30 @@ use PostgresNode; use TestLib; use Test::More tests => 12; -# Setup master node -my $node_master = get_new_node("master"); -$node_master->init(allows_streaming => 1); -$node_master->append_conf( +# Setup primary node +my $node_primary = get_new_node("primary"); +$node_primary->init(allows_streaming => 1); +$node_primary->append_conf( 'postgresql.conf', qq( max_prepared_transactions = 10 log_checkpoints = true )); -$node_master->start; -$node_master->backup('master_backup'); -$node_master->psql('postgres', "CREATE TABLE t_012_tbl (id int)"); +$node_primary->start; +$node_primary->backup('primary_backup'); +$node_primary->psql('postgres', "CREATE TABLE t_012_tbl (id int)"); # Setup standby node my $node_standby = get_new_node('standby'); -$node_standby->init_from_backup($node_master, 'master_backup', +$node_standby->init_from_backup($node_primary, 'primary_backup', has_streaming => 1); $node_standby->start; # Switch to synchronous replication -$node_master->append_conf( +$node_primary->append_conf( 'postgresql.conf', qq( synchronous_standby_names = '*' )); -$node_master->psql('postgres', "SELECT pg_reload_conf()"); +$node_primary->psql('postgres', "SELECT pg_reload_conf()"); my $psql_out = ''; my $psql_rc = ''; @@ -39,7 +39,7 @@ my $psql_rc = ''; # so that it won't conflict with savepoint xids. ############################################################################### -$node_master->psql( +$node_primary->psql( 'postgres', " BEGIN; DELETE FROM t_012_tbl; @@ -57,9 +57,9 @@ $node_master->psql( PREPARE TRANSACTION 'xact_012_1'; CHECKPOINT;"); -$node_master->stop; -$node_master->start; -$node_master->psql( +$node_primary->stop; +$node_primary->start; +$node_primary->psql( 'postgres', " -- here we can get xid of previous savepoint if nextXid -- wasn't properly advanced @@ -68,7 +68,7 @@ $node_master->psql( ROLLBACK; COMMIT PREPARED 'xact_012_1';"); -$node_master->psql( +$node_primary->psql( 'postgres', "SELECT count(*) FROM t_012_tbl", stdout => \$psql_out); @@ -79,10 +79,10 @@ is($psql_out, '6', "Check nextXid handling for prepared subtransactions"); # PGPROC_MAX_CACHED_SUBXIDS subtransactions and also show data properly # on promotion ############################################################################### -$node_master->psql('postgres', "DELETE FROM t_012_tbl"); +$node_primary->psql('postgres', "DELETE FROM t_012_tbl"); # Function borrowed from src/test/regress/sql/hs_primary_extremes.sql -$node_master->psql( +$node_primary->psql( 'postgres', " CREATE OR REPLACE FUNCTION hs_subxids (n integer) RETURNS void @@ -95,19 +95,19 @@ $node_master->psql( RETURN; EXCEPTION WHEN raise_exception THEN NULL; END; \$\$;"); -$node_master->psql( +$node_primary->psql( 'postgres', " BEGIN; SELECT hs_subxids(127); COMMIT;"); -$node_master->wait_for_catchup($node_standby, 'replay', - $node_master->lsn('insert')); +$node_primary->wait_for_catchup($node_standby, 'replay', + $node_primary->lsn('insert')); $node_standby->psql( 'postgres', "SELECT coalesce(sum(id),-1) FROM t_012_tbl", stdout => \$psql_out); is($psql_out, '8128', "Visible"); -$node_master->stop; +$node_primary->stop; $node_standby->promote; $node_standby->psql( @@ -117,8 +117,8 @@ $node_standby->psql( is($psql_out, '8128', "Visible"); # restore state -($node_master, $node_standby) = ($node_standby, $node_master); -$node_standby->enable_streaming($node_master); +($node_primary, $node_standby) = ($node_standby, $node_primary); +$node_standby->enable_streaming($node_primary); $node_standby->start; $node_standby->psql( 'postgres', @@ -126,10 +126,10 @@ $node_standby->psql( stdout => \$psql_out); is($psql_out, '8128', "Visible"); -$node_master->psql('postgres', "DELETE FROM t_012_tbl"); +$node_primary->psql('postgres', "DELETE FROM t_012_tbl"); # Function borrowed from src/test/regress/sql/hs_primary_extremes.sql -$node_master->psql( +$node_primary->psql( 'postgres', " CREATE OR REPLACE FUNCTION hs_subxids (n integer) RETURNS void @@ -142,19 +142,19 @@ $node_master->psql( RETURN; EXCEPTION WHEN raise_exception THEN NULL; END; \$\$;"); -$node_master->psql( +$node_primary->psql( 'postgres', " BEGIN; SELECT hs_subxids(127); PREPARE TRANSACTION 'xact_012_1';"); -$node_master->wait_for_catchup($node_standby, 'replay', - $node_master->lsn('insert')); +$node_primary->wait_for_catchup($node_standby, 'replay', + $node_primary->lsn('insert')); $node_standby->psql( 'postgres', "SELECT coalesce(sum(id),-1) FROM t_012_tbl", stdout => \$psql_out); is($psql_out, '-1', "Not visible"); -$node_master->stop; +$node_primary->stop; $node_standby->promote; $node_standby->psql( @@ -164,34 +164,34 @@ $node_standby->psql( is($psql_out, '-1', "Not visible"); # restore state -($node_master, $node_standby) = ($node_standby, $node_master); -$node_standby->enable_streaming($node_master); +($node_primary, $node_standby) = ($node_standby, $node_primary); +$node_standby->enable_streaming($node_primary); $node_standby->start; -$psql_rc = $node_master->psql('postgres', "COMMIT PREPARED 'xact_012_1'"); +$psql_rc = $node_primary->psql('postgres', "COMMIT PREPARED 'xact_012_1'"); is($psql_rc, '0', "Restore of PGPROC_MAX_CACHED_SUBXIDS+ prepared transaction on promoted standby" ); -$node_master->psql( +$node_primary->psql( 'postgres', "SELECT coalesce(sum(id),-1) FROM t_012_tbl", stdout => \$psql_out); is($psql_out, '8128', "Visible"); -$node_master->psql('postgres', "DELETE FROM t_012_tbl"); -$node_master->psql( +$node_primary->psql('postgres', "DELETE FROM t_012_tbl"); +$node_primary->psql( 'postgres', " BEGIN; SELECT hs_subxids(201); PREPARE TRANSACTION 'xact_012_1';"); -$node_master->wait_for_catchup($node_standby, 'replay', - $node_master->lsn('insert')); +$node_primary->wait_for_catchup($node_standby, 'replay', + $node_primary->lsn('insert')); $node_standby->psql( 'postgres', "SELECT coalesce(sum(id),-1) FROM t_012_tbl", stdout => \$psql_out); is($psql_out, '-1', "Not visible"); -$node_master->stop; +$node_primary->stop; $node_standby->promote; $node_standby->psql( @@ -201,15 +201,15 @@ $node_standby->psql( is($psql_out, '-1', "Not visible"); # restore state -($node_master, $node_standby) = ($node_standby, $node_master); -$node_standby->enable_streaming($node_master); +($node_primary, $node_standby) = ($node_standby, $node_primary); +$node_standby->enable_streaming($node_primary); $node_standby->start; -$psql_rc = $node_master->psql('postgres', "ROLLBACK PREPARED 'xact_012_1'"); +$psql_rc = $node_primary->psql('postgres', "ROLLBACK PREPARED 'xact_012_1'"); is($psql_rc, '0', "Rollback of PGPROC_MAX_CACHED_SUBXIDS+ prepared transaction on promoted standby" ); -$node_master->psql( +$node_primary->psql( 'postgres', "SELECT coalesce(sum(id),-1) FROM t_012_tbl", stdout => \$psql_out); diff --git a/src/test/recovery/t/013_crash_restart.pl b/src/test/recovery/t/013_crash_restart.pl index 2c477978e7d..95d7bb62425 100644 --- a/src/test/recovery/t/013_crash_restart.pl +++ b/src/test/recovery/t/013_crash_restart.pl @@ -25,7 +25,7 @@ plan tests => 18; # is really wrong. my $psql_timeout = IPC::Run::timer(60); -my $node = get_new_node('master'); +my $node = get_new_node('primary'); $node->init(allows_streaming => 1); $node->start(); diff --git a/src/test/recovery/t/019_replslot_limit.pl b/src/test/recovery/t/019_replslot_limit.pl index af656c6902f..1fced12fca5 100644 --- a/src/test/recovery/t/019_replslot_limit.pl +++ b/src/test/recovery/t/019_replslot_limit.pl @@ -13,21 +13,21 @@ use Time::HiRes qw(usleep); $ENV{PGDATABASE} = 'postgres'; -# Initialize master node, setting wal-segsize to 1MB -my $node_master = get_new_node('master'); -$node_master->init(allows_streaming => 1, extra => ['--wal-segsize=1']); -$node_master->append_conf( +# Initialize primary node, setting wal-segsize to 1MB +my $node_primary = get_new_node('primary'); +$node_primary->init(allows_streaming => 1, extra => ['--wal-segsize=1']); +$node_primary->append_conf( 'postgresql.conf', qq( min_wal_size = 2MB max_wal_size = 4MB log_checkpoints = yes )); -$node_master->start; -$node_master->safe_psql('postgres', +$node_primary->start; +$node_primary->safe_psql('postgres', "SELECT pg_create_physical_replication_slot('rep1')"); # The slot state and remain should be null before the first connection -my $result = $node_master->safe_psql('postgres', +my $result = $node_primary->safe_psql('postgres', "SELECT restart_lsn IS NULL, wal_status is NULL, safe_wal_size is NULL FROM pg_replication_slots WHERE slot_name = 'rep1'" ); is($result, "t|t|t", 'check the state of non-reserved slot is "unknown"'); @@ -35,133 +35,133 @@ is($result, "t|t|t", 'check the state of non-reserved slot is "unknown"'); # Take backup my $backup_name = 'my_backup'; -$node_master->backup($backup_name); +$node_primary->backup($backup_name); # Create a standby linking to it using the replication slot my $node_standby = get_new_node('standby_1'); -$node_standby->init_from_backup($node_master, $backup_name, +$node_standby->init_from_backup($node_primary, $backup_name, has_streaming => 1); $node_standby->append_conf('postgresql.conf', "primary_slot_name = 'rep1'"); $node_standby->start; # Wait until standby has replayed enough data -my $start_lsn = $node_master->lsn('write'); -$node_master->wait_for_catchup($node_standby, 'replay', $start_lsn); +my $start_lsn = $node_primary->lsn('write'); +$node_primary->wait_for_catchup($node_standby, 'replay', $start_lsn); # Stop standby $node_standby->stop; # Preparation done, the slot is the state "reserved" now -$result = $node_master->safe_psql('postgres', +$result = $node_primary->safe_psql('postgres', "SELECT wal_status, safe_wal_size IS NULL FROM pg_replication_slots WHERE slot_name = 'rep1'" ); is($result, "reserved|t", 'check the catching-up state'); -# Advance WAL by five segments (= 5MB) on master -advance_wal($node_master, 1); -$node_master->safe_psql('postgres', "CHECKPOINT;"); +# Advance WAL by five segments (= 5MB) on primary +advance_wal($node_primary, 1); +$node_primary->safe_psql('postgres', "CHECKPOINT;"); # The slot is always "safe" when fitting max_wal_size -$result = $node_master->safe_psql('postgres', +$result = $node_primary->safe_psql('postgres', "SELECT wal_status, safe_wal_size IS NULL FROM pg_replication_slots WHERE slot_name = 'rep1'" ); is($result, "reserved|t", 'check that it is safe if WAL fits in max_wal_size'); -advance_wal($node_master, 4); -$node_master->safe_psql('postgres', "CHECKPOINT;"); +advance_wal($node_primary, 4); +$node_primary->safe_psql('postgres', "CHECKPOINT;"); # The slot is always "safe" when max_slot_wal_keep_size is not set -$result = $node_master->safe_psql('postgres', +$result = $node_primary->safe_psql('postgres', "SELECT wal_status, safe_wal_size IS NULL FROM pg_replication_slots WHERE slot_name = 'rep1'" ); is($result, "reserved|t", 'check that slot is working'); -# The standby can reconnect to master +# The standby can reconnect to primary $node_standby->start; -$start_lsn = $node_master->lsn('write'); -$node_master->wait_for_catchup($node_standby, 'replay', $start_lsn); +$start_lsn = $node_primary->lsn('write'); +$node_primary->wait_for_catchup($node_standby, 'replay', $start_lsn); $node_standby->stop; -# Set max_slot_wal_keep_size on master +# Set max_slot_wal_keep_size on primary my $max_slot_wal_keep_size_mb = 6; -$node_master->append_conf( +$node_primary->append_conf( 'postgresql.conf', qq( max_slot_wal_keep_size = ${max_slot_wal_keep_size_mb}MB )); -$node_master->reload; +$node_primary->reload; # The slot is in safe state. -$result = $node_master->safe_psql('postgres', +$result = $node_primary->safe_psql('postgres', "SELECT wal_status FROM pg_replication_slots WHERE slot_name = 'rep1'"); is($result, "reserved", 'check that max_slot_wal_keep_size is working'); # Advance WAL again then checkpoint, reducing remain by 2 MB. -advance_wal($node_master, 2); -$node_master->safe_psql('postgres', "CHECKPOINT;"); +advance_wal($node_primary, 2); +$node_primary->safe_psql('postgres', "CHECKPOINT;"); # The slot is still working -$result = $node_master->safe_psql('postgres', +$result = $node_primary->safe_psql('postgres', "SELECT wal_status FROM pg_replication_slots WHERE slot_name = 'rep1'"); is($result, "reserved", 'check that safe_wal_size gets close to the current LSN'); -# The standby can reconnect to master +# The standby can reconnect to primary $node_standby->start; -$start_lsn = $node_master->lsn('write'); -$node_master->wait_for_catchup($node_standby, 'replay', $start_lsn); +$start_lsn = $node_primary->lsn('write'); +$node_primary->wait_for_catchup($node_standby, 'replay', $start_lsn); $node_standby->stop; # wal_keep_segments overrides max_slot_wal_keep_size -$result = $node_master->safe_psql('postgres', +$result = $node_primary->safe_psql('postgres', "ALTER SYSTEM SET wal_keep_segments to 8; SELECT pg_reload_conf();"); # Advance WAL again then checkpoint, reducing remain by 6 MB. -advance_wal($node_master, 6); -$result = $node_master->safe_psql('postgres', +advance_wal($node_primary, 6); +$result = $node_primary->safe_psql('postgres', "SELECT wal_status as remain FROM pg_replication_slots WHERE slot_name = 'rep1'" ); is($result, "extended", 'check that wal_keep_segments overrides max_slot_wal_keep_size'); # restore wal_keep_segments -$result = $node_master->safe_psql('postgres', +$result = $node_primary->safe_psql('postgres', "ALTER SYSTEM SET wal_keep_segments to 0; SELECT pg_reload_conf();"); -# The standby can reconnect to master +# The standby can reconnect to primary $node_standby->start; -$start_lsn = $node_master->lsn('write'); -$node_master->wait_for_catchup($node_standby, 'replay', $start_lsn); +$start_lsn = $node_primary->lsn('write'); +$node_primary->wait_for_catchup($node_standby, 'replay', $start_lsn); $node_standby->stop; # Advance WAL again without checkpoint, reducing remain by 6 MB. -advance_wal($node_master, 6); +advance_wal($node_primary, 6); # Slot gets into 'reserved' state -$result = $node_master->safe_psql('postgres', +$result = $node_primary->safe_psql('postgres', "SELECT wal_status FROM pg_replication_slots WHERE slot_name = 'rep1'"); is($result, "extended", 'check that the slot state changes to "extended"'); # do checkpoint so that the next checkpoint runs too early -$node_master->safe_psql('postgres', "CHECKPOINT;"); +$node_primary->safe_psql('postgres', "CHECKPOINT;"); # Advance WAL again without checkpoint; remain goes to 0. -advance_wal($node_master, 1); +advance_wal($node_primary, 1); # Slot gets into 'unreserved' state and safe_wal_size is negative -$result = $node_master->safe_psql('postgres', +$result = $node_primary->safe_psql('postgres', "SELECT wal_status, safe_wal_size <= 0 FROM pg_replication_slots WHERE slot_name = 'rep1'" ); is($result, "unreserved|t", 'check that the slot state changes to "unreserved"'); -# The standby still can connect to master before a checkpoint +# The standby still can connect to primary before a checkpoint $node_standby->start; -$start_lsn = $node_master->lsn('write'); -$node_master->wait_for_catchup($node_standby, 'replay', $start_lsn); +$start_lsn = $node_primary->lsn('write'); +$node_primary->wait_for_catchup($node_standby, 'replay', $start_lsn); $node_standby->stop; @@ -171,25 +171,25 @@ ok( !find_in_log( 'check that required WAL segments are still available'); # Advance WAL again, the slot loses the oldest segment. -my $logstart = get_log_size($node_master); -advance_wal($node_master, 7); -$node_master->safe_psql('postgres', "CHECKPOINT;"); +my $logstart = get_log_size($node_primary); +advance_wal($node_primary, 7); +$node_primary->safe_psql('postgres', "CHECKPOINT;"); # WARNING should be issued ok( find_in_log( - $node_master, + $node_primary, "invalidating slot \"rep1\" because its restart_lsn [0-9A-F/]+ exceeds max_slot_wal_keep_size", $logstart), 'check that the warning is logged'); # This slot should be broken -$result = $node_master->safe_psql('postgres', +$result = $node_primary->safe_psql('postgres', "SELECT slot_name, active, restart_lsn IS NULL, wal_status, safe_wal_size FROM pg_replication_slots WHERE slot_name = 'rep1'" ); is($result, "rep1|f|t|lost|", 'check that the slot became inactive and the state "lost" persists'); -# The standby no longer can connect to the master +# The standby no longer can connect to the primary $logstart = get_log_size($node_standby); $node_standby->start; @@ -208,39 +208,39 @@ for (my $i = 0; $i < 10000; $i++) } ok($failed, 'check that replication has been broken'); -$node_master->stop('immediate'); +$node_primary->stop('immediate'); $node_standby->stop('immediate'); -my $node_master2 = get_new_node('master2'); -$node_master2->init(allows_streaming => 1); -$node_master2->append_conf( +my $node_primary2 = get_new_node('primary2'); +$node_primary2->init(allows_streaming => 1); +$node_primary2->append_conf( 'postgresql.conf', qq( min_wal_size = 32MB max_wal_size = 32MB log_checkpoints = yes )); -$node_master2->start; -$node_master2->safe_psql('postgres', +$node_primary2->start; +$node_primary2->safe_psql('postgres', "SELECT pg_create_physical_replication_slot('rep1')"); $backup_name = 'my_backup2'; -$node_master2->backup($backup_name); +$node_primary2->backup($backup_name); -$node_master2->stop; -$node_master2->append_conf( +$node_primary2->stop; +$node_primary2->append_conf( 'postgresql.conf', qq( max_slot_wal_keep_size = 0 )); -$node_master2->start; +$node_primary2->start; $node_standby = get_new_node('standby_2'); -$node_standby->init_from_backup($node_master2, $backup_name, +$node_standby->init_from_backup($node_primary2, $backup_name, has_streaming => 1); $node_standby->append_conf('postgresql.conf', "primary_slot_name = 'rep1'"); $node_standby->start; my @result = split( '\n', - $node_master2->safe_psql( + $node_primary2->safe_psql( 'postgres', "CREATE TABLE tt(); DROP TABLE tt; @@ -256,7 +256,7 @@ sub advance_wal { my ($node, $n) = @_; - # Advance by $n segments (= (16 * $n) MB) on master + # Advance by $n segments (= (16 * $n) MB) on primary for (my $i = 0; $i < $n; $i++) { $node->safe_psql('postgres', diff --git a/src/test/recovery/t/020_archive_status.pl b/src/test/recovery/t/020_archive_status.pl index c18b737785d..c726453417b 100644 --- a/src/test/recovery/t/020_archive_status.pl +++ b/src/test/recovery/t/020_archive_status.pl @@ -8,7 +8,7 @@ use TestLib; use Test::More tests => 16; use Config; -my $primary = get_new_node('master'); +my $primary = get_new_node('primary'); $primary->init( has_archiving => 1, allows_streaming => 1); diff --git a/src/test/ssl/t/001_ssltests.pl b/src/test/ssl/t/001_ssltests.pl index c0680f39d6f..fd2727b5684 100644 --- a/src/test/ssl/t/001_ssltests.pl +++ b/src/test/ssl/t/001_ssltests.pl @@ -61,7 +61,7 @@ push @keys, 'client_wrongperms'; #### Set up the server. note "setting up data directory"; -my $node = get_new_node('master'); +my $node = get_new_node('primary'); $node->init; # PGHOST is enforced here to set up the node, subsequent connections diff --git a/src/test/ssl/t/002_scram.pl b/src/test/ssl/t/002_scram.pl index a1ab9119880..01231f8ba0f 100644 --- a/src/test/ssl/t/002_scram.pl +++ b/src/test/ssl/t/002_scram.pl @@ -35,7 +35,7 @@ my $common_connstr; # Set up the server. note "setting up data directory"; -my $node = get_new_node('master'); +my $node = get_new_node('primary'); $node->init; # PGHOST is enforced here to set up the node, subsequent connections |