aboutsummaryrefslogtreecommitdiff
path: root/src/test
diff options
context:
space:
mode:
Diffstat (limited to 'src/test')
-rw-r--r--src/test/authentication/t/001_password.pl23
-rw-r--r--src/test/authentication/t/003_peer.pl2
-rw-r--r--src/test/authentication/t/005_sspi.pl2
-rw-r--r--src/test/authentication/t/007_pre_auth.pl2
-rw-r--r--src/test/kerberos/t/001_auth.pl2
-rw-r--r--src/test/ldap/t/001_auth.pl2
-rw-r--r--src/test/ldap/t/002_bindpasswd.pl2
-rw-r--r--src/test/modules/injection_points/Makefile2
-rw-r--r--src/test/modules/injection_points/expected/vacuum.out122
-rw-r--r--src/test/modules/injection_points/meson.build3
-rw-r--r--src/test/modules/injection_points/sql/vacuum.sql47
-rw-r--r--src/test/modules/ldap_password_func/t/001_mutated_bindpasswd.pl3
-rw-r--r--src/test/modules/libpq_pipeline/t/001_libpq_pipeline.pl6
-rw-r--r--src/test/modules/oauth_validator/meson.build2
-rw-r--r--src/test/modules/oauth_validator/t/001_server.pl22
-rw-r--r--src/test/modules/oauth_validator/t/002_client.pl2
-rwxr-xr-xsrc/test/modules/oauth_validator/t/oauth_server.py31
-rw-r--r--src/test/modules/test_aio/t/001_aio.pl46
-rw-r--r--src/test/modules/test_aio/test_aio.c4
-rw-r--r--src/test/modules/test_dsm_registry/expected/test_dsm_registry.out12
-rw-r--r--src/test/modules/test_dsm_registry/sql/test_dsm_registry.sql2
-rw-r--r--src/test/modules/test_dsm_registry/test_dsm_registry--1.0.sql6
-rw-r--r--src/test/modules/test_dsm_registry/test_dsm_registry.c105
-rw-r--r--src/test/modules/test_shm_mq/worker.c2
-rw-r--r--src/test/perl/PostgreSQL/Test/AdjustUpgrade.pm1
-rw-r--r--src/test/perl/PostgreSQL/Test/Cluster.pm10
-rw-r--r--src/test/postmaster/t/002_connection_limits.pl6
-rw-r--r--src/test/postmaster/t/003_start_stop.pl3
-rw-r--r--src/test/recovery/meson.build2
-rw-r--r--src/test/recovery/t/003_recovery_targets.pl50
-rw-r--r--src/test/recovery/t/013_crash_restart.pl2
-rw-r--r--src/test/recovery/t/022_crash_temp_files.pl2
-rw-r--r--src/test/recovery/t/032_relfilenode_reuse.pl2
-rw-r--r--src/test/recovery/t/037_invalid_database.pl2
-rw-r--r--src/test/recovery/t/040_standby_failover_slots_sync.pl3
-rw-r--r--src/test/recovery/t/047_checkpoint_physical_slot.pl132
-rw-r--r--src/test/recovery/t/048_vacuum_horizon_floor.pl288
-rw-r--r--src/test/regress/expected/alter_table.out7
-rw-r--r--src/test/regress/expected/btree_index.out101
-rw-r--r--src/test/regress/expected/constraints.out7
-rw-r--r--src/test/regress/expected/copy.out25
-rw-r--r--src/test/regress/expected/copy2.out6
-rw-r--r--src/test/regress/expected/create_table_like.out30
-rw-r--r--src/test/regress/expected/domain.out5
-rw-r--r--src/test/regress/expected/foreign_key.out81
-rw-r--r--src/test/regress/expected/generated_virtual.out89
-rw-r--r--src/test/regress/expected/horology.out9
-rw-r--r--src/test/regress/expected/inherit.out4
-rw-r--r--src/test/regress/expected/join.out242
-rw-r--r--src/test/regress/expected/limit.out20
-rw-r--r--src/test/regress/expected/matview.out2
-rw-r--r--src/test/regress/expected/memoize.out60
-rw-r--r--src/test/regress/expected/merge.out70
-rw-r--r--src/test/regress/expected/misc_functions.out18
-rw-r--r--src/test/regress/expected/numeric.out16
-rw-r--r--src/test/regress/expected/partition_prune.out47
-rw-r--r--src/test/regress/expected/psql.out14
-rw-r--r--src/test/regress/expected/psql_pipeline.out194
-rw-r--r--src/test/regress/expected/publication.out17
-rw-r--r--src/test/regress/expected/regproc.out174
-rw-r--r--src/test/regress/expected/strings.out67
-rw-r--r--src/test/regress/expected/sysviews.out19
-rw-r--r--src/test/regress/expected/triggers.out49
-rw-r--r--src/test/regress/expected/type_sanity.out1
-rw-r--r--src/test/regress/expected/without_overlaps.out4
-rw-r--r--src/test/regress/sql/alter_table.sql9
-rw-r--r--src/test/regress/sql/btree_index.sql65
-rw-r--r--src/test/regress/sql/constraints.sql6
-rw-r--r--src/test/regress/sql/copy.sql30
-rw-r--r--src/test/regress/sql/copy2.sql3
-rw-r--r--src/test/regress/sql/create_table_like.sql16
-rw-r--r--src/test/regress/sql/domain.sql3
-rw-r--r--src/test/regress/sql/foreign_key.sql36
-rw-r--r--src/test/regress/sql/generated_virtual.sql39
-rw-r--r--src/test/regress/sql/horology.sql4
-rw-r--r--src/test/regress/sql/join.sql75
-rw-r--r--src/test/regress/sql/limit.sql5
-rw-r--r--src/test/regress/sql/memoize.sql27
-rw-r--r--src/test/regress/sql/merge.sql49
-rw-r--r--src/test/regress/sql/misc_functions.sql6
-rw-r--r--src/test/regress/sql/numeric.sql2
-rw-r--r--src/test/regress/sql/partition_prune.sql6
-rw-r--r--src/test/regress/sql/psql.sql12
-rw-r--r--src/test/regress/sql/psql_pipeline.sql106
-rw-r--r--src/test/regress/sql/publication.sql5
-rw-r--r--src/test/regress/sql/regproc.sql38
-rw-r--r--src/test/regress/sql/strings.sql20
-rw-r--r--src/test/regress/sql/sysviews.sql18
-rw-r--r--src/test/regress/sql/triggers.sql33
-rw-r--r--src/test/regress/sql/type_sanity.sql1
-rw-r--r--src/test/ssl/meson.build2
-rw-r--r--src/test/ssl/t/SSL/Server.pm5
-rw-r--r--src/test/subscription/t/007_ddl.pl35
-rw-r--r--src/test/subscription/t/013_partition.pl3
-rw-r--r--src/test/subscription/t/021_twophase.pl7
-rw-r--r--src/test/subscription/t/024_add_drop_pub.pl14
-rw-r--r--src/test/subscription/t/035_conflicts.pl3
97 files changed, 2384 insertions, 632 deletions
diff --git a/src/test/authentication/t/001_password.pl b/src/test/authentication/t/001_password.pl
index 37d96d95a1a..a16e9a563f3 100644
--- a/src/test/authentication/t/001_password.pl
+++ b/src/test/authentication/t/001_password.pl
@@ -79,39 +79,40 @@ $node->start;
# other tests are added to this file in the future
$node->safe_psql('postgres', "CREATE DATABASE test_log_connections");
-my $log_connections = $node->safe_psql('test_log_connections', q(SHOW log_connections;));
+my $log_connections =
+ $node->safe_psql('test_log_connections', q(SHOW log_connections;));
is($log_connections, 'on', qq(check log connections has expected value 'on'));
-$node->connect_ok('test_log_connections',
+$node->connect_ok(
+ 'test_log_connections',
qq(log_connections 'on' works as expected for backwards compatibility),
log_like => [
qr/connection received/,
qr/connection authenticated/,
qr/connection authorized: user=\S+ database=test_log_connections/,
],
- log_unlike => [
- qr/connection ready/,
- ],);
+ log_unlike => [ qr/connection ready/, ],);
-$node->safe_psql('test_log_connections',
+$node->safe_psql(
+ 'test_log_connections',
q[ALTER SYSTEM SET log_connections = receipt,authorization,setup_durations;
SELECT pg_reload_conf();]);
-$node->connect_ok('test_log_connections',
+$node->connect_ok(
+ 'test_log_connections',
q(log_connections with subset of specified options logs only those aspects),
log_like => [
qr/connection received/,
qr/connection authorized: user=\S+ database=test_log_connections/,
qr/connection ready/,
],
- log_unlike => [
- qr/connection authenticated/,
- ],);
+ log_unlike => [ qr/connection authenticated/, ],);
$node->safe_psql('test_log_connections',
qq(ALTER SYSTEM SET log_connections = 'all'; SELECT pg_reload_conf();));
-$node->connect_ok('test_log_connections',
+$node->connect_ok(
+ 'test_log_connections',
qq(log_connections 'all' logs all available connection aspects),
log_like => [
qr/connection received/,
diff --git a/src/test/authentication/t/003_peer.pl b/src/test/authentication/t/003_peer.pl
index 2879800eacf..f2320b62c87 100644
--- a/src/test/authentication/t/003_peer.pl
+++ b/src/test/authentication/t/003_peer.pl
@@ -71,7 +71,7 @@ sub test_role
my $node = PostgreSQL::Test::Cluster->new('node');
$node->init;
-$node->append_conf('postgresql.conf', "log_connections = on\n");
+$node->append_conf('postgresql.conf', "log_connections = authentication\n");
# Needed to allow connect_fails to inspect postmaster log:
$node->append_conf('postgresql.conf', "log_min_messages = debug2");
$node->start;
diff --git a/src/test/authentication/t/005_sspi.pl b/src/test/authentication/t/005_sspi.pl
index b480b702590..cb3e169002f 100644
--- a/src/test/authentication/t/005_sspi.pl
+++ b/src/test/authentication/t/005_sspi.pl
@@ -18,7 +18,7 @@ if (!$windows_os || $use_unix_sockets)
# Initialize primary node
my $node = PostgreSQL::Test::Cluster->new('primary');
$node->init;
-$node->append_conf('postgresql.conf', "log_connections = on\n");
+$node->append_conf('postgresql.conf', "log_connections = authentication\n");
$node->start;
my $huge_pages_status =
diff --git a/src/test/authentication/t/007_pre_auth.pl b/src/test/authentication/t/007_pre_auth.pl
index 12e40dc722c..7b3765e6d25 100644
--- a/src/test/authentication/t/007_pre_auth.pl
+++ b/src/test/authentication/t/007_pre_auth.pl
@@ -20,7 +20,7 @@ my $node = PostgreSQL::Test::Cluster->new('primary');
$node->init;
$node->append_conf(
'postgresql.conf', q[
-log_connections = on
+log_connections = 'receipt,authentication'
]);
$node->start;
diff --git a/src/test/kerberos/t/001_auth.pl b/src/test/kerberos/t/001_auth.pl
index 2dc6bec9b89..b0be96f2beb 100644
--- a/src/test/kerberos/t/001_auth.pl
+++ b/src/test/kerberos/t/001_auth.pl
@@ -65,7 +65,7 @@ $node->append_conf(
'postgresql.conf', qq{
listen_addresses = '$hostaddr'
krb_server_keyfile = '$krb->{keytab}'
-log_connections = on
+log_connections = all
log_min_messages = debug2
lc_messages = 'C'
});
diff --git a/src/test/ldap/t/001_auth.pl b/src/test/ldap/t/001_auth.pl
index d1315ed5351..440c30b7ddd 100644
--- a/src/test/ldap/t/001_auth.pl
+++ b/src/test/ldap/t/001_auth.pl
@@ -47,7 +47,7 @@ note "setting up PostgreSQL instance";
my $node = PostgreSQL::Test::Cluster->new('node');
$node->init;
-$node->append_conf('postgresql.conf', "log_connections = on\n");
+$node->append_conf('postgresql.conf', "log_connections = all\n");
# Needed to allow connect_fails to inspect postmaster log:
$node->append_conf('postgresql.conf', "log_min_messages = debug2");
$node->start;
diff --git a/src/test/ldap/t/002_bindpasswd.pl b/src/test/ldap/t/002_bindpasswd.pl
index f8beba2b279..642bb2d9a77 100644
--- a/src/test/ldap/t/002_bindpasswd.pl
+++ b/src/test/ldap/t/002_bindpasswd.pl
@@ -43,7 +43,7 @@ note "setting up PostgreSQL instance";
my $node = PostgreSQL::Test::Cluster->new('node');
$node->init;
-$node->append_conf('postgresql.conf', "log_connections = on\n");
+$node->append_conf('postgresql.conf', "log_connections = all\n");
$node->start;
$node->safe_psql('postgres', 'CREATE USER test0;');
diff --git a/src/test/modules/injection_points/Makefile b/src/test/modules/injection_points/Makefile
index e680991f8d4..fc82cd67f6c 100644
--- a/src/test/modules/injection_points/Makefile
+++ b/src/test/modules/injection_points/Makefile
@@ -11,7 +11,7 @@ EXTENSION = injection_points
DATA = injection_points--1.0.sql
PGFILEDESC = "injection_points - facility for injection points"
-REGRESS = injection_points hashagg reindex_conc
+REGRESS = injection_points hashagg reindex_conc vacuum
REGRESS_OPTS = --dlpath=$(top_builddir)/src/test/regress
ISOLATION = basic inplace syscache-update-pruned
diff --git a/src/test/modules/injection_points/expected/vacuum.out b/src/test/modules/injection_points/expected/vacuum.out
new file mode 100644
index 00000000000..58df59fa927
--- /dev/null
+++ b/src/test/modules/injection_points/expected/vacuum.out
@@ -0,0 +1,122 @@
+-- Tests for VACUUM
+CREATE EXTENSION injection_points;
+SELECT injection_points_set_local();
+ injection_points_set_local
+----------------------------
+
+(1 row)
+
+SELECT injection_points_attach('vacuum-index-cleanup-auto', 'notice');
+ injection_points_attach
+-------------------------
+
+(1 row)
+
+SELECT injection_points_attach('vacuum-index-cleanup-disabled', 'notice');
+ injection_points_attach
+-------------------------
+
+(1 row)
+
+SELECT injection_points_attach('vacuum-index-cleanup-enabled', 'notice');
+ injection_points_attach
+-------------------------
+
+(1 row)
+
+SELECT injection_points_attach('vacuum-truncate-auto', 'notice');
+ injection_points_attach
+-------------------------
+
+(1 row)
+
+SELECT injection_points_attach('vacuum-truncate-disabled', 'notice');
+ injection_points_attach
+-------------------------
+
+(1 row)
+
+SELECT injection_points_attach('vacuum-truncate-enabled', 'notice');
+ injection_points_attach
+-------------------------
+
+(1 row)
+
+-- Check state of index_cleanup and truncate in VACUUM.
+CREATE TABLE vac_tab_on_toast_off(i int, j text) WITH
+ (autovacuum_enabled=false,
+ vacuum_index_cleanup=true, toast.vacuum_index_cleanup=false,
+ vacuum_truncate=true, toast.vacuum_truncate=false);
+CREATE TABLE vac_tab_off_toast_on(i int, j text) WITH
+ (autovacuum_enabled=false,
+ vacuum_index_cleanup=false, toast.vacuum_index_cleanup=true,
+ vacuum_truncate=false, toast.vacuum_truncate=true);
+-- Multiple relations should use their options in isolation.
+VACUUM vac_tab_on_toast_off, vac_tab_off_toast_on;
+NOTICE: notice triggered for injection point vacuum-index-cleanup-enabled
+NOTICE: notice triggered for injection point vacuum-truncate-enabled
+NOTICE: notice triggered for injection point vacuum-index-cleanup-disabled
+NOTICE: notice triggered for injection point vacuum-truncate-disabled
+NOTICE: notice triggered for injection point vacuum-index-cleanup-disabled
+NOTICE: notice triggered for injection point vacuum-truncate-disabled
+NOTICE: notice triggered for injection point vacuum-index-cleanup-enabled
+NOTICE: notice triggered for injection point vacuum-truncate-enabled
+-- Check "auto" case of index_cleanup and "truncate" controlled by
+-- its GUC.
+CREATE TABLE vac_tab_auto(i int, j text) WITH
+ (autovacuum_enabled=false,
+ vacuum_index_cleanup=auto, toast.vacuum_index_cleanup=auto);
+SET vacuum_truncate = false;
+VACUUM vac_tab_auto;
+NOTICE: notice triggered for injection point vacuum-index-cleanup-auto
+NOTICE: notice triggered for injection point vacuum-truncate-disabled
+NOTICE: notice triggered for injection point vacuum-index-cleanup-auto
+NOTICE: notice triggered for injection point vacuum-truncate-disabled
+SET vacuum_truncate = true;
+VACUUM vac_tab_auto;
+NOTICE: notice triggered for injection point vacuum-index-cleanup-auto
+NOTICE: notice triggered for injection point vacuum-truncate-enabled
+NOTICE: notice triggered for injection point vacuum-index-cleanup-auto
+NOTICE: notice triggered for injection point vacuum-truncate-enabled
+RESET vacuum_truncate;
+DROP TABLE vac_tab_auto;
+DROP TABLE vac_tab_on_toast_off;
+DROP TABLE vac_tab_off_toast_on;
+-- Cleanup
+SELECT injection_points_detach('vacuum-index-cleanup-auto');
+ injection_points_detach
+-------------------------
+
+(1 row)
+
+SELECT injection_points_detach('vacuum-index-cleanup-disabled');
+ injection_points_detach
+-------------------------
+
+(1 row)
+
+SELECT injection_points_detach('vacuum-index-cleanup-enabled');
+ injection_points_detach
+-------------------------
+
+(1 row)
+
+SELECT injection_points_detach('vacuum-truncate-auto');
+ injection_points_detach
+-------------------------
+
+(1 row)
+
+SELECT injection_points_detach('vacuum-truncate-disabled');
+ injection_points_detach
+-------------------------
+
+(1 row)
+
+SELECT injection_points_detach('vacuum-truncate-enabled');
+ injection_points_detach
+-------------------------
+
+(1 row)
+
+DROP EXTENSION injection_points;
diff --git a/src/test/modules/injection_points/meson.build b/src/test/modules/injection_points/meson.build
index d61149712fd..20390d6b4bf 100644
--- a/src/test/modules/injection_points/meson.build
+++ b/src/test/modules/injection_points/meson.build
@@ -37,8 +37,9 @@ tests += {
'injection_points',
'hashagg',
'reindex_conc',
+ 'vacuum',
],
- 'regress_args': ['--dlpath', meson.build_root() / 'src/test/regress'],
+ 'regress_args': ['--dlpath', meson.project_build_root() / 'src/test/regress'],
# The injection points are cluster-wide, so disable installcheck
'runningcheck': false,
},
diff --git a/src/test/modules/injection_points/sql/vacuum.sql b/src/test/modules/injection_points/sql/vacuum.sql
new file mode 100644
index 00000000000..23760dd0f38
--- /dev/null
+++ b/src/test/modules/injection_points/sql/vacuum.sql
@@ -0,0 +1,47 @@
+-- Tests for VACUUM
+
+CREATE EXTENSION injection_points;
+
+SELECT injection_points_set_local();
+SELECT injection_points_attach('vacuum-index-cleanup-auto', 'notice');
+SELECT injection_points_attach('vacuum-index-cleanup-disabled', 'notice');
+SELECT injection_points_attach('vacuum-index-cleanup-enabled', 'notice');
+SELECT injection_points_attach('vacuum-truncate-auto', 'notice');
+SELECT injection_points_attach('vacuum-truncate-disabled', 'notice');
+SELECT injection_points_attach('vacuum-truncate-enabled', 'notice');
+
+-- Check state of index_cleanup and truncate in VACUUM.
+CREATE TABLE vac_tab_on_toast_off(i int, j text) WITH
+ (autovacuum_enabled=false,
+ vacuum_index_cleanup=true, toast.vacuum_index_cleanup=false,
+ vacuum_truncate=true, toast.vacuum_truncate=false);
+CREATE TABLE vac_tab_off_toast_on(i int, j text) WITH
+ (autovacuum_enabled=false,
+ vacuum_index_cleanup=false, toast.vacuum_index_cleanup=true,
+ vacuum_truncate=false, toast.vacuum_truncate=true);
+-- Multiple relations should use their options in isolation.
+VACUUM vac_tab_on_toast_off, vac_tab_off_toast_on;
+
+-- Check "auto" case of index_cleanup and "truncate" controlled by
+-- its GUC.
+CREATE TABLE vac_tab_auto(i int, j text) WITH
+ (autovacuum_enabled=false,
+ vacuum_index_cleanup=auto, toast.vacuum_index_cleanup=auto);
+SET vacuum_truncate = false;
+VACUUM vac_tab_auto;
+SET vacuum_truncate = true;
+VACUUM vac_tab_auto;
+RESET vacuum_truncate;
+
+DROP TABLE vac_tab_auto;
+DROP TABLE vac_tab_on_toast_off;
+DROP TABLE vac_tab_off_toast_on;
+
+-- Cleanup
+SELECT injection_points_detach('vacuum-index-cleanup-auto');
+SELECT injection_points_detach('vacuum-index-cleanup-disabled');
+SELECT injection_points_detach('vacuum-index-cleanup-enabled');
+SELECT injection_points_detach('vacuum-truncate-auto');
+SELECT injection_points_detach('vacuum-truncate-disabled');
+SELECT injection_points_detach('vacuum-truncate-enabled');
+DROP EXTENSION injection_points;
diff --git a/src/test/modules/ldap_password_func/t/001_mutated_bindpasswd.pl b/src/test/modules/ldap_password_func/t/001_mutated_bindpasswd.pl
index 9b062e1c800..5dc1e442d29 100644
--- a/src/test/modules/ldap_password_func/t/001_mutated_bindpasswd.pl
+++ b/src/test/modules/ldap_password_func/t/001_mutated_bindpasswd.pl
@@ -42,7 +42,8 @@ note "setting up PostgreSQL instance";
my $node = PostgreSQL::Test::Cluster->new('node');
$node->init;
-$node->append_conf('postgresql.conf', "log_connections = on\n");
+$node->append_conf('postgresql.conf',
+ "log_connections = 'receipt,authentication,authorization'\n");
$node->append_conf('postgresql.conf',
"shared_preload_libraries = 'ldap_password_func'");
$node->start;
diff --git a/src/test/modules/libpq_pipeline/t/001_libpq_pipeline.pl b/src/test/modules/libpq_pipeline/t/001_libpq_pipeline.pl
index 61524bdbd8f..f9678853070 100644
--- a/src/test/modules/libpq_pipeline/t/001_libpq_pipeline.pl
+++ b/src/test/modules/libpq_pipeline/t/001_libpq_pipeline.pl
@@ -53,7 +53,8 @@ for my $testname (@tests)
$node->command_ok(
[
'libpq_pipeline', @extraargs,
- $testname, $node->connstr('postgres') . " max_protocol_version=latest"
+ $testname,
+ $node->connstr('postgres') . " max_protocol_version=latest"
],
"libpq_pipeline $testname");
@@ -76,7 +77,8 @@ for my $testname (@tests)
# test separately that it still works the old protocol version too.
$node->command_ok(
[
- 'libpq_pipeline', 'cancel', $node->connstr('postgres') . " max_protocol_version=3.0"
+ 'libpq_pipeline', 'cancel',
+ $node->connstr('postgres') . " max_protocol_version=3.0"
],
"libpq_pipeline cancel with protocol 3.0");
diff --git a/src/test/modules/oauth_validator/meson.build b/src/test/modules/oauth_validator/meson.build
index e190f9cf15a..a6f937fd7d7 100644
--- a/src/test/modules/oauth_validator/meson.build
+++ b/src/test/modules/oauth_validator/meson.build
@@ -77,7 +77,7 @@ tests += {
't/002_client.pl',
],
'env': {
- 'PYTHON': python.path(),
+ 'PYTHON': python.full_path(),
'with_libcurl': oauth_flow_supported ? 'yes' : 'no',
'with_python': 'yes',
},
diff --git a/src/test/modules/oauth_validator/t/001_server.pl b/src/test/modules/oauth_validator/t/001_server.pl
index 4f035417a40..41672ebd5c6 100644
--- a/src/test/modules/oauth_validator/t/001_server.pl
+++ b/src/test/modules/oauth_validator/t/001_server.pl
@@ -45,7 +45,7 @@ if ($ENV{with_python} ne 'yes')
my $node = PostgreSQL::Test::Cluster->new('primary');
$node->init;
-$node->append_conf('postgresql.conf', "log_connections = on\n");
+$node->append_conf('postgresql.conf', "log_connections = all\n");
$node->append_conf('postgresql.conf',
"oauth_validator_libraries = 'validator'\n");
# Needed to allow connect_fails to inspect postmaster log:
@@ -295,6 +295,26 @@ $node->connect_fails(
expected_stderr =>
qr/failed to obtain access token: response is too large/);
+my $nesting_limit = 16;
+$node->connect_ok(
+ connstr(
+ stage => 'device',
+ nested_array => $nesting_limit,
+ nested_object => $nesting_limit),
+ "nested arrays and objects, up to parse limit",
+ expected_stderr =>
+ qr@Visit https://example\.com/ and enter the code: postgresuser@);
+$node->connect_fails(
+ connstr(stage => 'device', nested_array => $nesting_limit + 1),
+ "bad discovery response: overly nested JSON array",
+ expected_stderr =>
+ qr/failed to parse device authorization: JSON is too deeply nested/);
+$node->connect_fails(
+ connstr(stage => 'device', nested_object => $nesting_limit + 1),
+ "bad discovery response: overly nested JSON object",
+ expected_stderr =>
+ qr/failed to parse device authorization: JSON is too deeply nested/);
+
$node->connect_fails(
connstr(stage => 'device', content_type => 'text/plain'),
"bad device authz response: wrong content type",
diff --git a/src/test/modules/oauth_validator/t/002_client.pl b/src/test/modules/oauth_validator/t/002_client.pl
index 21d4acc1926..aac0220d215 100644
--- a/src/test/modules/oauth_validator/t/002_client.pl
+++ b/src/test/modules/oauth_validator/t/002_client.pl
@@ -26,7 +26,7 @@ if (!$ENV{PG_TEST_EXTRA} || $ENV{PG_TEST_EXTRA} !~ /\boauth\b/)
my $node = PostgreSQL::Test::Cluster->new('primary');
$node->init;
-$node->append_conf('postgresql.conf', "log_connections = on\n");
+$node->append_conf('postgresql.conf', "log_connections = all\n");
$node->append_conf('postgresql.conf',
"oauth_validator_libraries = 'validator'\n");
$node->start;
diff --git a/src/test/modules/oauth_validator/t/oauth_server.py b/src/test/modules/oauth_validator/t/oauth_server.py
index 20b3a9506cb..0f8836aadf3 100755
--- a/src/test/modules/oauth_validator/t/oauth_server.py
+++ b/src/test/modules/oauth_validator/t/oauth_server.py
@@ -7,6 +7,7 @@
#
import base64
+import functools
import http.server
import json
import os
@@ -213,14 +214,32 @@ class OAuthHandler(http.server.BaseHTTPRequestHandler):
@property
def _response_padding(self):
"""
- If the huge_response test parameter is set to True, returns a dict
- containing a gigantic string value, which can then be folded into a JSON
- response.
+ Returns a dict with any additional entries that should be folded into a
+ JSON response, as determined by test parameters provided by the client:
+
+ - huge_response: if set to True, the dict will contain a gigantic string
+ value
+
+ - nested_array: if set to nonzero, the dict will contain a deeply nested
+ array so that the top-level object has the given depth
+
+ - nested_object: if set to nonzero, the dict will contain a deeply
+ nested JSON object so that the top-level object has the given depth
"""
- if not self._get_param("huge_response", False):
- return dict()
+ ret = dict()
+
+ if self._get_param("huge_response", False):
+ ret["_pad_"] = "x" * 1024 * 1024
+
+ depth = self._get_param("nested_array", 0)
+ if depth:
+ ret["_arr_"] = functools.reduce(lambda x, _: [x], range(depth))
+
+ depth = self._get_param("nested_object", 0)
+ if depth:
+ ret["_obj_"] = functools.reduce(lambda x, _: {"": x}, range(depth))
- return {"_pad_": "x" * 1024 * 1024}
+ return ret
@property
def _access_token(self):
diff --git a/src/test/modules/test_aio/t/001_aio.pl b/src/test/modules/test_aio/t/001_aio.pl
index 4527c70785d..82ffffc058f 100644
--- a/src/test/modules/test_aio/t/001_aio.pl
+++ b/src/test/modules/test_aio/t/001_aio.pl
@@ -1123,7 +1123,8 @@ COMMIT;
{
# Create a corruption and then read the block without waiting for
# completion.
- $psql_a->query(qq(
+ $psql_a->query(
+ qq(
SELECT modify_rel_block('tbl_zero', 1, corrupt_header=>true);
SELECT read_rel_block_ll('tbl_zero', 1, wait_complete=>false, zero_on_error=>true)
));
@@ -1133,7 +1134,8 @@ SELECT read_rel_block_ll('tbl_zero', 1, wait_complete=>false, zero_on_error=>tru
$psql_b,
"$persistency: test completing read by other session doesn't generate warning",
qq(SELECT count(*) > 0 FROM tbl_zero;),
- qr/^t$/, qr/^$/);
+ qr/^t$/,
+ qr/^$/);
}
# Clean up
@@ -1355,18 +1357,24 @@ SELECT modify_rel_block('tbl_cs_fail', 6, corrupt_checksum=>true);
));
$psql->query_safe($invalidate_sql);
- psql_like($io_method, $psql,
+ psql_like(
+ $io_method,
+ $psql,
"reading block w/ wrong checksum with ignore_checksum_failure=off fails",
- $count_sql, qr/^$/, qr/ERROR: invalid page in block/);
+ $count_sql,
+ qr/^$/,
+ qr/ERROR: invalid page in block/);
$psql->query_safe("SET ignore_checksum_failure=on");
$psql->query_safe($invalidate_sql);
- psql_like($io_method, $psql,
- "reading block w/ wrong checksum with ignore_checksum_failure=off succeeds",
- $count_sql,
- qr/^$expect$/,
- qr/WARNING: ignoring (checksum failure|\d checksum failures)/);
+ psql_like(
+ $io_method,
+ $psql,
+ "reading block w/ wrong checksum with ignore_checksum_failure=off succeeds",
+ $count_sql,
+ qr/^$expect$/,
+ qr/WARNING: ignoring (checksum failure|\d checksum failures)/);
# Verify that ignore_checksum_failure=off works in multi-block reads
@@ -1432,19 +1440,22 @@ SELECT read_rel_block_ll('tbl_cs_fail', 1, nblocks=>5, zero_on_error=>true);),
# file.
$node->wait_for_log(qr/LOG: ignoring checksum failure in block 2/,
- $log_location);
+ $log_location);
ok(1, "$io_method: found information about checksum failure in block 2");
- $node->wait_for_log(qr/LOG: invalid page in block 3 of relation base.*; zeroing out page/,
- $log_location);
+ $node->wait_for_log(
+ qr/LOG: invalid page in block 3 of relation base.*; zeroing out page/,
+ $log_location);
ok(1, "$io_method: found information about invalid page in block 3");
- $node->wait_for_log(qr/LOG: invalid page in block 4 of relation base.*; zeroing out page/,
- $log_location);
+ $node->wait_for_log(
+ qr/LOG: invalid page in block 4 of relation base.*; zeroing out page/,
+ $log_location);
ok(1, "$io_method: found information about checksum failure in block 4");
- $node->wait_for_log(qr/LOG: invalid page in block 5 of relation base.*; zeroing out page/,
- $log_location);
+ $node->wait_for_log(
+ qr/LOG: invalid page in block 5 of relation base.*; zeroing out page/,
+ $log_location);
ok(1, "$io_method: found information about checksum failure in block 5");
@@ -1462,8 +1473,7 @@ SELECT modify_rel_block('tbl_cs_fail', 3, corrupt_checksum=>true, corrupt_header
qq(
SELECT read_rel_block_ll('tbl_cs_fail', 3, nblocks=>1, zero_on_error=>false);),
qr/^$/,
- qr/^psql:<stdin>:\d+: ERROR: invalid page in block 3 of relation/
- );
+ qr/^psql:<stdin>:\d+: ERROR: invalid page in block 3 of relation/);
psql_like(
$io_method,
diff --git a/src/test/modules/test_aio/test_aio.c b/src/test/modules/test_aio/test_aio.c
index 5cdfb89210b..c55cf6c0aac 100644
--- a/src/test/modules/test_aio/test_aio.c
+++ b/src/test/modules/test_aio/test_aio.c
@@ -42,9 +42,9 @@ typedef struct InjIoErrorState
bool short_read_result_set;
int short_read_result;
-} InjIoErrorState;
+} InjIoErrorState;
-static InjIoErrorState * inj_io_error_state;
+static InjIoErrorState *inj_io_error_state;
/* Shared memory init callbacks */
static shmem_request_hook_type prev_shmem_request_hook = NULL;
diff --git a/src/test/modules/test_dsm_registry/expected/test_dsm_registry.out b/src/test/modules/test_dsm_registry/expected/test_dsm_registry.out
index 8ffbd343a05..8ded82e59d6 100644
--- a/src/test/modules/test_dsm_registry/expected/test_dsm_registry.out
+++ b/src/test/modules/test_dsm_registry/expected/test_dsm_registry.out
@@ -5,6 +5,12 @@ SELECT set_val_in_shmem(1236);
(1 row)
+SELECT set_val_in_hash('test', '1414');
+ set_val_in_hash
+-----------------
+
+(1 row)
+
\c
SELECT get_val_in_shmem();
get_val_in_shmem
@@ -12,3 +18,9 @@ SELECT get_val_in_shmem();
1236
(1 row)
+SELECT get_val_in_hash('test');
+ get_val_in_hash
+-----------------
+ 1414
+(1 row)
+
diff --git a/src/test/modules/test_dsm_registry/sql/test_dsm_registry.sql b/src/test/modules/test_dsm_registry/sql/test_dsm_registry.sql
index b3351be0a16..c2e25cddaae 100644
--- a/src/test/modules/test_dsm_registry/sql/test_dsm_registry.sql
+++ b/src/test/modules/test_dsm_registry/sql/test_dsm_registry.sql
@@ -1,4 +1,6 @@
CREATE EXTENSION test_dsm_registry;
SELECT set_val_in_shmem(1236);
+SELECT set_val_in_hash('test', '1414');
\c
SELECT get_val_in_shmem();
+SELECT get_val_in_hash('test');
diff --git a/src/test/modules/test_dsm_registry/test_dsm_registry--1.0.sql b/src/test/modules/test_dsm_registry/test_dsm_registry--1.0.sql
index 8c55b0919b1..5da45155be9 100644
--- a/src/test/modules/test_dsm_registry/test_dsm_registry--1.0.sql
+++ b/src/test/modules/test_dsm_registry/test_dsm_registry--1.0.sql
@@ -8,3 +8,9 @@ CREATE FUNCTION set_val_in_shmem(val INT) RETURNS VOID
CREATE FUNCTION get_val_in_shmem() RETURNS INT
AS 'MODULE_PATHNAME' LANGUAGE C;
+
+CREATE FUNCTION set_val_in_hash(key TEXT, val TEXT) RETURNS VOID
+ AS 'MODULE_PATHNAME' LANGUAGE C;
+
+CREATE FUNCTION get_val_in_hash(key TEXT) RETURNS TEXT
+ AS 'MODULE_PATHNAME' LANGUAGE C;
diff --git a/src/test/modules/test_dsm_registry/test_dsm_registry.c b/src/test/modules/test_dsm_registry/test_dsm_registry.c
index 462a80f8790..141c8ed1b34 100644
--- a/src/test/modules/test_dsm_registry/test_dsm_registry.c
+++ b/src/test/modules/test_dsm_registry/test_dsm_registry.c
@@ -15,6 +15,7 @@
#include "fmgr.h"
#include "storage/dsm_registry.h"
#include "storage/lwlock.h"
+#include "utils/builtins.h"
PG_MODULE_MAGIC;
@@ -24,15 +25,31 @@ typedef struct TestDSMRegistryStruct
LWLock lck;
} TestDSMRegistryStruct;
-static TestDSMRegistryStruct *tdr_state;
+typedef struct TestDSMRegistryHashEntry
+{
+ char key[64];
+ dsa_pointer val;
+} TestDSMRegistryHashEntry;
+
+static TestDSMRegistryStruct *tdr_dsm;
+static dsa_area *tdr_dsa;
+static dshash_table *tdr_hash;
+
+static const dshash_parameters dsh_params = {
+ offsetof(TestDSMRegistryHashEntry, val),
+ sizeof(TestDSMRegistryHashEntry),
+ dshash_strcmp,
+ dshash_strhash,
+ dshash_strcpy
+};
static void
-tdr_init_shmem(void *ptr)
+init_tdr_dsm(void *ptr)
{
- TestDSMRegistryStruct *state = (TestDSMRegistryStruct *) ptr;
+ TestDSMRegistryStruct *dsm = (TestDSMRegistryStruct *) ptr;
- LWLockInitialize(&state->lck, LWLockNewTrancheId());
- state->val = 0;
+ LWLockInitialize(&dsm->lck, LWLockNewTrancheId());
+ dsm->val = 0;
}
static void
@@ -40,11 +57,17 @@ tdr_attach_shmem(void)
{
bool found;
- tdr_state = GetNamedDSMSegment("test_dsm_registry",
- sizeof(TestDSMRegistryStruct),
- tdr_init_shmem,
- &found);
- LWLockRegisterTranche(tdr_state->lck.tranche, "test_dsm_registry");
+ tdr_dsm = GetNamedDSMSegment("test_dsm_registry_dsm",
+ sizeof(TestDSMRegistryStruct),
+ init_tdr_dsm,
+ &found);
+ LWLockRegisterTranche(tdr_dsm->lck.tranche, "test_dsm_registry");
+
+ if (tdr_dsa == NULL)
+ tdr_dsa = GetNamedDSA("test_dsm_registry_dsa", &found);
+
+ if (tdr_hash == NULL)
+ tdr_hash = GetNamedDSHash("test_dsm_registry_hash", &dsh_params, &found);
}
PG_FUNCTION_INFO_V1(set_val_in_shmem);
@@ -53,9 +76,9 @@ set_val_in_shmem(PG_FUNCTION_ARGS)
{
tdr_attach_shmem();
- LWLockAcquire(&tdr_state->lck, LW_EXCLUSIVE);
- tdr_state->val = PG_GETARG_UINT32(0);
- LWLockRelease(&tdr_state->lck);
+ LWLockAcquire(&tdr_dsm->lck, LW_EXCLUSIVE);
+ tdr_dsm->val = PG_GETARG_INT32(0);
+ LWLockRelease(&tdr_dsm->lck);
PG_RETURN_VOID();
}
@@ -68,9 +91,57 @@ get_val_in_shmem(PG_FUNCTION_ARGS)
tdr_attach_shmem();
- LWLockAcquire(&tdr_state->lck, LW_SHARED);
- ret = tdr_state->val;
- LWLockRelease(&tdr_state->lck);
+ LWLockAcquire(&tdr_dsm->lck, LW_SHARED);
+ ret = tdr_dsm->val;
+ LWLockRelease(&tdr_dsm->lck);
+
+ PG_RETURN_INT32(ret);
+}
+
+PG_FUNCTION_INFO_V1(set_val_in_hash);
+Datum
+set_val_in_hash(PG_FUNCTION_ARGS)
+{
+ TestDSMRegistryHashEntry *entry;
+ char *key = TextDatumGetCString(PG_GETARG_DATUM(0));
+ char *val = TextDatumGetCString(PG_GETARG_DATUM(1));
+ bool found;
+
+ if (strlen(key) >= offsetof(TestDSMRegistryHashEntry, val))
+ ereport(ERROR,
+ (errmsg("key too long")));
+
+ tdr_attach_shmem();
+
+ entry = dshash_find_or_insert(tdr_hash, key, &found);
+ if (found)
+ dsa_free(tdr_dsa, entry->val);
+
+ entry->val = dsa_allocate(tdr_dsa, strlen(val) + 1);
+ strcpy(dsa_get_address(tdr_dsa, entry->val), val);
+
+ dshash_release_lock(tdr_hash, entry);
+
+ PG_RETURN_VOID();
+}
+
+PG_FUNCTION_INFO_V1(get_val_in_hash);
+Datum
+get_val_in_hash(PG_FUNCTION_ARGS)
+{
+ TestDSMRegistryHashEntry *entry;
+ char *key = TextDatumGetCString(PG_GETARG_DATUM(0));
+ text *val = NULL;
+
+ tdr_attach_shmem();
+
+ entry = dshash_find(tdr_hash, key, false);
+ if (entry == NULL)
+ PG_RETURN_NULL();
+
+ val = cstring_to_text(dsa_get_address(tdr_dsa, entry->val));
+
+ dshash_release_lock(tdr_hash, entry);
- PG_RETURN_UINT32(ret);
+ PG_RETURN_TEXT_P(val);
}
diff --git a/src/test/modules/test_shm_mq/worker.c b/src/test/modules/test_shm_mq/worker.c
index 96cd304dbbc..c1d321b69a4 100644
--- a/src/test/modules/test_shm_mq/worker.c
+++ b/src/test/modules/test_shm_mq/worker.c
@@ -77,7 +77,7 @@ test_shm_mq_main(Datum main_arg)
* exit, which is fine. If there were a ResourceOwner, it would acquire
* ownership of the mapping, but we have no need for that.
*/
- seg = dsm_attach(DatumGetInt32(main_arg));
+ seg = dsm_attach(DatumGetUInt32(main_arg));
if (seg == NULL)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
diff --git a/src/test/perl/PostgreSQL/Test/AdjustUpgrade.pm b/src/test/perl/PostgreSQL/Test/AdjustUpgrade.pm
index 609275e2c26..1725fe2f948 100644
--- a/src/test/perl/PostgreSQL/Test/AdjustUpgrade.pm
+++ b/src/test/perl/PostgreSQL/Test/AdjustUpgrade.pm
@@ -538,6 +538,7 @@ my @_unused_view_qualifiers = (
{ obj => 'VIEW public.limit_thousand_v_2', qual => 'onek' },
{ obj => 'VIEW public.limit_thousand_v_3', qual => 'onek' },
{ obj => 'VIEW public.limit_thousand_v_4', qual => 'onek' },
+ { obj => 'VIEW public.limit_thousand_v_5', qual => 'onek' },
# Since 14
{ obj => 'MATERIALIZED VIEW public.compressmv', qual => 'cmdata1' });
diff --git a/src/test/perl/PostgreSQL/Test/Cluster.pm b/src/test/perl/PostgreSQL/Test/Cluster.pm
index 1c11750ac1d..301766d2ed9 100644
--- a/src/test/perl/PostgreSQL/Test/Cluster.pm
+++ b/src/test/perl/PostgreSQL/Test/Cluster.pm
@@ -684,7 +684,7 @@ sub init
print $conf "\n# Added by PostgreSQL::Test::Cluster.pm\n";
print $conf "fsync = off\n";
print $conf "restart_after_crash = off\n";
- print $conf "log_line_prefix = '%m [%p] %q%a '\n";
+ print $conf "log_line_prefix = '%m %b[%p] %q%a '\n";
print $conf "log_statement = all\n";
print $conf "log_replication_commands = on\n";
print $conf "wal_retrieve_retry_interval = '500ms'\n";
@@ -2199,6 +2199,14 @@ sub psql
$ret = $?;
};
my $exc_save = $@;
+
+ # we need a dummy $stderr from hereon, if we didn't collect it
+ if (! defined $stderr)
+ {
+ my $errtxt = "<not collected>";
+ $stderr = \$errtxt;
+ }
+
if ($exc_save)
{
diff --git a/src/test/postmaster/t/002_connection_limits.pl b/src/test/postmaster/t/002_connection_limits.pl
index 325a00efd47..4a7fb16261f 100644
--- a/src/test/postmaster/t/002_connection_limits.pl
+++ b/src/test/postmaster/t/002_connection_limits.pl
@@ -20,7 +20,8 @@ $node->init(
$node->append_conf('postgresql.conf', "max_connections = 6");
$node->append_conf('postgresql.conf', "reserved_connections = 2");
$node->append_conf('postgresql.conf', "superuser_reserved_connections = 1");
-$node->append_conf('postgresql.conf', "log_connections = on");
+$node->append_conf('postgresql.conf',
+ "log_connections = 'receipt,authentication,authorization'");
$node->append_conf('postgresql.conf', "log_min_messages=debug2");
$node->start;
@@ -67,7 +68,8 @@ sub connect_fails_wait
my $log_location = -s $node->logfile;
$node->connect_fails($connstr, $test_name, %params);
- $node->wait_for_log(qr/DEBUG: (00000: )?client backend.*exited with exit code 1/,
+ $node->wait_for_log(
+ qr/DEBUG: (00000: )?client backend.*exited with exit code 1/,
$log_location);
ok(1, "$test_name: client backend process exited");
}
diff --git a/src/test/postmaster/t/003_start_stop.pl b/src/test/postmaster/t/003_start_stop.pl
index 4dc394139d9..58e7ba6cc42 100644
--- a/src/test/postmaster/t/003_start_stop.pl
+++ b/src/test/postmaster/t/003_start_stop.pl
@@ -33,7 +33,8 @@ $node->append_conf('postgresql.conf', "max_connections = 5");
$node->append_conf('postgresql.conf', "max_wal_senders = 0");
$node->append_conf('postgresql.conf', "autovacuum_max_workers = 1");
$node->append_conf('postgresql.conf', "max_worker_processes = 1");
-$node->append_conf('postgresql.conf', "log_connections = on");
+$node->append_conf('postgresql.conf',
+ "log_connections = 'receipt,authentication,authorization'");
$node->append_conf('postgresql.conf', "log_min_messages = debug2");
$node->append_conf('postgresql.conf',
"authentication_timeout = '$authentication_timeout s'");
diff --git a/src/test/recovery/meson.build b/src/test/recovery/meson.build
index cb983766c67..6e78ff1a030 100644
--- a/src/test/recovery/meson.build
+++ b/src/test/recovery/meson.build
@@ -54,6 +54,8 @@ tests += {
't/043_no_contrecord_switch.pl',
't/044_invalidate_inactive_slots.pl',
't/045_archive_restartpoint.pl',
+ 't/047_checkpoint_physical_slot.pl',
+ 't/048_vacuum_horizon_floor.pl'
],
},
}
diff --git a/src/test/recovery/t/003_recovery_targets.pl b/src/test/recovery/t/003_recovery_targets.pl
index 0ae2e982727..f2109efa9b1 100644
--- a/src/test/recovery/t/003_recovery_targets.pl
+++ b/src/test/recovery/t/003_recovery_targets.pl
@@ -187,4 +187,54 @@ ok( $logfile =~
qr/FATAL: .* recovery ended before configured recovery target was reached/,
'recovery end before target reached is a fatal error');
+# Invalid timeline target
+$node_standby = PostgreSQL::Test::Cluster->new('standby_9');
+$node_standby->init_from_backup($node_primary, 'my_backup',
+ has_restoring => 1);
+$node_standby->append_conf('postgresql.conf',
+ "recovery_target_timeline = 'bogus'");
+
+$res = run_log(
+ [
+ 'pg_ctl',
+ '--pgdata' => $node_standby->data_dir,
+ '--log' => $node_standby->logfile,
+ 'start',
+ ]);
+ok(!$res, 'invalid timeline target (bogus value)');
+
+my $log_start = $node_standby->wait_for_log("is not a valid number");
+
+# Timeline target out of min range
+$node_standby->append_conf('postgresql.conf',
+ "recovery_target_timeline = '0'");
+
+$res = run_log(
+ [
+ 'pg_ctl',
+ '--pgdata' => $node_standby->data_dir,
+ '--log' => $node_standby->logfile,
+ 'start',
+ ]);
+ok(!$res, 'invalid timeline target (lower bound check)');
+
+$log_start =
+ $node_standby->wait_for_log("must be between 1 and 4294967295", $log_start);
+
+# Timeline target out of max range
+$node_standby->append_conf('postgresql.conf',
+ "recovery_target_timeline = '4294967296'");
+
+$res = run_log(
+ [
+ 'pg_ctl',
+ '--pgdata' => $node_standby->data_dir,
+ '--log' => $node_standby->logfile,
+ 'start',
+ ]);
+ok(!$res, 'invalid timeline target (upper bound check)');
+
+$log_start =
+ $node_standby->wait_for_log("must be between 1 and 4294967295", $log_start);
+
done_testing();
diff --git a/src/test/recovery/t/013_crash_restart.pl b/src/test/recovery/t/013_crash_restart.pl
index 4e60806563f..debfa635c36 100644
--- a/src/test/recovery/t/013_crash_restart.pl
+++ b/src/test/recovery/t/013_crash_restart.pl
@@ -27,7 +27,7 @@ $node->start();
$node->safe_psql(
'postgres',
q[ALTER SYSTEM SET restart_after_crash = 1;
- ALTER SYSTEM SET log_connections = 1;
+ ALTER SYSTEM SET log_connections = receipt;
SELECT pg_reload_conf();]);
# Run psql, keeping session alive, so we have an alive backend to kill.
diff --git a/src/test/recovery/t/022_crash_temp_files.pl b/src/test/recovery/t/022_crash_temp_files.pl
index 50def031c96..0b68860bd3e 100644
--- a/src/test/recovery/t/022_crash_temp_files.pl
+++ b/src/test/recovery/t/022_crash_temp_files.pl
@@ -26,7 +26,7 @@ $node->start();
$node->safe_psql(
'postgres',
q[ALTER SYSTEM SET remove_temp_files_after_crash = on;
- ALTER SYSTEM SET log_connections = 1;
+ ALTER SYSTEM SET log_connections = receipt;
ALTER SYSTEM SET work_mem = '64kB';
ALTER SYSTEM SET restart_after_crash = on;
SELECT pg_reload_conf();]);
diff --git a/src/test/recovery/t/032_relfilenode_reuse.pl b/src/test/recovery/t/032_relfilenode_reuse.pl
index 492ef115ba4..0c44883cc34 100644
--- a/src/test/recovery/t/032_relfilenode_reuse.pl
+++ b/src/test/recovery/t/032_relfilenode_reuse.pl
@@ -14,7 +14,7 @@ $node_primary->init(allows_streaming => 1);
$node_primary->append_conf(
'postgresql.conf', q[
allow_in_place_tablespaces = true
-log_connections=on
+log_connections=receipt
# to avoid "repairing" corruption
full_page_writes=off
log_min_messages=debug2
diff --git a/src/test/recovery/t/037_invalid_database.pl b/src/test/recovery/t/037_invalid_database.pl
index bdf39397397..dc52c55c7af 100644
--- a/src/test/recovery/t/037_invalid_database.pl
+++ b/src/test/recovery/t/037_invalid_database.pl
@@ -15,7 +15,7 @@ $node->append_conf(
autovacuum = off
max_prepared_transactions=5
log_min_duration_statement=0
-log_connections=on
+log_connections=receipt
log_disconnections=on
));
diff --git a/src/test/recovery/t/040_standby_failover_slots_sync.pl b/src/test/recovery/t/040_standby_failover_slots_sync.pl
index 9c8b49e942d..2c61c51e914 100644
--- a/src/test/recovery/t/040_standby_failover_slots_sync.pl
+++ b/src/test/recovery/t/040_standby_failover_slots_sync.pl
@@ -941,8 +941,7 @@ is( $standby1->safe_psql(
'synced slot retained on the new primary');
# Commit the prepared transaction
-$standby1->safe_psql('postgres',
- "COMMIT PREPARED 'test_twophase_slotsync';");
+$standby1->safe_psql('postgres', "COMMIT PREPARED 'test_twophase_slotsync';");
$standby1->wait_for_catchup('regress_mysub1');
# Confirm that the prepared transaction is replicated to the subscriber
diff --git a/src/test/recovery/t/047_checkpoint_physical_slot.pl b/src/test/recovery/t/047_checkpoint_physical_slot.pl
new file mode 100644
index 00000000000..a1332b5d44c
--- /dev/null
+++ b/src/test/recovery/t/047_checkpoint_physical_slot.pl
@@ -0,0 +1,132 @@
+# Copyright (c) 2025, PostgreSQL Global Development Group
+#
+# This test verifies the case when the physical slot is advanced during
+# checkpoint. The test checks that the physical slot's restart_lsn still refers
+# to an existed WAL segment after immediate restart.
+#
+use strict;
+use warnings FATAL => 'all';
+
+use PostgreSQL::Test::Cluster;
+use PostgreSQL::Test::Utils;
+
+use Test::More;
+
+if ($ENV{enable_injection_points} ne 'yes')
+{
+ plan skip_all => 'Injection points not supported by this build';
+}
+
+my ($node, $result);
+
+$node = PostgreSQL::Test::Cluster->new('mike');
+$node->init;
+$node->append_conf('postgresql.conf', "wal_level = 'replica'");
+$node->start;
+
+# Check if the extension injection_points is available, as it may be
+# possible that this script is run with installcheck, where the module
+# would not be installed by default.
+if (!$node->check_extension('injection_points'))
+{
+ plan skip_all => 'Extension injection_points not installed';
+}
+
+$node->safe_psql('postgres', q(CREATE EXTENSION injection_points));
+
+# Create a physical replication slot.
+$node->safe_psql('postgres',
+ q{select pg_create_physical_replication_slot('slot_physical', true)});
+
+# Advance slot to the current position, just to have everything "valid".
+$node->safe_psql('postgres',
+ q{select pg_replication_slot_advance('slot_physical', pg_current_wal_lsn())}
+);
+
+# Run checkpoint to flush current state to disk and set a baseline.
+$node->safe_psql('postgres', q{checkpoint});
+
+# Insert 2M rows; that's about 260MB (~20 segments) worth of WAL.
+$node->advance_wal(20);
+
+# Advance slot to the current position, just to have everything "valid".
+$node->safe_psql('postgres',
+ q{select pg_replication_slot_advance('slot_physical', pg_current_wal_lsn())}
+);
+
+# Run another checkpoint to set a new restore LSN.
+$node->safe_psql('postgres', q{checkpoint});
+
+# Another 2M rows; that's about 260MB (~20 segments) worth of WAL.
+$node->advance_wal(20);
+
+my $restart_lsn_init = $node->safe_psql('postgres',
+ q{select restart_lsn from pg_replication_slots where slot_name = 'slot_physical'}
+);
+chomp($restart_lsn_init);
+note("restart lsn before checkpoint: $restart_lsn_init");
+
+# Run another checkpoint, this time in the background, and make it wait
+# on the injection point) so that the checkpoint stops right before
+# removing old WAL segments.
+note('starting checkpoint');
+
+my $checkpoint = $node->background_psql('postgres');
+$checkpoint->query_safe(
+ q{select injection_points_attach('checkpoint-before-old-wal-removal','wait')}
+);
+$checkpoint->query_until(
+ qr/starting_checkpoint/,
+ q(\echo starting_checkpoint
+checkpoint;
+\q
+));
+
+# Wait until the checkpoint stops right before removing WAL segments.
+note('waiting for injection_point');
+$node->wait_for_event('checkpointer', 'checkpoint-before-old-wal-removal');
+note('injection_point is reached');
+
+# OK, we're in the right situation: time to advance the physical slot, which
+# recalculates the required LSN and then unblock the checkpoint, which
+# removes the WAL still needed by the physical slot.
+$node->safe_psql('postgres',
+ q{select pg_replication_slot_advance('slot_physical', pg_current_wal_lsn())}
+);
+
+# Continue the checkpoint.
+$node->safe_psql('postgres',
+ q{select injection_points_wakeup('checkpoint-before-old-wal-removal')});
+
+my $restart_lsn_old = $node->safe_psql('postgres',
+ q{select restart_lsn from pg_replication_slots where slot_name = 'slot_physical'}
+);
+chomp($restart_lsn_old);
+note("restart lsn before stop: $restart_lsn_old");
+
+# Abruptly stop the server (1 second should be enough for the checkpoint
+# to finish; it would be better).
+$node->stop('immediate');
+
+$node->start;
+
+# Get the restart_lsn of the slot right after restarting.
+my $restart_lsn = $node->safe_psql('postgres',
+ q{select restart_lsn from pg_replication_slots where slot_name = 'slot_physical'}
+);
+chomp($restart_lsn);
+note("restart lsn: $restart_lsn");
+
+# Get the WAL segment name for the slot's restart_lsn.
+my $restart_lsn_segment = $node->safe_psql('postgres',
+ "SELECT pg_walfile_name('$restart_lsn'::pg_lsn)");
+chomp($restart_lsn_segment);
+
+# Check if the required wal segment exists.
+note("required by slot segment name: $restart_lsn_segment");
+my $datadir = $node->data_dir;
+ok( -f "$datadir/pg_wal/$restart_lsn_segment",
+ "WAL segment $restart_lsn_segment for physical slot's restart_lsn $restart_lsn exists"
+);
+
+done_testing();
diff --git a/src/test/recovery/t/048_vacuum_horizon_floor.pl b/src/test/recovery/t/048_vacuum_horizon_floor.pl
new file mode 100644
index 00000000000..e56fce59d58
--- /dev/null
+++ b/src/test/recovery/t/048_vacuum_horizon_floor.pl
@@ -0,0 +1,288 @@
+use strict;
+use warnings;
+use PostgreSQL::Test::Cluster;
+use Test::More;
+
+# Test that vacuum prunes away all dead tuples killed before OldestXmin
+#
+# This test creates a table on a primary, updates the table to generate dead
+# tuples for vacuum, and then, during the vacuum, uses the replica to force
+# GlobalVisState->maybe_needed on the primary to move backwards and precede
+# the value of OldestXmin set at the beginning of vacuuming the table.
+
+# Set up nodes
+my $node_primary = PostgreSQL::Test::Cluster->new('primary');
+$node_primary->init(allows_streaming => 'physical');
+
+# io_combine_limit is set to 1 to avoid pinning more than one buffer at a time
+# to ensure test determinism.
+$node_primary->append_conf(
+ 'postgresql.conf', qq[
+hot_standby_feedback = on
+autovacuum = off
+log_min_messages = INFO
+maintenance_work_mem = 64
+io_combine_limit = 1
+]);
+$node_primary->start;
+
+my $node_replica = PostgreSQL::Test::Cluster->new('standby');
+
+$node_primary->backup('my_backup');
+$node_replica->init_from_backup($node_primary, 'my_backup',
+ has_streaming => 1);
+
+$node_replica->start;
+
+my $test_db = "test_db";
+$node_primary->safe_psql('postgres', "CREATE DATABASE $test_db");
+
+# Save the original connection info for later use
+my $orig_conninfo = $node_primary->connstr();
+
+my $table1 = "vac_horizon_floor_table";
+
+# Long-running Primary Session A
+my $psql_primaryA =
+ $node_primary->background_psql($test_db, on_error_stop => 1);
+
+# Long-running Primary Session B
+my $psql_primaryB =
+ $node_primary->background_psql($test_db, on_error_stop => 1);
+
+# Our test relies on two rounds of index vacuuming for reasons elaborated
+# later. To trigger two rounds of index vacuuming, we must fill up the
+# TIDStore with dead items partway through a vacuum of the table. The number
+# of rows is just enough to ensure we exceed maintenance_work_mem on all
+# supported platforms, while keeping test runtime as short as we can.
+my $nrows = 2000;
+
+# Because vacuum's first pass, pruning, is where we use the GlobalVisState to
+# check tuple visibility, GlobalVisState->maybe_needed must move backwards
+# during pruning before checking the visibility for a tuple which would have
+# been considered HEAPTUPLE_DEAD prior to maybe_needed moving backwards but
+# HEAPTUPLE_RECENTLY_DEAD compared to the new, older value of maybe_needed.
+#
+# We must not only force the horizon on the primary to move backwards but also
+# force the vacuuming backend's GlobalVisState to be updated. GlobalVisState
+# is forced to update during index vacuuming.
+#
+# _bt_pendingfsm_finalize() calls GetOldestNonRemovableTransactionId() at the
+# end of a round of index vacuuming, updating the backend's GlobalVisState
+# and, in our case, moving maybe_needed backwards.
+#
+# Then vacuum's first (pruning) pass will continue and pruning will find our
+# later inserted and updated tuple HEAPTUPLE_RECENTLY_DEAD when compared to
+# maybe_needed but HEAPTUPLE_DEAD when compared to OldestXmin.
+#
+# Thus, we must force at least two rounds of index vacuuming to ensure that
+# some tuple visibility checks will happen after a round of index vacuuming.
+# To accomplish this, we set maintenance_work_mem to its minimum value and
+# insert and delete enough rows that we force at least one round of index
+# vacuuming before getting to a dead tuple which was killed after the standby
+# is disconnected.
+$node_primary->safe_psql(
+ $test_db, qq[
+ CREATE TABLE ${table1}(col1 int)
+ WITH (autovacuum_enabled=false, fillfactor=10);
+ INSERT INTO $table1 VALUES(7);
+ INSERT INTO $table1 SELECT generate_series(1, $nrows) % 3;
+ CREATE INDEX on ${table1}(col1);
+ DELETE FROM $table1 WHERE col1 = 0;
+ INSERT INTO $table1 VALUES(7);
+]);
+
+# We will later move the primary forward while the standby is disconnected.
+# For now, however, there is no reason not to wait for the standby to catch
+# up.
+my $primary_lsn = $node_primary->lsn('flush');
+$node_primary->wait_for_catchup($node_replica, 'replay', $primary_lsn);
+
+# Test that the WAL receiver is up and running.
+$node_replica->poll_query_until(
+ $test_db, qq[
+ SELECT EXISTS (SELECT * FROM pg_stat_wal_receiver);], 't');
+
+# Set primary_conninfo to something invalid on the replica and reload the
+# config. Once the config is reloaded, the startup process will force the WAL
+# receiver to restart and it will be unable to reconnect because of the
+# invalid connection information.
+$node_replica->safe_psql(
+ $test_db, qq[
+ ALTER SYSTEM SET primary_conninfo = '';
+ SELECT pg_reload_conf();
+ ]);
+
+# Wait until the WAL receiver has shut down and been unable to start up again.
+$node_replica->poll_query_until(
+ $test_db, qq[
+ SELECT EXISTS (SELECT * FROM pg_stat_wal_receiver);], 'f');
+
+# Now insert and update a tuple which will be visible to the vacuum on the
+# primary but which will have xmax newer than the oldest xmin on the standby
+# that was recently disconnected.
+my $res = $psql_primaryA->query_safe(
+ qq[
+ INSERT INTO $table1 VALUES (99);
+ UPDATE $table1 SET col1 = 100 WHERE col1 = 99;
+ SELECT 'after_update';
+ ]
+);
+
+# Make sure the UPDATE finished
+like($res, qr/^after_update$/m, "UPDATE occurred on primary session A");
+
+# Open a cursor on the primary whose pin will keep VACUUM from getting a
+# cleanup lock on the first page of the relation. We want VACUUM to be able to
+# start, calculate initial values for OldestXmin and GlobalVisState and then
+# be unable to proceed with pruning our dead tuples. This will allow us to
+# reconnect the standby and push the horizon back before we start actual
+# pruning and vacuuming.
+my $primary_cursor1 = "vac_horizon_floor_cursor1";
+
+# The first value inserted into the table was a 7, so FETCH FORWARD should
+# return a 7. That's how we know the cursor has a pin.
+# Disable index scans so the cursor pins heap pages and not index pages.
+$res = $psql_primaryB->query_safe(
+ qq[
+ BEGIN;
+ SET enable_bitmapscan = off;
+ SET enable_indexscan = off;
+ SET enable_indexonlyscan = off;
+ DECLARE $primary_cursor1 CURSOR FOR SELECT * FROM $table1 WHERE col1 = 7;
+ FETCH $primary_cursor1;
+ ]
+);
+
+is($res, 7, qq[Cursor query returned $res. Expected value 7.]);
+
+# Get the PID of the session which will run the VACUUM FREEZE so that we can
+# use it to filter pg_stat_activity later.
+my $vacuum_pid = $psql_primaryA->query_safe("SELECT pg_backend_pid();");
+
+# Now start a VACUUM FREEZE on the primary. It will call vacuum_get_cutoffs()
+# and establish values of OldestXmin and GlobalVisState which are newer than
+# all of our dead tuples. Then it will be unable to get a cleanup lock to
+# start pruning, so it will hang.
+#
+# We use VACUUM FREEZE because it will wait for a cleanup lock instead of
+# skipping the page pinned by the cursor. Note that works because the target
+# tuple's xmax precedes OldestXmin which ensures that lazy_scan_noprune() will
+# return false and we will wait for the cleanup lock.
+#
+# Disable any prefetching, parallelism, or other concurrent I/O by vacuum. The
+# pages of the heap must be processed in order by a single worker to ensure
+# test stability (PARALLEL 0 shouldn't be necessary but guards against the
+# possibility of parallel heap vacuuming).
+$psql_primaryA->{stdin} .= qq[
+ SET maintenance_io_concurrency = 0;
+ VACUUM (VERBOSE, FREEZE, PARALLEL 0) $table1;
+ \\echo VACUUM
+ ];
+
+# Make sure the VACUUM command makes it to the server.
+$psql_primaryA->{run}->pump_nb();
+
+# Make sure that the VACUUM has already called vacuum_get_cutoffs() and is
+# just waiting on the lock to start vacuuming. We don't want the standby to
+# re-establish a connection to the primary and push the horizon back until
+# we've saved initial values in GlobalVisState and calculated OldestXmin.
+$node_primary->poll_query_until(
+ $test_db,
+ qq[
+ SELECT count(*) >= 1 FROM pg_stat_activity
+ WHERE pid = $vacuum_pid
+ AND wait_event = 'BufferPin';
+ ],
+ 't');
+
+# Ensure the WAL receiver is still not active on the replica.
+$node_replica->poll_query_until(
+ $test_db, qq[
+ SELECT EXISTS (SELECT * FROM pg_stat_wal_receiver);], 'f');
+
+# Allow the WAL receiver connection to re-establish.
+$node_replica->safe_psql(
+ $test_db, qq[
+ ALTER SYSTEM SET primary_conninfo = '$orig_conninfo';
+ SELECT pg_reload_conf();
+ ]);
+
+# Ensure the new WAL receiver has connected.
+$node_replica->poll_query_until(
+ $test_db, qq[
+ SELECT EXISTS (SELECT * FROM pg_stat_wal_receiver);], 't');
+
+# Once the WAL sender is shown on the primary, the replica should have
+# connected with the primary and pushed the horizon backward. Primary Session
+# A won't see that until the VACUUM FREEZE proceeds and does its first round
+# of index vacuuming.
+$node_primary->poll_query_until(
+ $test_db, qq[
+ SELECT EXISTS (SELECT * FROM pg_stat_replication);], 't');
+
+# Move the cursor forward to the next 7. We inserted the 7 much later, so
+# advancing the cursor should allow vacuum to proceed vacuuming most pages of
+# the relation. Because we set maintanence_work_mem sufficiently low, we
+# expect that a round of index vacuuming has happened and that the vacuum is
+# now waiting for the cursor to release its pin on the last page of the
+# relation.
+$res = $psql_primaryB->query_safe("FETCH $primary_cursor1");
+is($res, 7,
+ qq[Cursor query returned $res from second fetch. Expected value 7.]);
+
+# Prevent the test from incorrectly passing by confirming that we did indeed
+# do a pass of index vacuuming.
+$node_primary->poll_query_until(
+ $test_db, qq[
+ SELECT index_vacuum_count > 0
+ FROM pg_stat_progress_vacuum
+ WHERE datname='$test_db' AND relid::regclass = '$table1'::regclass;
+ ], 't');
+
+# Commit the transaction with the open cursor so that the VACUUM can finish.
+$psql_primaryB->query_until(
+ qr/^commit$/m,
+ qq[
+ COMMIT;
+ \\echo commit
+ ]
+);
+
+# VACUUM proceeds with pruning and does a visibility check on each tuple. In
+# older versions of Postgres, pruning found our final dead tuple
+# non-removable (HEAPTUPLE_RECENTLY_DEAD) since its xmax is after the new
+# value of maybe_needed. Then heap_prepare_freeze_tuple() would decide the
+# tuple xmax should be frozen because it precedes OldestXmin. Vacuum would
+# then error out in heap_pre_freeze_checks() with "cannot freeze committed
+# xmax". This was fixed by changing pruning to find all
+# HEAPTUPLE_RECENTLY_DEAD tuples with xmaxes preceding OldestXmin
+# HEAPTUPLE_DEAD and removing them.
+
+# With the fix, VACUUM should finish successfully, incrementing the table
+# vacuum_count.
+$node_primary->poll_query_until(
+ $test_db,
+ qq[
+ SELECT vacuum_count > 0
+ FROM pg_stat_all_tables WHERE relname = '${table1}';
+ ]
+ , 't');
+
+$primary_lsn = $node_primary->lsn('flush');
+
+# Make sure something causes us to flush
+$node_primary->safe_psql($test_db, "INSERT INTO $table1 VALUES (1);");
+
+# Nothing on the replica should cause a recovery conflict, so this should
+# finish successfully.
+$node_primary->wait_for_catchup($node_replica, 'replay', $primary_lsn);
+
+## Shut down psqls
+$psql_primaryA->quit;
+$psql_primaryB->quit;
+
+$node_replica->stop();
+$node_primary->stop();
+
+done_testing();
diff --git a/src/test/regress/expected/alter_table.out b/src/test/regress/expected/alter_table.out
index 476266e3f4b..750efc042d8 100644
--- a/src/test/regress/expected/alter_table.out
+++ b/src/test/regress/expected/alter_table.out
@@ -4745,6 +4745,13 @@ alter table attbl alter column p1 set data type bigint;
alter table atref alter column c1 set data type bigint;
drop table attbl, atref;
/* End test case for bug #17409 */
+/* Test case for bug #18970 */
+create table attbl(a int);
+create table atref(b attbl check ((b).a is not null));
+alter table attbl alter column a type numeric; -- someday this should work
+ERROR: cannot alter table "attbl" because column "atref.b" uses its row type
+drop table attbl, atref;
+/* End test case for bug #18970 */
-- Test that ALTER TABLE rewrite preserves a clustered index
-- for normal indexes and indexes on constraints.
create table alttype_cluster (a int);
diff --git a/src/test/regress/expected/btree_index.out b/src/test/regress/expected/btree_index.out
index bfb1a286ea4..21dc9b5783a 100644
--- a/src/test/regress/expected/btree_index.out
+++ b/src/test/regress/expected/btree_index.out
@@ -195,54 +195,123 @@ ORDER BY proname DESC, proargtypes DESC, pronamespace DESC LIMIT 1;
(1 row)
--
--- Add coverage for RowCompare quals whose rhs row has a NULL that ends scan
+-- Forwards scan RowCompare qual whose row arg has a NULL that affects our
+-- initial positioning strategy
--
explain (costs off)
SELECT proname, proargtypes, pronamespace
FROM pg_proc
- WHERE proname = 'abs' AND (proname, proargtypes) < ('abs', NULL)
+ WHERE (proname, proargtypes) >= ('abs', NULL) AND proname <= 'abs'
ORDER BY proname, proargtypes, pronamespace;
- QUERY PLAN
--------------------------------------------------------------------------------------------------------------
+ QUERY PLAN
+---------------------------------------------------------------------------------------------------------------
Index Only Scan using pg_proc_proname_args_nsp_index on pg_proc
- Index Cond: ((ROW(proname, proargtypes) < ROW('abs'::name, NULL::oidvector)) AND (proname = 'abs'::name))
+ Index Cond: ((ROW(proname, proargtypes) >= ROW('abs'::name, NULL::oidvector)) AND (proname <= 'abs'::name))
(2 rows)
SELECT proname, proargtypes, pronamespace
FROM pg_proc
- WHERE proname = 'abs' AND (proname, proargtypes) < ('abs', NULL)
+ WHERE (proname, proargtypes) >= ('abs', NULL) AND proname <= 'abs'
ORDER BY proname, proargtypes, pronamespace;
proname | proargtypes | pronamespace
---------+-------------+--------------
(0 rows)
--
--- Add coverage for backwards scan RowCompare quals whose rhs row has a NULL
--- that ends scan
+-- Forwards scan RowCompare quals whose row arg has a NULL that ends scan
--
explain (costs off)
SELECT proname, proargtypes, pronamespace
FROM pg_proc
- WHERE proname = 'abs' AND (proname, proargtypes) > ('abs', NULL)
+ WHERE proname >= 'abs' AND (proname, proargtypes) < ('abs', NULL)
+ORDER BY proname, proargtypes, pronamespace;
+ QUERY PLAN
+--------------------------------------------------------------------------------------------------------------
+ Index Only Scan using pg_proc_proname_args_nsp_index on pg_proc
+ Index Cond: ((proname >= 'abs'::name) AND (ROW(proname, proargtypes) < ROW('abs'::name, NULL::oidvector)))
+(2 rows)
+
+SELECT proname, proargtypes, pronamespace
+ FROM pg_proc
+ WHERE proname >= 'abs' AND (proname, proargtypes) < ('abs', NULL)
+ORDER BY proname, proargtypes, pronamespace;
+ proname | proargtypes | pronamespace
+---------+-------------+--------------
+(0 rows)
+
+--
+-- Backwards scan RowCompare qual whose row arg has a NULL that affects our
+-- initial positioning strategy
+--
+explain (costs off)
+SELECT proname, proargtypes, pronamespace
+ FROM pg_proc
+ WHERE proname >= 'abs' AND (proname, proargtypes) <= ('abs', NULL)
+ORDER BY proname DESC, proargtypes DESC, pronamespace DESC;
+ QUERY PLAN
+---------------------------------------------------------------------------------------------------------------
+ Index Only Scan Backward using pg_proc_proname_args_nsp_index on pg_proc
+ Index Cond: ((proname >= 'abs'::name) AND (ROW(proname, proargtypes) <= ROW('abs'::name, NULL::oidvector)))
+(2 rows)
+
+SELECT proname, proargtypes, pronamespace
+ FROM pg_proc
+ WHERE proname >= 'abs' AND (proname, proargtypes) <= ('abs', NULL)
+ORDER BY proname DESC, proargtypes DESC, pronamespace DESC;
+ proname | proargtypes | pronamespace
+---------+-------------+--------------
+(0 rows)
+
+--
+-- Backwards scan RowCompare qual whose row arg has a NULL that ends scan
+--
+explain (costs off)
+SELECT proname, proargtypes, pronamespace
+ FROM pg_proc
+ WHERE (proname, proargtypes) > ('abs', NULL) AND proname <= 'abs'
ORDER BY proname DESC, proargtypes DESC, pronamespace DESC;
- QUERY PLAN
--------------------------------------------------------------------------------------------------------------
+ QUERY PLAN
+--------------------------------------------------------------------------------------------------------------
Index Only Scan Backward using pg_proc_proname_args_nsp_index on pg_proc
- Index Cond: ((ROW(proname, proargtypes) > ROW('abs'::name, NULL::oidvector)) AND (proname = 'abs'::name))
+ Index Cond: ((ROW(proname, proargtypes) > ROW('abs'::name, NULL::oidvector)) AND (proname <= 'abs'::name))
(2 rows)
SELECT proname, proargtypes, pronamespace
FROM pg_proc
- WHERE proname = 'abs' AND (proname, proargtypes) > ('abs', NULL)
+ WHERE (proname, proargtypes) > ('abs', NULL) AND proname <= 'abs'
ORDER BY proname DESC, proargtypes DESC, pronamespace DESC;
proname | proargtypes | pronamespace
---------+-------------+--------------
(0 rows)
+-- Makes B-Tree preprocessing deal with unmarking redundant keys that were
+-- initially marked required (test case relies on current row compare
+-- preprocessing limitations)
+explain (costs off)
+SELECT proname, proargtypes, pronamespace
+ FROM pg_proc
+ WHERE proname = 'zzzzzz' AND (proname, proargtypes) > ('abs', NULL)
+ AND pronamespace IN (1, 2, 3) AND proargtypes IN ('26 23', '5077')
+ORDER BY proname, proargtypes, pronamespace;
+ QUERY PLAN
+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Index Only Scan using pg_proc_proname_args_nsp_index on pg_proc
+ Index Cond: ((ROW(proname, proargtypes) > ROW('abs'::name, NULL::oidvector)) AND (proname = 'zzzzzz'::name) AND (proargtypes = ANY ('{"26 23",5077}'::oidvector[])) AND (pronamespace = ANY ('{1,2,3}'::oid[])))
+(2 rows)
+
+SELECT proname, proargtypes, pronamespace
+ FROM pg_proc
+ WHERE proname = 'zzzzzz' AND (proname, proargtypes) > ('abs', NULL)
+ AND pronamespace IN (1, 2, 3) AND proargtypes IN ('26 23', '5077')
+ORDER BY proname, proargtypes, pronamespace;
+ proname | proargtypes | pronamespace
+---------+-------------+--------------
+(0 rows)
+
--
--- Add coverage for recheck of > key following array advancement on previous
--- (left sibling) page that used a high key whose attribute value corresponding
--- to the > key was -inf (due to being truncated when the high key was created).
+-- Performs a recheck of > key following array advancement on previous (left
+-- sibling) page that used a high key whose attribute value corresponding to
+-- the > key was -inf (due to being truncated when the high key was created).
--
-- XXX This relies on the assumption that tenk1_thous_tenthous has a truncated
-- high key "(183, -inf)" on the first page that we'll scan. The test will only
diff --git a/src/test/regress/expected/constraints.out b/src/test/regress/expected/constraints.out
index ad6aaab7385..ccea883cffd 100644
--- a/src/test/regress/expected/constraints.out
+++ b/src/test/regress/expected/constraints.out
@@ -748,6 +748,11 @@ ALTER TABLE unique_tbl ALTER CONSTRAINT unique_tbl_i_key ENFORCED;
ERROR: cannot alter enforceability of constraint "unique_tbl_i_key" of relation "unique_tbl"
ALTER TABLE unique_tbl ALTER CONSTRAINT unique_tbl_i_key NOT ENFORCED;
ERROR: cannot alter enforceability of constraint "unique_tbl_i_key" of relation "unique_tbl"
+-- can't make an existing constraint NOT VALID
+ALTER TABLE unique_tbl ALTER CONSTRAINT unique_tbl_i_key NOT VALID;
+ERROR: constraints cannot be altered to be NOT VALID
+LINE 1: ...ABLE unique_tbl ALTER CONSTRAINT unique_tbl_i_key NOT VALID;
+ ^
DROP TABLE unique_tbl;
--
-- EXCLUDE constraints
@@ -1659,6 +1664,8 @@ EXECUTE get_nnconstraint_info('{constr_parent3, constr_child3}');
constr_parent3 | constr_parent3_a_not_null | t | t | 0
(2 rows)
+COMMENT ON CONSTRAINT constr_parent2_a_not_null ON constr_parent2 IS 'this constraint is invalid';
+COMMENT ON CONSTRAINT constr_parent2_a_not_null ON constr_child2 IS 'this constraint is valid';
DEALLOCATE get_nnconstraint_info;
-- end NOT NULL NOT VALID
-- Comments
diff --git a/src/test/regress/expected/copy.out b/src/test/regress/expected/copy.out
index 8d5a06563c4..ac66eb55aee 100644
--- a/src/test/regress/expected/copy.out
+++ b/src/test/regress/expected/copy.out
@@ -81,6 +81,29 @@ copy copytest4 to stdout (header);
c1 colname with tab: \t
1 a
2 b
+-- test multi-line header line feature
+create temp table copytest5 (c1 int);
+copy copytest5 from stdin (format csv, header 2);
+copy copytest5 to stdout (header);
+c1
+1
+2
+truncate copytest5;
+copy copytest5 from stdin (format csv, header 4);
+select count(*) from copytest5;
+ count
+-------
+ 0
+(1 row)
+
+truncate copytest5;
+copy copytest5 from stdin (format csv, header 5);
+select count(*) from copytest5;
+ count
+-------
+ 0
+(1 row)
+
-- test copy from with a partitioned table
create table parted_copytest (
a int,
@@ -224,7 +247,7 @@ alter table header_copytest add column c text;
copy header_copytest to stdout with (header match);
ERROR: cannot use "match" with HEADER in COPY TO
copy header_copytest from stdin with (header wrong_choice);
-ERROR: header requires a Boolean value or "match"
+ERROR: header requires a Boolean value, a non-negative integer, or the string "match"
-- works
copy header_copytest from stdin with (header match);
copy header_copytest (c, a, b) from stdin with (header match);
diff --git a/src/test/regress/expected/copy2.out b/src/test/regress/expected/copy2.out
index 64ea33aeae8..caa3c44f0d0 100644
--- a/src/test/regress/expected/copy2.out
+++ b/src/test/regress/expected/copy2.out
@@ -132,6 +132,12 @@ COPY x from stdin with (reject_limit 1);
ERROR: COPY REJECT_LIMIT requires ON_ERROR to be set to IGNORE
COPY x from stdin with (on_error ignore, reject_limit 0);
ERROR: REJECT_LIMIT (0) must be greater than zero
+COPY x from stdin with (header -1);
+ERROR: a negative integer value cannot be specified for header
+COPY x from stdin with (header 2.5);
+ERROR: header requires a Boolean value, a non-negative integer, or the string "match"
+COPY x to stdout with (header 2);
+ERROR: cannot use multi-line header in COPY TO
-- too many columns in column list: should fail
COPY x (a, b, c, d, e, d, c) from stdin;
ERROR: column "d" specified more than once
diff --git a/src/test/regress/expected/create_table_like.out b/src/test/regress/expected/create_table_like.out
index bf34289e984..29a779c2e90 100644
--- a/src/test/regress/expected/create_table_like.out
+++ b/src/test/regress/expected/create_table_like.out
@@ -332,9 +332,10 @@ COMMENT ON CONSTRAINT ctlt1_a_check ON ctlt1 IS 't1_a_check';
COMMENT ON INDEX ctlt1_pkey IS 'index pkey';
COMMENT ON INDEX ctlt1_b_key IS 'index b_key';
ALTER TABLE ctlt1 ALTER COLUMN a SET STORAGE MAIN;
-CREATE TABLE ctlt2 (c text);
+CREATE TABLE ctlt2 (c text NOT NULL);
ALTER TABLE ctlt2 ALTER COLUMN c SET STORAGE EXTERNAL;
COMMENT ON COLUMN ctlt2.c IS 'C';
+COMMENT ON CONSTRAINT ctlt2_c_not_null ON ctlt2 IS 't2_c_not_null';
CREATE TABLE ctlt3 (a text CHECK (length(a) < 5), c text CHECK (length(c) < 7));
ALTER TABLE ctlt3 ALTER COLUMN c SET STORAGE EXTERNAL;
ALTER TABLE ctlt3 ALTER COLUMN a SET STORAGE MAIN;
@@ -351,9 +352,10 @@ CREATE TABLE ctlt12_storage (LIKE ctlt1 INCLUDING STORAGE, LIKE ctlt2 INCLUDING
--------+------+-----------+----------+---------+----------+--------------+-------------
a | text | | not null | | main | |
b | text | | | | extended | |
- c | text | | | | external | |
+ c | text | | not null | | external | |
Not-null constraints:
"ctlt1_a_not_null" NOT NULL "a"
+ "ctlt2_c_not_null" NOT NULL "c"
CREATE TABLE ctlt12_comments (LIKE ctlt1 INCLUDING COMMENTS, LIKE ctlt2 INCLUDING COMMENTS);
\d+ ctlt12_comments
@@ -362,9 +364,16 @@ CREATE TABLE ctlt12_comments (LIKE ctlt1 INCLUDING COMMENTS, LIKE ctlt2 INCLUDIN
--------+------+-----------+----------+---------+----------+--------------+-------------
a | text | | not null | | extended | | A
b | text | | | | extended | | B
- c | text | | | | extended | | C
+ c | text | | not null | | extended | | C
Not-null constraints:
"ctlt1_a_not_null" NOT NULL "a"
+ "ctlt2_c_not_null" NOT NULL "c"
+
+SELECT conname, description FROM pg_description, pg_constraint c WHERE classoid = 'pg_constraint'::regclass AND objoid = c.oid AND c.conrelid = 'ctlt12_comments'::regclass;
+ conname | description
+------------------+---------------
+ ctlt2_c_not_null | t2_c_not_null
+(1 row)
CREATE TABLE ctlt1_inh (LIKE ctlt1 INCLUDING CONSTRAINTS INCLUDING COMMENTS) INHERITS (ctlt1);
NOTICE: merging column "a" with inherited definition
@@ -529,7 +538,9 @@ NOTICE: drop cascades to table inhe
-- LIKE must respect NO INHERIT property of constraints
CREATE TABLE noinh_con_copy (a int CHECK (a > 0) NO INHERIT, b int not null,
c int not null no inherit);
-CREATE TABLE noinh_con_copy1 (LIKE noinh_con_copy INCLUDING CONSTRAINTS);
+COMMENT ON CONSTRAINT noinh_con_copy_b_not_null ON noinh_con_copy IS 'not null b';
+COMMENT ON CONSTRAINT noinh_con_copy_c_not_null ON noinh_con_copy IS 'not null c no inherit';
+CREATE TABLE noinh_con_copy1 (LIKE noinh_con_copy INCLUDING CONSTRAINTS INCLUDING COMMENTS);
\d+ noinh_con_copy1
Table "public.noinh_con_copy1"
Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
@@ -543,6 +554,17 @@ Not-null constraints:
"noinh_con_copy_b_not_null" NOT NULL "b"
"noinh_con_copy_c_not_null" NOT NULL "c" NO INHERIT
+SELECT conname, description
+FROM pg_description, pg_constraint c
+WHERE classoid = 'pg_constraint'::regclass
+AND objoid = c.oid AND c.conrelid = 'noinh_con_copy1'::regclass
+ORDER BY conname COLLATE "C";
+ conname | description
+---------------------------+-----------------------
+ noinh_con_copy_b_not_null | not null b
+ noinh_con_copy_c_not_null | not null c no inherit
+(2 rows)
+
-- fail, as partitioned tables don't allow NO INHERIT constraints
CREATE TABLE noinh_con_copy1_parted (LIKE noinh_con_copy INCLUDING ALL)
PARTITION BY LIST (a);
diff --git a/src/test/regress/expected/domain.out b/src/test/regress/expected/domain.out
index ba6f05eeb7d..b5ea707df31 100644
--- a/src/test/regress/expected/domain.out
+++ b/src/test/regress/expected/domain.out
@@ -1019,6 +1019,11 @@ insert into domain_test values (1, 2);
-- should fail
alter table domain_test add column c str_domain;
ERROR: domain str_domain does not allow null values
+-- disallow duplicated not-null constraints
+create domain int_domain1 as int constraint nn1 not null constraint nn2 not null;
+ERROR: redundant NOT NULL constraint definition
+LINE 1: ...domain int_domain1 as int constraint nn1 not null constraint...
+ ^
create domain str_domain2 as text check (value <> 'foo') default 'foo';
-- should fail
alter table domain_test add column d str_domain2;
diff --git a/src/test/regress/expected/foreign_key.out b/src/test/regress/expected/foreign_key.out
index 4f3f280a439..f9bd252444f 100644
--- a/src/test/regress/expected/foreign_key.out
+++ b/src/test/regress/expected/foreign_key.out
@@ -1359,7 +1359,7 @@ LINE 1: ...e ALTER CONSTRAINT fktable_fk_fkey NOT DEFERRABLE INITIALLY ...
ALTER TABLE fktable ALTER CONSTRAINT fktable_fk_fkey NO INHERIT;
ERROR: constraint "fktable_fk_fkey" of relation "fktable" is not a not-null constraint
ALTER TABLE fktable ALTER CONSTRAINT fktable_fk_fkey NOT VALID;
-ERROR: FOREIGN KEY constraints cannot be marked NOT VALID
+ERROR: constraints cannot be altered to be NOT VALID
LINE 1: ...ER TABLE fktable ALTER CONSTRAINT fktable_fk_fkey NOT VALID;
^
ALTER TABLE fktable ALTER CONSTRAINT fktable_fk_fkey ENFORCED NOT ENFORCED;
@@ -1895,29 +1895,76 @@ WHERE conrelid::regclass::text like 'fk_partitioned_fk%' ORDER BY oid::regclass:
(5 rows)
DROP TABLE fk_partitioned_fk, fk_notpartitioned_pk;
--- NOT VALID foreign key on a non-partitioned table referencing a partitioned table
+-- NOT VALID and NOT ENFORCED foreign key on a non-partitioned table
+-- referencing a partitioned table
CREATE TABLE fk_partitioned_pk (a int, b int, PRIMARY KEY (a, b)) PARTITION BY RANGE (a, b);
CREATE TABLE fk_partitioned_pk_1 PARTITION OF fk_partitioned_pk FOR VALUES FROM (0,0) TO (1000,1000);
+CREATE TABLE fk_partitioned_pk_2 PARTITION OF fk_partitioned_pk FOR VALUES FROM (1000,1000) TO (2000,2000);
CREATE TABLE fk_notpartitioned_fk (b int, a int);
-ALTER TABLE fk_notpartitioned_fk ADD FOREIGN KEY (a, b) REFERENCES fk_partitioned_pk NOT VALID;
--- Constraint will be invalid.
-SELECT conname, convalidated FROM pg_constraint
+INSERT INTO fk_partitioned_pk VALUES(100,100), (1000,1000);
+INSERT INTO fk_notpartitioned_fk VALUES(100,100), (1000,1000);
+ALTER TABLE fk_notpartitioned_fk ADD CONSTRAINT fk_notpartitioned_fk_a_b_fkey
+ FOREIGN KEY (a, b) REFERENCES fk_partitioned_pk NOT VALID;
+ALTER TABLE fk_notpartitioned_fk ADD CONSTRAINT fk_notpartitioned_fk_a_b_fkey2
+ FOREIGN KEY (a, b) REFERENCES fk_partitioned_pk NOT ENFORCED;
+-- All constraints will be invalid, and _fkey2 constraints will not be enforced.
+SELECT conname, conenforced, convalidated FROM pg_constraint
WHERE conrelid = 'fk_notpartitioned_fk'::regclass ORDER BY oid::regclass::text;
- conname | convalidated
----------------------------------+--------------
- fk_notpartitioned_fk_a_b_fkey | f
- fk_notpartitioned_fk_a_b_fkey_1 | f
-(2 rows)
+ conname | conenforced | convalidated
+----------------------------------+-------------+--------------
+ fk_notpartitioned_fk_a_b_fkey | t | f
+ fk_notpartitioned_fk_a_b_fkey_1 | t | f
+ fk_notpartitioned_fk_a_b_fkey_2 | t | f
+ fk_notpartitioned_fk_a_b_fkey2 | f | f
+ fk_notpartitioned_fk_a_b_fkey2_1 | f | f
+ fk_notpartitioned_fk_a_b_fkey2_2 | f | f
+(6 rows)
ALTER TABLE fk_notpartitioned_fk VALIDATE CONSTRAINT fk_notpartitioned_fk_a_b_fkey;
--- All constraints are now valid.
-SELECT conname, convalidated FROM pg_constraint
+ALTER TABLE fk_notpartitioned_fk ALTER CONSTRAINT fk_notpartitioned_fk_a_b_fkey2 ENFORCED;
+-- All constraints are now valid and enforced.
+SELECT conname, conenforced, convalidated FROM pg_constraint
WHERE conrelid = 'fk_notpartitioned_fk'::regclass ORDER BY oid::regclass::text;
- conname | convalidated
----------------------------------+--------------
- fk_notpartitioned_fk_a_b_fkey | t
- fk_notpartitioned_fk_a_b_fkey_1 | t
-(2 rows)
+ conname | conenforced | convalidated
+----------------------------------+-------------+--------------
+ fk_notpartitioned_fk_a_b_fkey | t | t
+ fk_notpartitioned_fk_a_b_fkey_1 | t | t
+ fk_notpartitioned_fk_a_b_fkey_2 | t | t
+ fk_notpartitioned_fk_a_b_fkey2 | t | t
+ fk_notpartitioned_fk_a_b_fkey2_1 | t | t
+ fk_notpartitioned_fk_a_b_fkey2_2 | t | t
+(6 rows)
+
+-- test a self-referential FK
+ALTER TABLE fk_partitioned_pk ADD CONSTRAINT selffk FOREIGN KEY (a, b) REFERENCES fk_partitioned_pk NOT VALID;
+CREATE TABLE fk_partitioned_pk_3 PARTITION OF fk_partitioned_pk FOR VALUES FROM (2000,2000) TO (3000,3000)
+ PARTITION BY RANGE (a);
+CREATE TABLE fk_partitioned_pk_3_1 PARTITION OF fk_partitioned_pk_3 FOR VALUES FROM (2000) TO (2100);
+SELECT conname, conenforced, convalidated FROM pg_constraint
+WHERE conrelid = 'fk_partitioned_pk'::regclass AND contype = 'f'
+ORDER BY oid::regclass::text;
+ conname | conenforced | convalidated
+------------+-------------+--------------
+ selffk | t | f
+ selffk_1 | t | f
+ selffk_2 | t | f
+ selffk_3 | t | f
+ selffk_3_1 | t | f
+(5 rows)
+
+ALTER TABLE fk_partitioned_pk_2 VALIDATE CONSTRAINT selffk;
+ALTER TABLE fk_partitioned_pk VALIDATE CONSTRAINT selffk;
+SELECT conname, conenforced, convalidated FROM pg_constraint
+WHERE conrelid = 'fk_partitioned_pk'::regclass AND contype = 'f'
+ORDER BY oid::regclass::text;
+ conname | conenforced | convalidated
+------------+-------------+--------------
+ selffk | t | t
+ selffk_1 | t | t
+ selffk_2 | t | t
+ selffk_3 | t | t
+ selffk_3_1 | t | t
+(5 rows)
DROP TABLE fk_notpartitioned_fk, fk_partitioned_pk;
-- Test some other exotic foreign key features: MATCH SIMPLE, ON UPDATE/DELETE
diff --git a/src/test/regress/expected/generated_virtual.out b/src/test/regress/expected/generated_virtual.out
index 6300e7c1d96..df704b5166f 100644
--- a/src/test/regress/expected/generated_virtual.out
+++ b/src/test/regress/expected/generated_virtual.out
@@ -553,15 +553,11 @@ CREATE TABLE gtest4 (
a int,
b double_int GENERATED ALWAYS AS ((a * 2, a * 3)) VIRTUAL
);
-INSERT INTO gtest4 VALUES (1), (6);
-SELECT * FROM gtest4;
- a | b
----+---------
- 1 | (2,3)
- 6 | (12,18)
-(2 rows)
-
-DROP TABLE gtest4;
+ERROR: virtual generated column "b" cannot have a user-defined type
+DETAIL: Virtual generated columns that make use of user-defined types are not yet supported.
+--INSERT INTO gtest4 VALUES (1), (6);
+--SELECT * FROM gtest4;
+--DROP TABLE gtest4;
DROP TYPE double_int;
-- using tableoid is allowed
CREATE TABLE gtest_tableoid (
@@ -604,9 +600,13 @@ INSERT INTO gtest11 VALUES (1, 10), (2, 20);
GRANT SELECT (a, c) ON gtest11 TO regress_user11;
CREATE FUNCTION gf1(a int) RETURNS int AS $$ SELECT a * 3 $$ IMMUTABLE LANGUAGE SQL;
REVOKE ALL ON FUNCTION gf1(int) FROM PUBLIC;
-CREATE TABLE gtest12 (a int PRIMARY KEY, b int, c int GENERATED ALWAYS AS (gf1(b)) VIRTUAL);
-INSERT INTO gtest12 VALUES (1, 10), (2, 20);
-GRANT SELECT (a, c), INSERT ON gtest12 TO regress_user11;
+CREATE TABLE gtest12 (a int PRIMARY KEY, b int, c int GENERATED ALWAYS AS (gf1(b)) VIRTUAL); -- fails, user-defined function
+ERROR: generation expression uses user-defined function
+LINE 1: ...nt PRIMARY KEY, b int, c int GENERATED ALWAYS AS (gf1(b)) VI...
+ ^
+DETAIL: Virtual generated columns that make use of user-defined functions are not yet supported.
+--INSERT INTO gtest12 VALUES (1, 10), (2, 20);
+--GRANT SELECT (a, c), INSERT ON gtest12 TO regress_user11;
SET ROLE regress_user11;
SELECT a, b FROM gtest11; -- not allowed
ERROR: permission denied for table gtest11
@@ -619,15 +619,12 @@ SELECT a, c FROM gtest11; -- allowed
SELECT gf1(10); -- not allowed
ERROR: permission denied for function gf1
-INSERT INTO gtest12 VALUES (3, 30), (4, 40); -- allowed (does not actually invoke the function)
-SELECT a, c FROM gtest12; -- currently not allowed because of function permissions, should arguably be allowed
-ERROR: permission denied for function gf1
+--INSERT INTO gtest12 VALUES (3, 30), (4, 40); -- allowed (does not actually invoke the function)
+--SELECT a, c FROM gtest12; -- currently not allowed because of function permissions, should arguably be allowed
RESET ROLE;
-DROP FUNCTION gf1(int); -- fail
-ERROR: cannot drop function gf1(integer) because other objects depend on it
-DETAIL: column c of table gtest12 depends on function gf1(integer)
-HINT: Use DROP ... CASCADE to drop the dependent objects too.
-DROP TABLE gtest11, gtest12;
+--DROP FUNCTION gf1(int); -- fail
+DROP TABLE gtest11;
+--DROP TABLE gtest12;
DROP FUNCTION gf1(int);
DROP USER regress_user11;
-- check constraints
@@ -637,10 +634,10 @@ INSERT INTO gtest20 (a) VALUES (30); -- violates constraint
ERROR: new row for relation "gtest20" violates check constraint "gtest20_b_check"
DETAIL: Failing row contains (30, virtual).
ALTER TABLE gtest20 ALTER COLUMN b SET EXPRESSION AS (a * 100); -- violates constraint (currently not supported)
-ERROR: ALTER TABLE / SET EXPRESSION is not supported for virtual generated columns on tables with check constraints
+ERROR: ALTER TABLE / SET EXPRESSION is not supported for virtual generated columns in tables with check constraints
DETAIL: Column "b" of relation "gtest20" is a virtual generated column.
ALTER TABLE gtest20 ALTER COLUMN b SET EXPRESSION AS (a * 3); -- ok (currently not supported)
-ERROR: ALTER TABLE / SET EXPRESSION is not supported for virtual generated columns on tables with check constraints
+ERROR: ALTER TABLE / SET EXPRESSION is not supported for virtual generated columns in tables with check constraints
DETAIL: Column "b" of relation "gtest20" is a virtual generated column.
CREATE TABLE gtest20a (a int PRIMARY KEY, b int GENERATED ALWAYS AS (a * 2) VIRTUAL);
INSERT INTO gtest20a (a) VALUES (10);
@@ -800,11 +797,23 @@ CREATE TABLE gtest24r (a int PRIMARY KEY, b gtestdomain1range GENERATED ALWAYS A
ERROR: virtual generated column "b" cannot have a domain type
--INSERT INTO gtest24r (a) VALUES (4); -- ok
--INSERT INTO gtest24r (a) VALUES (6); -- error
+CREATE TABLE gtest24at (a int PRIMARY KEY);
+ALTER TABLE gtest24at ADD COLUMN b gtestdomain1 GENERATED ALWAYS AS (a * 2) VIRTUAL; -- error
+ERROR: virtual generated column "b" cannot have a domain type
+CREATE TABLE gtest24ata (a int PRIMARY KEY, b int GENERATED ALWAYS AS (a * 2) VIRTUAL);
+ALTER TABLE gtest24ata ALTER COLUMN b TYPE gtestdomain1; -- error
+ERROR: virtual generated column "b" cannot have a domain type
CREATE DOMAIN gtestdomainnn AS int CHECK (VALUE IS NOT NULL);
CREATE TABLE gtest24nn (a int, b gtestdomainnn GENERATED ALWAYS AS (a * 2) VIRTUAL);
ERROR: virtual generated column "b" cannot have a domain type
--INSERT INTO gtest24nn (a) VALUES (4); -- ok
--INSERT INTO gtest24nn (a) VALUES (NULL); -- error
+-- using user-defined type not yet supported
+CREATE TABLE gtest24xxx (a gtestdomain1, b gtestdomain1, c int GENERATED ALWAYS AS (greatest(a, b)) VIRTUAL); -- error
+ERROR: generation expression uses user-defined type
+LINE 1: ...main1, b gtestdomain1, c int GENERATED ALWAYS AS (greatest(a...
+ ^
+DETAIL: Virtual generated columns that make use of user-defined types are not yet supported.
-- typed tables (currently not supported)
CREATE TYPE gtest_type AS (f1 integer, f2 text, f3 bigint);
CREATE TABLE gtest28 OF gtest_type (f1 WITH OPTIONS GENERATED ALWAYS AS (f2 *2) VIRTUAL);
@@ -1470,7 +1479,8 @@ create table gtest32 (
a int primary key,
b int generated always as (a * 2),
c int generated always as (10 + 10),
- d int generated always as (coalesce(a, 100))
+ d int generated always as (coalesce(a, 100)),
+ e int
);
insert into gtest32 values (1), (2);
analyze gtest32;
@@ -1554,41 +1564,44 @@ select t2.* from gtest32 t1 left join gtest32 t2 on false;
QUERY PLAN
------------------------------------------------------
Nested Loop Left Join
- Output: a, (a * 2), (20), (COALESCE(a, 100))
+ Output: a, (a * 2), (20), (COALESCE(a, 100)), e
Join Filter: false
-> Seq Scan on generated_virtual_tests.gtest32 t1
- Output: t1.a, t1.b, t1.c, t1.d
+ Output: t1.a, t1.b, t1.c, t1.d, t1.e
-> Result
- Output: a, 20, COALESCE(a, 100)
+ Output: a, e, 20, COALESCE(a, 100)
One-Time Filter: false
(8 rows)
select t2.* from gtest32 t1 left join gtest32 t2 on false;
- a | b | c | d
----+---+---+---
- | | |
- | | |
+ a | b | c | d | e
+---+---+---+---+---
+ | | | |
+ | | | |
(2 rows)
explain (verbose, costs off)
-select * from gtest32 t group by grouping sets (a, b, c, d) having c = 20;
+select * from gtest32 t group by grouping sets (a, b, c, d, e) having c = 20;
QUERY PLAN
-----------------------------------------------------
HashAggregate
- Output: a, ((a * 2)), (20), (COALESCE(a, 100))
+ Output: a, ((a * 2)), (20), (COALESCE(a, 100)), e
Hash Key: t.a
Hash Key: (t.a * 2)
Hash Key: 20
Hash Key: COALESCE(t.a, 100)
+ Hash Key: t.e
Filter: ((20) = 20)
-> Seq Scan on generated_virtual_tests.gtest32 t
- Output: a, (a * 2), 20, COALESCE(a, 100)
-(9 rows)
+ Output: a, (a * 2), 20, COALESCE(a, 100), e
+(10 rows)
-select * from gtest32 t group by grouping sets (a, b, c, d) having c = 20;
- a | b | c | d
----+---+----+---
- | | 20 |
+select * from gtest32 t group by grouping sets (a, b, c, d, e) having c = 20;
+ a | b | c | d | e
+---+---+----+---+---
+ | | 20 | |
(1 row)
+-- Ensure that the virtual generated columns in ALTER COLUMN TYPE USING expression are expanded
+alter table gtest32 alter column e type bigint using b;
drop table gtest32;
diff --git a/src/test/regress/expected/horology.out b/src/test/regress/expected/horology.out
index b90bfcd794f..5ae93d8e8a5 100644
--- a/src/test/regress/expected/horology.out
+++ b/src/test/regress/expected/horology.out
@@ -467,6 +467,15 @@ SELECT timestamp with time zone 'Y2001M12D27H04MM05S06.789-08';
ERROR: invalid input syntax for type timestamp with time zone: "Y2001M12D27H04MM05S06.789-08"
LINE 1: SELECT timestamp with time zone 'Y2001M12D27H04MM05S06.789-0...
^
+-- More examples we used to accept and should not
+SELECT timestamp with time zone 'J2452271 T X03456-08';
+ERROR: invalid input syntax for type timestamp with time zone: "J2452271 T X03456-08"
+LINE 1: SELECT timestamp with time zone 'J2452271 T X03456-08';
+ ^
+SELECT timestamp with time zone 'J2452271 T X03456.001e6-08';
+ERROR: invalid input syntax for type timestamp with time zone: "J2452271 T X03456.001e6-08"
+LINE 1: SELECT timestamp with time zone 'J2452271 T X03456.001e6-08'...
+ ^
-- conflicting fields should throw errors
SELECT date '1995-08-06 epoch';
ERROR: invalid input syntax for type date: "1995-08-06 epoch"
diff --git a/src/test/regress/expected/inherit.out b/src/test/regress/expected/inherit.out
index f9b0c415cfd..78dead65325 100644
--- a/src/test/regress/expected/inherit.out
+++ b/src/test/regress/expected/inherit.out
@@ -2281,7 +2281,7 @@ Inherits: pp1,
create table cc3 (a2 int not null no inherit) inherits (cc1);
NOTICE: moving and merging column "a2" with inherited definition
DETAIL: User-specified column moved to the position of the inherited column.
-ERROR: cannot define not-null constraint on column "a2" with NO INHERIT
+ERROR: cannot define not-null constraint with NO INHERIT on column "a2"
DETAIL: The column has an inherited not-null constraint.
-- change NO INHERIT status of inherited constraint: no dice, it's inherited
alter table cc2 add not null a2 no inherit;
@@ -2530,7 +2530,7 @@ ERROR: conflicting NO INHERIT declaration for not-null constraint on column "a"
CREATE TABLE inh_nn1 (a int not null);
CREATE TABLE inh_nn2 (a int not null no inherit) INHERITS (inh_nn1);
NOTICE: merging column "a" with inherited definition
-ERROR: cannot define not-null constraint on column "a" with NO INHERIT
+ERROR: cannot define not-null constraint with NO INHERIT on column "a"
DETAIL: The column has an inherited not-null constraint.
CREATE TABLE inh_nn3 (a int not null, b int, not null a no inherit);
ERROR: conflicting NO INHERIT declaration for not-null constraint on column "a"
diff --git a/src/test/regress/expected/join.out b/src/test/regress/expected/join.out
index f35a0b18c37..390aabfb34b 100644
--- a/src/test/regress/expected/join.out
+++ b/src/test/regress/expected/join.out
@@ -3946,6 +3946,59 @@ where t1.unique2 < 42 and t1.stringu1 > t2.stringu2;
(1 row)
-- variant that isn't quite a star-schema case
+explain (verbose, costs off)
+select ss1.d1 from
+ tenk1 as t1
+ inner join tenk1 as t2
+ on t1.tenthous = t2.ten
+ inner join
+ int8_tbl as i8
+ left join int4_tbl as i4
+ inner join (select 64::information_schema.cardinal_number as d1
+ from tenk1 t3,
+ lateral (select abs(t3.unique1) + random()) ss0(x)
+ where t3.fivethous < 0) as ss1
+ on i4.f1 = ss1.d1
+ on i8.q1 = i4.f1
+ on t1.tenthous = ss1.d1
+where t1.unique1 < i4.f1;
+ QUERY PLAN
+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Nested Loop
+ Output: (64)::information_schema.cardinal_number
+ Join Filter: (t1.tenthous = ((64)::information_schema.cardinal_number)::integer)
+ -> Seq Scan on public.tenk1 t3
+ Output: t3.unique1, t3.unique2, t3.two, t3.four, t3.ten, t3.twenty, t3.hundred, t3.thousand, t3.twothousand, t3.fivethous, t3.tenthous, t3.odd, t3.even, t3.stringu1, t3.stringu2, t3.string4
+ Filter: (t3.fivethous < 0)
+ -> Nested Loop
+ Output: t1.tenthous, t2.ten
+ -> Nested Loop
+ Output: t1.tenthous, t2.ten, i4.f1
+ Join Filter: (t1.unique1 < i4.f1)
+ -> Hash Join
+ Output: t1.tenthous, t1.unique1, t2.ten
+ Hash Cond: (t2.ten = t1.tenthous)
+ -> Seq Scan on public.tenk1 t2
+ Output: t2.unique1, t2.unique2, t2.two, t2.four, t2.ten, t2.twenty, t2.hundred, t2.thousand, t2.twothousand, t2.fivethous, t2.tenthous, t2.odd, t2.even, t2.stringu1, t2.stringu2, t2.string4
+ -> Hash
+ Output: t1.tenthous, t1.unique1
+ -> Nested Loop
+ Output: t1.tenthous, t1.unique1
+ -> Subquery Scan on ss0
+ Output: ss0.x, (64)::information_schema.cardinal_number
+ -> Result
+ Output: ((abs(t3.unique1))::double precision + random())
+ -> Index Scan using tenk1_thous_tenthous on public.tenk1 t1
+ Output: t1.unique1, t1.unique2, t1.two, t1.four, t1.ten, t1.twenty, t1.hundred, t1.thousand, t1.twothousand, t1.fivethous, t1.tenthous, t1.odd, t1.even, t1.stringu1, t1.stringu2, t1.string4
+ Index Cond: (t1.tenthous = (((64)::information_schema.cardinal_number))::integer)
+ -> Seq Scan on public.int4_tbl i4
+ Output: i4.f1
+ Filter: (i4.f1 = ((64)::information_schema.cardinal_number)::integer)
+ -> Seq Scan on public.int8_tbl i8
+ Output: i8.q1, i8.q2
+ Filter: (i8.q1 = ((64)::information_schema.cardinal_number)::integer)
+(33 rows)
+
select ss1.d1 from
tenk1 as t1
inner join tenk1 as t2
@@ -4035,6 +4088,195 @@ select * from
1 | 2 | 2
(1 row)
+-- This example demonstrates the folly of our old "have_dangerous_phv" logic
+begin;
+set local from_collapse_limit to 2;
+explain (verbose, costs off)
+select * from int8_tbl t1
+ left join
+ (select coalesce(t2.q1 + x, 0) from int8_tbl t2,
+ lateral (select t3.q1 as x from int8_tbl t3,
+ lateral (select t2.q1, t3.q1 offset 0) s))
+ on true;
+ QUERY PLAN
+------------------------------------------------------------------
+ Nested Loop Left Join
+ Output: t1.q1, t1.q2, (COALESCE((t2.q1 + t3.q1), '0'::bigint))
+ -> Seq Scan on public.int8_tbl t1
+ Output: t1.q1, t1.q2
+ -> Materialize
+ Output: (COALESCE((t2.q1 + t3.q1), '0'::bigint))
+ -> Nested Loop
+ Output: COALESCE((t2.q1 + t3.q1), '0'::bigint)
+ -> Seq Scan on public.int8_tbl t2
+ Output: t2.q1, t2.q2
+ -> Nested Loop
+ Output: t3.q1
+ -> Seq Scan on public.int8_tbl t3
+ Output: t3.q1, t3.q2
+ -> Result
+ Output: NULL::bigint, NULL::bigint
+(16 rows)
+
+rollback;
+-- ... not that the initial replacement didn't have some bugs too
+begin;
+create temp table t(i int primary key);
+explain (verbose, costs off)
+select * from t t1
+ left join (select 1 as x, * from t t2(i2)) t2ss on t1.i = t2ss.i2
+ left join t t3(i3) on false
+ left join t t4(i4) on t4.i4 > t2ss.x;
+ QUERY PLAN
+----------------------------------------------------------
+ Nested Loop Left Join
+ Output: t1.i, (1), t2.i2, i3, t4.i4
+ -> Nested Loop Left Join
+ Output: t1.i, t2.i2, (1), i3
+ Join Filter: false
+ -> Hash Left Join
+ Output: t1.i, t2.i2, (1)
+ Inner Unique: true
+ Hash Cond: (t1.i = t2.i2)
+ -> Seq Scan on pg_temp.t t1
+ Output: t1.i
+ -> Hash
+ Output: t2.i2, (1)
+ -> Seq Scan on pg_temp.t t2
+ Output: t2.i2, 1
+ -> Result
+ Output: i3
+ One-Time Filter: false
+ -> Memoize
+ Output: t4.i4
+ Cache Key: (1)
+ Cache Mode: binary
+ -> Index Only Scan using t_pkey on pg_temp.t t4
+ Output: t4.i4
+ Index Cond: (t4.i4 > (1))
+(25 rows)
+
+explain (verbose, costs off)
+select * from
+ (select k from
+ (select i, coalesce(i, j) as k from
+ (select i from t union all select 0)
+ join (select 1 as j limit 1) on i = j)
+ right join (select 2 as x) on true
+ join (select 3 as y) on i is not null
+ ),
+ lateral (select k as kl limit 1);
+ QUERY PLAN
+-------------------------------------------------------------------
+ Nested Loop
+ Output: COALESCE(t.i, (1)), ((COALESCE(t.i, (1))))
+ -> Limit
+ Output: 1
+ -> Result
+ Output: 1
+ -> Nested Loop
+ Output: t.i, ((COALESCE(t.i, (1))))
+ -> Result
+ Output: t.i, COALESCE(t.i, (1))
+ -> Append
+ -> Index Only Scan using t_pkey on pg_temp.t
+ Output: t.i
+ Index Cond: (t.i = (1))
+ -> Result
+ Output: 0
+ One-Time Filter: ((1) = 0)
+ -> Limit
+ Output: ((COALESCE(t.i, (1))))
+ -> Result
+ Output: (COALESCE(t.i, (1)))
+(21 rows)
+
+rollback;
+-- PHVs containing SubLinks are quite tricky to get right
+explain (verbose, costs off)
+select *
+from int8_tbl i8
+ inner join
+ (select (select true) as x
+ from int4_tbl i4, lateral (select i4.f1 as y limit 1) ss1
+ where i4.f1 = 0) ss2 on true
+ right join (select false as z) ss3 on true,
+ lateral (select i8.q2 as q2l where x limit 1) ss4
+where i8.q2 = 123;
+ QUERY PLAN
+----------------------------------------------------------------
+ Nested Loop
+ Output: i8.q1, i8.q2, (InitPlan 1).col1, false, (i8.q2)
+ InitPlan 1
+ -> Result
+ Output: true
+ InitPlan 2
+ -> Result
+ Output: true
+ -> Seq Scan on public.int4_tbl i4
+ Output: i4.f1
+ Filter: (i4.f1 = 0)
+ -> Nested Loop
+ Output: i8.q1, i8.q2, (i8.q2)
+ -> Subquery Scan on ss1
+ Output: ss1.y, (InitPlan 1).col1
+ -> Limit
+ Output: NULL::integer
+ -> Result
+ Output: NULL::integer
+ -> Nested Loop
+ Output: i8.q1, i8.q2, (i8.q2)
+ -> Seq Scan on public.int8_tbl i8
+ Output: i8.q1, i8.q2
+ Filter: (i8.q2 = 123)
+ -> Limit
+ Output: (i8.q2)
+ -> Result
+ Output: i8.q2
+ One-Time Filter: ((InitPlan 1).col1)
+(29 rows)
+
+explain (verbose, costs off)
+select *
+from int8_tbl i8
+ inner join
+ (select (select true) as x
+ from int4_tbl i4, lateral (select 1 as y limit 1) ss1
+ where i4.f1 = 0) ss2 on true
+ right join (select false as z) ss3 on true,
+ lateral (select i8.q2 as q2l where x limit 1) ss4
+where i8.q2 = 123;
+ QUERY PLAN
+----------------------------------------------------------------
+ Nested Loop
+ Output: i8.q1, i8.q2, (InitPlan 1).col1, false, (i8.q2)
+ InitPlan 1
+ -> Result
+ Output: true
+ InitPlan 2
+ -> Result
+ Output: true
+ -> Limit
+ Output: NULL::integer
+ -> Result
+ Output: NULL::integer
+ -> Nested Loop
+ Output: i8.q1, i8.q2, (i8.q2)
+ -> Seq Scan on public.int4_tbl i4
+ Output: i4.f1, (InitPlan 1).col1
+ Filter: (i4.f1 = 0)
+ -> Nested Loop
+ Output: i8.q1, i8.q2, (i8.q2)
+ -> Seq Scan on public.int8_tbl i8
+ Output: i8.q1, i8.q2
+ Filter: (i8.q2 = 123)
+ -> Limit
+ Output: (i8.q2)
+ -> Result
+ Output: i8.q2
+ One-Time Filter: ((InitPlan 1).col1)
+(27 rows)
+
-- Test proper handling of appendrel PHVs during useless-RTE removal
explain (costs off)
select * from
diff --git a/src/test/regress/expected/limit.out b/src/test/regress/expected/limit.out
index f4267c002d7..e3bcc680653 100644
--- a/src/test/regress/expected/limit.out
+++ b/src/test/regress/expected/limit.out
@@ -647,7 +647,7 @@ View definition:
WHERE thousand < 995
ORDER BY thousand
OFFSET 10
- FETCH FIRST 5 ROWS WITH TIES;
+ FETCH FIRST (5) ROWS WITH TIES;
CREATE VIEW limit_thousand_v_2 AS SELECT thousand FROM onek WHERE thousand < 995
ORDER BY thousand OFFSET 10 FETCH FIRST 5 ROWS ONLY;
@@ -679,10 +679,10 @@ View definition:
FROM onek
WHERE thousand < 995
ORDER BY thousand
- FETCH FIRST (NULL::integer + 1) ROWS WITH TIES;
+ FETCH FIRST ((NULL::integer + 1)) ROWS WITH TIES;
CREATE VIEW limit_thousand_v_4 AS SELECT thousand FROM onek WHERE thousand < 995
- ORDER BY thousand FETCH FIRST NULL ROWS ONLY;
+ ORDER BY thousand FETCH FIRST (5::bigint) ROWS WITH TIES;
\d+ limit_thousand_v_4
View "public.limit_thousand_v_4"
Column | Type | Collation | Nullable | Default | Storage | Description
@@ -693,6 +693,20 @@ View definition:
FROM onek
WHERE thousand < 995
ORDER BY thousand
+ FETCH FIRST (5::bigint) ROWS WITH TIES;
+
+CREATE VIEW limit_thousand_v_5 AS SELECT thousand FROM onek WHERE thousand < 995
+ ORDER BY thousand FETCH FIRST NULL ROWS ONLY;
+\d+ limit_thousand_v_5
+ View "public.limit_thousand_v_5"
+ Column | Type | Collation | Nullable | Default | Storage | Description
+----------+---------+-----------+----------+---------+---------+-------------
+ thousand | integer | | | | plain |
+View definition:
+ SELECT thousand
+ FROM onek
+ WHERE thousand < 995
+ ORDER BY thousand
LIMIT ALL;
-- leave these views
diff --git a/src/test/regress/expected/matview.out b/src/test/regress/expected/matview.out
index 54939ecc6b0..c56c9fa3a25 100644
--- a/src/test/regress/expected/matview.out
+++ b/src/test/regress/expected/matview.out
@@ -587,7 +587,7 @@ CREATE MATERIALIZED VIEW drop_idx_matview AS
NOTICE: index "mvtest_drop_idx" does not exist, skipping
CREATE UNIQUE INDEX mvtest_drop_idx ON drop_idx_matview (i);
REFRESH MATERIALIZED VIEW CONCURRENTLY drop_idx_matview;
-ERROR: could not find suitable unique index on materialized view
+ERROR: could not find suitable unique index on materialized view "drop_idx_matview"
DROP MATERIALIZED VIEW drop_idx_matview; -- clean up
RESET search_path;
-- make sure that create WITH NO DATA works via SPI
diff --git a/src/test/regress/expected/memoize.out b/src/test/regress/expected/memoize.out
index 38dfaf021c9..150dc1b44cf 100644
--- a/src/test/regress/expected/memoize.out
+++ b/src/test/regress/expected/memoize.out
@@ -25,6 +25,7 @@ begin
ln := regexp_replace(ln, 'Heap Fetches: \d+', 'Heap Fetches: N');
ln := regexp_replace(ln, 'loops=\d+', 'loops=N');
ln := regexp_replace(ln, 'Index Searches: \d+', 'Index Searches: N');
+ ln := regexp_replace(ln, 'Memory: \d+kB', 'Memory: NkB');
return next ln;
end loop;
end;
@@ -500,3 +501,62 @@ RESET max_parallel_workers_per_gather;
RESET parallel_tuple_cost;
RESET parallel_setup_cost;
RESET min_parallel_table_scan_size;
+-- Ensure memoize works for ANTI joins
+CREATE TABLE tab_anti (a int, b boolean);
+INSERT INTO tab_anti SELECT i%3, false FROM generate_series(1,100)i;
+ANALYZE tab_anti;
+-- Ensure we get a Memoize plan for ANTI join
+SELECT explain_memoize('
+SELECT COUNT(*) FROM tab_anti t1 LEFT JOIN
+LATERAL (SELECT DISTINCT ON (a) a, b, t1.a AS x FROM tab_anti t2) t2
+ON t1.a+1 = t2.a
+WHERE t2.a IS NULL;', false);
+ explain_memoize
+--------------------------------------------------------------------------------------------
+ Aggregate (actual rows=1.00 loops=N)
+ -> Nested Loop Anti Join (actual rows=33.00 loops=N)
+ -> Seq Scan on tab_anti t1 (actual rows=100.00 loops=N)
+ -> Memoize (actual rows=0.67 loops=N)
+ Cache Key: (t1.a + 1), t1.a
+ Cache Mode: binary
+ Hits: 97 Misses: 3 Evictions: Zero Overflows: 0 Memory Usage: NkB
+ -> Subquery Scan on t2 (actual rows=0.67 loops=N)
+ Filter: ((t1.a + 1) = t2.a)
+ Rows Removed by Filter: 2
+ -> Unique (actual rows=2.67 loops=N)
+ -> Sort (actual rows=67.33 loops=N)
+ Sort Key: t2_1.a
+ Sort Method: quicksort Memory: NkB
+ -> Seq Scan on tab_anti t2_1 (actual rows=100.00 loops=N)
+(15 rows)
+
+-- And check we get the expected results.
+SELECT COUNT(*) FROM tab_anti t1 LEFT JOIN
+LATERAL (SELECT DISTINCT ON (a) a, b, t1.a AS x FROM tab_anti t2) t2
+ON t1.a+1 = t2.a
+WHERE t2.a IS NULL;
+ count
+-------
+ 33
+(1 row)
+
+-- Ensure we do not add memoize node for SEMI join
+EXPLAIN (COSTS OFF)
+SELECT * FROM tab_anti t1 WHERE t1.a IN
+ (SELECT a FROM tab_anti t2 WHERE t2.b IN
+ (SELECT t1.b FROM tab_anti t3 WHERE t2.a > 1 OFFSET 0));
+ QUERY PLAN
+-------------------------------------------------
+ Nested Loop Semi Join
+ -> Seq Scan on tab_anti t1
+ -> Nested Loop Semi Join
+ Join Filter: (t1.a = t2.a)
+ -> Seq Scan on tab_anti t2
+ -> Subquery Scan on "ANY_subquery"
+ Filter: (t2.b = "ANY_subquery".b)
+ -> Result
+ One-Time Filter: (t2.a > 1)
+ -> Seq Scan on tab_anti t3
+(10 rows)
+
+DROP TABLE tab_anti;
diff --git a/src/test/regress/expected/merge.out b/src/test/regress/expected/merge.out
index bcd29668297..cf2219df754 100644
--- a/src/test/regress/expected/merge.out
+++ b/src/test/regress/expected/merge.out
@@ -2702,6 +2702,76 @@ SELECT * FROM new_measurement ORDER BY city_id, logdate;
1 | 01-17-2007 | |
(2 rows)
+-- MERGE into inheritance root table
+DROP TRIGGER insert_measurement_trigger ON measurement;
+ALTER TABLE measurement ADD CONSTRAINT mcheck CHECK (city_id = 0) NO INHERIT;
+EXPLAIN (COSTS OFF)
+MERGE INTO measurement m
+ USING (VALUES (1, '01-17-2007'::date)) nm(city_id, logdate) ON
+ (m.city_id = nm.city_id and m.logdate=nm.logdate)
+WHEN NOT MATCHED THEN INSERT
+ (city_id, logdate, peaktemp, unitsales)
+ VALUES (city_id - 1, logdate, 25, 100);
+ QUERY PLAN
+--------------------------------------------------------------------------
+ Merge on measurement m
+ Merge on measurement_y2007m01 m_1
+ -> Nested Loop Left Join
+ -> Result
+ -> Seq Scan on measurement_y2007m01 m_1
+ Filter: ((city_id = 1) AND (logdate = '01-17-2007'::date))
+(6 rows)
+
+BEGIN;
+MERGE INTO measurement m
+ USING (VALUES (1, '01-17-2007'::date)) nm(city_id, logdate) ON
+ (m.city_id = nm.city_id and m.logdate=nm.logdate)
+WHEN NOT MATCHED THEN INSERT
+ (city_id, logdate, peaktemp, unitsales)
+ VALUES (city_id - 1, logdate, 25, 100);
+SELECT * FROM ONLY measurement ORDER BY city_id, logdate;
+ city_id | logdate | peaktemp | unitsales
+---------+------------+----------+-----------
+ 0 | 07-21-2005 | 25 | 35
+ 0 | 01-17-2007 | 25 | 100
+(2 rows)
+
+ROLLBACK;
+ALTER TABLE measurement ENABLE ROW LEVEL SECURITY;
+ALTER TABLE measurement FORCE ROW LEVEL SECURITY;
+CREATE POLICY measurement_p ON measurement USING (peaktemp IS NOT NULL);
+MERGE INTO measurement m
+ USING (VALUES (1, '01-17-2007'::date)) nm(city_id, logdate) ON
+ (m.city_id = nm.city_id and m.logdate=nm.logdate)
+WHEN NOT MATCHED THEN INSERT
+ (city_id, logdate, peaktemp, unitsales)
+ VALUES (city_id - 1, logdate, NULL, 100); -- should fail
+ERROR: new row violates row-level security policy for table "measurement"
+MERGE INTO measurement m
+ USING (VALUES (1, '01-17-2007'::date)) nm(city_id, logdate) ON
+ (m.city_id = nm.city_id and m.logdate=nm.logdate)
+WHEN NOT MATCHED THEN INSERT
+ (city_id, logdate, peaktemp, unitsales)
+ VALUES (city_id - 1, logdate, 25, 100); -- ok
+SELECT * FROM ONLY measurement ORDER BY city_id, logdate;
+ city_id | logdate | peaktemp | unitsales
+---------+------------+----------+-----------
+ 0 | 07-21-2005 | 25 | 35
+ 0 | 01-17-2007 | 25 | 100
+(2 rows)
+
+MERGE INTO measurement m
+ USING (VALUES (1, '01-18-2007'::date)) nm(city_id, logdate) ON
+ (m.city_id = nm.city_id and m.logdate=nm.logdate)
+WHEN NOT MATCHED THEN INSERT
+ (city_id, logdate, peaktemp, unitsales)
+ VALUES (city_id - 1, logdate, 25, 200)
+RETURNING merge_action(), m.*;
+ merge_action | city_id | logdate | peaktemp | unitsales
+--------------+---------+------------+----------+-----------
+ INSERT | 0 | 01-18-2007 | 25 | 200
+(1 row)
+
DROP TABLE measurement, new_measurement CASCADE;
NOTICE: drop cascades to 3 other objects
DETAIL: drop cascades to table measurement_y2006m02
diff --git a/src/test/regress/expected/misc_functions.out b/src/test/regress/expected/misc_functions.out
index cc517ed5e90..c3b2b9d8603 100644
--- a/src/test/regress/expected/misc_functions.out
+++ b/src/test/regress/expected/misc_functions.out
@@ -890,17 +890,17 @@ SELECT pg_column_toast_chunk_id(a) IS NULL,
DROP TABLE test_chunk_id;
DROP FUNCTION explain_mask_costs(text, bool, bool, bool, bool);
--- test stratnum support functions
-SELECT gist_stratnum_common(7);
- gist_stratnum_common
-----------------------
- 3
+-- test stratnum translation support functions
+SELECT gist_translate_cmptype_common(7);
+ gist_translate_cmptype_common
+-------------------------------
+ 3
(1 row)
-SELECT gist_stratnum_common(3);
- gist_stratnum_common
-----------------------
- 18
+SELECT gist_translate_cmptype_common(3);
+ gist_translate_cmptype_common
+-------------------------------
+ 18
(1 row)
-- relpath tests
diff --git a/src/test/regress/expected/numeric.out b/src/test/regress/expected/numeric.out
index 072d76ce131..93e93be5668 100644
--- a/src/test/regress/expected/numeric.out
+++ b/src/test/regress/expected/numeric.out
@@ -1464,9 +1464,21 @@ ERROR: count must be greater than zero
SELECT width_bucket(3.5::float8, 3.0::float8, 3.0::float8, 888);
ERROR: lower bound cannot equal upper bound
SELECT width_bucket('NaN', 3.0, 4.0, 888);
-ERROR: operand, lower bound, and upper bound cannot be NaN
+ width_bucket
+--------------
+ 889
+(1 row)
+
+SELECT width_bucket('NaN'::float8, 3.0::float8, 4.0::float8, 888);
+ width_bucket
+--------------
+ 889
+(1 row)
+
+SELECT width_bucket(0, 'NaN', 4.0, 888);
+ERROR: lower and upper bounds cannot be NaN
SELECT width_bucket(0::float8, 'NaN', 4.0::float8, 888);
-ERROR: operand, lower bound, and upper bound cannot be NaN
+ERROR: lower and upper bounds cannot be NaN
SELECT width_bucket(2.0, 3.0, '-inf', 888);
ERROR: lower and upper bounds must be finite
SELECT width_bucket(0::float8, '-inf', 4.0::float8, 888);
diff --git a/src/test/regress/expected/partition_prune.out b/src/test/regress/expected/partition_prune.out
index 0bf35260b46..d1966cd7d82 100644
--- a/src/test/regress/expected/partition_prune.out
+++ b/src/test/regress/expected/partition_prune.out
@@ -4553,16 +4553,18 @@ create view part_abc_view as select * from part_abc where b <> 'a' with check op
prepare update_part_abc_view as update part_abc_view set b = $2 where a = $1 returning *;
-- Only the unpruned partition should be shown in the list of relations to be
-- updated
-explain (costs off) execute update_part_abc_view (1, 'd');
- QUERY PLAN
--------------------------------------------------------
- Update on part_abc
- Update on part_abc_1
+explain (verbose, costs off) execute update_part_abc_view (1, 'd');
+ QUERY PLAN
+-----------------------------------------------------------------------------
+ Update on public.part_abc
+ Output: part_abc_1.a, part_abc_1.b, part_abc_1.c
+ Update on public.part_abc_1
-> Append
Subplans Removed: 1
- -> Seq Scan on part_abc_1
- Filter: ((b <> 'a'::text) AND (a = $1))
-(6 rows)
+ -> Seq Scan on public.part_abc_1
+ Output: $2, part_abc_1.tableoid, part_abc_1.ctid
+ Filter: ((part_abc_1.b <> 'a'::text) AND (part_abc_1.a = $1))
+(8 rows)
execute update_part_abc_view (1, 'd');
a | b | c
@@ -4570,28 +4572,31 @@ execute update_part_abc_view (1, 'd');
1 | d | t
(1 row)
-explain (costs off) execute update_part_abc_view (2, 'a');
- QUERY PLAN
--------------------------------------------------------
- Update on part_abc
- Update on part_abc_2 part_abc_1
+explain (verbose, costs off) execute update_part_abc_view (2, 'a');
+ QUERY PLAN
+-----------------------------------------------------------------------------
+ Update on public.part_abc
+ Output: part_abc_1.a, part_abc_1.b, part_abc_1.c
+ Update on public.part_abc_2
-> Append
Subplans Removed: 1
- -> Seq Scan on part_abc_2 part_abc_1
- Filter: ((b <> 'a'::text) AND (a = $1))
-(6 rows)
+ -> Seq Scan on public.part_abc_2
+ Output: $2, part_abc_2.tableoid, part_abc_2.ctid
+ Filter: ((part_abc_2.b <> 'a'::text) AND (part_abc_2.a = $1))
+(8 rows)
execute update_part_abc_view (2, 'a');
ERROR: new row violates check option for view "part_abc_view"
DETAIL: Failing row contains (2, a, t).
-- All pruned.
-explain (costs off) execute update_part_abc_view (3, 'a');
- QUERY PLAN
------------------------------
- Update on part_abc
+explain (verbose, costs off) execute update_part_abc_view (3, 'a');
+ QUERY PLAN
+----------------------------------------------------
+ Update on public.part_abc
+ Output: part_abc_1.a, part_abc_1.b, part_abc_1.c
-> Append
Subplans Removed: 2
-(3 rows)
+(4 rows)
execute update_part_abc_view (3, 'a');
a | b | c
diff --git a/src/test/regress/expected/psql.out b/src/test/regress/expected/psql.out
index cf48ae6d0c2..236eba2540e 100644
--- a/src/test/regress/expected/psql.out
+++ b/src/test/regress/expected/psql.out
@@ -160,12 +160,12 @@ LINE 1: SELECT $1, $2
foo4 | bar4
(1 row)
--- \close (extended query protocol)
-\close
-\close: missing required argument
-\close ''
-\close stmt2
-\close stmt2
+-- \close_prepared (extended query protocol)
+\close_prepared
+\close_prepared: missing required argument
+\close_prepared ''
+\close_prepared stmt2
+\close_prepared stmt2
SELECT name, statement FROM pg_prepared_statements ORDER BY name;
name | statement
-------+----------------
@@ -4666,7 +4666,7 @@ bar 'bar' "bar"
\C arg1
\c arg1 arg2 arg3 arg4
\cd arg1
- \close stmt1
+ \close_prepared stmt1
\conninfo
\copy arg1 arg2 arg3 arg4 arg5 arg6
\copyright
diff --git a/src/test/regress/expected/psql_pipeline.out b/src/test/regress/expected/psql_pipeline.out
index a30dec088b9..a0816fb10b6 100644
--- a/src/test/regress/expected/psql_pipeline.out
+++ b/src/test/regress/expected/psql_pipeline.out
@@ -228,192 +228,6 @@ BEGIN \bind \sendpipeline
INSERT INTO psql_pipeline VALUES ($1) \bind 1 \sendpipeline
COMMIT \bind \sendpipeline
\endpipeline
--- COPY FROM STDIN
--- with \sendpipeline and \bind
-\startpipeline
-SELECT $1 \bind 'val1' \sendpipeline
-COPY psql_pipeline FROM STDIN \bind \sendpipeline
-\endpipeline
- ?column?
-----------
- val1
-(1 row)
-
--- with semicolon
-\startpipeline
-SELECT 'val1';
-COPY psql_pipeline FROM STDIN;
-\endpipeline
- ?column?
-----------
- val1
-(1 row)
-
--- COPY FROM STDIN with \flushrequest + \getresults
--- with \sendpipeline and \bind
-\startpipeline
-SELECT $1 \bind 'val1' \sendpipeline
-COPY psql_pipeline FROM STDIN \bind \sendpipeline
-\flushrequest
-\getresults
- ?column?
-----------
- val1
-(1 row)
-
-message type 0x5a arrived from server while idle
-\endpipeline
--- with semicolon
-\startpipeline
-SELECT 'val1';
-COPY psql_pipeline FROM STDIN;
-\flushrequest
-\getresults
- ?column?
-----------
- val1
-(1 row)
-
-message type 0x5a arrived from server while idle
-\endpipeline
--- COPY FROM STDIN with \syncpipeline + \getresults
--- with \bind and \sendpipeline
-\startpipeline
-SELECT $1 \bind 'val1' \sendpipeline
-COPY psql_pipeline FROM STDIN \bind \sendpipeline
-\syncpipeline
-\getresults
- ?column?
-----------
- val1
-(1 row)
-
-\endpipeline
--- with semicolon
-\startpipeline
-SELECT 'val1';
-COPY psql_pipeline FROM STDIN;
-\syncpipeline
-\getresults
- ?column?
-----------
- val1
-(1 row)
-
-\endpipeline
--- COPY TO STDOUT
--- with \bind and \sendpipeline
-\startpipeline
-SELECT $1 \bind 'val1' \sendpipeline
-copy psql_pipeline TO STDOUT \bind \sendpipeline
-\endpipeline
- ?column?
-----------
- val1
-(1 row)
-
-1 \N
-2 test2
-20 test2
-3 test3
-30 test3
-4 test4
-40 test4
--- with semicolon
-\startpipeline
-SELECT 'val1';
-copy psql_pipeline TO STDOUT;
-\endpipeline
- ?column?
-----------
- val1
-(1 row)
-
-1 \N
-2 test2
-20 test2
-3 test3
-30 test3
-4 test4
-40 test4
--- COPY TO STDOUT with \flushrequest + \getresults
--- with \bind and \sendpipeline
-\startpipeline
-SELECT $1 \bind 'val1' \sendpipeline
-copy psql_pipeline TO STDOUT \bind \sendpipeline
-\flushrequest
-\getresults
- ?column?
-----------
- val1
-(1 row)
-
-1 \N
-2 test2
-20 test2
-3 test3
-30 test3
-4 test4
-40 test4
-\endpipeline
--- with semicolon
-\startpipeline
-SELECT 'val1';
-copy psql_pipeline TO STDOUT;
-\flushrequest
-\getresults
- ?column?
-----------
- val1
-(1 row)
-
-1 \N
-2 test2
-20 test2
-3 test3
-30 test3
-4 test4
-40 test4
-\endpipeline
--- COPY TO STDOUT with \syncpipeline + \getresults
--- with \bind and \sendpipeline
-\startpipeline
-SELECT $1 \bind 'val1' \sendpipeline
-copy psql_pipeline TO STDOUT \bind \sendpipeline
-\syncpipeline
-\getresults
- ?column?
-----------
- val1
-(1 row)
-
-1 \N
-2 test2
-20 test2
-3 test3
-30 test3
-4 test4
-40 test4
-\endpipeline
--- with semicolon
-\startpipeline
-SELECT 'val1';
-copy psql_pipeline TO STDOUT;
-\syncpipeline
-\getresults
- ?column?
-----------
- val1
-(1 row)
-
-1 \N
-2 test2
-20 test2
-3 test3
-30 test3
-4 test4
-40 test4
-\endpipeline
-- Use \parse and \bind_named
\startpipeline
SELECT $1 \parse ''
@@ -740,7 +554,7 @@ SELECT COUNT(*) FROM psql_pipeline \bind \sendpipeline
count
-------
- 7
+ 1
(1 row)
-- After an error, pipeline is aborted and requires \syncpipeline to be
@@ -750,7 +564,7 @@ SELECT $1 \bind \sendpipeline
SELECT $1 \bind 1 \sendpipeline
SELECT $1 \parse a
\bind_named a 1 \sendpipeline
-\close a
+\close_prepared a
\flushrequest
\getresults
ERROR: bind message supplies 0 parameters, but prepared statement "" requires 1
@@ -758,7 +572,7 @@ ERROR: bind message supplies 0 parameters, but prepared statement "" requires 1
SELECT $1 \bind 1 \sendpipeline
SELECT $1 \parse a
\bind_named a 1 \sendpipeline
-\close a
+\close_prepared a
-- Sync allows pipeline to recover.
\syncpipeline
\getresults
@@ -766,7 +580,7 @@ Pipeline aborted, command did not run
SELECT $1 \bind 1 \sendpipeline
SELECT $1 \parse a
\bind_named a 1 \sendpipeline
-\close a
+\close_prepared a
\flushrequest
\getresults
?column?
diff --git a/src/test/regress/expected/publication.out b/src/test/regress/expected/publication.out
index 4de96c04f9d..3a2eacd793f 100644
--- a/src/test/regress/expected/publication.out
+++ b/src/test/regress/expected/publication.out
@@ -34,7 +34,8 @@ ERROR: conflicting or redundant options
LINE 1: ...pub_xxx WITH (publish_generated_columns = stored, publish_ge...
^
CREATE PUBLICATION testpub_xxx WITH (publish_generated_columns = foo);
-ERROR: publish_generated_columns requires a "none" or "stored" value
+ERROR: invalid value for publication parameter "publish_generated_columns": "foo"
+DETAIL: Valid values are "none" and "stored".
\dRp
List of publications
Name | Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root
@@ -524,16 +525,22 @@ Tables from schemas:
"testpub_rf_schema2"
-- fail - virtual generated column uses user-defined function
+-- (Actually, this already fails at CREATE TABLE rather than at CREATE
+-- PUBLICATION, but let's keep the test in case the former gets
+-- relaxed sometime.)
CREATE TABLE testpub_rf_tbl6 (id int PRIMARY KEY, x int, y int GENERATED ALWAYS AS (x * testpub_rf_func2()) VIRTUAL);
+ERROR: generation expression uses user-defined function
+LINE 1: ...RIMARY KEY, x int, y int GENERATED ALWAYS AS (x * testpub_rf...
+ ^
+DETAIL: Virtual generated columns that make use of user-defined functions are not yet supported.
CREATE PUBLICATION testpub7 FOR TABLE testpub_rf_tbl6 WHERE (y > 100);
-ERROR: invalid publication WHERE expression
-DETAIL: User-defined or built-in mutable functions are not allowed.
+ERROR: relation "testpub_rf_tbl6" does not exist
-- test that SET EXPRESSION is rejected, because it could affect a row filter
SET client_min_messages = 'ERROR';
CREATE TABLE testpub_rf_tbl7 (id int PRIMARY KEY, x int, y int GENERATED ALWAYS AS (x * 111) VIRTUAL);
CREATE PUBLICATION testpub8 FOR TABLE testpub_rf_tbl7 WHERE (y > 100);
ALTER TABLE testpub_rf_tbl7 ALTER COLUMN y SET EXPRESSION AS (x * testpub_rf_func2());
-ERROR: ALTER TABLE / SET EXPRESSION is not supported for virtual generated columns on tables that are part of a publication
+ERROR: ALTER TABLE / SET EXPRESSION is not supported for virtual generated columns in tables that are part of a publication
DETAIL: Column "y" of relation "testpub_rf_tbl7" is a virtual generated column.
RESET client_min_messages;
DROP TABLE testpub_rf_tbl1;
@@ -541,7 +548,7 @@ DROP TABLE testpub_rf_tbl2;
DROP TABLE testpub_rf_tbl3;
DROP TABLE testpub_rf_tbl4;
DROP TABLE testpub_rf_tbl5;
-DROP TABLE testpub_rf_tbl6;
+--DROP TABLE testpub_rf_tbl6;
DROP TABLE testpub_rf_schema1.testpub_rf_tbl5;
DROP TABLE testpub_rf_schema2.testpub_rf_tbl6;
DROP SCHEMA testpub_rf_schema1;
diff --git a/src/test/regress/expected/regproc.out b/src/test/regress/expected/regproc.out
index 97b917502ca..84c84aef420 100644
--- a/src/test/regress/expected/regproc.out
+++ b/src/test/regress/expected/regproc.out
@@ -192,6 +192,18 @@ SELECT regnamespace('"pg_catalog"');
pg_catalog
(1 row)
+SELECT regdatabase('template1');
+ regdatabase
+-------------
+ template1
+(1 row)
+
+SELECT regdatabase('"template1"');
+ regdatabase
+-------------
+ template1
+(1 row)
+
SELECT to_regrole('regress_regrole_test');
to_regrole
----------------------
@@ -216,6 +228,132 @@ SELECT to_regnamespace('"pg_catalog"');
pg_catalog
(1 row)
+SELECT to_regdatabase('template1');
+ to_regdatabase
+----------------
+ template1
+(1 row)
+
+SELECT to_regdatabase('"template1"');
+ to_regdatabase
+----------------
+ template1
+(1 row)
+
+-- special "single dash" case
+SELECT regproc('-')::oid;
+ regproc
+---------
+ 0
+(1 row)
+
+SELECT regprocedure('-')::oid;
+ regprocedure
+--------------
+ 0
+(1 row)
+
+SELECT regclass('-')::oid;
+ regclass
+----------
+ 0
+(1 row)
+
+SELECT regcollation('-')::oid;
+ regcollation
+--------------
+ 0
+(1 row)
+
+SELECT regtype('-')::oid;
+ regtype
+---------
+ 0
+(1 row)
+
+SELECT regconfig('-')::oid;
+ regconfig
+-----------
+ 0
+(1 row)
+
+SELECT regdictionary('-')::oid;
+ regdictionary
+---------------
+ 0
+(1 row)
+
+SELECT regrole('-')::oid;
+ regrole
+---------
+ 0
+(1 row)
+
+SELECT regnamespace('-')::oid;
+ regnamespace
+--------------
+ 0
+(1 row)
+
+SELECT regdatabase('-')::oid;
+ regdatabase
+-------------
+ 0
+(1 row)
+
+SELECT to_regproc('-')::oid;
+ to_regproc
+------------
+ 0
+(1 row)
+
+SELECT to_regprocedure('-')::oid;
+ to_regprocedure
+-----------------
+ 0
+(1 row)
+
+SELECT to_regclass('-')::oid;
+ to_regclass
+-------------
+ 0
+(1 row)
+
+SELECT to_regcollation('-')::oid;
+ to_regcollation
+-----------------
+ 0
+(1 row)
+
+SELECT to_regtype('-')::oid;
+ to_regtype
+------------
+ 0
+(1 row)
+
+SELECT to_regrole('-')::oid;
+ to_regrole
+------------
+ 0
+(1 row)
+
+SELECT to_regnamespace('-')::oid;
+ to_regnamespace
+-----------------
+ 0
+(1 row)
+
+SELECT to_regdatabase('-')::oid;
+ to_regdatabase
+----------------
+ 0
+(1 row)
+
+-- constant cannot be used here
+CREATE TABLE regrole_test (rolid OID DEFAULT 'regress_regrole_test'::regrole);
+ERROR: constant of the type regrole cannot be used here
+CREATE TABLE regdatabase_test (datid OID DEFAULT 'template1'::regdatabase);
+ERROR: constant of the type regdatabase cannot be used here
/* If objects don't exist, raise errors. */
DROP ROLE regress_regrole_test;
-- without schemaname
@@ -305,6 +443,18 @@ SELECT regnamespace('foo.bar');
ERROR: invalid name syntax
LINE 1: SELECT regnamespace('foo.bar');
^
+SELECT regdatabase('Nonexistent');
+ERROR: database "nonexistent" does not exist
+LINE 1: SELECT regdatabase('Nonexistent');
+ ^
+SELECT regdatabase('"Nonexistent"');
+ERROR: database "Nonexistent" does not exist
+LINE 1: SELECT regdatabase('"Nonexistent"');
+ ^
+SELECT regdatabase('foo.bar');
+ERROR: invalid name syntax
+LINE 1: SELECT regdatabase('foo.bar');
+ ^
/* If objects don't exist, return NULL with no error. */
-- without schemaname
SELECT to_regoper('||//');
@@ -447,6 +597,24 @@ SELECT to_regnamespace('foo.bar');
(1 row)
+SELECT to_regdatabase('Nonexistent');
+ to_regdatabase
+----------------
+
+(1 row)
+
+SELECT to_regdatabase('"Nonexistent"');
+ to_regdatabase
+----------------
+
+(1 row)
+
+SELECT to_regdatabase('foo.bar');
+ to_regdatabase
+----------------
+
+(1 row)
+
-- Test to_regtypemod
SELECT to_regtypemod('text');
to_regtypemod
@@ -569,6 +737,12 @@ SELECT * FROM pg_input_error_info('no_such_type', 'regtype');
type "no_such_type" does not exist | | | 42704
(1 row)
+SELECT * FROM pg_input_error_info('Nonexistent', 'regdatabase');
+ message | detail | hint | sql_error_code
+---------------------------------------+--------+------+----------------
+ database "nonexistent" does not exist | | | 42704
+(1 row)
+
-- Some cases that should be soft errors, but are not yet
SELECT * FROM pg_input_error_info('incorrect type name syntax', 'regtype');
ERROR: syntax error at or near "type"
diff --git a/src/test/regress/expected/strings.out b/src/test/regress/expected/strings.out
index 174f0a68331..788844abd20 100644
--- a/src/test/regress/expected/strings.out
+++ b/src/test/regress/expected/strings.out
@@ -614,6 +614,73 @@ SELECT 'abcdefg' SIMILAR TO '_bcd%' ESCAPE NULL AS null;
SELECT 'abcdefg' SIMILAR TO '_bcd#%' ESCAPE '##' AS error;
ERROR: invalid escape string
HINT: Escape string must be empty or one character.
+-- Characters that should be left alone in character classes when a
+-- SIMILAR TO regexp pattern is converted to POSIX style.
+-- Underscore "_"
+EXPLAIN (COSTS OFF) SELECT * FROM TEXT_TBL WHERE f1 SIMILAR TO '_[_[:alpha:]_]_';
+ QUERY PLAN
+------------------------------------------------
+ Seq Scan on text_tbl
+ Filter: (f1 ~ '^(?:.[_[:alpha:]_].)$'::text)
+(2 rows)
+
+-- Percentage "%"
+EXPLAIN (COSTS OFF) SELECT * FROM TEXT_TBL WHERE f1 SIMILAR TO '%[%[:alnum:]%]%';
+ QUERY PLAN
+--------------------------------------------------
+ Seq Scan on text_tbl
+ Filter: (f1 ~ '^(?:.*[%[:alnum:]%].*)$'::text)
+(2 rows)
+
+-- Dot "."
+EXPLAIN (COSTS OFF) SELECT * FROM TEXT_TBL WHERE f1 SIMILAR TO '.[.[:alnum:].].';
+ QUERY PLAN
+--------------------------------------------------
+ Seq Scan on text_tbl
+ Filter: (f1 ~ '^(?:\.[.[:alnum:].]\.)$'::text)
+(2 rows)
+
+-- Dollar "$"
+EXPLAIN (COSTS OFF) SELECT * FROM TEXT_TBL WHERE f1 SIMILAR TO '$[$[:alnum:]$]$';
+ QUERY PLAN
+--------------------------------------------------
+ Seq Scan on text_tbl
+ Filter: (f1 ~ '^(?:\$[$[:alnum:]$]\$)$'::text)
+(2 rows)
+
+-- Opening parenthesis "("
+EXPLAIN (COSTS OFF) SELECT * FROM TEXT_TBL WHERE f1 SIMILAR TO '()[([:alnum:](]()';
+ QUERY PLAN
+------------------------------------------------------
+ Seq Scan on text_tbl
+ Filter: (f1 ~ '^(?:(?:)[([:alnum:](](?:))$'::text)
+(2 rows)
+
+-- Caret "^"
+EXPLAIN (COSTS OFF) SELECT * FROM TEXT_TBL WHERE f1 SIMILAR TO '^[^[:alnum:]^[^^][[^^]][\^][[\^]]\^]^';
+ QUERY PLAN
+------------------------------------------------------------------------
+ Seq Scan on text_tbl
+ Filter: (f1 ~ '^(?:\^[^[:alnum:]^[^^][[^^]][\^][[\^]]\^]\^)$'::text)
+(2 rows)
+
+-- Closing square bracket "]" at the beginning of character class
+EXPLAIN (COSTS OFF) SELECT * FROM TEXT_TBL WHERE f1 SIMILAR TO '[]%][^]%][^%]%';
+ QUERY PLAN
+------------------------------------------------
+ Seq Scan on text_tbl
+ Filter: (f1 ~ '^(?:[]%][^]%][^%].*)$'::text)
+(2 rows)
+
+-- Closing square bracket effective after two carets at the beginning
+-- of character class.
+EXPLAIN (COSTS OFF) SELECT * FROM TEXT_TBL WHERE f1 SIMILAR TO '[^^]^';
+ QUERY PLAN
+---------------------------------------
+ Seq Scan on text_tbl
+ Filter: (f1 ~ '^(?:[^^]\^)$'::text)
+(2 rows)
+
-- Test backslash escapes in regexp_replace's replacement string
SELECT regexp_replace('1112223333', E'(\\d{3})(\\d{3})(\\d{4})', E'(\\1) \\2-\\3');
regexp_replace
diff --git a/src/test/regress/expected/sysviews.out b/src/test/regress/expected/sysviews.out
index ae17d028ed3..83228cfca29 100644
--- a/src/test/regress/expected/sysviews.out
+++ b/src/test/regress/expected/sysviews.out
@@ -232,22 +232,3 @@ select * from pg_timezone_abbrevs where abbrev = 'LMT';
LMT | @ 7 hours 52 mins 58 secs ago | f
(1 row)
-DO $$
-DECLARE
- bg_writer_pid int;
- r RECORD;
-BEGIN
- SELECT pid from pg_stat_activity where backend_type='background writer'
- INTO bg_writer_pid;
-
- select type, name, ident
- from pg_get_process_memory_contexts(bg_writer_pid, false, 20)
- where path = '{1}' into r;
- RAISE NOTICE '%', r;
- select type, name, ident
- from pg_get_process_memory_contexts(pg_backend_pid(), false, 20)
- where path = '{1}' into r;
- RAISE NOTICE '%', r;
-END $$;
-NOTICE: (AllocSet,TopMemoryContext,)
-NOTICE: (AllocSet,TopMemoryContext,)
diff --git a/src/test/regress/expected/triggers.out b/src/test/regress/expected/triggers.out
index f245d7f1549..872b9100e1a 100644
--- a/src/test/regress/expected/triggers.out
+++ b/src/test/regress/expected/triggers.out
@@ -2280,6 +2280,27 @@ select * from parted;
drop table parted;
drop function parted_trigfunc();
--
+-- Constraint triggers
+--
+create constraint trigger crtr
+ after insert on foo not valid
+ for each row execute procedure foo ();
+ERROR: constraint triggers cannot be marked NOT VALID
+LINE 2: after insert on foo not valid
+ ^
+create constraint trigger crtr
+ after insert on foo no inherit
+ for each row execute procedure foo ();
+ERROR: constraint triggers cannot be marked NO INHERIT
+LINE 2: after insert on foo no inherit
+ ^
+create constraint trigger crtr
+ after insert on foo not enforced
+ for each row execute procedure foo ();
+ERROR: constraint triggers cannot be marked NOT ENFORCED
+LINE 2: after insert on foo not enforced
+ ^
+--
-- Constraint triggers and partitioned tables
create table parted_constr_ancestor (a int, b text)
partition by range (b);
@@ -2294,7 +2315,7 @@ create constraint trigger parted_trig after insert on parted_constr_ancestor
deferrable
for each row execute procedure trigger_notice_ab();
create constraint trigger parted_trig_two after insert on parted_constr
- deferrable initially deferred
+ deferrable initially deferred enforced
for each row when (bark(new.b) AND new.a % 2 = 1)
execute procedure trigger_notice_ab();
-- The immediate constraint is fired immediately; the WHEN clause of the
@@ -3535,8 +3556,8 @@ drop table parent, child;
drop function f();
-- Test who runs deferred trigger functions
-- setup
-create role regress_groot;
-create role regress_outis;
+create role regress_caller;
+create role regress_fn_owner;
create function whoami() returns trigger language plpgsql
as $$
begin
@@ -3544,7 +3565,7 @@ begin
return null;
end;
$$;
-alter function whoami() owner to regress_outis;
+alter function whoami() owner to regress_fn_owner;
create table defer_trig (id integer);
grant insert on defer_trig to public;
create constraint trigger whoami after insert on defer_trig
@@ -3553,23 +3574,23 @@ create constraint trigger whoami after insert on defer_trig
execute function whoami();
-- deferred triggers must run as the user that queued the trigger
begin;
-set role regress_groot;
+set role regress_caller;
insert into defer_trig values (1);
reset role;
-set role regress_outis;
+set role regress_fn_owner;
insert into defer_trig values (2);
reset role;
commit;
-NOTICE: I am regress_groot
-NOTICE: I am regress_outis
+NOTICE: I am regress_caller
+NOTICE: I am regress_fn_owner
-- security definer functions override the user who queued the trigger
alter function whoami() security definer;
begin;
-set role regress_groot;
+set role regress_caller;
insert into defer_trig values (3);
reset role;
commit;
-NOTICE: I am regress_outis
+NOTICE: I am regress_fn_owner
alter function whoami() security invoker;
-- make sure the current user is restored after error
create or replace function whoami() returns trigger language plpgsql
@@ -3581,11 +3602,11 @@ begin
end;
$$;
begin;
-set role regress_groot;
+set role regress_caller;
insert into defer_trig values (4);
reset role;
commit; -- error expected
-NOTICE: I am regress_groot
+NOTICE: I am regress_caller
ERROR: division by zero
CONTEXT: SQL statement "SELECT 1 / 0"
PL/pgSQL function whoami() line 4 at PERFORM
@@ -3598,5 +3619,5 @@ select current_user = session_user;
-- clean up
drop table defer_trig;
drop function whoami();
-drop role regress_outis;
-drop role regress_groot;
+drop role regress_fn_owner;
+drop role regress_caller;
diff --git a/src/test/regress/expected/type_sanity.out b/src/test/regress/expected/type_sanity.out
index dd0c52ab08b..943e56506bf 100644
--- a/src/test/regress/expected/type_sanity.out
+++ b/src/test/regress/expected/type_sanity.out
@@ -711,6 +711,7 @@ CREATE TABLE tab_core_types AS SELECT
'regtype'::regtype type,
'pg_monitor'::regrole,
'pg_class'::regclass::oid,
+ 'template1'::regdatabase,
'(1,1)'::tid, '2'::xid, '3'::cid,
'10:20:10,14,15'::txid_snapshot,
'10:20:10,14,15'::pg_snapshot,
diff --git a/src/test/regress/expected/without_overlaps.out b/src/test/regress/expected/without_overlaps.out
index ea607bed0a4..f3144bdc39c 100644
--- a/src/test/regress/expected/without_overlaps.out
+++ b/src/test/regress/expected/without_overlaps.out
@@ -1426,7 +1426,7 @@ CREATE TABLE temporal_fk_rng2rng (
CONSTRAINT temporal_fk_rng2rng_fk FOREIGN KEY (parent_id, valid_at)
REFERENCES temporal_rng (id, valid_at)
);
-ERROR: foreign key must use PERIOD when referencing a primary using WITHOUT OVERLAPS
+ERROR: foreign key must use PERIOD when referencing a primary key using WITHOUT OVERLAPS
-- (parent_id, valid_at) REFERENCES (id, PERIOD valid_at)
-- FOREIGN KEY part should specify PERIOD
CREATE TABLE temporal_fk_rng2rng (
@@ -1900,7 +1900,7 @@ CREATE TABLE temporal_fk_mltrng2mltrng (
CONSTRAINT temporal_fk_mltrng2mltrng_fk FOREIGN KEY (parent_id, valid_at)
REFERENCES temporal_mltrng (id, valid_at)
);
-ERROR: foreign key must use PERIOD when referencing a primary using WITHOUT OVERLAPS
+ERROR: foreign key must use PERIOD when referencing a primary key using WITHOUT OVERLAPS
-- (parent_id, valid_at) REFERENCES (id, PERIOD valid_at)
-- FOREIGN KEY part should specify PERIOD
CREATE TABLE temporal_fk_mltrng2mltrng (
diff --git a/src/test/regress/sql/alter_table.sql b/src/test/regress/sql/alter_table.sql
index 5ce9d1e429f..41cff198e18 100644
--- a/src/test/regress/sql/alter_table.sql
+++ b/src/test/regress/sql/alter_table.sql
@@ -3069,6 +3069,15 @@ drop table attbl, atref;
/* End test case for bug #17409 */
+/* Test case for bug #18970 */
+
+create table attbl(a int);
+create table atref(b attbl check ((b).a is not null));
+alter table attbl alter column a type numeric; -- someday this should work
+drop table attbl, atref;
+
+/* End test case for bug #18970 */
+
-- Test that ALTER TABLE rewrite preserves a clustered index
-- for normal indexes and indexes on constraints.
create table alttype_cluster (a int);
diff --git a/src/test/regress/sql/btree_index.sql b/src/test/regress/sql/btree_index.sql
index 68c61dbc7d1..6aaaa386abc 100644
--- a/src/test/regress/sql/btree_index.sql
+++ b/src/test/regress/sql/btree_index.sql
@@ -143,38 +143,83 @@ SELECT proname, proargtypes, pronamespace
ORDER BY proname DESC, proargtypes DESC, pronamespace DESC LIMIT 1;
--
--- Add coverage for RowCompare quals whose rhs row has a NULL that ends scan
+-- Forwards scan RowCompare qual whose row arg has a NULL that affects our
+-- initial positioning strategy
--
explain (costs off)
SELECT proname, proargtypes, pronamespace
FROM pg_proc
- WHERE proname = 'abs' AND (proname, proargtypes) < ('abs', NULL)
+ WHERE (proname, proargtypes) >= ('abs', NULL) AND proname <= 'abs'
ORDER BY proname, proargtypes, pronamespace;
SELECT proname, proargtypes, pronamespace
FROM pg_proc
- WHERE proname = 'abs' AND (proname, proargtypes) < ('abs', NULL)
+ WHERE (proname, proargtypes) >= ('abs', NULL) AND proname <= 'abs'
ORDER BY proname, proargtypes, pronamespace;
--
--- Add coverage for backwards scan RowCompare quals whose rhs row has a NULL
--- that ends scan
+-- Forwards scan RowCompare quals whose row arg has a NULL that ends scan
--
explain (costs off)
SELECT proname, proargtypes, pronamespace
FROM pg_proc
- WHERE proname = 'abs' AND (proname, proargtypes) > ('abs', NULL)
+ WHERE proname >= 'abs' AND (proname, proargtypes) < ('abs', NULL)
+ORDER BY proname, proargtypes, pronamespace;
+
+SELECT proname, proargtypes, pronamespace
+ FROM pg_proc
+ WHERE proname >= 'abs' AND (proname, proargtypes) < ('abs', NULL)
+ORDER BY proname, proargtypes, pronamespace;
+
+--
+-- Backwards scan RowCompare qual whose row arg has a NULL that affects our
+-- initial positioning strategy
+--
+explain (costs off)
+SELECT proname, proargtypes, pronamespace
+ FROM pg_proc
+ WHERE proname >= 'abs' AND (proname, proargtypes) <= ('abs', NULL)
ORDER BY proname DESC, proargtypes DESC, pronamespace DESC;
SELECT proname, proargtypes, pronamespace
FROM pg_proc
- WHERE proname = 'abs' AND (proname, proargtypes) > ('abs', NULL)
+ WHERE proname >= 'abs' AND (proname, proargtypes) <= ('abs', NULL)
ORDER BY proname DESC, proargtypes DESC, pronamespace DESC;
--
--- Add coverage for recheck of > key following array advancement on previous
--- (left sibling) page that used a high key whose attribute value corresponding
--- to the > key was -inf (due to being truncated when the high key was created).
+-- Backwards scan RowCompare qual whose row arg has a NULL that ends scan
+--
+explain (costs off)
+SELECT proname, proargtypes, pronamespace
+ FROM pg_proc
+ WHERE (proname, proargtypes) > ('abs', NULL) AND proname <= 'abs'
+ORDER BY proname DESC, proargtypes DESC, pronamespace DESC;
+
+SELECT proname, proargtypes, pronamespace
+ FROM pg_proc
+ WHERE (proname, proargtypes) > ('abs', NULL) AND proname <= 'abs'
+ORDER BY proname DESC, proargtypes DESC, pronamespace DESC;
+
+-- Makes B-Tree preprocessing deal with unmarking redundant keys that were
+-- initially marked required (test case relies on current row compare
+-- preprocessing limitations)
+explain (costs off)
+SELECT proname, proargtypes, pronamespace
+ FROM pg_proc
+ WHERE proname = 'zzzzzz' AND (proname, proargtypes) > ('abs', NULL)
+ AND pronamespace IN (1, 2, 3) AND proargtypes IN ('26 23', '5077')
+ORDER BY proname, proargtypes, pronamespace;
+
+SELECT proname, proargtypes, pronamespace
+ FROM pg_proc
+ WHERE proname = 'zzzzzz' AND (proname, proargtypes) > ('abs', NULL)
+ AND pronamespace IN (1, 2, 3) AND proargtypes IN ('26 23', '5077')
+ORDER BY proname, proargtypes, pronamespace;
+
+--
+-- Performs a recheck of > key following array advancement on previous (left
+-- sibling) page that used a high key whose attribute value corresponding to
+-- the > key was -inf (due to being truncated when the high key was created).
--
-- XXX This relies on the assumption that tenk1_thous_tenthous has a truncated
-- high key "(183, -inf)" on the first page that we'll scan. The test will only
diff --git a/src/test/regress/sql/constraints.sql b/src/test/regress/sql/constraints.sql
index 337baab7ced..7487723ab84 100644
--- a/src/test/regress/sql/constraints.sql
+++ b/src/test/regress/sql/constraints.sql
@@ -537,6 +537,9 @@ CREATE TABLE UNIQUE_NOTEN_TBL(i int UNIQUE NOT ENFORCED);
ALTER TABLE unique_tbl ALTER CONSTRAINT unique_tbl_i_key ENFORCED;
ALTER TABLE unique_tbl ALTER CONSTRAINT unique_tbl_i_key NOT ENFORCED;
+-- can't make an existing constraint NOT VALID
+ALTER TABLE unique_tbl ALTER CONSTRAINT unique_tbl_i_key NOT VALID;
+
DROP TABLE unique_tbl;
--
@@ -997,6 +1000,9 @@ create table constr_parent3 (a int not null);
create table constr_child3 () inherits (constr_parent2, constr_parent3);
EXECUTE get_nnconstraint_info('{constr_parent3, constr_child3}');
+COMMENT ON CONSTRAINT constr_parent2_a_not_null ON constr_parent2 IS 'this constraint is invalid';
+COMMENT ON CONSTRAINT constr_parent2_a_not_null ON constr_child2 IS 'this constraint is valid';
+
DEALLOCATE get_nnconstraint_info;
-- end NOT NULL NOT VALID
diff --git a/src/test/regress/sql/copy.sql b/src/test/regress/sql/copy.sql
index f0b88a23db8..a1316c73bac 100644
--- a/src/test/regress/sql/copy.sql
+++ b/src/test/regress/sql/copy.sql
@@ -94,6 +94,36 @@ this is just a line full of junk that would error out if parsed
copy copytest4 to stdout (header);
+-- test multi-line header line feature
+
+create temp table copytest5 (c1 int);
+
+copy copytest5 from stdin (format csv, header 2);
+this is a first header line.
+this is a second header line.
+1
+2
+\.
+copy copytest5 to stdout (header);
+
+truncate copytest5;
+copy copytest5 from stdin (format csv, header 4);
+this is a first header line.
+this is a second header line.
+1
+2
+\.
+select count(*) from copytest5;
+
+truncate copytest5;
+copy copytest5 from stdin (format csv, header 5);
+this is a first header line.
+this is a second header line.
+1
+2
+\.
+select count(*) from copytest5;
+
-- test copy from with a partitioned table
create table parted_copytest (
a int,
diff --git a/src/test/regress/sql/copy2.sql b/src/test/regress/sql/copy2.sql
index 45273557ce0..cef45868db5 100644
--- a/src/test/regress/sql/copy2.sql
+++ b/src/test/regress/sql/copy2.sql
@@ -90,6 +90,9 @@ COPY x to stdout (format BINARY, on_error unsupported);
COPY x from stdin (log_verbosity unsupported);
COPY x from stdin with (reject_limit 1);
COPY x from stdin with (on_error ignore, reject_limit 0);
+COPY x from stdin with (header -1);
+COPY x from stdin with (header 2.5);
+COPY x to stdout with (header 2);
-- too many columns in column list: should fail
COPY x (a, b, c, d, e, d, c) from stdin;
diff --git a/src/test/regress/sql/create_table_like.sql b/src/test/regress/sql/create_table_like.sql
index 6e21722aaeb..bf8702116a7 100644
--- a/src/test/regress/sql/create_table_like.sql
+++ b/src/test/regress/sql/create_table_like.sql
@@ -143,9 +143,10 @@ COMMENT ON INDEX ctlt1_pkey IS 'index pkey';
COMMENT ON INDEX ctlt1_b_key IS 'index b_key';
ALTER TABLE ctlt1 ALTER COLUMN a SET STORAGE MAIN;
-CREATE TABLE ctlt2 (c text);
+CREATE TABLE ctlt2 (c text NOT NULL);
ALTER TABLE ctlt2 ALTER COLUMN c SET STORAGE EXTERNAL;
COMMENT ON COLUMN ctlt2.c IS 'C';
+COMMENT ON CONSTRAINT ctlt2_c_not_null ON ctlt2 IS 't2_c_not_null';
CREATE TABLE ctlt3 (a text CHECK (length(a) < 5), c text CHECK (length(c) < 7));
ALTER TABLE ctlt3 ALTER COLUMN c SET STORAGE EXTERNAL;
@@ -162,6 +163,7 @@ CREATE TABLE ctlt12_storage (LIKE ctlt1 INCLUDING STORAGE, LIKE ctlt2 INCLUDING
\d+ ctlt12_storage
CREATE TABLE ctlt12_comments (LIKE ctlt1 INCLUDING COMMENTS, LIKE ctlt2 INCLUDING COMMENTS);
\d+ ctlt12_comments
+SELECT conname, description FROM pg_description, pg_constraint c WHERE classoid = 'pg_constraint'::regclass AND objoid = c.oid AND c.conrelid = 'ctlt12_comments'::regclass;
CREATE TABLE ctlt1_inh (LIKE ctlt1 INCLUDING CONSTRAINTS INCLUDING COMMENTS) INHERITS (ctlt1);
\d+ ctlt1_inh
SELECT description FROM pg_description, pg_constraint c WHERE classoid = 'pg_constraint'::regclass AND objoid = c.oid AND c.conrelid = 'ctlt1_inh'::regclass;
@@ -197,9 +199,19 @@ DROP TABLE ctlt1, ctlt2, ctlt3, ctlt4, ctlt12_storage, ctlt12_comments, ctlt1_in
-- LIKE must respect NO INHERIT property of constraints
CREATE TABLE noinh_con_copy (a int CHECK (a > 0) NO INHERIT, b int not null,
c int not null no inherit);
-CREATE TABLE noinh_con_copy1 (LIKE noinh_con_copy INCLUDING CONSTRAINTS);
+
+COMMENT ON CONSTRAINT noinh_con_copy_b_not_null ON noinh_con_copy IS 'not null b';
+COMMENT ON CONSTRAINT noinh_con_copy_c_not_null ON noinh_con_copy IS 'not null c no inherit';
+
+CREATE TABLE noinh_con_copy1 (LIKE noinh_con_copy INCLUDING CONSTRAINTS INCLUDING COMMENTS);
\d+ noinh_con_copy1
+SELECT conname, description
+FROM pg_description, pg_constraint c
+WHERE classoid = 'pg_constraint'::regclass
+AND objoid = c.oid AND c.conrelid = 'noinh_con_copy1'::regclass
+ORDER BY conname COLLATE "C";
+
-- fail, as partitioned tables don't allow NO INHERIT constraints
CREATE TABLE noinh_con_copy1_parted (LIKE noinh_con_copy INCLUDING ALL)
PARTITION BY LIST (a);
diff --git a/src/test/regress/sql/domain.sql b/src/test/regress/sql/domain.sql
index b752a63ab5f..b8f5a639712 100644
--- a/src/test/regress/sql/domain.sql
+++ b/src/test/regress/sql/domain.sql
@@ -602,6 +602,9 @@ insert into domain_test values (1, 2);
-- should fail
alter table domain_test add column c str_domain;
+-- disallow duplicated not-null constraints
+create domain int_domain1 as int constraint nn1 not null constraint nn2 not null;
+
create domain str_domain2 as text check (value <> 'foo') default 'foo';
-- should fail
diff --git a/src/test/regress/sql/foreign_key.sql b/src/test/regress/sql/foreign_key.sql
index 8159e363022..cfcecb4e911 100644
--- a/src/test/regress/sql/foreign_key.sql
+++ b/src/test/regress/sql/foreign_key.sql
@@ -1389,22 +1389,44 @@ WHERE conrelid::regclass::text like 'fk_partitioned_fk%' ORDER BY oid::regclass:
DROP TABLE fk_partitioned_fk, fk_notpartitioned_pk;
--- NOT VALID foreign key on a non-partitioned table referencing a partitioned table
+-- NOT VALID and NOT ENFORCED foreign key on a non-partitioned table
+-- referencing a partitioned table
CREATE TABLE fk_partitioned_pk (a int, b int, PRIMARY KEY (a, b)) PARTITION BY RANGE (a, b);
CREATE TABLE fk_partitioned_pk_1 PARTITION OF fk_partitioned_pk FOR VALUES FROM (0,0) TO (1000,1000);
+CREATE TABLE fk_partitioned_pk_2 PARTITION OF fk_partitioned_pk FOR VALUES FROM (1000,1000) TO (2000,2000);
CREATE TABLE fk_notpartitioned_fk (b int, a int);
-ALTER TABLE fk_notpartitioned_fk ADD FOREIGN KEY (a, b) REFERENCES fk_partitioned_pk NOT VALID;
-
--- Constraint will be invalid.
-SELECT conname, convalidated FROM pg_constraint
+INSERT INTO fk_partitioned_pk VALUES(100,100), (1000,1000);
+INSERT INTO fk_notpartitioned_fk VALUES(100,100), (1000,1000);
+ALTER TABLE fk_notpartitioned_fk ADD CONSTRAINT fk_notpartitioned_fk_a_b_fkey
+ FOREIGN KEY (a, b) REFERENCES fk_partitioned_pk NOT VALID;
+ALTER TABLE fk_notpartitioned_fk ADD CONSTRAINT fk_notpartitioned_fk_a_b_fkey2
+ FOREIGN KEY (a, b) REFERENCES fk_partitioned_pk NOT ENFORCED;
+
+-- All constraints will be invalid, and _fkey2 constraints will not be enforced.
+SELECT conname, conenforced, convalidated FROM pg_constraint
WHERE conrelid = 'fk_notpartitioned_fk'::regclass ORDER BY oid::regclass::text;
ALTER TABLE fk_notpartitioned_fk VALIDATE CONSTRAINT fk_notpartitioned_fk_a_b_fkey;
+ALTER TABLE fk_notpartitioned_fk ALTER CONSTRAINT fk_notpartitioned_fk_a_b_fkey2 ENFORCED;
--- All constraints are now valid.
-SELECT conname, convalidated FROM pg_constraint
+-- All constraints are now valid and enforced.
+SELECT conname, conenforced, convalidated FROM pg_constraint
WHERE conrelid = 'fk_notpartitioned_fk'::regclass ORDER BY oid::regclass::text;
+-- test a self-referential FK
+ALTER TABLE fk_partitioned_pk ADD CONSTRAINT selffk FOREIGN KEY (a, b) REFERENCES fk_partitioned_pk NOT VALID;
+CREATE TABLE fk_partitioned_pk_3 PARTITION OF fk_partitioned_pk FOR VALUES FROM (2000,2000) TO (3000,3000)
+ PARTITION BY RANGE (a);
+CREATE TABLE fk_partitioned_pk_3_1 PARTITION OF fk_partitioned_pk_3 FOR VALUES FROM (2000) TO (2100);
+SELECT conname, conenforced, convalidated FROM pg_constraint
+WHERE conrelid = 'fk_partitioned_pk'::regclass AND contype = 'f'
+ORDER BY oid::regclass::text;
+ALTER TABLE fk_partitioned_pk_2 VALIDATE CONSTRAINT selffk;
+ALTER TABLE fk_partitioned_pk VALIDATE CONSTRAINT selffk;
+SELECT conname, conenforced, convalidated FROM pg_constraint
+WHERE conrelid = 'fk_partitioned_pk'::regclass AND contype = 'f'
+ORDER BY oid::regclass::text;
+
DROP TABLE fk_notpartitioned_fk, fk_partitioned_pk;
-- Test some other exotic foreign key features: MATCH SIMPLE, ON UPDATE/DELETE
diff --git a/src/test/regress/sql/generated_virtual.sql b/src/test/regress/sql/generated_virtual.sql
index b4eedeee2fb..6fa986515b9 100644
--- a/src/test/regress/sql/generated_virtual.sql
+++ b/src/test/regress/sql/generated_virtual.sql
@@ -253,10 +253,10 @@ CREATE TABLE gtest4 (
a int,
b double_int GENERATED ALWAYS AS ((a * 2, a * 3)) VIRTUAL
);
-INSERT INTO gtest4 VALUES (1), (6);
-SELECT * FROM gtest4;
+--INSERT INTO gtest4 VALUES (1), (6);
+--SELECT * FROM gtest4;
-DROP TABLE gtest4;
+--DROP TABLE gtest4;
DROP TYPE double_int;
-- using tableoid is allowed
@@ -290,20 +290,21 @@ GRANT SELECT (a, c) ON gtest11 TO regress_user11;
CREATE FUNCTION gf1(a int) RETURNS int AS $$ SELECT a * 3 $$ IMMUTABLE LANGUAGE SQL;
REVOKE ALL ON FUNCTION gf1(int) FROM PUBLIC;
-CREATE TABLE gtest12 (a int PRIMARY KEY, b int, c int GENERATED ALWAYS AS (gf1(b)) VIRTUAL);
-INSERT INTO gtest12 VALUES (1, 10), (2, 20);
-GRANT SELECT (a, c), INSERT ON gtest12 TO regress_user11;
+CREATE TABLE gtest12 (a int PRIMARY KEY, b int, c int GENERATED ALWAYS AS (gf1(b)) VIRTUAL); -- fails, user-defined function
+--INSERT INTO gtest12 VALUES (1, 10), (2, 20);
+--GRANT SELECT (a, c), INSERT ON gtest12 TO regress_user11;
SET ROLE regress_user11;
SELECT a, b FROM gtest11; -- not allowed
SELECT a, c FROM gtest11; -- allowed
SELECT gf1(10); -- not allowed
-INSERT INTO gtest12 VALUES (3, 30), (4, 40); -- allowed (does not actually invoke the function)
-SELECT a, c FROM gtest12; -- currently not allowed because of function permissions, should arguably be allowed
+--INSERT INTO gtest12 VALUES (3, 30), (4, 40); -- allowed (does not actually invoke the function)
+--SELECT a, c FROM gtest12; -- currently not allowed because of function permissions, should arguably be allowed
RESET ROLE;
-DROP FUNCTION gf1(int); -- fail
-DROP TABLE gtest11, gtest12;
+--DROP FUNCTION gf1(int); -- fail
+DROP TABLE gtest11;
+--DROP TABLE gtest12;
DROP FUNCTION gf1(int);
DROP USER regress_user11;
@@ -453,11 +454,19 @@ CREATE TABLE gtest24r (a int PRIMARY KEY, b gtestdomain1range GENERATED ALWAYS A
--INSERT INTO gtest24r (a) VALUES (4); -- ok
--INSERT INTO gtest24r (a) VALUES (6); -- error
+CREATE TABLE gtest24at (a int PRIMARY KEY);
+ALTER TABLE gtest24at ADD COLUMN b gtestdomain1 GENERATED ALWAYS AS (a * 2) VIRTUAL; -- error
+CREATE TABLE gtest24ata (a int PRIMARY KEY, b int GENERATED ALWAYS AS (a * 2) VIRTUAL);
+ALTER TABLE gtest24ata ALTER COLUMN b TYPE gtestdomain1; -- error
+
CREATE DOMAIN gtestdomainnn AS int CHECK (VALUE IS NOT NULL);
CREATE TABLE gtest24nn (a int, b gtestdomainnn GENERATED ALWAYS AS (a * 2) VIRTUAL);
--INSERT INTO gtest24nn (a) VALUES (4); -- ok
--INSERT INTO gtest24nn (a) VALUES (NULL); -- error
+-- using user-defined type not yet supported
+CREATE TABLE gtest24xxx (a gtestdomain1, b gtestdomain1, c int GENERATED ALWAYS AS (greatest(a, b)) VIRTUAL); -- error
+
-- typed tables (currently not supported)
CREATE TYPE gtest_type AS (f1 integer, f2 text, f3 bigint);
CREATE TABLE gtest28 OF gtest_type (f1 WITH OPTIONS GENERATED ALWAYS AS (f2 *2) VIRTUAL);
@@ -788,7 +797,8 @@ create table gtest32 (
a int primary key,
b int generated always as (a * 2),
c int generated always as (10 + 10),
- d int generated always as (coalesce(a, 100))
+ d int generated always as (coalesce(a, 100)),
+ e int
);
insert into gtest32 values (1), (2);
@@ -829,7 +839,10 @@ select t2.* from gtest32 t1 left join gtest32 t2 on false;
select t2.* from gtest32 t1 left join gtest32 t2 on false;
explain (verbose, costs off)
-select * from gtest32 t group by grouping sets (a, b, c, d) having c = 20;
-select * from gtest32 t group by grouping sets (a, b, c, d) having c = 20;
+select * from gtest32 t group by grouping sets (a, b, c, d, e) having c = 20;
+select * from gtest32 t group by grouping sets (a, b, c, d, e) having c = 20;
+
+-- Ensure that the virtual generated columns in ALTER COLUMN TYPE USING expression are expanded
+alter table gtest32 alter column e type bigint using b;
drop table gtest32;
diff --git a/src/test/regress/sql/horology.sql b/src/test/regress/sql/horology.sql
index 1310b432773..8978249a5dc 100644
--- a/src/test/regress/sql/horology.sql
+++ b/src/test/regress/sql/horology.sql
@@ -102,6 +102,10 @@ SELECT date 'J J 1520447';
SELECT timestamp with time zone 'Y2001M12D27H04M05S06.789+08';
SELECT timestamp with time zone 'Y2001M12D27H04MM05S06.789-08';
+-- More examples we used to accept and should not
+SELECT timestamp with time zone 'J2452271 T X03456-08';
+SELECT timestamp with time zone 'J2452271 T X03456.001e6-08';
+
-- conflicting fields should throw errors
SELECT date '1995-08-06 epoch';
SELECT date '1995-08-06 infinity';
diff --git a/src/test/regress/sql/join.sql b/src/test/regress/sql/join.sql
index cc5128add4d..f6e7070db65 100644
--- a/src/test/regress/sql/join.sql
+++ b/src/test/regress/sql/join.sql
@@ -1277,6 +1277,23 @@ where t1.unique2 < 42 and t1.stringu1 > t2.stringu2;
-- variant that isn't quite a star-schema case
+explain (verbose, costs off)
+select ss1.d1 from
+ tenk1 as t1
+ inner join tenk1 as t2
+ on t1.tenthous = t2.ten
+ inner join
+ int8_tbl as i8
+ left join int4_tbl as i4
+ inner join (select 64::information_schema.cardinal_number as d1
+ from tenk1 t3,
+ lateral (select abs(t3.unique1) + random()) ss0(x)
+ where t3.fivethous < 0) as ss1
+ on i4.f1 = ss1.d1
+ on i8.q1 = i4.f1
+ on t1.tenthous = ss1.d1
+where t1.unique1 < i4.f1;
+
select ss1.d1 from
tenk1 as t1
inner join tenk1 as t2
@@ -1332,6 +1349,64 @@ select * from
(select 1 as x) ss1 left join (select 2 as y) ss2 on (true),
lateral (select ss2.y as z limit 1) ss3;
+-- This example demonstrates the folly of our old "have_dangerous_phv" logic
+begin;
+set local from_collapse_limit to 2;
+explain (verbose, costs off)
+select * from int8_tbl t1
+ left join
+ (select coalesce(t2.q1 + x, 0) from int8_tbl t2,
+ lateral (select t3.q1 as x from int8_tbl t3,
+ lateral (select t2.q1, t3.q1 offset 0) s))
+ on true;
+rollback;
+
+-- ... not that the initial replacement didn't have some bugs too
+begin;
+create temp table t(i int primary key);
+
+explain (verbose, costs off)
+select * from t t1
+ left join (select 1 as x, * from t t2(i2)) t2ss on t1.i = t2ss.i2
+ left join t t3(i3) on false
+ left join t t4(i4) on t4.i4 > t2ss.x;
+
+explain (verbose, costs off)
+select * from
+ (select k from
+ (select i, coalesce(i, j) as k from
+ (select i from t union all select 0)
+ join (select 1 as j limit 1) on i = j)
+ right join (select 2 as x) on true
+ join (select 3 as y) on i is not null
+ ),
+ lateral (select k as kl limit 1);
+
+rollback;
+
+-- PHVs containing SubLinks are quite tricky to get right
+explain (verbose, costs off)
+select *
+from int8_tbl i8
+ inner join
+ (select (select true) as x
+ from int4_tbl i4, lateral (select i4.f1 as y limit 1) ss1
+ where i4.f1 = 0) ss2 on true
+ right join (select false as z) ss3 on true,
+ lateral (select i8.q2 as q2l where x limit 1) ss4
+where i8.q2 = 123;
+
+explain (verbose, costs off)
+select *
+from int8_tbl i8
+ inner join
+ (select (select true) as x
+ from int4_tbl i4, lateral (select 1 as y limit 1) ss1
+ where i4.f1 = 0) ss2 on true
+ right join (select false as z) ss3 on true,
+ lateral (select i8.q2 as q2l where x limit 1) ss4
+where i8.q2 = 123;
+
-- Test proper handling of appendrel PHVs during useless-RTE removal
explain (costs off)
select * from
diff --git a/src/test/regress/sql/limit.sql b/src/test/regress/sql/limit.sql
index 6f0cda98701..603910fe6d1 100644
--- a/src/test/regress/sql/limit.sql
+++ b/src/test/regress/sql/limit.sql
@@ -196,6 +196,9 @@ CREATE VIEW limit_thousand_v_3 AS SELECT thousand FROM onek WHERE thousand < 995
ORDER BY thousand FETCH FIRST (NULL+1) ROWS WITH TIES;
\d+ limit_thousand_v_3
CREATE VIEW limit_thousand_v_4 AS SELECT thousand FROM onek WHERE thousand < 995
- ORDER BY thousand FETCH FIRST NULL ROWS ONLY;
+ ORDER BY thousand FETCH FIRST (5::bigint) ROWS WITH TIES;
\d+ limit_thousand_v_4
+CREATE VIEW limit_thousand_v_5 AS SELECT thousand FROM onek WHERE thousand < 995
+ ORDER BY thousand FETCH FIRST NULL ROWS ONLY;
+\d+ limit_thousand_v_5
-- leave these views
diff --git a/src/test/regress/sql/memoize.sql b/src/test/regress/sql/memoize.sql
index c0d47fa875a..8d1cdd6990c 100644
--- a/src/test/regress/sql/memoize.sql
+++ b/src/test/regress/sql/memoize.sql
@@ -26,6 +26,7 @@ begin
ln := regexp_replace(ln, 'Heap Fetches: \d+', 'Heap Fetches: N');
ln := regexp_replace(ln, 'loops=\d+', 'loops=N');
ln := regexp_replace(ln, 'Index Searches: \d+', 'Index Searches: N');
+ ln := regexp_replace(ln, 'Memory: \d+kB', 'Memory: NkB');
return next ln;
end loop;
end;
@@ -244,3 +245,29 @@ RESET max_parallel_workers_per_gather;
RESET parallel_tuple_cost;
RESET parallel_setup_cost;
RESET min_parallel_table_scan_size;
+
+-- Ensure memoize works for ANTI joins
+CREATE TABLE tab_anti (a int, b boolean);
+INSERT INTO tab_anti SELECT i%3, false FROM generate_series(1,100)i;
+ANALYZE tab_anti;
+
+-- Ensure we get a Memoize plan for ANTI join
+SELECT explain_memoize('
+SELECT COUNT(*) FROM tab_anti t1 LEFT JOIN
+LATERAL (SELECT DISTINCT ON (a) a, b, t1.a AS x FROM tab_anti t2) t2
+ON t1.a+1 = t2.a
+WHERE t2.a IS NULL;', false);
+
+-- And check we get the expected results.
+SELECT COUNT(*) FROM tab_anti t1 LEFT JOIN
+LATERAL (SELECT DISTINCT ON (a) a, b, t1.a AS x FROM tab_anti t2) t2
+ON t1.a+1 = t2.a
+WHERE t2.a IS NULL;
+
+-- Ensure we do not add memoize node for SEMI join
+EXPLAIN (COSTS OFF)
+SELECT * FROM tab_anti t1 WHERE t1.a IN
+ (SELECT a FROM tab_anti t2 WHERE t2.b IN
+ (SELECT t1.b FROM tab_anti t3 WHERE t2.a > 1 OFFSET 0));
+
+DROP TABLE tab_anti;
diff --git a/src/test/regress/sql/merge.sql b/src/test/regress/sql/merge.sql
index f7a19c0e7dd..2660b19f238 100644
--- a/src/test/regress/sql/merge.sql
+++ b/src/test/regress/sql/merge.sql
@@ -1722,6 +1722,55 @@ WHEN MATCHED THEN DELETE;
SELECT * FROM new_measurement ORDER BY city_id, logdate;
+-- MERGE into inheritance root table
+DROP TRIGGER insert_measurement_trigger ON measurement;
+ALTER TABLE measurement ADD CONSTRAINT mcheck CHECK (city_id = 0) NO INHERIT;
+
+EXPLAIN (COSTS OFF)
+MERGE INTO measurement m
+ USING (VALUES (1, '01-17-2007'::date)) nm(city_id, logdate) ON
+ (m.city_id = nm.city_id and m.logdate=nm.logdate)
+WHEN NOT MATCHED THEN INSERT
+ (city_id, logdate, peaktemp, unitsales)
+ VALUES (city_id - 1, logdate, 25, 100);
+
+BEGIN;
+MERGE INTO measurement m
+ USING (VALUES (1, '01-17-2007'::date)) nm(city_id, logdate) ON
+ (m.city_id = nm.city_id and m.logdate=nm.logdate)
+WHEN NOT MATCHED THEN INSERT
+ (city_id, logdate, peaktemp, unitsales)
+ VALUES (city_id - 1, logdate, 25, 100);
+SELECT * FROM ONLY measurement ORDER BY city_id, logdate;
+ROLLBACK;
+
+ALTER TABLE measurement ENABLE ROW LEVEL SECURITY;
+ALTER TABLE measurement FORCE ROW LEVEL SECURITY;
+CREATE POLICY measurement_p ON measurement USING (peaktemp IS NOT NULL);
+
+MERGE INTO measurement m
+ USING (VALUES (1, '01-17-2007'::date)) nm(city_id, logdate) ON
+ (m.city_id = nm.city_id and m.logdate=nm.logdate)
+WHEN NOT MATCHED THEN INSERT
+ (city_id, logdate, peaktemp, unitsales)
+ VALUES (city_id - 1, logdate, NULL, 100); -- should fail
+
+MERGE INTO measurement m
+ USING (VALUES (1, '01-17-2007'::date)) nm(city_id, logdate) ON
+ (m.city_id = nm.city_id and m.logdate=nm.logdate)
+WHEN NOT MATCHED THEN INSERT
+ (city_id, logdate, peaktemp, unitsales)
+ VALUES (city_id - 1, logdate, 25, 100); -- ok
+SELECT * FROM ONLY measurement ORDER BY city_id, logdate;
+
+MERGE INTO measurement m
+ USING (VALUES (1, '01-18-2007'::date)) nm(city_id, logdate) ON
+ (m.city_id = nm.city_id and m.logdate=nm.logdate)
+WHEN NOT MATCHED THEN INSERT
+ (city_id, logdate, peaktemp, unitsales)
+ VALUES (city_id - 1, logdate, 25, 200)
+RETURNING merge_action(), m.*;
+
DROP TABLE measurement, new_measurement CASCADE;
DROP FUNCTION measurement_insert_trigger();
diff --git a/src/test/regress/sql/misc_functions.sql b/src/test/regress/sql/misc_functions.sql
index 5f9c77512d1..23792c4132a 100644
--- a/src/test/regress/sql/misc_functions.sql
+++ b/src/test/regress/sql/misc_functions.sql
@@ -400,9 +400,9 @@ SELECT pg_column_toast_chunk_id(a) IS NULL,
DROP TABLE test_chunk_id;
DROP FUNCTION explain_mask_costs(text, bool, bool, bool, bool);
--- test stratnum support functions
-SELECT gist_stratnum_common(7);
-SELECT gist_stratnum_common(3);
+-- test stratnum translation support functions
+SELECT gist_translate_cmptype_common(7);
+SELECT gist_translate_cmptype_common(3);
-- relpath tests
diff --git a/src/test/regress/sql/numeric.sql b/src/test/regress/sql/numeric.sql
index b98ae27df56..640c6d92f4c 100644
--- a/src/test/regress/sql/numeric.sql
+++ b/src/test/regress/sql/numeric.sql
@@ -869,6 +869,8 @@ SELECT width_bucket(5.0::float8, 3.0::float8, 4.0::float8, 0);
SELECT width_bucket(5.0::float8, 3.0::float8, 4.0::float8, -5);
SELECT width_bucket(3.5::float8, 3.0::float8, 3.0::float8, 888);
SELECT width_bucket('NaN', 3.0, 4.0, 888);
+SELECT width_bucket('NaN'::float8, 3.0::float8, 4.0::float8, 888);
+SELECT width_bucket(0, 'NaN', 4.0, 888);
SELECT width_bucket(0::float8, 'NaN', 4.0::float8, 888);
SELECT width_bucket(2.0, 3.0, '-inf', 888);
SELECT width_bucket(0::float8, '-inf', 4.0::float8, 888);
diff --git a/src/test/regress/sql/partition_prune.sql b/src/test/regress/sql/partition_prune.sql
index f6db9479f54..d93c0c03bab 100644
--- a/src/test/regress/sql/partition_prune.sql
+++ b/src/test/regress/sql/partition_prune.sql
@@ -1371,12 +1371,12 @@ create view part_abc_view as select * from part_abc where b <> 'a' with check op
prepare update_part_abc_view as update part_abc_view set b = $2 where a = $1 returning *;
-- Only the unpruned partition should be shown in the list of relations to be
-- updated
-explain (costs off) execute update_part_abc_view (1, 'd');
+explain (verbose, costs off) execute update_part_abc_view (1, 'd');
execute update_part_abc_view (1, 'd');
-explain (costs off) execute update_part_abc_view (2, 'a');
+explain (verbose, costs off) execute update_part_abc_view (2, 'a');
execute update_part_abc_view (2, 'a');
-- All pruned.
-explain (costs off) execute update_part_abc_view (3, 'a');
+explain (verbose, costs off) execute update_part_abc_view (3, 'a');
execute update_part_abc_view (3, 'a');
deallocate update_part_abc_view;
diff --git a/src/test/regress/sql/psql.sql b/src/test/regress/sql/psql.sql
index 1a8a83462f0..e2e31245439 100644
--- a/src/test/regress/sql/psql.sql
+++ b/src/test/regress/sql/psql.sql
@@ -68,11 +68,11 @@ SELECT $1, $2 \parse stmt3
-- Multiple \g calls mean multiple executions
\bind_named stmt2 'foo3' \g \bind_named stmt3 'foo4' 'bar4' \g
--- \close (extended query protocol)
-\close
-\close ''
-\close stmt2
-\close stmt2
+-- \close_prepared (extended query protocol)
+\close_prepared
+\close_prepared ''
+\close_prepared stmt2
+\close_prepared stmt2
SELECT name, statement FROM pg_prepared_statements ORDER BY name;
-- \bind (extended query protocol)
@@ -1035,7 +1035,7 @@ select \if false \\ (bogus \else \\ 42 \endif \\ forty_two;
\C arg1
\c arg1 arg2 arg3 arg4
\cd arg1
- \close stmt1
+ \close_prepared stmt1
\conninfo
\copy arg1 arg2 arg3 arg4 arg5 arg6
\copyright
diff --git a/src/test/regress/sql/psql_pipeline.sql b/src/test/regress/sql/psql_pipeline.sql
index 16e1e1e84cd..6788dceee2e 100644
--- a/src/test/regress/sql/psql_pipeline.sql
+++ b/src/test/regress/sql/psql_pipeline.sql
@@ -105,106 +105,6 @@ INSERT INTO psql_pipeline VALUES ($1) \bind 1 \sendpipeline
COMMIT \bind \sendpipeline
\endpipeline
--- COPY FROM STDIN
--- with \sendpipeline and \bind
-\startpipeline
-SELECT $1 \bind 'val1' \sendpipeline
-COPY psql_pipeline FROM STDIN \bind \sendpipeline
-\endpipeline
-2 test2
-\.
--- with semicolon
-\startpipeline
-SELECT 'val1';
-COPY psql_pipeline FROM STDIN;
-\endpipeline
-20 test2
-\.
-
--- COPY FROM STDIN with \flushrequest + \getresults
--- with \sendpipeline and \bind
-\startpipeline
-SELECT $1 \bind 'val1' \sendpipeline
-COPY psql_pipeline FROM STDIN \bind \sendpipeline
-\flushrequest
-\getresults
-3 test3
-\.
-\endpipeline
--- with semicolon
-\startpipeline
-SELECT 'val1';
-COPY psql_pipeline FROM STDIN;
-\flushrequest
-\getresults
-30 test3
-\.
-\endpipeline
-
--- COPY FROM STDIN with \syncpipeline + \getresults
--- with \bind and \sendpipeline
-\startpipeline
-SELECT $1 \bind 'val1' \sendpipeline
-COPY psql_pipeline FROM STDIN \bind \sendpipeline
-\syncpipeline
-\getresults
-4 test4
-\.
-\endpipeline
--- with semicolon
-\startpipeline
-SELECT 'val1';
-COPY psql_pipeline FROM STDIN;
-\syncpipeline
-\getresults
-40 test4
-\.
-\endpipeline
-
--- COPY TO STDOUT
--- with \bind and \sendpipeline
-\startpipeline
-SELECT $1 \bind 'val1' \sendpipeline
-copy psql_pipeline TO STDOUT \bind \sendpipeline
-\endpipeline
--- with semicolon
-\startpipeline
-SELECT 'val1';
-copy psql_pipeline TO STDOUT;
-\endpipeline
-
--- COPY TO STDOUT with \flushrequest + \getresults
--- with \bind and \sendpipeline
-\startpipeline
-SELECT $1 \bind 'val1' \sendpipeline
-copy psql_pipeline TO STDOUT \bind \sendpipeline
-\flushrequest
-\getresults
-\endpipeline
--- with semicolon
-\startpipeline
-SELECT 'val1';
-copy psql_pipeline TO STDOUT;
-\flushrequest
-\getresults
-\endpipeline
-
--- COPY TO STDOUT with \syncpipeline + \getresults
--- with \bind and \sendpipeline
-\startpipeline
-SELECT $1 \bind 'val1' \sendpipeline
-copy psql_pipeline TO STDOUT \bind \sendpipeline
-\syncpipeline
-\getresults
-\endpipeline
--- with semicolon
-\startpipeline
-SELECT 'val1';
-copy psql_pipeline TO STDOUT;
-\syncpipeline
-\getresults
-\endpipeline
-
-- Use \parse and \bind_named
\startpipeline
SELECT $1 \parse ''
@@ -406,21 +306,21 @@ SELECT $1 \bind \sendpipeline
SELECT $1 \bind 1 \sendpipeline
SELECT $1 \parse a
\bind_named a 1 \sendpipeline
-\close a
+\close_prepared a
\flushrequest
\getresults
-- Pipeline is aborted.
SELECT $1 \bind 1 \sendpipeline
SELECT $1 \parse a
\bind_named a 1 \sendpipeline
-\close a
+\close_prepared a
-- Sync allows pipeline to recover.
\syncpipeline
\getresults
SELECT $1 \bind 1 \sendpipeline
SELECT $1 \parse a
\bind_named a 1 \sendpipeline
-\close a
+\close_prepared a
\flushrequest
\getresults
\endpipeline
diff --git a/src/test/regress/sql/publication.sql b/src/test/regress/sql/publication.sql
index 68001de4000..c9e309190df 100644
--- a/src/test/regress/sql/publication.sql
+++ b/src/test/regress/sql/publication.sql
@@ -262,6 +262,9 @@ ALTER PUBLICATION testpub6 SET TABLES IN SCHEMA testpub_rf_schema2, TABLE testpu
RESET client_min_messages;
\dRp+ testpub6
-- fail - virtual generated column uses user-defined function
+-- (Actually, this already fails at CREATE TABLE rather than at CREATE
+-- PUBLICATION, but let's keep the test in case the former gets
+-- relaxed sometime.)
CREATE TABLE testpub_rf_tbl6 (id int PRIMARY KEY, x int, y int GENERATED ALWAYS AS (x * testpub_rf_func2()) VIRTUAL);
CREATE PUBLICATION testpub7 FOR TABLE testpub_rf_tbl6 WHERE (y > 100);
-- test that SET EXPRESSION is rejected, because it could affect a row filter
@@ -276,7 +279,7 @@ DROP TABLE testpub_rf_tbl2;
DROP TABLE testpub_rf_tbl3;
DROP TABLE testpub_rf_tbl4;
DROP TABLE testpub_rf_tbl5;
-DROP TABLE testpub_rf_tbl6;
+--DROP TABLE testpub_rf_tbl6;
DROP TABLE testpub_rf_schema1.testpub_rf_tbl5;
DROP TABLE testpub_rf_schema2.testpub_rf_tbl6;
DROP SCHEMA testpub_rf_schema1;
diff --git a/src/test/regress/sql/regproc.sql b/src/test/regress/sql/regproc.sql
index 232289ac398..cfec8f8c754 100644
--- a/src/test/regress/sql/regproc.sql
+++ b/src/test/regress/sql/regproc.sql
@@ -47,11 +47,42 @@ SELECT regrole('regress_regrole_test');
SELECT regrole('"regress_regrole_test"');
SELECT regnamespace('pg_catalog');
SELECT regnamespace('"pg_catalog"');
+SELECT regdatabase('template1');
+SELECT regdatabase('"template1"');
SELECT to_regrole('regress_regrole_test');
SELECT to_regrole('"regress_regrole_test"');
SELECT to_regnamespace('pg_catalog');
SELECT to_regnamespace('"pg_catalog"');
+SELECT to_regdatabase('template1');
+SELECT to_regdatabase('"template1"');
+
+-- special "single dash" case
+
+SELECT regproc('-')::oid;
+SELECT regprocedure('-')::oid;
+SELECT regclass('-')::oid;
+SELECT regcollation('-')::oid;
+SELECT regtype('-')::oid;
+SELECT regconfig('-')::oid;
+SELECT regdictionary('-')::oid;
+SELECT regrole('-')::oid;
+SELECT regnamespace('-')::oid;
+SELECT regdatabase('-')::oid;
+
+SELECT to_regproc('-')::oid;
+SELECT to_regprocedure('-')::oid;
+SELECT to_regclass('-')::oid;
+SELECT to_regcollation('-')::oid;
+SELECT to_regtype('-')::oid;
+SELECT to_regrole('-')::oid;
+SELECT to_regnamespace('-')::oid;
+SELECT to_regdatabase('-')::oid;
+
+-- constant cannot be used here
+
+CREATE TABLE regrole_test (rolid OID DEFAULT 'regress_regrole_test'::regrole);
+CREATE TABLE regdatabase_test (datid OID DEFAULT 'template1'::regdatabase);
/* If objects don't exist, raise errors. */
@@ -88,6 +119,9 @@ SELECT regrole('foo.bar');
SELECT regnamespace('Nonexistent');
SELECT regnamespace('"Nonexistent"');
SELECT regnamespace('foo.bar');
+SELECT regdatabase('Nonexistent');
+SELECT regdatabase('"Nonexistent"');
+SELECT regdatabase('foo.bar');
/* If objects don't exist, return NULL with no error. */
@@ -122,6 +156,9 @@ SELECT to_regrole('foo.bar');
SELECT to_regnamespace('Nonexistent');
SELECT to_regnamespace('"Nonexistent"');
SELECT to_regnamespace('foo.bar');
+SELECT to_regdatabase('Nonexistent');
+SELECT to_regdatabase('"Nonexistent"');
+SELECT to_regdatabase('foo.bar');
-- Test to_regtypemod
SELECT to_regtypemod('text');
@@ -147,6 +184,7 @@ SELECT * FROM pg_input_error_info('ng_catalog.abs(numeric)', 'regprocedure');
SELECT * FROM pg_input_error_info('ng_catalog.abs(numeric', 'regprocedure');
SELECT * FROM pg_input_error_info('regress_regrole_test', 'regrole');
SELECT * FROM pg_input_error_info('no_such_type', 'regtype');
+SELECT * FROM pg_input_error_info('Nonexistent', 'regdatabase');
-- Some cases that should be soft errors, but are not yet
SELECT * FROM pg_input_error_info('incorrect type name syntax', 'regtype');
diff --git a/src/test/regress/sql/strings.sql b/src/test/regress/sql/strings.sql
index f7b325baadf..2577a42987d 100644
--- a/src/test/regress/sql/strings.sql
+++ b/src/test/regress/sql/strings.sql
@@ -197,6 +197,26 @@ SELECT 'abcd\efg' SIMILAR TO '_bcd\%' ESCAPE '' AS true;
SELECT 'abcdefg' SIMILAR TO '_bcd%' ESCAPE NULL AS null;
SELECT 'abcdefg' SIMILAR TO '_bcd#%' ESCAPE '##' AS error;
+-- Characters that should be left alone in character classes when a
+-- SIMILAR TO regexp pattern is converted to POSIX style.
+-- Underscore "_"
+EXPLAIN (COSTS OFF) SELECT * FROM TEXT_TBL WHERE f1 SIMILAR TO '_[_[:alpha:]_]_';
+-- Percentage "%"
+EXPLAIN (COSTS OFF) SELECT * FROM TEXT_TBL WHERE f1 SIMILAR TO '%[%[:alnum:]%]%';
+-- Dot "."
+EXPLAIN (COSTS OFF) SELECT * FROM TEXT_TBL WHERE f1 SIMILAR TO '.[.[:alnum:].].';
+-- Dollar "$"
+EXPLAIN (COSTS OFF) SELECT * FROM TEXT_TBL WHERE f1 SIMILAR TO '$[$[:alnum:]$]$';
+-- Opening parenthesis "("
+EXPLAIN (COSTS OFF) SELECT * FROM TEXT_TBL WHERE f1 SIMILAR TO '()[([:alnum:](]()';
+-- Caret "^"
+EXPLAIN (COSTS OFF) SELECT * FROM TEXT_TBL WHERE f1 SIMILAR TO '^[^[:alnum:]^[^^][[^^]][\^][[\^]]\^]^';
+-- Closing square bracket "]" at the beginning of character class
+EXPLAIN (COSTS OFF) SELECT * FROM TEXT_TBL WHERE f1 SIMILAR TO '[]%][^]%][^%]%';
+-- Closing square bracket effective after two carets at the beginning
+-- of character class.
+EXPLAIN (COSTS OFF) SELECT * FROM TEXT_TBL WHERE f1 SIMILAR TO '[^^]^';
+
-- Test backslash escapes in regexp_replace's replacement string
SELECT regexp_replace('1112223333', E'(\\d{3})(\\d{3})(\\d{4})', E'(\\1) \\2-\\3');
SELECT regexp_replace('foobarrbazz', E'(.)\\1', E'X\\&Y', 'g');
diff --git a/src/test/regress/sql/sysviews.sql b/src/test/regress/sql/sysviews.sql
index d0917b6868e..66179f026b3 100644
--- a/src/test/regress/sql/sysviews.sql
+++ b/src/test/regress/sql/sysviews.sql
@@ -101,21 +101,3 @@ select count(distinct utc_offset) >= 24 as ok from pg_timezone_abbrevs;
-- One specific case we can check without much fear of breakage
-- is the historical local-mean-time value used for America/Los_Angeles.
select * from pg_timezone_abbrevs where abbrev = 'LMT';
-
-DO $$
-DECLARE
- bg_writer_pid int;
- r RECORD;
-BEGIN
- SELECT pid from pg_stat_activity where backend_type='background writer'
- INTO bg_writer_pid;
-
- select type, name, ident
- from pg_get_process_memory_contexts(bg_writer_pid, false, 20)
- where path = '{1}' into r;
- RAISE NOTICE '%', r;
- select type, name, ident
- from pg_get_process_memory_contexts(pg_backend_pid(), false, 20)
- where path = '{1}' into r;
- RAISE NOTICE '%', r;
-END $$;
diff --git a/src/test/regress/sql/triggers.sql b/src/test/regress/sql/triggers.sql
index d3d242dd29b..d674b25c83b 100644
--- a/src/test/regress/sql/triggers.sql
+++ b/src/test/regress/sql/triggers.sql
@@ -1577,6 +1577,19 @@ drop table parted;
drop function parted_trigfunc();
--
+-- Constraint triggers
+--
+create constraint trigger crtr
+ after insert on foo not valid
+ for each row execute procedure foo ();
+create constraint trigger crtr
+ after insert on foo no inherit
+ for each row execute procedure foo ();
+create constraint trigger crtr
+ after insert on foo not enforced
+ for each row execute procedure foo ();
+
+--
-- Constraint triggers and partitioned tables
create table parted_constr_ancestor (a int, b text)
partition by range (b);
@@ -1591,7 +1604,7 @@ create constraint trigger parted_trig after insert on parted_constr_ancestor
deferrable
for each row execute procedure trigger_notice_ab();
create constraint trigger parted_trig_two after insert on parted_constr
- deferrable initially deferred
+ deferrable initially deferred enforced
for each row when (bark(new.b) AND new.a % 2 = 1)
execute procedure trigger_notice_ab();
@@ -2701,8 +2714,8 @@ drop function f();
-- Test who runs deferred trigger functions
-- setup
-create role regress_groot;
-create role regress_outis;
+create role regress_caller;
+create role regress_fn_owner;
create function whoami() returns trigger language plpgsql
as $$
begin
@@ -2710,7 +2723,7 @@ begin
return null;
end;
$$;
-alter function whoami() owner to regress_outis;
+alter function whoami() owner to regress_fn_owner;
create table defer_trig (id integer);
grant insert on defer_trig to public;
@@ -2721,10 +2734,10 @@ create constraint trigger whoami after insert on defer_trig
-- deferred triggers must run as the user that queued the trigger
begin;
-set role regress_groot;
+set role regress_caller;
insert into defer_trig values (1);
reset role;
-set role regress_outis;
+set role regress_fn_owner;
insert into defer_trig values (2);
reset role;
commit;
@@ -2732,7 +2745,7 @@ commit;
-- security definer functions override the user who queued the trigger
alter function whoami() security definer;
begin;
-set role regress_groot;
+set role regress_caller;
insert into defer_trig values (3);
reset role;
commit;
@@ -2749,7 +2762,7 @@ end;
$$;
begin;
-set role regress_groot;
+set role regress_caller;
insert into defer_trig values (4);
reset role;
commit; -- error expected
@@ -2758,5 +2771,5 @@ select current_user = session_user;
-- clean up
drop table defer_trig;
drop function whoami();
-drop role regress_outis;
-drop role regress_groot;
+drop role regress_fn_owner;
+drop role regress_caller;
diff --git a/src/test/regress/sql/type_sanity.sql b/src/test/regress/sql/type_sanity.sql
index c94dd83d306..df795759bb4 100644
--- a/src/test/regress/sql/type_sanity.sql
+++ b/src/test/regress/sql/type_sanity.sql
@@ -539,6 +539,7 @@ CREATE TABLE tab_core_types AS SELECT
'regtype'::regtype type,
'pg_monitor'::regrole,
'pg_class'::regclass::oid,
+ 'template1'::regdatabase,
'(1,1)'::tid, '2'::xid, '3'::cid,
'10:20:10,14,15'::txid_snapshot,
'10:20:10,14,15'::pg_snapshot,
diff --git a/src/test/ssl/meson.build b/src/test/ssl/meson.build
index cf8b2b9303a..d8e0fb518e0 100644
--- a/src/test/ssl/meson.build
+++ b/src/test/ssl/meson.build
@@ -7,7 +7,7 @@ tests += {
'tap': {
'env': {
'with_ssl': ssl_library,
- 'OPENSSL': openssl.found() ? openssl.path() : '',
+ 'OPENSSL': openssl.found() ? openssl.full_path() : '',
},
'tests': [
't/001_ssltests.pl',
diff --git a/src/test/ssl/t/SSL/Server.pm b/src/test/ssl/t/SSL/Server.pm
index 33975b28e8c..efbd0dafaf6 100644
--- a/src/test/ssl/t/SSL/Server.pm
+++ b/src/test/ssl/t/SSL/Server.pm
@@ -200,7 +200,7 @@ sub configure_test_server_for_ssl
$node->append_conf(
'postgresql.conf', <<EOF
fsync=off
-log_connections=on
+log_connections=all
log_hostname=on
listen_addresses='$serverhost'
log_statement=all
@@ -318,7 +318,8 @@ sub switch_server_cert
$node->append_conf('sslconfig.conf', "ssl=on");
$node->append_conf('sslconfig.conf', $backend->set_server_cert(\%params));
# use lists of ECDH curves and cipher suites for syntax testing
- $node->append_conf('sslconfig.conf', 'ssl_groups=X25519:prime256v1:secp521r1');
+ $node->append_conf('sslconfig.conf',
+ 'ssl_groups=X25519:prime256v1:secp521r1');
$node->append_conf('sslconfig.conf',
'ssl_tls13_ciphers=TLS_AES_256_GCM_SHA384:TLS_AES_128_GCM_SHA256');
diff --git a/src/test/subscription/t/007_ddl.pl b/src/test/subscription/t/007_ddl.pl
index 7d12bcbddb6..2a45fb13739 100644
--- a/src/test/subscription/t/007_ddl.pl
+++ b/src/test/subscription/t/007_ddl.pl
@@ -70,7 +70,8 @@ ok( $stderr =~
);
# Cleanup
-$node_publisher->safe_psql('postgres', qq[
+$node_publisher->safe_psql(
+ 'postgres', qq[
DROP PUBLICATION mypub;
SELECT pg_drop_replication_slot('mysub');
]);
@@ -86,32 +87,38 @@ sub test_swap
my ($table_name, $pubname, $appname) = @_;
# Confirms tuples can be replicated
- $node_publisher->safe_psql('postgres', "INSERT INTO $table_name VALUES (1);");
+ $node_publisher->safe_psql('postgres',
+ "INSERT INTO $table_name VALUES (1);");
$node_publisher->wait_for_catchup($appname);
my $result =
- $node_subscriber->safe_psql('postgres', "SELECT a FROM $table_name");
- is($result, qq(1), 'check replication worked well before renaming a publication');
+ $node_subscriber->safe_psql('postgres', "SELECT a FROM $table_name");
+ is($result, qq(1),
+ 'check replication worked well before renaming a publication');
# Swap the name of publications; $pubname <-> pub_empty
- $node_publisher->safe_psql('postgres', qq[
+ $node_publisher->safe_psql(
+ 'postgres', qq[
ALTER PUBLICATION $pubname RENAME TO tap_pub_tmp;
ALTER PUBLICATION pub_empty RENAME TO $pubname;
ALTER PUBLICATION tap_pub_tmp RENAME TO pub_empty;
]);
# Insert the data again
- $node_publisher->safe_psql('postgres', "INSERT INTO $table_name VALUES (2);");
+ $node_publisher->safe_psql('postgres',
+ "INSERT INTO $table_name VALUES (2);");
$node_publisher->wait_for_catchup($appname);
# Confirms the second tuple won't be replicated because $pubname does not
# contains relations anymore.
$result =
- $node_subscriber->safe_psql('postgres', "SELECT a FROM $table_name ORDER BY a");
+ $node_subscriber->safe_psql('postgres',
+ "SELECT a FROM $table_name ORDER BY a");
is($result, qq(1),
'check the tuple inserted after the RENAME was not replicated');
# Restore the name of publications because it can be called several times
- $node_publisher->safe_psql('postgres', qq[
+ $node_publisher->safe_psql(
+ 'postgres', qq[
ALTER PUBLICATION $pubname RENAME TO tap_pub_tmp;
ALTER PUBLICATION pub_empty RENAME TO $pubname;
ALTER PUBLICATION tap_pub_tmp RENAME TO pub_empty;
@@ -124,7 +131,8 @@ $node_publisher->safe_psql('postgres', $ddl);
$node_subscriber->safe_psql('postgres', $ddl);
# Create publications and a subscription
-$node_publisher->safe_psql('postgres', qq[
+$node_publisher->safe_psql(
+ 'postgres', qq[
CREATE PUBLICATION pub_empty;
CREATE PUBLICATION pub_for_tab FOR TABLE test1;
CREATE PUBLICATION pub_for_all_tables FOR ALL TABLES;
@@ -139,19 +147,20 @@ test_swap('test1', 'pub_for_tab', 'tap_sub');
# Switches a publication which includes all tables
$node_subscriber->safe_psql('postgres',
- "ALTER SUBSCRIPTION tap_sub SET PUBLICATION pub_for_all_tables;"
-);
+ "ALTER SUBSCRIPTION tap_sub SET PUBLICATION pub_for_all_tables;");
$node_subscriber->wait_for_subscription_sync($node_publisher, 'tap_sub');
# Confirms RENAME command works well for ALL TABLES publication
test_swap('test2', 'pub_for_all_tables', 'tap_sub');
# Cleanup
-$node_publisher->safe_psql('postgres', qq[
+$node_publisher->safe_psql(
+ 'postgres', qq[
DROP PUBLICATION pub_empty, pub_for_tab, pub_for_all_tables;
DROP TABLE test1, test2;
]);
-$node_subscriber->safe_psql('postgres', qq[
+$node_subscriber->safe_psql(
+ 'postgres', qq[
DROP SUBSCRIPTION tap_sub;
DROP TABLE test1, test2;
]);
diff --git a/src/test/subscription/t/013_partition.pl b/src/test/subscription/t/013_partition.pl
index 61b0cb4aa1a..4f78dd48815 100644
--- a/src/test/subscription/t/013_partition.pl
+++ b/src/test/subscription/t/013_partition.pl
@@ -51,8 +51,7 @@ $node_subscriber1->safe_psql('postgres',
);
# make a BRIN index to test aminsertcleanup logic in subscriber
$node_subscriber1->safe_psql('postgres',
- "CREATE INDEX tab1_c_brin_idx ON tab1 USING brin (c)"
-);
+ "CREATE INDEX tab1_c_brin_idx ON tab1 USING brin (c)");
$node_subscriber1->safe_psql('postgres',
"CREATE TABLE tab1_1 (b text, c text DEFAULT 'sub1_tab1', a int NOT NULL)"
);
diff --git a/src/test/subscription/t/021_twophase.pl b/src/test/subscription/t/021_twophase.pl
index 61c427aed21..b8e4242d1f1 100644
--- a/src/test/subscription/t/021_twophase.pl
+++ b/src/test/subscription/t/021_twophase.pl
@@ -373,7 +373,14 @@ $result =
$node_publisher->safe_psql('postgres', "SELECT count(*) FROM tab_copy;");
is($result, qq(6), 'publisher inserted data');
+# Wait for both subscribers to catchup
$node_publisher->wait_for_catchup($appname_copy);
+$node_publisher->wait_for_catchup($appname);
+
+# Make sure there are no prepared transactions on the subscriber
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_prepared_xacts;");
+is($result, qq(0), 'should be no prepared transactions on subscriber');
$result =
$node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_copy;");
diff --git a/src/test/subscription/t/024_add_drop_pub.pl b/src/test/subscription/t/024_add_drop_pub.pl
index e995d8b3839..b396abe5599 100644
--- a/src/test/subscription/t/024_add_drop_pub.pl
+++ b/src/test/subscription/t/024_add_drop_pub.pl
@@ -108,11 +108,12 @@ $node_publisher->poll_query_until('postgres',
my $offset = -s $node_publisher->logfile;
-$node_publisher->safe_psql('postgres',"INSERT INTO tab_3 values(1)");
+$node_publisher->safe_psql('postgres', "INSERT INTO tab_3 values(1)");
# Verify that a warning is logged.
$node_publisher->wait_for_log(
- qr/WARNING: ( [A-Z0-9]+:)? skipped loading publication: tap_pub_3/, $offset);
+ qr/WARNING: ( [A-Z0-9]+:)? skipped loading publication "tap_pub_3"/,
+ $offset);
$node_publisher->safe_psql('postgres',
"CREATE PUBLICATION tap_pub_3 FOR TABLE tab_3");
@@ -128,10 +129,11 @@ $node_publisher->wait_for_catchup('tap_sub');
# Verify that the insert operation gets replicated to subscriber after
# publication is created.
-$result = $node_subscriber->safe_psql('postgres',
- "SELECT * FROM tab_3");
-is($result, qq(1
-2), 'check that the incremental data is replicated after the publication is created');
+$result = $node_subscriber->safe_psql('postgres', "SELECT * FROM tab_3");
+is( $result, qq(1
+2),
+ 'check that the incremental data is replicated after the publication is created'
+);
# shutdown
$node_subscriber->stop('fast');
diff --git a/src/test/subscription/t/035_conflicts.pl b/src/test/subscription/t/035_conflicts.pl
index 2a7a8239a29..d78a6bac16a 100644
--- a/src/test/subscription/t/035_conflicts.pl
+++ b/src/test/subscription/t/035_conflicts.pl
@@ -26,7 +26,8 @@ $node_publisher->safe_psql('postgres',
"CREATE TABLE conf_tab (a int PRIMARY KEY, b int UNIQUE, c int UNIQUE);");
$node_publisher->safe_psql('postgres',
- "CREATE TABLE conf_tab_2 (a int PRIMARY KEY, b int UNIQUE, c int UNIQUE);");
+ "CREATE TABLE conf_tab_2 (a int PRIMARY KEY, b int UNIQUE, c int UNIQUE);"
+);
# Create same table on subscriber
$node_subscriber->safe_psql('postgres',