aboutsummaryrefslogtreecommitdiff
path: root/src/test
diff options
context:
space:
mode:
Diffstat (limited to 'src/test')
-rw-r--r--src/test/authentication/t/001_password.pl23
-rw-r--r--src/test/authentication/t/003_peer.pl18
-rw-r--r--src/test/isolation/expected/merge-match-recheck.out27
-rw-r--r--src/test/isolation/specs/merge-match-recheck.spec22
-rw-r--r--src/test/modules/Makefile1
-rw-r--r--src/test/modules/commit_ts/t/001_base.pl3
-rw-r--r--src/test/modules/injection_points/Makefile2
-rw-r--r--src/test/modules/injection_points/expected/injection_points.out16
-rw-r--r--src/test/modules/injection_points/expected/vacuum.out122
-rw-r--r--src/test/modules/injection_points/injection_points--1.0.sql12
-rw-r--r--src/test/modules/injection_points/injection_points.c39
-rw-r--r--src/test/modules/injection_points/injection_stats.c2
-rw-r--r--src/test/modules/injection_points/injection_stats_fixed.c2
-rw-r--r--src/test/modules/injection_points/meson.build3
-rw-r--r--src/test/modules/injection_points/sql/injection_points.sql7
-rw-r--r--src/test/modules/injection_points/sql/vacuum.sql47
-rw-r--r--src/test/modules/libpq_pipeline/t/001_libpq_pipeline.pl6
-rw-r--r--src/test/modules/meson.build1
-rw-r--r--src/test/modules/oauth_validator/meson.build2
-rw-r--r--src/test/modules/test_aio/t/001_aio.pl46
-rw-r--r--src/test/modules/test_aio/test_aio.c4
-rw-r--r--src/test/modules/test_binaryheap/.gitignore4
-rw-r--r--src/test/modules/test_binaryheap/Makefile24
-rw-r--r--src/test/modules/test_binaryheap/expected/test_binaryheap.out12
-rw-r--r--src/test/modules/test_binaryheap/meson.build33
-rw-r--r--src/test/modules/test_binaryheap/sql/test_binaryheap.sql8
-rw-r--r--src/test/modules/test_binaryheap/test_binaryheap--1.0.sql7
-rw-r--r--src/test/modules/test_binaryheap/test_binaryheap.c275
-rw-r--r--src/test/modules/test_binaryheap/test_binaryheap.control5
-rw-r--r--src/test/modules/test_dsm_registry/expected/test_dsm_registry.out30
-rw-r--r--src/test/modules/test_dsm_registry/sql/test_dsm_registry.sql9
-rw-r--r--src/test/modules/test_dsm_registry/test_dsm_registry--1.0.sql6
-rw-r--r--src/test/modules/test_dsm_registry/test_dsm_registry.c103
-rw-r--r--src/test/modules/test_shm_mq/worker.c2
-rw-r--r--src/test/perl/PostgreSQL/Test/AdjustUpgrade.pm26
-rw-r--r--src/test/perl/PostgreSQL/Test/Cluster.pm37
-rw-r--r--src/test/perl/PostgreSQL/Test/Utils.pm1
-rw-r--r--src/test/postmaster/t/002_connection_limits.pl3
-rw-r--r--src/test/recovery/meson.build3
-rw-r--r--src/test/recovery/t/003_recovery_targets.pl50
-rw-r--r--src/test/recovery/t/013_crash_restart.pl7
-rw-r--r--src/test/recovery/t/016_min_consistency.pl2
-rw-r--r--src/test/recovery/t/027_stream_regress.pl11
-rw-r--r--src/test/recovery/t/040_standby_failover_slots_sync.pl3
-rw-r--r--src/test/recovery/t/041_checkpoint_at_promote.pl2
-rw-r--r--src/test/recovery/t/046_checkpoint_logical_slot.pl142
-rw-r--r--src/test/recovery/t/047_checkpoint_physical_slot.pl133
-rw-r--r--src/test/recovery/t/048_vacuum_horizon_floor.pl288
-rw-r--r--src/test/regress/expected/alter_table.out15
-rw-r--r--src/test/regress/expected/btree_index.out101
-rw-r--r--src/test/regress/expected/compression.out235
-rw-r--r--src/test/regress/expected/compression_1.out360
-rw-r--r--src/test/regress/expected/compression_lz4.out249
-rw-r--r--src/test/regress/expected/compression_lz4_1.out7
-rw-r--r--src/test/regress/expected/constraints.out11
-rw-r--r--src/test/regress/expected/copy.out25
-rw-r--r--src/test/regress/expected/copy2.out6
-rw-r--r--src/test/regress/expected/create_table_like.out30
-rw-r--r--src/test/regress/expected/domain.out5
-rw-r--r--src/test/regress/expected/foreign_key.out4
-rw-r--r--src/test/regress/expected/generated_stored.out12
-rw-r--r--src/test/regress/expected/generated_virtual.out126
-rw-r--r--src/test/regress/expected/incremental_sort.out40
-rw-r--r--src/test/regress/expected/inherit.out14
-rw-r--r--src/test/regress/expected/join.out280
-rw-r--r--src/test/regress/expected/matview.out2
-rw-r--r--src/test/regress/expected/memoize.out60
-rw-r--r--src/test/regress/expected/numeric.out28
-rw-r--r--src/test/regress/expected/pg_lsn.out240
-rw-r--r--src/test/regress/expected/predicate.out54
-rw-r--r--src/test/regress/expected/privileges.out35
-rw-r--r--src/test/regress/expected/psql.out14
-rw-r--r--src/test/regress/expected/psql_pipeline.out194
-rw-r--r--src/test/regress/expected/publication.out38
-rw-r--r--src/test/regress/expected/regproc.out174
-rw-r--r--src/test/regress/expected/rules.out7
-rw-r--r--src/test/regress/expected/stats.out15
-rw-r--r--src/test/regress/expected/strings.out12
-rw-r--r--src/test/regress/expected/subscription.out168
-rw-r--r--src/test/regress/expected/subselect.out46
-rw-r--r--src/test/regress/expected/triggers.out23
-rw-r--r--src/test/regress/expected/type_sanity.out1
-rw-r--r--src/test/regress/expected/without_overlaps.out4
-rw-r--r--src/test/regress/parallel_schedule2
-rw-r--r--src/test/regress/sql/alter_table.sql17
-rw-r--r--src/test/regress/sql/btree_index.sql65
-rw-r--r--src/test/regress/sql/compression.sql84
-rw-r--r--src/test/regress/sql/compression_lz4.sql129
-rw-r--r--src/test/regress/sql/constraints.sql12
-rw-r--r--src/test/regress/sql/copy.sql30
-rw-r--r--src/test/regress/sql/copy2.sql3
-rw-r--r--src/test/regress/sql/create_table_like.sql16
-rw-r--r--src/test/regress/sql/domain.sql3
-rw-r--r--src/test/regress/sql/foreign_key.sql2
-rw-r--r--src/test/regress/sql/generated_stored.sql13
-rw-r--r--src/test/regress/sql/generated_virtual.sql61
-rw-r--r--src/test/regress/sql/incremental_sort.sql24
-rw-r--r--src/test/regress/sql/join.sql85
-rw-r--r--src/test/regress/sql/memoize.sql27
-rw-r--r--src/test/regress/sql/numeric.sql2
-rw-r--r--src/test/regress/sql/predicate.sql18
-rw-r--r--src/test/regress/sql/privileges.sql13
-rw-r--r--src/test/regress/sql/psql.sql12
-rw-r--r--src/test/regress/sql/psql_pipeline.sql106
-rw-r--r--src/test/regress/sql/publication.sql27
-rw-r--r--src/test/regress/sql/regproc.sql38
-rw-r--r--src/test/regress/sql/stats.sql9
-rw-r--r--src/test/regress/sql/strings.sql2
-rw-r--r--src/test/regress/sql/subscription.sql11
-rw-r--r--src/test/regress/sql/subselect.sql8
-rw-r--r--src/test/regress/sql/triggers.sql15
-rw-r--r--src/test/regress/sql/type_sanity.sql1
-rw-r--r--src/test/ssl/meson.build2
-rw-r--r--src/test/ssl/t/001_ssltests.pl7
-rw-r--r--src/test/ssl/t/SSL/Server.pm3
-rw-r--r--src/test/subscription/t/007_ddl.pl35
-rw-r--r--src/test/subscription/t/013_partition.pl3
-rw-r--r--src/test/subscription/t/024_add_drop_pub.pl14
-rw-r--r--src/test/subscription/t/035_conflicts.pl270
119 files changed, 3991 insertions, 1444 deletions
diff --git a/src/test/authentication/t/001_password.pl b/src/test/authentication/t/001_password.pl
index 37d96d95a1a..a16e9a563f3 100644
--- a/src/test/authentication/t/001_password.pl
+++ b/src/test/authentication/t/001_password.pl
@@ -79,39 +79,40 @@ $node->start;
# other tests are added to this file in the future
$node->safe_psql('postgres', "CREATE DATABASE test_log_connections");
-my $log_connections = $node->safe_psql('test_log_connections', q(SHOW log_connections;));
+my $log_connections =
+ $node->safe_psql('test_log_connections', q(SHOW log_connections;));
is($log_connections, 'on', qq(check log connections has expected value 'on'));
-$node->connect_ok('test_log_connections',
+$node->connect_ok(
+ 'test_log_connections',
qq(log_connections 'on' works as expected for backwards compatibility),
log_like => [
qr/connection received/,
qr/connection authenticated/,
qr/connection authorized: user=\S+ database=test_log_connections/,
],
- log_unlike => [
- qr/connection ready/,
- ],);
+ log_unlike => [ qr/connection ready/, ],);
-$node->safe_psql('test_log_connections',
+$node->safe_psql(
+ 'test_log_connections',
q[ALTER SYSTEM SET log_connections = receipt,authorization,setup_durations;
SELECT pg_reload_conf();]);
-$node->connect_ok('test_log_connections',
+$node->connect_ok(
+ 'test_log_connections',
q(log_connections with subset of specified options logs only those aspects),
log_like => [
qr/connection received/,
qr/connection authorized: user=\S+ database=test_log_connections/,
qr/connection ready/,
],
- log_unlike => [
- qr/connection authenticated/,
- ],);
+ log_unlike => [ qr/connection authenticated/, ],);
$node->safe_psql('test_log_connections',
qq(ALTER SYSTEM SET log_connections = 'all'; SELECT pg_reload_conf();));
-$node->connect_ok('test_log_connections',
+$node->connect_ok(
+ 'test_log_connections',
qq(log_connections 'all' logs all available connection aspects),
log_like => [
qr/connection received/,
diff --git a/src/test/authentication/t/003_peer.pl b/src/test/authentication/t/003_peer.pl
index f2320b62c87..c751fbdbaa5 100644
--- a/src/test/authentication/t/003_peer.pl
+++ b/src/test/authentication/t/003_peer.pl
@@ -171,7 +171,8 @@ test_role(
# Test with regular expression in user name map.
# Extract the last 3 characters from the system_user
-# or the entire system_user (if its length is <= -3).
+# or the entire system_user name (if its length is <= 3).
+# We trust this will not include any regex metacharacters.
my $regex_test_string = substr($system_user, -3);
# Success as the system user regular expression matches.
@@ -210,12 +211,17 @@ test_role(
log_like =>
[qr/connection authenticated: identity="$system_user" method=peer/]);
+# Create target role for \1 tests.
+my $mapped_name = "test${regex_test_string}map${regex_test_string}user";
+$node->safe_psql('postgres', "CREATE ROLE $mapped_name LOGIN");
+
# Success as the regular expression matches and \1 is replaced in the given
# subexpression.
-reset_pg_ident($node, 'mypeermap', qq{/^$system_user(.*)\$}, 'test\1mapuser');
+reset_pg_ident($node, 'mypeermap', qq{/^.*($regex_test_string)\$},
+ 'test\1map\1user');
test_role(
$node,
- qq{testmapuser},
+ $mapped_name,
'peer',
0,
'with regular expression in user name map with \1 replaced',
@@ -224,11 +230,11 @@ test_role(
# Success as the regular expression matches and \1 is replaced in the given
# subexpression, even if quoted.
-reset_pg_ident($node, 'mypeermap', qq{/^$system_user(.*)\$},
- '"test\1mapuser"');
+reset_pg_ident($node, 'mypeermap', qq{/^.*($regex_test_string)\$},
+ '"test\1map\1user"');
test_role(
$node,
- qq{testmapuser},
+ $mapped_name,
'peer',
0,
'with regular expression in user name map with quoted \1 replaced',
diff --git a/src/test/isolation/expected/merge-match-recheck.out b/src/test/isolation/expected/merge-match-recheck.out
index 9a44a595927..90300f1db5a 100644
--- a/src/test/isolation/expected/merge-match-recheck.out
+++ b/src/test/isolation/expected/merge-match-recheck.out
@@ -241,19 +241,28 @@ starting permutation: update_bal1_tg merge_bal_tg c2 select1_tg c1
s2: NOTICE: Update: (1,160,s1,setup) -> (1,50,s1,"setup updated by update_bal1_tg")
step update_bal1_tg: UPDATE target_tg t SET balance = 50, val = t.val || ' updated by update_bal1_tg' WHERE t.key = 1;
step merge_bal_tg:
- MERGE INTO target_tg t
- USING (SELECT 1 as key) s
- ON s.key = t.key
- WHEN MATCHED AND balance < 100 THEN
- UPDATE SET balance = balance * 2, val = t.val || ' when1'
- WHEN MATCHED AND balance < 200 THEN
- UPDATE SET balance = balance * 4, val = t.val || ' when2'
- WHEN MATCHED AND balance < 300 THEN
- UPDATE SET balance = balance * 8, val = t.val || ' when3';
+ WITH t AS (
+ MERGE INTO target_tg t
+ USING (SELECT 1 as key) s
+ ON s.key = t.key
+ WHEN MATCHED AND balance < 100 THEN
+ UPDATE SET balance = balance * 2, val = t.val || ' when1'
+ WHEN MATCHED AND balance < 200 THEN
+ UPDATE SET balance = balance * 4, val = t.val || ' when2'
+ WHEN MATCHED AND balance < 300 THEN
+ UPDATE SET balance = balance * 8, val = t.val || ' when3'
+ RETURNING t.*
+ )
+ SELECT * FROM t;
<waiting ...>
step c2: COMMIT;
s1: NOTICE: Update: (1,50,s1,"setup updated by update_bal1_tg") -> (1,100,s1,"setup updated by update_bal1_tg when1")
step merge_bal_tg: <... completed>
+key|balance|status|val
+---+-------+------+-------------------------------------
+ 1| 100|s1 |setup updated by update_bal1_tg when1
+(1 row)
+
step select1_tg: SELECT * FROM target_tg;
key|balance|status|val
---+-------+------+-------------------------------------
diff --git a/src/test/isolation/specs/merge-match-recheck.spec b/src/test/isolation/specs/merge-match-recheck.spec
index 26266b8c297..15226e40c9e 100644
--- a/src/test/isolation/specs/merge-match-recheck.spec
+++ b/src/test/isolation/specs/merge-match-recheck.spec
@@ -99,15 +99,19 @@ step "merge_bal_pa"
}
step "merge_bal_tg"
{
- MERGE INTO target_tg t
- USING (SELECT 1 as key) s
- ON s.key = t.key
- WHEN MATCHED AND balance < 100 THEN
- UPDATE SET balance = balance * 2, val = t.val || ' when1'
- WHEN MATCHED AND balance < 200 THEN
- UPDATE SET balance = balance * 4, val = t.val || ' when2'
- WHEN MATCHED AND balance < 300 THEN
- UPDATE SET balance = balance * 8, val = t.val || ' when3';
+ WITH t AS (
+ MERGE INTO target_tg t
+ USING (SELECT 1 as key) s
+ ON s.key = t.key
+ WHEN MATCHED AND balance < 100 THEN
+ UPDATE SET balance = balance * 2, val = t.val || ' when1'
+ WHEN MATCHED AND balance < 200 THEN
+ UPDATE SET balance = balance * 4, val = t.val || ' when2'
+ WHEN MATCHED AND balance < 300 THEN
+ UPDATE SET balance = balance * 8, val = t.val || ' when3'
+ RETURNING t.*
+ )
+ SELECT * FROM t;
}
step "merge_delete"
diff --git a/src/test/modules/Makefile b/src/test/modules/Makefile
index aa1d27bbed3..7d3d3d52b45 100644
--- a/src/test/modules/Makefile
+++ b/src/test/modules/Makefile
@@ -15,6 +15,7 @@ SUBDIRS = \
plsample \
spgist_name_ops \
test_aio \
+ test_binaryheap \
test_bloomfilter \
test_copy_callbacks \
test_custom_rmgrs \
diff --git a/src/test/modules/commit_ts/t/001_base.pl b/src/test/modules/commit_ts/t/001_base.pl
index 1953b18f6b3..50e79ce6409 100644
--- a/src/test/modules/commit_ts/t/001_base.pl
+++ b/src/test/modules/commit_ts/t/001_base.pl
@@ -11,8 +11,7 @@ use Test::More;
use PostgreSQL::Test::Cluster;
my $node = PostgreSQL::Test::Cluster->new('foxtrot');
-$node->init;
-$node->append_conf('postgresql.conf', 'track_commit_timestamp = on');
+$node->init(extra => [ '-c', "track_commit_timestamp=on" ]);
$node->start;
# Create a table, compare "now()" to the commit TS of its xmin
diff --git a/src/test/modules/injection_points/Makefile b/src/test/modules/injection_points/Makefile
index e680991f8d4..fc82cd67f6c 100644
--- a/src/test/modules/injection_points/Makefile
+++ b/src/test/modules/injection_points/Makefile
@@ -11,7 +11,7 @@ EXTENSION = injection_points
DATA = injection_points--1.0.sql
PGFILEDESC = "injection_points - facility for injection points"
-REGRESS = injection_points hashagg reindex_conc
+REGRESS = injection_points hashagg reindex_conc vacuum
REGRESS_OPTS = --dlpath=$(top_builddir)/src/test/regress
ISOLATION = basic inplace syscache-update-pruned
diff --git a/src/test/modules/injection_points/expected/injection_points.out b/src/test/modules/injection_points/expected/injection_points.out
index 43bcdd01582..382f3b0bf88 100644
--- a/src/test/modules/injection_points/expected/injection_points.out
+++ b/src/test/modules/injection_points/expected/injection_points.out
@@ -39,6 +39,15 @@ SELECT injection_points_attach('TestInjectionLog2', 'notice');
(1 row)
+SELECT point_name, library, function FROM injection_points_list()
+ ORDER BY point_name COLLATE "C";
+ point_name | library | function
+--------------------+------------------+------------------
+ TestInjectionError | injection_points | injection_error
+ TestInjectionLog | injection_points | injection_notice
+ TestInjectionLog2 | injection_points | injection_notice
+(3 rows)
+
SELECT injection_points_run('TestInjectionBooh'); -- nothing
injection_points_run
----------------------
@@ -298,5 +307,12 @@ SELECT injection_points_detach('TestConditionLocal1');
(1 row)
+-- No points should be left around.
+SELECT point_name, library, function FROM injection_points_list()
+ ORDER BY point_name COLLATE "C";
+ point_name | library | function
+------------+---------+----------
+(0 rows)
+
DROP EXTENSION injection_points;
DROP FUNCTION wait_pid;
diff --git a/src/test/modules/injection_points/expected/vacuum.out b/src/test/modules/injection_points/expected/vacuum.out
new file mode 100644
index 00000000000..58df59fa927
--- /dev/null
+++ b/src/test/modules/injection_points/expected/vacuum.out
@@ -0,0 +1,122 @@
+-- Tests for VACUUM
+CREATE EXTENSION injection_points;
+SELECT injection_points_set_local();
+ injection_points_set_local
+----------------------------
+
+(1 row)
+
+SELECT injection_points_attach('vacuum-index-cleanup-auto', 'notice');
+ injection_points_attach
+-------------------------
+
+(1 row)
+
+SELECT injection_points_attach('vacuum-index-cleanup-disabled', 'notice');
+ injection_points_attach
+-------------------------
+
+(1 row)
+
+SELECT injection_points_attach('vacuum-index-cleanup-enabled', 'notice');
+ injection_points_attach
+-------------------------
+
+(1 row)
+
+SELECT injection_points_attach('vacuum-truncate-auto', 'notice');
+ injection_points_attach
+-------------------------
+
+(1 row)
+
+SELECT injection_points_attach('vacuum-truncate-disabled', 'notice');
+ injection_points_attach
+-------------------------
+
+(1 row)
+
+SELECT injection_points_attach('vacuum-truncate-enabled', 'notice');
+ injection_points_attach
+-------------------------
+
+(1 row)
+
+-- Check state of index_cleanup and truncate in VACUUM.
+CREATE TABLE vac_tab_on_toast_off(i int, j text) WITH
+ (autovacuum_enabled=false,
+ vacuum_index_cleanup=true, toast.vacuum_index_cleanup=false,
+ vacuum_truncate=true, toast.vacuum_truncate=false);
+CREATE TABLE vac_tab_off_toast_on(i int, j text) WITH
+ (autovacuum_enabled=false,
+ vacuum_index_cleanup=false, toast.vacuum_index_cleanup=true,
+ vacuum_truncate=false, toast.vacuum_truncate=true);
+-- Multiple relations should use their options in isolation.
+VACUUM vac_tab_on_toast_off, vac_tab_off_toast_on;
+NOTICE: notice triggered for injection point vacuum-index-cleanup-enabled
+NOTICE: notice triggered for injection point vacuum-truncate-enabled
+NOTICE: notice triggered for injection point vacuum-index-cleanup-disabled
+NOTICE: notice triggered for injection point vacuum-truncate-disabled
+NOTICE: notice triggered for injection point vacuum-index-cleanup-disabled
+NOTICE: notice triggered for injection point vacuum-truncate-disabled
+NOTICE: notice triggered for injection point vacuum-index-cleanup-enabled
+NOTICE: notice triggered for injection point vacuum-truncate-enabled
+-- Check "auto" case of index_cleanup and "truncate" controlled by
+-- its GUC.
+CREATE TABLE vac_tab_auto(i int, j text) WITH
+ (autovacuum_enabled=false,
+ vacuum_index_cleanup=auto, toast.vacuum_index_cleanup=auto);
+SET vacuum_truncate = false;
+VACUUM vac_tab_auto;
+NOTICE: notice triggered for injection point vacuum-index-cleanup-auto
+NOTICE: notice triggered for injection point vacuum-truncate-disabled
+NOTICE: notice triggered for injection point vacuum-index-cleanup-auto
+NOTICE: notice triggered for injection point vacuum-truncate-disabled
+SET vacuum_truncate = true;
+VACUUM vac_tab_auto;
+NOTICE: notice triggered for injection point vacuum-index-cleanup-auto
+NOTICE: notice triggered for injection point vacuum-truncate-enabled
+NOTICE: notice triggered for injection point vacuum-index-cleanup-auto
+NOTICE: notice triggered for injection point vacuum-truncate-enabled
+RESET vacuum_truncate;
+DROP TABLE vac_tab_auto;
+DROP TABLE vac_tab_on_toast_off;
+DROP TABLE vac_tab_off_toast_on;
+-- Cleanup
+SELECT injection_points_detach('vacuum-index-cleanup-auto');
+ injection_points_detach
+-------------------------
+
+(1 row)
+
+SELECT injection_points_detach('vacuum-index-cleanup-disabled');
+ injection_points_detach
+-------------------------
+
+(1 row)
+
+SELECT injection_points_detach('vacuum-index-cleanup-enabled');
+ injection_points_detach
+-------------------------
+
+(1 row)
+
+SELECT injection_points_detach('vacuum-truncate-auto');
+ injection_points_detach
+-------------------------
+
+(1 row)
+
+SELECT injection_points_detach('vacuum-truncate-disabled');
+ injection_points_detach
+-------------------------
+
+(1 row)
+
+SELECT injection_points_detach('vacuum-truncate-enabled');
+ injection_points_detach
+-------------------------
+
+(1 row)
+
+DROP EXTENSION injection_points;
diff --git a/src/test/modules/injection_points/injection_points--1.0.sql b/src/test/modules/injection_points/injection_points--1.0.sql
index cc76b1bf99a..5f5657b2043 100644
--- a/src/test/modules/injection_points/injection_points--1.0.sql
+++ b/src/test/modules/injection_points/injection_points--1.0.sql
@@ -78,6 +78,18 @@ AS 'MODULE_PATHNAME', 'injection_points_detach'
LANGUAGE C STRICT PARALLEL UNSAFE;
--
+-- injection_points_list()
+--
+-- List of all the injection points currently attached.
+--
+CREATE FUNCTION injection_points_list(OUT point_name text,
+ OUT library text,
+ OUT function text)
+RETURNS SETOF record
+AS 'MODULE_PATHNAME', 'injection_points_list'
+LANGUAGE C STRICT VOLATILE PARALLEL RESTRICTED;
+
+--
-- injection_points_stats_numcalls()
--
-- Reports statistics, if any, related to the given injection point.
diff --git a/src/test/modules/injection_points/injection_points.c b/src/test/modules/injection_points/injection_points.c
index 3da0cbc10e0..31138301117 100644
--- a/src/test/modules/injection_points/injection_points.c
+++ b/src/test/modules/injection_points/injection_points.c
@@ -18,6 +18,7 @@
#include "postgres.h"
#include "fmgr.h"
+#include "funcapi.h"
#include "injection_stats.h"
#include "miscadmin.h"
#include "nodes/pg_list.h"
@@ -545,6 +546,44 @@ injection_points_detach(PG_FUNCTION_ARGS)
PG_RETURN_VOID();
}
+/*
+ * SQL function for listing all the injection points attached.
+ */
+PG_FUNCTION_INFO_V1(injection_points_list);
+Datum
+injection_points_list(PG_FUNCTION_ARGS)
+{
+#define NUM_INJECTION_POINTS_LIST 3
+ ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
+ List *inj_points;
+ ListCell *lc;
+
+ /* Build a tuplestore to return our results in */
+ InitMaterializedSRF(fcinfo, 0);
+
+ inj_points = InjectionPointList();
+
+ foreach(lc, inj_points)
+ {
+ Datum values[NUM_INJECTION_POINTS_LIST];
+ bool nulls[NUM_INJECTION_POINTS_LIST];
+ InjectionPointData *inj_point = lfirst(lc);
+
+ memset(values, 0, sizeof(values));
+ memset(nulls, 0, sizeof(nulls));
+
+ values[0] = PointerGetDatum(cstring_to_text(inj_point->name));
+ values[1] = PointerGetDatum(cstring_to_text(inj_point->library));
+ values[2] = PointerGetDatum(cstring_to_text(inj_point->function));
+
+ /* shove row into tuplestore */
+ tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, values, nulls);
+ }
+
+ return (Datum) 0;
+#undef NUM_INJECTION_POINTS_LIST
+}
+
void
_PG_init(void)
diff --git a/src/test/modules/injection_points/injection_stats.c b/src/test/modules/injection_points/injection_stats.c
index 14903c629e0..e3947b23ba5 100644
--- a/src/test/modules/injection_points/injection_stats.c
+++ b/src/test/modules/injection_points/injection_stats.c
@@ -59,7 +59,7 @@ static const PgStat_KindInfo injection_stats = {
/*
* Kind ID reserved for statistics of injection points.
*/
-#define PGSTAT_KIND_INJECTION 129
+#define PGSTAT_KIND_INJECTION 25
/* Track if stats are loaded */
static bool inj_stats_loaded = false;
diff --git a/src/test/modules/injection_points/injection_stats_fixed.c b/src/test/modules/injection_points/injection_stats_fixed.c
index 3d0c01bdd05..bc54c79d190 100644
--- a/src/test/modules/injection_points/injection_stats_fixed.c
+++ b/src/test/modules/injection_points/injection_stats_fixed.c
@@ -64,7 +64,7 @@ static const PgStat_KindInfo injection_stats_fixed = {
/*
* Kind ID reserved for statistics of injection points.
*/
-#define PGSTAT_KIND_INJECTION_FIXED 130
+#define PGSTAT_KIND_INJECTION_FIXED 26
/* Track if fixed-numbered stats are loaded */
static bool inj_fixed_loaded = false;
diff --git a/src/test/modules/injection_points/meson.build b/src/test/modules/injection_points/meson.build
index d61149712fd..20390d6b4bf 100644
--- a/src/test/modules/injection_points/meson.build
+++ b/src/test/modules/injection_points/meson.build
@@ -37,8 +37,9 @@ tests += {
'injection_points',
'hashagg',
'reindex_conc',
+ 'vacuum',
],
- 'regress_args': ['--dlpath', meson.build_root() / 'src/test/regress'],
+ 'regress_args': ['--dlpath', meson.project_build_root() / 'src/test/regress'],
# The injection points are cluster-wide, so disable installcheck
'runningcheck': false,
},
diff --git a/src/test/modules/injection_points/sql/injection_points.sql b/src/test/modules/injection_points/sql/injection_points.sql
index d9748331c77..874421e9c11 100644
--- a/src/test/modules/injection_points/sql/injection_points.sql
+++ b/src/test/modules/injection_points/sql/injection_points.sql
@@ -18,6 +18,9 @@ SELECT injection_points_attach('TestInjectionError', 'error');
SELECT injection_points_attach('TestInjectionLog', 'notice');
SELECT injection_points_attach('TestInjectionLog2', 'notice');
+SELECT point_name, library, function FROM injection_points_list()
+ ORDER BY point_name COLLATE "C";
+
SELECT injection_points_run('TestInjectionBooh'); -- nothing
SELECT injection_points_run('TestInjectionLog2'); -- notice
SELECT injection_points_run('TestInjectionLog2', NULL); -- notice
@@ -85,5 +88,9 @@ SELECT injection_points_detach('TestConditionError');
SELECT injection_points_attach('TestConditionLocal1', 'error');
SELECT injection_points_detach('TestConditionLocal1');
+-- No points should be left around.
+SELECT point_name, library, function FROM injection_points_list()
+ ORDER BY point_name COLLATE "C";
+
DROP EXTENSION injection_points;
DROP FUNCTION wait_pid;
diff --git a/src/test/modules/injection_points/sql/vacuum.sql b/src/test/modules/injection_points/sql/vacuum.sql
new file mode 100644
index 00000000000..23760dd0f38
--- /dev/null
+++ b/src/test/modules/injection_points/sql/vacuum.sql
@@ -0,0 +1,47 @@
+-- Tests for VACUUM
+
+CREATE EXTENSION injection_points;
+
+SELECT injection_points_set_local();
+SELECT injection_points_attach('vacuum-index-cleanup-auto', 'notice');
+SELECT injection_points_attach('vacuum-index-cleanup-disabled', 'notice');
+SELECT injection_points_attach('vacuum-index-cleanup-enabled', 'notice');
+SELECT injection_points_attach('vacuum-truncate-auto', 'notice');
+SELECT injection_points_attach('vacuum-truncate-disabled', 'notice');
+SELECT injection_points_attach('vacuum-truncate-enabled', 'notice');
+
+-- Check state of index_cleanup and truncate in VACUUM.
+CREATE TABLE vac_tab_on_toast_off(i int, j text) WITH
+ (autovacuum_enabled=false,
+ vacuum_index_cleanup=true, toast.vacuum_index_cleanup=false,
+ vacuum_truncate=true, toast.vacuum_truncate=false);
+CREATE TABLE vac_tab_off_toast_on(i int, j text) WITH
+ (autovacuum_enabled=false,
+ vacuum_index_cleanup=false, toast.vacuum_index_cleanup=true,
+ vacuum_truncate=false, toast.vacuum_truncate=true);
+-- Multiple relations should use their options in isolation.
+VACUUM vac_tab_on_toast_off, vac_tab_off_toast_on;
+
+-- Check "auto" case of index_cleanup and "truncate" controlled by
+-- its GUC.
+CREATE TABLE vac_tab_auto(i int, j text) WITH
+ (autovacuum_enabled=false,
+ vacuum_index_cleanup=auto, toast.vacuum_index_cleanup=auto);
+SET vacuum_truncate = false;
+VACUUM vac_tab_auto;
+SET vacuum_truncate = true;
+VACUUM vac_tab_auto;
+RESET vacuum_truncate;
+
+DROP TABLE vac_tab_auto;
+DROP TABLE vac_tab_on_toast_off;
+DROP TABLE vac_tab_off_toast_on;
+
+-- Cleanup
+SELECT injection_points_detach('vacuum-index-cleanup-auto');
+SELECT injection_points_detach('vacuum-index-cleanup-disabled');
+SELECT injection_points_detach('vacuum-index-cleanup-enabled');
+SELECT injection_points_detach('vacuum-truncate-auto');
+SELECT injection_points_detach('vacuum-truncate-disabled');
+SELECT injection_points_detach('vacuum-truncate-enabled');
+DROP EXTENSION injection_points;
diff --git a/src/test/modules/libpq_pipeline/t/001_libpq_pipeline.pl b/src/test/modules/libpq_pipeline/t/001_libpq_pipeline.pl
index 61524bdbd8f..f9678853070 100644
--- a/src/test/modules/libpq_pipeline/t/001_libpq_pipeline.pl
+++ b/src/test/modules/libpq_pipeline/t/001_libpq_pipeline.pl
@@ -53,7 +53,8 @@ for my $testname (@tests)
$node->command_ok(
[
'libpq_pipeline', @extraargs,
- $testname, $node->connstr('postgres') . " max_protocol_version=latest"
+ $testname,
+ $node->connstr('postgres') . " max_protocol_version=latest"
],
"libpq_pipeline $testname");
@@ -76,7 +77,8 @@ for my $testname (@tests)
# test separately that it still works the old protocol version too.
$node->command_ok(
[
- 'libpq_pipeline', 'cancel', $node->connstr('postgres') . " max_protocol_version=3.0"
+ 'libpq_pipeline', 'cancel',
+ $node->connstr('postgres') . " max_protocol_version=3.0"
],
"libpq_pipeline cancel with protocol 3.0");
diff --git a/src/test/modules/meson.build b/src/test/modules/meson.build
index 9de0057bd1d..dd5cd065ba1 100644
--- a/src/test/modules/meson.build
+++ b/src/test/modules/meson.build
@@ -14,6 +14,7 @@ subdir('plsample')
subdir('spgist_name_ops')
subdir('ssl_passphrase_callback')
subdir('test_aio')
+subdir('test_binaryheap')
subdir('test_bloomfilter')
subdir('test_copy_callbacks')
subdir('test_custom_rmgrs')
diff --git a/src/test/modules/oauth_validator/meson.build b/src/test/modules/oauth_validator/meson.build
index e190f9cf15a..a6f937fd7d7 100644
--- a/src/test/modules/oauth_validator/meson.build
+++ b/src/test/modules/oauth_validator/meson.build
@@ -77,7 +77,7 @@ tests += {
't/002_client.pl',
],
'env': {
- 'PYTHON': python.path(),
+ 'PYTHON': python.full_path(),
'with_libcurl': oauth_flow_supported ? 'yes' : 'no',
'with_python': 'yes',
},
diff --git a/src/test/modules/test_aio/t/001_aio.pl b/src/test/modules/test_aio/t/001_aio.pl
index 4527c70785d..82ffffc058f 100644
--- a/src/test/modules/test_aio/t/001_aio.pl
+++ b/src/test/modules/test_aio/t/001_aio.pl
@@ -1123,7 +1123,8 @@ COMMIT;
{
# Create a corruption and then read the block without waiting for
# completion.
- $psql_a->query(qq(
+ $psql_a->query(
+ qq(
SELECT modify_rel_block('tbl_zero', 1, corrupt_header=>true);
SELECT read_rel_block_ll('tbl_zero', 1, wait_complete=>false, zero_on_error=>true)
));
@@ -1133,7 +1134,8 @@ SELECT read_rel_block_ll('tbl_zero', 1, wait_complete=>false, zero_on_error=>tru
$psql_b,
"$persistency: test completing read by other session doesn't generate warning",
qq(SELECT count(*) > 0 FROM tbl_zero;),
- qr/^t$/, qr/^$/);
+ qr/^t$/,
+ qr/^$/);
}
# Clean up
@@ -1355,18 +1357,24 @@ SELECT modify_rel_block('tbl_cs_fail', 6, corrupt_checksum=>true);
));
$psql->query_safe($invalidate_sql);
- psql_like($io_method, $psql,
+ psql_like(
+ $io_method,
+ $psql,
"reading block w/ wrong checksum with ignore_checksum_failure=off fails",
- $count_sql, qr/^$/, qr/ERROR: invalid page in block/);
+ $count_sql,
+ qr/^$/,
+ qr/ERROR: invalid page in block/);
$psql->query_safe("SET ignore_checksum_failure=on");
$psql->query_safe($invalidate_sql);
- psql_like($io_method, $psql,
- "reading block w/ wrong checksum with ignore_checksum_failure=off succeeds",
- $count_sql,
- qr/^$expect$/,
- qr/WARNING: ignoring (checksum failure|\d checksum failures)/);
+ psql_like(
+ $io_method,
+ $psql,
+ "reading block w/ wrong checksum with ignore_checksum_failure=off succeeds",
+ $count_sql,
+ qr/^$expect$/,
+ qr/WARNING: ignoring (checksum failure|\d checksum failures)/);
# Verify that ignore_checksum_failure=off works in multi-block reads
@@ -1432,19 +1440,22 @@ SELECT read_rel_block_ll('tbl_cs_fail', 1, nblocks=>5, zero_on_error=>true);),
# file.
$node->wait_for_log(qr/LOG: ignoring checksum failure in block 2/,
- $log_location);
+ $log_location);
ok(1, "$io_method: found information about checksum failure in block 2");
- $node->wait_for_log(qr/LOG: invalid page in block 3 of relation base.*; zeroing out page/,
- $log_location);
+ $node->wait_for_log(
+ qr/LOG: invalid page in block 3 of relation base.*; zeroing out page/,
+ $log_location);
ok(1, "$io_method: found information about invalid page in block 3");
- $node->wait_for_log(qr/LOG: invalid page in block 4 of relation base.*; zeroing out page/,
- $log_location);
+ $node->wait_for_log(
+ qr/LOG: invalid page in block 4 of relation base.*; zeroing out page/,
+ $log_location);
ok(1, "$io_method: found information about checksum failure in block 4");
- $node->wait_for_log(qr/LOG: invalid page in block 5 of relation base.*; zeroing out page/,
- $log_location);
+ $node->wait_for_log(
+ qr/LOG: invalid page in block 5 of relation base.*; zeroing out page/,
+ $log_location);
ok(1, "$io_method: found information about checksum failure in block 5");
@@ -1462,8 +1473,7 @@ SELECT modify_rel_block('tbl_cs_fail', 3, corrupt_checksum=>true, corrupt_header
qq(
SELECT read_rel_block_ll('tbl_cs_fail', 3, nblocks=>1, zero_on_error=>false);),
qr/^$/,
- qr/^psql:<stdin>:\d+: ERROR: invalid page in block 3 of relation/
- );
+ qr/^psql:<stdin>:\d+: ERROR: invalid page in block 3 of relation/);
psql_like(
$io_method,
diff --git a/src/test/modules/test_aio/test_aio.c b/src/test/modules/test_aio/test_aio.c
index 5cdfb89210b..c55cf6c0aac 100644
--- a/src/test/modules/test_aio/test_aio.c
+++ b/src/test/modules/test_aio/test_aio.c
@@ -42,9 +42,9 @@ typedef struct InjIoErrorState
bool short_read_result_set;
int short_read_result;
-} InjIoErrorState;
+} InjIoErrorState;
-static InjIoErrorState * inj_io_error_state;
+static InjIoErrorState *inj_io_error_state;
/* Shared memory init callbacks */
static shmem_request_hook_type prev_shmem_request_hook = NULL;
diff --git a/src/test/modules/test_binaryheap/.gitignore b/src/test/modules/test_binaryheap/.gitignore
new file mode 100644
index 00000000000..5dcb3ff9723
--- /dev/null
+++ b/src/test/modules/test_binaryheap/.gitignore
@@ -0,0 +1,4 @@
+# Generated subdirectories
+/log/
+/results/
+/tmp_check/
diff --git a/src/test/modules/test_binaryheap/Makefile b/src/test/modules/test_binaryheap/Makefile
new file mode 100644
index 00000000000..d310fbc9e88
--- /dev/null
+++ b/src/test/modules/test_binaryheap/Makefile
@@ -0,0 +1,24 @@
+# src/test/modules/test_binaryheap/Makefile
+
+MODULE_big = test_binaryheap
+OBJS = \
+ $(WIN32RES) \
+ test_binaryheap.o
+
+PGFILEDESC = "test_binaryheap - test code for binaryheap"
+
+EXTENSION = test_binaryheap
+DATA = test_binaryheap--1.0.sql
+
+REGRESS = test_binaryheap
+
+ifdef USE_PGXS
+PG_CONFIG = pg_config
+PGXS := $(shell $(PG_CONFIG) --pgxs)
+include $(PGXS)
+else
+subdir = src/test/modules/test_binaryheap
+top_builddir = ../../../..
+include $(top_builddir)/src/Makefile.global
+include $(top_srcdir)/contrib/contrib-global.mk
+endif
diff --git a/src/test/modules/test_binaryheap/expected/test_binaryheap.out b/src/test/modules/test_binaryheap/expected/test_binaryheap.out
new file mode 100644
index 00000000000..16ce07875e3
--- /dev/null
+++ b/src/test/modules/test_binaryheap/expected/test_binaryheap.out
@@ -0,0 +1,12 @@
+CREATE EXTENSION test_binaryheap;
+--
+-- These tests don't produce any interesting output. We're checking that
+-- the operations complete without crashing or hanging and that none of their
+-- internal sanity tests fail.
+--
+SELECT test_binaryheap();
+ test_binaryheap
+-----------------
+
+(1 row)
+
diff --git a/src/test/modules/test_binaryheap/meson.build b/src/test/modules/test_binaryheap/meson.build
new file mode 100644
index 00000000000..816a43c93e9
--- /dev/null
+++ b/src/test/modules/test_binaryheap/meson.build
@@ -0,0 +1,33 @@
+# Copyright (c) 2025, PostgreSQL Global Development Group
+
+test_binaryheap_sources = files(
+ 'test_binaryheap.c',
+)
+
+if host_system == 'windows'
+ test_binaryheap_sources += rc_lib_gen.process(win32ver_rc, extra_args: [
+ '--NAME', 'test_binaryheap',
+ '--FILEDESC', 'test_binaryheap - test code for binaryheap',])
+endif
+
+test_binaryheap = shared_module('test_binaryheap',
+ test_binaryheap_sources,
+ kwargs: pg_test_mod_args,
+)
+test_install_libs += test_binaryheap
+
+test_install_data += files(
+ 'test_binaryheap.control',
+ 'test_binaryheap--1.0.sql',
+)
+
+tests += {
+ 'name': 'test_binaryheap',
+ 'sd': meson.current_source_dir(),
+ 'bd': meson.current_build_dir(),
+ 'regress': {
+ 'sql': [
+ 'test_binaryheap',
+ ],
+ },
+}
diff --git a/src/test/modules/test_binaryheap/sql/test_binaryheap.sql b/src/test/modules/test_binaryheap/sql/test_binaryheap.sql
new file mode 100644
index 00000000000..8439545815b
--- /dev/null
+++ b/src/test/modules/test_binaryheap/sql/test_binaryheap.sql
@@ -0,0 +1,8 @@
+CREATE EXTENSION test_binaryheap;
+
+--
+-- These tests don't produce any interesting output. We're checking that
+-- the operations complete without crashing or hanging and that none of their
+-- internal sanity tests fail.
+--
+SELECT test_binaryheap();
diff --git a/src/test/modules/test_binaryheap/test_binaryheap--1.0.sql b/src/test/modules/test_binaryheap/test_binaryheap--1.0.sql
new file mode 100644
index 00000000000..cddceeee603
--- /dev/null
+++ b/src/test/modules/test_binaryheap/test_binaryheap--1.0.sql
@@ -0,0 +1,7 @@
+/* src/test/modules/test_binaryheap/test_binaryheap--1.0.sql */
+
+-- complain if script is sourced in psql, rather than via CREATE EXTENSION
+\echo Use "CREATE EXTENSION test_binaryheap" to load this file. \quit
+
+CREATE FUNCTION test_binaryheap() RETURNS VOID
+ AS 'MODULE_PATHNAME' LANGUAGE C;
diff --git a/src/test/modules/test_binaryheap/test_binaryheap.c b/src/test/modules/test_binaryheap/test_binaryheap.c
new file mode 100644
index 00000000000..583dae1da30
--- /dev/null
+++ b/src/test/modules/test_binaryheap/test_binaryheap.c
@@ -0,0 +1,275 @@
+/*--------------------------------------------------------------------------
+ *
+ * test_binaryheap.c
+ * Test correctness of binary heap implementation.
+ *
+ * Copyright (c) 2025, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ * src/test/modules/test_binaryheap/test_binaryheap.c
+ *
+ * -------------------------------------------------------------------------
+ */
+
+#include "postgres.h"
+
+#include "common/int.h"
+#include "common/pg_prng.h"
+#include "fmgr.h"
+#include "lib/binaryheap.h"
+
+PG_MODULE_MAGIC;
+
+/*
+ * Test binaryheap_comparator for max-heap of integers.
+ */
+static int
+int_cmp(Datum a, Datum b, void *arg)
+{
+ return pg_cmp_s32(DatumGetInt32(a), DatumGetInt32(b));
+}
+
+/*
+ * Loops through all nodes and returns the maximum value.
+ */
+static int
+get_max_from_heap(binaryheap *heap)
+{
+ int max = -1;
+
+ for (int i = 0; i < binaryheap_size(heap); i++)
+ max = Max(max, DatumGetInt32(binaryheap_get_node(heap, i)));
+
+ return max;
+}
+
+/*
+ * Generate a random permutation of the integers 0..size-1.
+ */
+static int *
+get_permutation(int size)
+{
+ int *permutation = (int *) palloc(size * sizeof(int));
+
+ permutation[0] = 0;
+
+ /*
+ * This is the "inside-out" variant of the Fisher-Yates shuffle algorithm.
+ * Notionally, we append each new value to the array and then swap it with
+ * a randomly-chosen array element (possibly including itself, else we
+ * fail to generate permutations with the last integer last). The swap
+ * step can be optimized by combining it with the insertion.
+ */
+ for (int i = 1; i < size; i++)
+ {
+ int j = pg_prng_uint64_range(&pg_global_prng_state, 0, i);
+
+ if (j < i) /* avoid fetching undefined data if j=i */
+ permutation[i] = permutation[j];
+ permutation[j] = i;
+ }
+
+ return permutation;
+}
+
+/*
+ * Ensure that the heap property holds for the given heap, i.e., each parent is
+ * greater than or equal to its children.
+ */
+static void
+verify_heap_property(binaryheap *heap)
+{
+ for (int i = 0; i < binaryheap_size(heap); i++)
+ {
+ int left = 2 * i + 1;
+ int right = 2 * i + 2;
+ int parent_val = DatumGetInt32(binaryheap_get_node(heap, i));
+
+ if (left < binaryheap_size(heap) &&
+ parent_val < DatumGetInt32(binaryheap_get_node(heap, left)))
+ elog(ERROR, "parent node less than left child");
+
+ if (right < binaryheap_size(heap) &&
+ parent_val < DatumGetInt32(binaryheap_get_node(heap, right)))
+ elog(ERROR, "parent node less than right child");
+ }
+}
+
+/*
+ * Check correctness of basic operations.
+ */
+static void
+test_basic(int size)
+{
+ binaryheap *heap = binaryheap_allocate(size, int_cmp, NULL);
+ int *permutation = get_permutation(size);
+
+ if (!binaryheap_empty(heap))
+ elog(ERROR, "new heap not empty");
+ if (binaryheap_size(heap) != 0)
+ elog(ERROR, "wrong size for new heap");
+
+ for (int i = 0; i < size; i++)
+ {
+ binaryheap_add(heap, Int32GetDatum(permutation[i]));
+ verify_heap_property(heap);
+ }
+
+ if (binaryheap_empty(heap))
+ elog(ERROR, "heap empty after adding values");
+ if (binaryheap_size(heap) != size)
+ elog(ERROR, "wrong size for heap after adding values");
+
+ if (DatumGetInt32(binaryheap_first(heap)) != get_max_from_heap(heap))
+ elog(ERROR, "incorrect root node after adding values");
+
+ for (int i = 0; i < size; i++)
+ {
+ int expected = get_max_from_heap(heap);
+ int actual = DatumGetInt32(binaryheap_remove_first(heap));
+
+ if (actual != expected)
+ elog(ERROR, "incorrect root node after removing root");
+ verify_heap_property(heap);
+ }
+
+ if (!binaryheap_empty(heap))
+ elog(ERROR, "heap not empty after removing all nodes");
+}
+
+/*
+ * Test building heap after unordered additions.
+ */
+static void
+test_build(int size)
+{
+ binaryheap *heap = binaryheap_allocate(size, int_cmp, NULL);
+ int *permutation = get_permutation(size);
+
+ for (int i = 0; i < size; i++)
+ binaryheap_add_unordered(heap, Int32GetDatum(permutation[i]));
+
+ if (binaryheap_size(heap) != size)
+ elog(ERROR, "wrong size for heap after unordered additions");
+
+ binaryheap_build(heap);
+ verify_heap_property(heap);
+}
+
+/*
+ * Test removing nodes.
+ */
+static void
+test_remove_node(int size)
+{
+ binaryheap *heap = binaryheap_allocate(size, int_cmp, NULL);
+ int *permutation = get_permutation(size);
+ int remove_count = pg_prng_uint64_range(&pg_global_prng_state,
+ 0, size - 1);
+
+ for (int i = 0; i < size; i++)
+ binaryheap_add(heap, Int32GetDatum(permutation[i]));
+
+ for (int i = 0; i < remove_count; i++)
+ {
+ int idx = pg_prng_uint64_range(&pg_global_prng_state,
+ 0, binaryheap_size(heap) - 1);
+
+ binaryheap_remove_node(heap, idx);
+ verify_heap_property(heap);
+ }
+
+ if (binaryheap_size(heap) != size - remove_count)
+ elog(ERROR, "wrong size after removing nodes");
+}
+
+/*
+ * Test replacing the root node.
+ */
+static void
+test_replace_first(int size)
+{
+ binaryheap *heap = binaryheap_allocate(size, int_cmp, NULL);
+
+ for (int i = 0; i < size; i++)
+ binaryheap_add(heap, Int32GetDatum(i));
+
+ /*
+ * Replace root with a value smaller than everything in the heap.
+ */
+ binaryheap_replace_first(heap, Int32GetDatum(-1));
+ verify_heap_property(heap);
+
+ /*
+ * Replace root with a value in the middle of the heap.
+ */
+ binaryheap_replace_first(heap, Int32GetDatum(size / 2));
+ verify_heap_property(heap);
+
+ /*
+ * Replace root with a larger value than everything in the heap.
+ */
+ binaryheap_replace_first(heap, Int32GetDatum(size + 1));
+ verify_heap_property(heap);
+}
+
+/*
+ * Test duplicate values.
+ */
+static void
+test_duplicates(int size)
+{
+ binaryheap *heap = binaryheap_allocate(size, int_cmp, NULL);
+ int dup = pg_prng_uint64_range(&pg_global_prng_state, 0, size - 1);
+
+ for (int i = 0; i < size; i++)
+ binaryheap_add(heap, Int32GetDatum(dup));
+
+ for (int i = 0; i < size; i++)
+ {
+ if (DatumGetInt32(binaryheap_remove_first(heap)) != dup)
+ elog(ERROR, "unexpected value in heap with duplicates");
+ }
+}
+
+/*
+ * Test resetting.
+ */
+static void
+test_reset(int size)
+{
+ binaryheap *heap = binaryheap_allocate(size, int_cmp, NULL);
+
+ for (int i = 0; i < size; i++)
+ binaryheap_add(heap, Int32GetDatum(i));
+
+ binaryheap_reset(heap);
+
+ if (!binaryheap_empty(heap))
+ elog(ERROR, "heap not empty after resetting");
+}
+
+/*
+ * SQL-callable entry point to perform all tests.
+ */
+PG_FUNCTION_INFO_V1(test_binaryheap);
+
+Datum
+test_binaryheap(PG_FUNCTION_ARGS)
+{
+ static const int test_sizes[] = {1, 2, 3, 10, 100, 1000};
+
+ for (int i = 0; i < sizeof(test_sizes) / sizeof(int); i++)
+ {
+ int size = test_sizes[i];
+
+ test_basic(size);
+ test_build(size);
+ test_remove_node(size);
+ test_replace_first(size);
+ test_duplicates(size);
+ test_reset(size);
+ }
+
+ PG_RETURN_VOID();
+}
diff --git a/src/test/modules/test_binaryheap/test_binaryheap.control b/src/test/modules/test_binaryheap/test_binaryheap.control
new file mode 100644
index 00000000000..dd0785e05bd
--- /dev/null
+++ b/src/test/modules/test_binaryheap/test_binaryheap.control
@@ -0,0 +1,5 @@
+# test_binaryheap extension
+comment = 'Test code for binaryheap'
+default_version = '1.0'
+module_pathname = '$libdir/test_binaryheap'
+relocatable = true
diff --git a/src/test/modules/test_dsm_registry/expected/test_dsm_registry.out b/src/test/modules/test_dsm_registry/expected/test_dsm_registry.out
index 8ffbd343a05..ca8abbb377e 100644
--- a/src/test/modules/test_dsm_registry/expected/test_dsm_registry.out
+++ b/src/test/modules/test_dsm_registry/expected/test_dsm_registry.out
@@ -1,3 +1,10 @@
+SELECT name, type, size IS DISTINCT FROM 0 AS size
+FROM pg_dsm_registry_allocations
+WHERE name like 'test_dsm_registry%' ORDER BY name;
+ name | type | size
+------+------+------
+(0 rows)
+
CREATE EXTENSION test_dsm_registry;
SELECT set_val_in_shmem(1236);
set_val_in_shmem
@@ -5,6 +12,12 @@ SELECT set_val_in_shmem(1236);
(1 row)
+SELECT set_val_in_hash('test', '1414');
+ set_val_in_hash
+-----------------
+
+(1 row)
+
\c
SELECT get_val_in_shmem();
get_val_in_shmem
@@ -12,3 +25,20 @@ SELECT get_val_in_shmem();
1236
(1 row)
+SELECT get_val_in_hash('test');
+ get_val_in_hash
+-----------------
+ 1414
+(1 row)
+
+\c
+SELECT name, type, size IS DISTINCT FROM 0 AS size
+FROM pg_dsm_registry_allocations
+WHERE name like 'test_dsm_registry%' ORDER BY name;
+ name | type | size
+------------------------+---------+------
+ test_dsm_registry_dsa | area | t
+ test_dsm_registry_dsm | segment | t
+ test_dsm_registry_hash | hash | t
+(3 rows)
+
diff --git a/src/test/modules/test_dsm_registry/sql/test_dsm_registry.sql b/src/test/modules/test_dsm_registry/sql/test_dsm_registry.sql
index b3351be0a16..965a3f1ebb6 100644
--- a/src/test/modules/test_dsm_registry/sql/test_dsm_registry.sql
+++ b/src/test/modules/test_dsm_registry/sql/test_dsm_registry.sql
@@ -1,4 +1,13 @@
+SELECT name, type, size IS DISTINCT FROM 0 AS size
+FROM pg_dsm_registry_allocations
+WHERE name like 'test_dsm_registry%' ORDER BY name;
CREATE EXTENSION test_dsm_registry;
SELECT set_val_in_shmem(1236);
+SELECT set_val_in_hash('test', '1414');
\c
SELECT get_val_in_shmem();
+SELECT get_val_in_hash('test');
+\c
+SELECT name, type, size IS DISTINCT FROM 0 AS size
+FROM pg_dsm_registry_allocations
+WHERE name like 'test_dsm_registry%' ORDER BY name;
diff --git a/src/test/modules/test_dsm_registry/test_dsm_registry--1.0.sql b/src/test/modules/test_dsm_registry/test_dsm_registry--1.0.sql
index 8c55b0919b1..5da45155be9 100644
--- a/src/test/modules/test_dsm_registry/test_dsm_registry--1.0.sql
+++ b/src/test/modules/test_dsm_registry/test_dsm_registry--1.0.sql
@@ -8,3 +8,9 @@ CREATE FUNCTION set_val_in_shmem(val INT) RETURNS VOID
CREATE FUNCTION get_val_in_shmem() RETURNS INT
AS 'MODULE_PATHNAME' LANGUAGE C;
+
+CREATE FUNCTION set_val_in_hash(key TEXT, val TEXT) RETURNS VOID
+ AS 'MODULE_PATHNAME' LANGUAGE C;
+
+CREATE FUNCTION get_val_in_hash(key TEXT) RETURNS TEXT
+ AS 'MODULE_PATHNAME' LANGUAGE C;
diff --git a/src/test/modules/test_dsm_registry/test_dsm_registry.c b/src/test/modules/test_dsm_registry/test_dsm_registry.c
index 96a890be228..141c8ed1b34 100644
--- a/src/test/modules/test_dsm_registry/test_dsm_registry.c
+++ b/src/test/modules/test_dsm_registry/test_dsm_registry.c
@@ -15,6 +15,7 @@
#include "fmgr.h"
#include "storage/dsm_registry.h"
#include "storage/lwlock.h"
+#include "utils/builtins.h"
PG_MODULE_MAGIC;
@@ -24,15 +25,31 @@ typedef struct TestDSMRegistryStruct
LWLock lck;
} TestDSMRegistryStruct;
-static TestDSMRegistryStruct *tdr_state;
+typedef struct TestDSMRegistryHashEntry
+{
+ char key[64];
+ dsa_pointer val;
+} TestDSMRegistryHashEntry;
+
+static TestDSMRegistryStruct *tdr_dsm;
+static dsa_area *tdr_dsa;
+static dshash_table *tdr_hash;
+
+static const dshash_parameters dsh_params = {
+ offsetof(TestDSMRegistryHashEntry, val),
+ sizeof(TestDSMRegistryHashEntry),
+ dshash_strcmp,
+ dshash_strhash,
+ dshash_strcpy
+};
static void
-tdr_init_shmem(void *ptr)
+init_tdr_dsm(void *ptr)
{
- TestDSMRegistryStruct *state = (TestDSMRegistryStruct *) ptr;
+ TestDSMRegistryStruct *dsm = (TestDSMRegistryStruct *) ptr;
- LWLockInitialize(&state->lck, LWLockNewTrancheId());
- state->val = 0;
+ LWLockInitialize(&dsm->lck, LWLockNewTrancheId());
+ dsm->val = 0;
}
static void
@@ -40,11 +57,17 @@ tdr_attach_shmem(void)
{
bool found;
- tdr_state = GetNamedDSMSegment("test_dsm_registry",
- sizeof(TestDSMRegistryStruct),
- tdr_init_shmem,
- &found);
- LWLockRegisterTranche(tdr_state->lck.tranche, "test_dsm_registry");
+ tdr_dsm = GetNamedDSMSegment("test_dsm_registry_dsm",
+ sizeof(TestDSMRegistryStruct),
+ init_tdr_dsm,
+ &found);
+ LWLockRegisterTranche(tdr_dsm->lck.tranche, "test_dsm_registry");
+
+ if (tdr_dsa == NULL)
+ tdr_dsa = GetNamedDSA("test_dsm_registry_dsa", &found);
+
+ if (tdr_hash == NULL)
+ tdr_hash = GetNamedDSHash("test_dsm_registry_hash", &dsh_params, &found);
}
PG_FUNCTION_INFO_V1(set_val_in_shmem);
@@ -53,9 +76,9 @@ set_val_in_shmem(PG_FUNCTION_ARGS)
{
tdr_attach_shmem();
- LWLockAcquire(&tdr_state->lck, LW_EXCLUSIVE);
- tdr_state->val = PG_GETARG_INT32(0);
- LWLockRelease(&tdr_state->lck);
+ LWLockAcquire(&tdr_dsm->lck, LW_EXCLUSIVE);
+ tdr_dsm->val = PG_GETARG_INT32(0);
+ LWLockRelease(&tdr_dsm->lck);
PG_RETURN_VOID();
}
@@ -68,9 +91,57 @@ get_val_in_shmem(PG_FUNCTION_ARGS)
tdr_attach_shmem();
- LWLockAcquire(&tdr_state->lck, LW_SHARED);
- ret = tdr_state->val;
- LWLockRelease(&tdr_state->lck);
+ LWLockAcquire(&tdr_dsm->lck, LW_SHARED);
+ ret = tdr_dsm->val;
+ LWLockRelease(&tdr_dsm->lck);
PG_RETURN_INT32(ret);
}
+
+PG_FUNCTION_INFO_V1(set_val_in_hash);
+Datum
+set_val_in_hash(PG_FUNCTION_ARGS)
+{
+ TestDSMRegistryHashEntry *entry;
+ char *key = TextDatumGetCString(PG_GETARG_DATUM(0));
+ char *val = TextDatumGetCString(PG_GETARG_DATUM(1));
+ bool found;
+
+ if (strlen(key) >= offsetof(TestDSMRegistryHashEntry, val))
+ ereport(ERROR,
+ (errmsg("key too long")));
+
+ tdr_attach_shmem();
+
+ entry = dshash_find_or_insert(tdr_hash, key, &found);
+ if (found)
+ dsa_free(tdr_dsa, entry->val);
+
+ entry->val = dsa_allocate(tdr_dsa, strlen(val) + 1);
+ strcpy(dsa_get_address(tdr_dsa, entry->val), val);
+
+ dshash_release_lock(tdr_hash, entry);
+
+ PG_RETURN_VOID();
+}
+
+PG_FUNCTION_INFO_V1(get_val_in_hash);
+Datum
+get_val_in_hash(PG_FUNCTION_ARGS)
+{
+ TestDSMRegistryHashEntry *entry;
+ char *key = TextDatumGetCString(PG_GETARG_DATUM(0));
+ text *val = NULL;
+
+ tdr_attach_shmem();
+
+ entry = dshash_find(tdr_hash, key, false);
+ if (entry == NULL)
+ PG_RETURN_NULL();
+
+ val = cstring_to_text(dsa_get_address(tdr_dsa, entry->val));
+
+ dshash_release_lock(tdr_hash, entry);
+
+ PG_RETURN_TEXT_P(val);
+}
diff --git a/src/test/modules/test_shm_mq/worker.c b/src/test/modules/test_shm_mq/worker.c
index 96cd304dbbc..c1d321b69a4 100644
--- a/src/test/modules/test_shm_mq/worker.c
+++ b/src/test/modules/test_shm_mq/worker.c
@@ -77,7 +77,7 @@ test_shm_mq_main(Datum main_arg)
* exit, which is fine. If there were a ResourceOwner, it would acquire
* ownership of the mapping, but we have no need for that.
*/
- seg = dsm_attach(DatumGetInt32(main_arg));
+ seg = dsm_attach(DatumGetUInt32(main_arg));
if (seg == NULL)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
diff --git a/src/test/perl/PostgreSQL/Test/AdjustUpgrade.pm b/src/test/perl/PostgreSQL/Test/AdjustUpgrade.pm
index 1725fe2f948..7224c286e1d 100644
--- a/src/test/perl/PostgreSQL/Test/AdjustUpgrade.pm
+++ b/src/test/perl/PostgreSQL/Test/AdjustUpgrade.pm
@@ -251,6 +251,32 @@ sub adjust_database_contents
'drop operator if exists public.=> (bigint, NONE)');
}
+ # Version 19 changed the output format of pg_lsn. To avoid output
+ # differences, set all pg_lsn columns to NULL if the old version is
+ # older than 19.
+ if ($old_version < 19)
+ {
+ if ($old_version >= '9.5')
+ {
+ _add_st($result, 'regression',
+ "update brintest set lsncol = NULL");
+ }
+
+ if ($old_version >= 12)
+ {
+ _add_st($result, 'regression',
+ "update tab_core_types set pg_lsn = NULL");
+ }
+
+ if ($old_version >= 14)
+ {
+ _add_st($result, 'regression',
+ "update brintest_multi set lsncol = NULL");
+ _add_st($result, 'regression',
+ "update brintest_bloom set lsncol = NULL");
+ }
+ }
+
return $result;
}
diff --git a/src/test/perl/PostgreSQL/Test/Cluster.pm b/src/test/perl/PostgreSQL/Test/Cluster.pm
index 1c11750ac1d..35413f14019 100644
--- a/src/test/perl/PostgreSQL/Test/Cluster.pm
+++ b/src/test/perl/PostgreSQL/Test/Cluster.pm
@@ -290,6 +290,33 @@ sub connstr
=pod
+=item $node->is_alive()
+
+Check if the node is alive, using pg_isready.
+Returns 1 if successful, 0 on failure.
+
+=cut
+
+sub is_alive
+{
+ my ($self) = @_;
+ local %ENV = $self->_get_env();
+
+ my $ret = PostgreSQL::Test::Utils::system_log(
+ 'pg_isready',
+ '--timeout' => $PostgreSQL::Test::Utils::timeout_default,
+ '--host' => $self->host,
+ '--port' => $self->port);
+
+ if ($ret != 0)
+ {
+ return 0;
+ }
+ return 1;
+}
+
+=pod
+
=item $node->raw_connect()
Open a raw TCP or Unix domain socket connection to the server. This is
@@ -684,7 +711,7 @@ sub init
print $conf "\n# Added by PostgreSQL::Test::Cluster.pm\n";
print $conf "fsync = off\n";
print $conf "restart_after_crash = off\n";
- print $conf "log_line_prefix = '%m [%p] %q%a '\n";
+ print $conf "log_line_prefix = '%m %b[%p] %q%a '\n";
print $conf "log_statement = all\n";
print $conf "log_replication_commands = on\n";
print $conf "wal_retrieve_retry_interval = '500ms'\n";
@@ -2199,6 +2226,14 @@ sub psql
$ret = $?;
};
my $exc_save = $@;
+
+ # we need a dummy $stderr from hereon, if we didn't collect it
+ if (! defined $stderr)
+ {
+ my $errtxt = "<not collected>";
+ $stderr = \$errtxt;
+ }
+
if ($exc_save)
{
diff --git a/src/test/perl/PostgreSQL/Test/Utils.pm b/src/test/perl/PostgreSQL/Test/Utils.pm
index 7d7ca83495f..85d36a3171e 100644
--- a/src/test/perl/PostgreSQL/Test/Utils.pm
+++ b/src/test/perl/PostgreSQL/Test/Utils.pm
@@ -108,6 +108,7 @@ BEGIN
delete $ENV{LANGUAGE};
delete $ENV{LC_ALL};
$ENV{LC_MESSAGES} = 'C';
+ $ENV{LC_NUMERIC} = 'C';
setlocale(LC_ALL, "");
# This list should be kept in sync with pg_regress.c.
diff --git a/src/test/postmaster/t/002_connection_limits.pl b/src/test/postmaster/t/002_connection_limits.pl
index 6442500fc37..4a7fb16261f 100644
--- a/src/test/postmaster/t/002_connection_limits.pl
+++ b/src/test/postmaster/t/002_connection_limits.pl
@@ -68,7 +68,8 @@ sub connect_fails_wait
my $log_location = -s $node->logfile;
$node->connect_fails($connstr, $test_name, %params);
- $node->wait_for_log(qr/DEBUG: (00000: )?client backend.*exited with exit code 1/,
+ $node->wait_for_log(
+ qr/DEBUG: (00000: )?client backend.*exited with exit code 1/,
$log_location);
ok(1, "$test_name: client backend process exited");
}
diff --git a/src/test/recovery/meson.build b/src/test/recovery/meson.build
index cb983766c67..52993c32dbb 100644
--- a/src/test/recovery/meson.build
+++ b/src/test/recovery/meson.build
@@ -54,6 +54,9 @@ tests += {
't/043_no_contrecord_switch.pl',
't/044_invalidate_inactive_slots.pl',
't/045_archive_restartpoint.pl',
+ 't/046_checkpoint_logical_slot.pl',
+ 't/047_checkpoint_physical_slot.pl',
+ 't/048_vacuum_horizon_floor.pl'
],
},
}
diff --git a/src/test/recovery/t/003_recovery_targets.pl b/src/test/recovery/t/003_recovery_targets.pl
index 0ae2e982727..f2109efa9b1 100644
--- a/src/test/recovery/t/003_recovery_targets.pl
+++ b/src/test/recovery/t/003_recovery_targets.pl
@@ -187,4 +187,54 @@ ok( $logfile =~
qr/FATAL: .* recovery ended before configured recovery target was reached/,
'recovery end before target reached is a fatal error');
+# Invalid timeline target
+$node_standby = PostgreSQL::Test::Cluster->new('standby_9');
+$node_standby->init_from_backup($node_primary, 'my_backup',
+ has_restoring => 1);
+$node_standby->append_conf('postgresql.conf',
+ "recovery_target_timeline = 'bogus'");
+
+$res = run_log(
+ [
+ 'pg_ctl',
+ '--pgdata' => $node_standby->data_dir,
+ '--log' => $node_standby->logfile,
+ 'start',
+ ]);
+ok(!$res, 'invalid timeline target (bogus value)');
+
+my $log_start = $node_standby->wait_for_log("is not a valid number");
+
+# Timeline target out of min range
+$node_standby->append_conf('postgresql.conf',
+ "recovery_target_timeline = '0'");
+
+$res = run_log(
+ [
+ 'pg_ctl',
+ '--pgdata' => $node_standby->data_dir,
+ '--log' => $node_standby->logfile,
+ 'start',
+ ]);
+ok(!$res, 'invalid timeline target (lower bound check)');
+
+$log_start =
+ $node_standby->wait_for_log("must be between 1 and 4294967295", $log_start);
+
+# Timeline target out of max range
+$node_standby->append_conf('postgresql.conf',
+ "recovery_target_timeline = '4294967296'");
+
+$res = run_log(
+ [
+ 'pg_ctl',
+ '--pgdata' => $node_standby->data_dir,
+ '--log' => $node_standby->logfile,
+ 'start',
+ ]);
+ok(!$res, 'invalid timeline target (upper bound check)');
+
+$log_start =
+ $node_standby->wait_for_log("must be between 1 and 4294967295", $log_start);
+
done_testing();
diff --git a/src/test/recovery/t/013_crash_restart.pl b/src/test/recovery/t/013_crash_restart.pl
index debfa635c36..4c5af018ee4 100644
--- a/src/test/recovery/t/013_crash_restart.pl
+++ b/src/test/recovery/t/013_crash_restart.pl
@@ -228,6 +228,13 @@ is( $node->safe_psql(
'before-orderly-restart',
'can still write after crash restart');
+# Confirm that the logical replication launcher, a background worker
+# without the never-restart flag, has also restarted successfully.
+is($node->poll_query_until('postgres',
+ "SELECT count(*) = 1 FROM pg_stat_activity WHERE backend_type = 'logical replication launcher'"),
+ '1',
+ 'logical replication launcher restarted after crash');
+
# Just to be sure, check that an orderly restart now still works
$node->restart();
diff --git a/src/test/recovery/t/016_min_consistency.pl b/src/test/recovery/t/016_min_consistency.pl
index 9a3b4866fce..b381d0c21b5 100644
--- a/src/test/recovery/t/016_min_consistency.pl
+++ b/src/test/recovery/t/016_min_consistency.pl
@@ -39,7 +39,7 @@ sub find_largest_lsn
defined($len) or die "read error on $filename: $!";
close($fh);
- return sprintf("%X/%X", $max_hi, $max_lo);
+ return sprintf("%X/%08X", $max_hi, $max_lo);
}
# Initialize primary node
diff --git a/src/test/recovery/t/027_stream_regress.pl b/src/test/recovery/t/027_stream_regress.pl
index 83def062d11..5d2c06ba06e 100644
--- a/src/test/recovery/t/027_stream_regress.pl
+++ b/src/test/recovery/t/027_stream_regress.pl
@@ -81,7 +81,14 @@ my $rc =
. "--max-concurrent-tests=20 "
. "--inputdir=../regress "
. "--outputdir=\"$outputdir\"");
-if ($rc != 0)
+
+# Regression diffs are only meaningful if both the primary and the standby
+# are still alive after a regression test failure. A crash would cause a
+# useless increase in the log quantity, mostly filled with information
+# related to queries that could not run.
+my $primary_alive = $node_primary->is_alive;
+my $standby_alive = $node_standby_1->is_alive;
+if ($rc != 0 && $primary_alive && $standby_alive)
{
# Dump out the regression diffs file, if there is one
my $diffs = "$outputdir/regression.diffs";
@@ -93,6 +100,8 @@ if ($rc != 0)
}
}
is($rc, 0, 'regression tests pass');
+is($primary_alive, 1, 'primary alive after regression test run');
+is($standby_alive, 1, 'standby alive after regression test run');
# Clobber all sequences with their next value, so that we don't have
# differences between nodes due to caching.
diff --git a/src/test/recovery/t/040_standby_failover_slots_sync.pl b/src/test/recovery/t/040_standby_failover_slots_sync.pl
index 9c8b49e942d..2c61c51e914 100644
--- a/src/test/recovery/t/040_standby_failover_slots_sync.pl
+++ b/src/test/recovery/t/040_standby_failover_slots_sync.pl
@@ -941,8 +941,7 @@ is( $standby1->safe_psql(
'synced slot retained on the new primary');
# Commit the prepared transaction
-$standby1->safe_psql('postgres',
- "COMMIT PREPARED 'test_twophase_slotsync';");
+$standby1->safe_psql('postgres', "COMMIT PREPARED 'test_twophase_slotsync';");
$standby1->wait_for_catchup('regress_mysub1');
# Confirm that the prepared transaction is replicated to the subscriber
diff --git a/src/test/recovery/t/041_checkpoint_at_promote.pl b/src/test/recovery/t/041_checkpoint_at_promote.pl
index cb63ac8d5c9..12750ff7d4f 100644
--- a/src/test/recovery/t/041_checkpoint_at_promote.pl
+++ b/src/test/recovery/t/041_checkpoint_at_promote.pl
@@ -91,7 +91,7 @@ $node_standby->wait_for_event('checkpointer', 'create-restart-point');
# Check the logs that the restart point has started on standby. This is
# optional, but let's be sure.
ok( $node_standby->log_contains(
- "restartpoint starting: immediate wait", $logstart),
+ "restartpoint starting: fast wait", $logstart),
"restartpoint has started");
# Trigger promotion during the restart point.
diff --git a/src/test/recovery/t/046_checkpoint_logical_slot.pl b/src/test/recovery/t/046_checkpoint_logical_slot.pl
new file mode 100644
index 00000000000..4fd709e3a03
--- /dev/null
+++ b/src/test/recovery/t/046_checkpoint_logical_slot.pl
@@ -0,0 +1,142 @@
+# Copyright (c) 2025, PostgreSQL Global Development Group
+#
+# This test verifies the case when the logical slot is advanced during
+# checkpoint. The test checks that the logical slot's restart_lsn still refers
+# to an existed WAL segment after immediate restart.
+#
+use strict;
+use warnings FATAL => 'all';
+
+use PostgreSQL::Test::Cluster;
+use PostgreSQL::Test::Utils;
+
+use Test::More;
+
+if ($ENV{enable_injection_points} ne 'yes')
+{
+ plan skip_all => 'Injection points not supported by this build';
+}
+
+my ($node, $result);
+
+$node = PostgreSQL::Test::Cluster->new('mike');
+$node->init;
+$node->append_conf('postgresql.conf', "wal_level = 'logical'");
+$node->start;
+
+# Check if the extension injection_points is available, as it may be
+# possible that this script is run with installcheck, where the module
+# would not be installed by default.
+if (!$node->check_extension('injection_points'))
+{
+ plan skip_all => 'Extension injection_points not installed';
+}
+
+$node->safe_psql('postgres', q(CREATE EXTENSION injection_points));
+
+# Create the two slots we'll need.
+$node->safe_psql('postgres',
+ q{select pg_create_logical_replication_slot('slot_logical', 'test_decoding')}
+);
+$node->safe_psql('postgres',
+ q{select pg_create_physical_replication_slot('slot_physical', true)});
+
+# Advance both slots to the current position just to have everything "valid".
+$node->safe_psql('postgres',
+ q{select count(*) from pg_logical_slot_get_changes('slot_logical', null, null)}
+);
+$node->safe_psql('postgres',
+ q{select pg_replication_slot_advance('slot_physical', pg_current_wal_lsn())}
+);
+
+# Run checkpoint to flush current state to disk and set a baseline.
+$node->safe_psql('postgres', q{checkpoint});
+
+# Generate some transactions to get RUNNING_XACTS.
+my $xacts = $node->background_psql('postgres');
+$xacts->query_until(
+ qr/run_xacts/,
+ q(\echo run_xacts
+SELECT 1 \watch 0.1
+\q
+));
+
+$node->advance_wal(20);
+
+# Run another checkpoint to set a new restore LSN.
+$node->safe_psql('postgres', q{checkpoint});
+
+$node->advance_wal(20);
+
+# Run another checkpoint, this time in the background, and make it wait
+# on the injection point) so that the checkpoint stops right before
+# removing old WAL segments.
+note('starting checkpoint');
+
+my $checkpoint = $node->background_psql('postgres');
+$checkpoint->query_safe(
+ q(select injection_points_attach('checkpoint-before-old-wal-removal','wait'))
+);
+$checkpoint->query_until(
+ qr/starting_checkpoint/,
+ q(\echo starting_checkpoint
+checkpoint;
+\q
+));
+
+# Wait until the checkpoint stops right before removing WAL segments.
+note('waiting for injection_point');
+$node->wait_for_event('checkpointer', 'checkpoint-before-old-wal-removal');
+note('injection_point is reached');
+
+# Try to advance the logical slot, but make it stop when it moves to the next
+# WAL segment (this has to happen in the background, too).
+my $logical = $node->background_psql('postgres');
+$logical->query_safe(
+ q{select injection_points_attach('logical-replication-slot-advance-segment','wait');}
+);
+$logical->query_until(
+ qr/get_changes/,
+ q(
+\echo get_changes
+select count(*) from pg_logical_slot_get_changes('slot_logical', null, null) \watch 1
+\q
+));
+
+# Wait until the slot's restart_lsn points to the next WAL segment.
+note('waiting for injection_point');
+$node->wait_for_event('client backend',
+ 'logical-replication-slot-advance-segment');
+note('injection_point is reached');
+
+# OK, we're in the right situation: time to advance the physical slot, which
+# recalculates the required LSN, and then unblock the checkpoint, which
+# removes the WAL still needed by the logical slot.
+$node->safe_psql('postgres',
+ q{select pg_replication_slot_advance('slot_physical', pg_current_wal_lsn())}
+);
+
+# Generate a long WAL record, spawning at least two pages for the follow-up
+# post-recovery check.
+$node->safe_psql('postgres',
+ q{select pg_logical_emit_message(false, '', repeat('123456789', 1000))});
+
+# Continue the checkpoint and wait for its completion.
+my $log_offset = -s $node->logfile;
+$node->safe_psql('postgres',
+ q{select injection_points_wakeup('checkpoint-before-old-wal-removal')});
+$node->wait_for_log(qr/checkpoint complete/, $log_offset);
+
+# Abruptly stop the server.
+$node->stop('immediate');
+
+$node->start;
+
+eval {
+ $node->safe_psql('postgres',
+ q{select count(*) from pg_logical_slot_get_changes('slot_logical', null, null);}
+ );
+};
+is($@, '', "Logical slot still valid");
+
+done_testing();
diff --git a/src/test/recovery/t/047_checkpoint_physical_slot.pl b/src/test/recovery/t/047_checkpoint_physical_slot.pl
new file mode 100644
index 00000000000..9e98383e30e
--- /dev/null
+++ b/src/test/recovery/t/047_checkpoint_physical_slot.pl
@@ -0,0 +1,133 @@
+# Copyright (c) 2025, PostgreSQL Global Development Group
+#
+# This test verifies the case when the physical slot is advanced during
+# checkpoint. The test checks that the physical slot's restart_lsn still refers
+# to an existed WAL segment after immediate restart.
+#
+use strict;
+use warnings FATAL => 'all';
+
+use PostgreSQL::Test::Cluster;
+use PostgreSQL::Test::Utils;
+
+use Test::More;
+
+if ($ENV{enable_injection_points} ne 'yes')
+{
+ plan skip_all => 'Injection points not supported by this build';
+}
+
+my ($node, $result);
+
+$node = PostgreSQL::Test::Cluster->new('mike');
+$node->init;
+$node->append_conf('postgresql.conf', "wal_level = 'replica'");
+$node->start;
+
+# Check if the extension injection_points is available, as it may be
+# possible that this script is run with installcheck, where the module
+# would not be installed by default.
+if (!$node->check_extension('injection_points'))
+{
+ plan skip_all => 'Extension injection_points not installed';
+}
+
+$node->safe_psql('postgres', q(CREATE EXTENSION injection_points));
+
+# Create a physical replication slot.
+$node->safe_psql('postgres',
+ q{select pg_create_physical_replication_slot('slot_physical', true)});
+
+# Advance slot to the current position, just to have everything "valid".
+$node->safe_psql('postgres',
+ q{select pg_replication_slot_advance('slot_physical', pg_current_wal_lsn())}
+);
+
+# Run checkpoint to flush current state to disk and set a baseline.
+$node->safe_psql('postgres', q{checkpoint});
+
+# Insert 2M rows; that's about 260MB (~20 segments) worth of WAL.
+$node->advance_wal(20);
+
+# Advance slot to the current position, just to have everything "valid".
+$node->safe_psql('postgres',
+ q{select pg_replication_slot_advance('slot_physical', pg_current_wal_lsn())}
+);
+
+# Run another checkpoint to set a new restore LSN.
+$node->safe_psql('postgres', q{checkpoint});
+
+# Another 2M rows; that's about 260MB (~20 segments) worth of WAL.
+$node->advance_wal(20);
+
+my $restart_lsn_init = $node->safe_psql('postgres',
+ q{select restart_lsn from pg_replication_slots where slot_name = 'slot_physical'}
+);
+chomp($restart_lsn_init);
+note("restart lsn before checkpoint: $restart_lsn_init");
+
+# Run another checkpoint, this time in the background, and make it wait
+# on the injection point) so that the checkpoint stops right before
+# removing old WAL segments.
+note('starting checkpoint');
+
+my $checkpoint = $node->background_psql('postgres');
+$checkpoint->query_safe(
+ q{select injection_points_attach('checkpoint-before-old-wal-removal','wait')}
+);
+$checkpoint->query_until(
+ qr/starting_checkpoint/,
+ q(\echo starting_checkpoint
+checkpoint;
+\q
+));
+
+# Wait until the checkpoint stops right before removing WAL segments.
+note('waiting for injection_point');
+$node->wait_for_event('checkpointer', 'checkpoint-before-old-wal-removal');
+note('injection_point is reached');
+
+# OK, we're in the right situation: time to advance the physical slot, which
+# recalculates the required LSN and then unblock the checkpoint, which
+# removes the WAL still needed by the physical slot.
+$node->safe_psql('postgres',
+ q{select pg_replication_slot_advance('slot_physical', pg_current_wal_lsn())}
+);
+
+# Continue the checkpoint and wait for its completion.
+my $log_offset = -s $node->logfile;
+$node->safe_psql('postgres',
+ q{select injection_points_wakeup('checkpoint-before-old-wal-removal')});
+$node->wait_for_log(qr/checkpoint complete/, $log_offset);
+
+my $restart_lsn_old = $node->safe_psql('postgres',
+ q{select restart_lsn from pg_replication_slots where slot_name = 'slot_physical'}
+);
+chomp($restart_lsn_old);
+note("restart lsn before stop: $restart_lsn_old");
+
+# Abruptly stop the server.
+$node->stop('immediate');
+
+$node->start;
+
+# Get the restart_lsn of the slot right after restarting.
+my $restart_lsn = $node->safe_psql('postgres',
+ q{select restart_lsn from pg_replication_slots where slot_name = 'slot_physical'}
+);
+chomp($restart_lsn);
+note("restart lsn: $restart_lsn");
+
+# Get the WAL segment name for the slot's restart_lsn.
+my $restart_lsn_segment = $node->safe_psql('postgres',
+ "SELECT pg_walfile_name('$restart_lsn'::pg_lsn)");
+chomp($restart_lsn_segment);
+
+# Check if the required wal segment exists.
+note("required by slot segment name: $restart_lsn_segment");
+my $datadir = $node->data_dir;
+ok( -f "$datadir/pg_wal/$restart_lsn_segment",
+ "WAL segment $restart_lsn_segment for physical slot's restart_lsn $restart_lsn exists"
+);
+
+done_testing();
diff --git a/src/test/recovery/t/048_vacuum_horizon_floor.pl b/src/test/recovery/t/048_vacuum_horizon_floor.pl
new file mode 100644
index 00000000000..e56fce59d58
--- /dev/null
+++ b/src/test/recovery/t/048_vacuum_horizon_floor.pl
@@ -0,0 +1,288 @@
+use strict;
+use warnings;
+use PostgreSQL::Test::Cluster;
+use Test::More;
+
+# Test that vacuum prunes away all dead tuples killed before OldestXmin
+#
+# This test creates a table on a primary, updates the table to generate dead
+# tuples for vacuum, and then, during the vacuum, uses the replica to force
+# GlobalVisState->maybe_needed on the primary to move backwards and precede
+# the value of OldestXmin set at the beginning of vacuuming the table.
+
+# Set up nodes
+my $node_primary = PostgreSQL::Test::Cluster->new('primary');
+$node_primary->init(allows_streaming => 'physical');
+
+# io_combine_limit is set to 1 to avoid pinning more than one buffer at a time
+# to ensure test determinism.
+$node_primary->append_conf(
+ 'postgresql.conf', qq[
+hot_standby_feedback = on
+autovacuum = off
+log_min_messages = INFO
+maintenance_work_mem = 64
+io_combine_limit = 1
+]);
+$node_primary->start;
+
+my $node_replica = PostgreSQL::Test::Cluster->new('standby');
+
+$node_primary->backup('my_backup');
+$node_replica->init_from_backup($node_primary, 'my_backup',
+ has_streaming => 1);
+
+$node_replica->start;
+
+my $test_db = "test_db";
+$node_primary->safe_psql('postgres', "CREATE DATABASE $test_db");
+
+# Save the original connection info for later use
+my $orig_conninfo = $node_primary->connstr();
+
+my $table1 = "vac_horizon_floor_table";
+
+# Long-running Primary Session A
+my $psql_primaryA =
+ $node_primary->background_psql($test_db, on_error_stop => 1);
+
+# Long-running Primary Session B
+my $psql_primaryB =
+ $node_primary->background_psql($test_db, on_error_stop => 1);
+
+# Our test relies on two rounds of index vacuuming for reasons elaborated
+# later. To trigger two rounds of index vacuuming, we must fill up the
+# TIDStore with dead items partway through a vacuum of the table. The number
+# of rows is just enough to ensure we exceed maintenance_work_mem on all
+# supported platforms, while keeping test runtime as short as we can.
+my $nrows = 2000;
+
+# Because vacuum's first pass, pruning, is where we use the GlobalVisState to
+# check tuple visibility, GlobalVisState->maybe_needed must move backwards
+# during pruning before checking the visibility for a tuple which would have
+# been considered HEAPTUPLE_DEAD prior to maybe_needed moving backwards but
+# HEAPTUPLE_RECENTLY_DEAD compared to the new, older value of maybe_needed.
+#
+# We must not only force the horizon on the primary to move backwards but also
+# force the vacuuming backend's GlobalVisState to be updated. GlobalVisState
+# is forced to update during index vacuuming.
+#
+# _bt_pendingfsm_finalize() calls GetOldestNonRemovableTransactionId() at the
+# end of a round of index vacuuming, updating the backend's GlobalVisState
+# and, in our case, moving maybe_needed backwards.
+#
+# Then vacuum's first (pruning) pass will continue and pruning will find our
+# later inserted and updated tuple HEAPTUPLE_RECENTLY_DEAD when compared to
+# maybe_needed but HEAPTUPLE_DEAD when compared to OldestXmin.
+#
+# Thus, we must force at least two rounds of index vacuuming to ensure that
+# some tuple visibility checks will happen after a round of index vacuuming.
+# To accomplish this, we set maintenance_work_mem to its minimum value and
+# insert and delete enough rows that we force at least one round of index
+# vacuuming before getting to a dead tuple which was killed after the standby
+# is disconnected.
+$node_primary->safe_psql(
+ $test_db, qq[
+ CREATE TABLE ${table1}(col1 int)
+ WITH (autovacuum_enabled=false, fillfactor=10);
+ INSERT INTO $table1 VALUES(7);
+ INSERT INTO $table1 SELECT generate_series(1, $nrows) % 3;
+ CREATE INDEX on ${table1}(col1);
+ DELETE FROM $table1 WHERE col1 = 0;
+ INSERT INTO $table1 VALUES(7);
+]);
+
+# We will later move the primary forward while the standby is disconnected.
+# For now, however, there is no reason not to wait for the standby to catch
+# up.
+my $primary_lsn = $node_primary->lsn('flush');
+$node_primary->wait_for_catchup($node_replica, 'replay', $primary_lsn);
+
+# Test that the WAL receiver is up and running.
+$node_replica->poll_query_until(
+ $test_db, qq[
+ SELECT EXISTS (SELECT * FROM pg_stat_wal_receiver);], 't');
+
+# Set primary_conninfo to something invalid on the replica and reload the
+# config. Once the config is reloaded, the startup process will force the WAL
+# receiver to restart and it will be unable to reconnect because of the
+# invalid connection information.
+$node_replica->safe_psql(
+ $test_db, qq[
+ ALTER SYSTEM SET primary_conninfo = '';
+ SELECT pg_reload_conf();
+ ]);
+
+# Wait until the WAL receiver has shut down and been unable to start up again.
+$node_replica->poll_query_until(
+ $test_db, qq[
+ SELECT EXISTS (SELECT * FROM pg_stat_wal_receiver);], 'f');
+
+# Now insert and update a tuple which will be visible to the vacuum on the
+# primary but which will have xmax newer than the oldest xmin on the standby
+# that was recently disconnected.
+my $res = $psql_primaryA->query_safe(
+ qq[
+ INSERT INTO $table1 VALUES (99);
+ UPDATE $table1 SET col1 = 100 WHERE col1 = 99;
+ SELECT 'after_update';
+ ]
+);
+
+# Make sure the UPDATE finished
+like($res, qr/^after_update$/m, "UPDATE occurred on primary session A");
+
+# Open a cursor on the primary whose pin will keep VACUUM from getting a
+# cleanup lock on the first page of the relation. We want VACUUM to be able to
+# start, calculate initial values for OldestXmin and GlobalVisState and then
+# be unable to proceed with pruning our dead tuples. This will allow us to
+# reconnect the standby and push the horizon back before we start actual
+# pruning and vacuuming.
+my $primary_cursor1 = "vac_horizon_floor_cursor1";
+
+# The first value inserted into the table was a 7, so FETCH FORWARD should
+# return a 7. That's how we know the cursor has a pin.
+# Disable index scans so the cursor pins heap pages and not index pages.
+$res = $psql_primaryB->query_safe(
+ qq[
+ BEGIN;
+ SET enable_bitmapscan = off;
+ SET enable_indexscan = off;
+ SET enable_indexonlyscan = off;
+ DECLARE $primary_cursor1 CURSOR FOR SELECT * FROM $table1 WHERE col1 = 7;
+ FETCH $primary_cursor1;
+ ]
+);
+
+is($res, 7, qq[Cursor query returned $res. Expected value 7.]);
+
+# Get the PID of the session which will run the VACUUM FREEZE so that we can
+# use it to filter pg_stat_activity later.
+my $vacuum_pid = $psql_primaryA->query_safe("SELECT pg_backend_pid();");
+
+# Now start a VACUUM FREEZE on the primary. It will call vacuum_get_cutoffs()
+# and establish values of OldestXmin and GlobalVisState which are newer than
+# all of our dead tuples. Then it will be unable to get a cleanup lock to
+# start pruning, so it will hang.
+#
+# We use VACUUM FREEZE because it will wait for a cleanup lock instead of
+# skipping the page pinned by the cursor. Note that works because the target
+# tuple's xmax precedes OldestXmin which ensures that lazy_scan_noprune() will
+# return false and we will wait for the cleanup lock.
+#
+# Disable any prefetching, parallelism, or other concurrent I/O by vacuum. The
+# pages of the heap must be processed in order by a single worker to ensure
+# test stability (PARALLEL 0 shouldn't be necessary but guards against the
+# possibility of parallel heap vacuuming).
+$psql_primaryA->{stdin} .= qq[
+ SET maintenance_io_concurrency = 0;
+ VACUUM (VERBOSE, FREEZE, PARALLEL 0) $table1;
+ \\echo VACUUM
+ ];
+
+# Make sure the VACUUM command makes it to the server.
+$psql_primaryA->{run}->pump_nb();
+
+# Make sure that the VACUUM has already called vacuum_get_cutoffs() and is
+# just waiting on the lock to start vacuuming. We don't want the standby to
+# re-establish a connection to the primary and push the horizon back until
+# we've saved initial values in GlobalVisState and calculated OldestXmin.
+$node_primary->poll_query_until(
+ $test_db,
+ qq[
+ SELECT count(*) >= 1 FROM pg_stat_activity
+ WHERE pid = $vacuum_pid
+ AND wait_event = 'BufferPin';
+ ],
+ 't');
+
+# Ensure the WAL receiver is still not active on the replica.
+$node_replica->poll_query_until(
+ $test_db, qq[
+ SELECT EXISTS (SELECT * FROM pg_stat_wal_receiver);], 'f');
+
+# Allow the WAL receiver connection to re-establish.
+$node_replica->safe_psql(
+ $test_db, qq[
+ ALTER SYSTEM SET primary_conninfo = '$orig_conninfo';
+ SELECT pg_reload_conf();
+ ]);
+
+# Ensure the new WAL receiver has connected.
+$node_replica->poll_query_until(
+ $test_db, qq[
+ SELECT EXISTS (SELECT * FROM pg_stat_wal_receiver);], 't');
+
+# Once the WAL sender is shown on the primary, the replica should have
+# connected with the primary and pushed the horizon backward. Primary Session
+# A won't see that until the VACUUM FREEZE proceeds and does its first round
+# of index vacuuming.
+$node_primary->poll_query_until(
+ $test_db, qq[
+ SELECT EXISTS (SELECT * FROM pg_stat_replication);], 't');
+
+# Move the cursor forward to the next 7. We inserted the 7 much later, so
+# advancing the cursor should allow vacuum to proceed vacuuming most pages of
+# the relation. Because we set maintanence_work_mem sufficiently low, we
+# expect that a round of index vacuuming has happened and that the vacuum is
+# now waiting for the cursor to release its pin on the last page of the
+# relation.
+$res = $psql_primaryB->query_safe("FETCH $primary_cursor1");
+is($res, 7,
+ qq[Cursor query returned $res from second fetch. Expected value 7.]);
+
+# Prevent the test from incorrectly passing by confirming that we did indeed
+# do a pass of index vacuuming.
+$node_primary->poll_query_until(
+ $test_db, qq[
+ SELECT index_vacuum_count > 0
+ FROM pg_stat_progress_vacuum
+ WHERE datname='$test_db' AND relid::regclass = '$table1'::regclass;
+ ], 't');
+
+# Commit the transaction with the open cursor so that the VACUUM can finish.
+$psql_primaryB->query_until(
+ qr/^commit$/m,
+ qq[
+ COMMIT;
+ \\echo commit
+ ]
+);
+
+# VACUUM proceeds with pruning and does a visibility check on each tuple. In
+# older versions of Postgres, pruning found our final dead tuple
+# non-removable (HEAPTUPLE_RECENTLY_DEAD) since its xmax is after the new
+# value of maybe_needed. Then heap_prepare_freeze_tuple() would decide the
+# tuple xmax should be frozen because it precedes OldestXmin. Vacuum would
+# then error out in heap_pre_freeze_checks() with "cannot freeze committed
+# xmax". This was fixed by changing pruning to find all
+# HEAPTUPLE_RECENTLY_DEAD tuples with xmaxes preceding OldestXmin
+# HEAPTUPLE_DEAD and removing them.
+
+# With the fix, VACUUM should finish successfully, incrementing the table
+# vacuum_count.
+$node_primary->poll_query_until(
+ $test_db,
+ qq[
+ SELECT vacuum_count > 0
+ FROM pg_stat_all_tables WHERE relname = '${table1}';
+ ]
+ , 't');
+
+$primary_lsn = $node_primary->lsn('flush');
+
+# Make sure something causes us to flush
+$node_primary->safe_psql($test_db, "INSERT INTO $table1 VALUES (1);");
+
+# Nothing on the replica should cause a recovery conflict, so this should
+# finish successfully.
+$node_primary->wait_for_catchup($node_replica, 'replay', $primary_lsn);
+
+## Shut down psqls
+$psql_primaryA->quit;
+$psql_primaryB->quit;
+
+$node_replica->stop();
+$node_primary->stop();
+
+done_testing();
diff --git a/src/test/regress/expected/alter_table.out b/src/test/regress/expected/alter_table.out
index 476266e3f4b..08984dd98f1 100644
--- a/src/test/regress/expected/alter_table.out
+++ b/src/test/regress/expected/alter_table.out
@@ -4745,6 +4745,21 @@ alter table attbl alter column p1 set data type bigint;
alter table atref alter column c1 set data type bigint;
drop table attbl, atref;
/* End test case for bug #17409 */
+/* Test case for bug #18970 */
+create table attbl(a int);
+create table atref(b attbl check ((b).a is not null));
+alter table attbl alter column a type numeric; -- someday this should work
+ERROR: cannot alter table "attbl" because column "atref.b" uses its row type
+alter table atref drop constraint atref_b_check;
+create statistics atref_stat on ((b).a is not null) from atref;
+alter table attbl alter column a type numeric; -- someday this should work
+ERROR: cannot alter table "attbl" because column "atref.b" uses its row type
+drop statistics atref_stat;
+create index atref_idx on atref (((b).a));
+alter table attbl alter column a type numeric; -- someday this should work
+ERROR: cannot alter table "attbl" because column "atref.b" uses its row type
+drop table attbl, atref;
+/* End test case for bug #18970 */
-- Test that ALTER TABLE rewrite preserves a clustered index
-- for normal indexes and indexes on constraints.
create table alttype_cluster (a int);
diff --git a/src/test/regress/expected/btree_index.out b/src/test/regress/expected/btree_index.out
index bfb1a286ea4..21dc9b5783a 100644
--- a/src/test/regress/expected/btree_index.out
+++ b/src/test/regress/expected/btree_index.out
@@ -195,54 +195,123 @@ ORDER BY proname DESC, proargtypes DESC, pronamespace DESC LIMIT 1;
(1 row)
--
--- Add coverage for RowCompare quals whose rhs row has a NULL that ends scan
+-- Forwards scan RowCompare qual whose row arg has a NULL that affects our
+-- initial positioning strategy
--
explain (costs off)
SELECT proname, proargtypes, pronamespace
FROM pg_proc
- WHERE proname = 'abs' AND (proname, proargtypes) < ('abs', NULL)
+ WHERE (proname, proargtypes) >= ('abs', NULL) AND proname <= 'abs'
ORDER BY proname, proargtypes, pronamespace;
- QUERY PLAN
--------------------------------------------------------------------------------------------------------------
+ QUERY PLAN
+---------------------------------------------------------------------------------------------------------------
Index Only Scan using pg_proc_proname_args_nsp_index on pg_proc
- Index Cond: ((ROW(proname, proargtypes) < ROW('abs'::name, NULL::oidvector)) AND (proname = 'abs'::name))
+ Index Cond: ((ROW(proname, proargtypes) >= ROW('abs'::name, NULL::oidvector)) AND (proname <= 'abs'::name))
(2 rows)
SELECT proname, proargtypes, pronamespace
FROM pg_proc
- WHERE proname = 'abs' AND (proname, proargtypes) < ('abs', NULL)
+ WHERE (proname, proargtypes) >= ('abs', NULL) AND proname <= 'abs'
ORDER BY proname, proargtypes, pronamespace;
proname | proargtypes | pronamespace
---------+-------------+--------------
(0 rows)
--
--- Add coverage for backwards scan RowCompare quals whose rhs row has a NULL
--- that ends scan
+-- Forwards scan RowCompare quals whose row arg has a NULL that ends scan
--
explain (costs off)
SELECT proname, proargtypes, pronamespace
FROM pg_proc
- WHERE proname = 'abs' AND (proname, proargtypes) > ('abs', NULL)
+ WHERE proname >= 'abs' AND (proname, proargtypes) < ('abs', NULL)
+ORDER BY proname, proargtypes, pronamespace;
+ QUERY PLAN
+--------------------------------------------------------------------------------------------------------------
+ Index Only Scan using pg_proc_proname_args_nsp_index on pg_proc
+ Index Cond: ((proname >= 'abs'::name) AND (ROW(proname, proargtypes) < ROW('abs'::name, NULL::oidvector)))
+(2 rows)
+
+SELECT proname, proargtypes, pronamespace
+ FROM pg_proc
+ WHERE proname >= 'abs' AND (proname, proargtypes) < ('abs', NULL)
+ORDER BY proname, proargtypes, pronamespace;
+ proname | proargtypes | pronamespace
+---------+-------------+--------------
+(0 rows)
+
+--
+-- Backwards scan RowCompare qual whose row arg has a NULL that affects our
+-- initial positioning strategy
+--
+explain (costs off)
+SELECT proname, proargtypes, pronamespace
+ FROM pg_proc
+ WHERE proname >= 'abs' AND (proname, proargtypes) <= ('abs', NULL)
+ORDER BY proname DESC, proargtypes DESC, pronamespace DESC;
+ QUERY PLAN
+---------------------------------------------------------------------------------------------------------------
+ Index Only Scan Backward using pg_proc_proname_args_nsp_index on pg_proc
+ Index Cond: ((proname >= 'abs'::name) AND (ROW(proname, proargtypes) <= ROW('abs'::name, NULL::oidvector)))
+(2 rows)
+
+SELECT proname, proargtypes, pronamespace
+ FROM pg_proc
+ WHERE proname >= 'abs' AND (proname, proargtypes) <= ('abs', NULL)
+ORDER BY proname DESC, proargtypes DESC, pronamespace DESC;
+ proname | proargtypes | pronamespace
+---------+-------------+--------------
+(0 rows)
+
+--
+-- Backwards scan RowCompare qual whose row arg has a NULL that ends scan
+--
+explain (costs off)
+SELECT proname, proargtypes, pronamespace
+ FROM pg_proc
+ WHERE (proname, proargtypes) > ('abs', NULL) AND proname <= 'abs'
ORDER BY proname DESC, proargtypes DESC, pronamespace DESC;
- QUERY PLAN
--------------------------------------------------------------------------------------------------------------
+ QUERY PLAN
+--------------------------------------------------------------------------------------------------------------
Index Only Scan Backward using pg_proc_proname_args_nsp_index on pg_proc
- Index Cond: ((ROW(proname, proargtypes) > ROW('abs'::name, NULL::oidvector)) AND (proname = 'abs'::name))
+ Index Cond: ((ROW(proname, proargtypes) > ROW('abs'::name, NULL::oidvector)) AND (proname <= 'abs'::name))
(2 rows)
SELECT proname, proargtypes, pronamespace
FROM pg_proc
- WHERE proname = 'abs' AND (proname, proargtypes) > ('abs', NULL)
+ WHERE (proname, proargtypes) > ('abs', NULL) AND proname <= 'abs'
ORDER BY proname DESC, proargtypes DESC, pronamespace DESC;
proname | proargtypes | pronamespace
---------+-------------+--------------
(0 rows)
+-- Makes B-Tree preprocessing deal with unmarking redundant keys that were
+-- initially marked required (test case relies on current row compare
+-- preprocessing limitations)
+explain (costs off)
+SELECT proname, proargtypes, pronamespace
+ FROM pg_proc
+ WHERE proname = 'zzzzzz' AND (proname, proargtypes) > ('abs', NULL)
+ AND pronamespace IN (1, 2, 3) AND proargtypes IN ('26 23', '5077')
+ORDER BY proname, proargtypes, pronamespace;
+ QUERY PLAN
+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Index Only Scan using pg_proc_proname_args_nsp_index on pg_proc
+ Index Cond: ((ROW(proname, proargtypes) > ROW('abs'::name, NULL::oidvector)) AND (proname = 'zzzzzz'::name) AND (proargtypes = ANY ('{"26 23",5077}'::oidvector[])) AND (pronamespace = ANY ('{1,2,3}'::oid[])))
+(2 rows)
+
+SELECT proname, proargtypes, pronamespace
+ FROM pg_proc
+ WHERE proname = 'zzzzzz' AND (proname, proargtypes) > ('abs', NULL)
+ AND pronamespace IN (1, 2, 3) AND proargtypes IN ('26 23', '5077')
+ORDER BY proname, proargtypes, pronamespace;
+ proname | proargtypes | pronamespace
+---------+-------------+--------------
+(0 rows)
+
--
--- Add coverage for recheck of > key following array advancement on previous
--- (left sibling) page that used a high key whose attribute value corresponding
--- to the > key was -inf (due to being truncated when the high key was created).
+-- Performs a recheck of > key following array advancement on previous (left
+-- sibling) page that used a high key whose attribute value corresponding to
+-- the > key was -inf (due to being truncated when the high key was created).
--
-- XXX This relies on the assumption that tenk1_thous_tenthous has a truncated
-- high key "(183, -inf)" on the first page that we'll scan. The test will only
diff --git a/src/test/regress/expected/compression.out b/src/test/regress/expected/compression.out
index 4dd9ee7200d..09f198149aa 100644
--- a/src/test/regress/expected/compression.out
+++ b/src/test/regress/expected/compression.out
@@ -1,3 +1,7 @@
+-- Default set of tests for TOAST compression, independent on compression
+-- methods supported by the build.
+CREATE SCHEMA pglz;
+SET search_path TO pglz, public;
\set HIDE_TOAST_COMPRESSION false
-- ensure we get stable results regardless of installation's default
SET default_toast_compression = 'pglz';
@@ -6,21 +10,13 @@ CREATE TABLE cmdata(f1 text COMPRESSION pglz);
CREATE INDEX idx ON cmdata(f1);
INSERT INTO cmdata VALUES(repeat('1234567890', 1000));
\d+ cmdata
- Table "public.cmdata"
+ Table "pglz.cmdata"
Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
--------+------+-----------+----------+---------+----------+-------------+--------------+-------------
f1 | text | | | | extended | pglz | |
Indexes:
"idx" btree (f1)
-CREATE TABLE cmdata1(f1 TEXT COMPRESSION lz4);
-INSERT INTO cmdata1 VALUES(repeat('1234567890', 1004));
-\d+ cmdata1
- Table "public.cmdata1"
- Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
---------+------+-----------+----------+---------+----------+-------------+--------------+-------------
- f1 | text | | | | extended | lz4 | |
-
-- verify stored compression method in the data
SELECT pg_column_compression(f1) FROM cmdata;
pg_column_compression
@@ -28,12 +24,6 @@ SELECT pg_column_compression(f1) FROM cmdata;
pglz
(1 row)
-SELECT pg_column_compression(f1) FROM cmdata1;
- pg_column_compression
------------------------
- lz4
-(1 row)
-
-- decompress data slice
SELECT SUBSTR(f1, 200, 5) FROM cmdata;
substr
@@ -41,16 +31,10 @@ SELECT SUBSTR(f1, 200, 5) FROM cmdata;
01234
(1 row)
-SELECT SUBSTR(f1, 2000, 50) FROM cmdata1;
- substr
-----------------------------------------------------
- 01234567890123456789012345678901234567890123456789
-(1 row)
-
-- copy with table creation
SELECT * INTO cmmove1 FROM cmdata;
\d+ cmmove1
- Table "public.cmmove1"
+ Table "pglz.cmmove1"
Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
--------+------+-----------+----------+---------+----------+-------------+--------------+-------------
f1 | text | | | | extended | | |
@@ -61,45 +45,9 @@ SELECT pg_column_compression(f1) FROM cmmove1;
pglz
(1 row)
--- copy to existing table
-CREATE TABLE cmmove3(f1 text COMPRESSION pglz);
-INSERT INTO cmmove3 SELECT * FROM cmdata;
-INSERT INTO cmmove3 SELECT * FROM cmdata1;
-SELECT pg_column_compression(f1) FROM cmmove3;
- pg_column_compression
------------------------
- pglz
- lz4
-(2 rows)
-
--- test LIKE INCLUDING COMPRESSION
-CREATE TABLE cmdata2 (LIKE cmdata1 INCLUDING COMPRESSION);
-\d+ cmdata2
- Table "public.cmdata2"
- Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
---------+------+-----------+----------+---------+----------+-------------+--------------+-------------
- f1 | text | | | | extended | lz4 | |
-
-DROP TABLE cmdata2;
-- try setting compression for incompressible data type
CREATE TABLE cmdata2 (f1 int COMPRESSION pglz);
ERROR: column data type integer does not support compression
--- update using datum from different table
-CREATE TABLE cmmove2(f1 text COMPRESSION pglz);
-INSERT INTO cmmove2 VALUES (repeat('1234567890', 1004));
-SELECT pg_column_compression(f1) FROM cmmove2;
- pg_column_compression
------------------------
- pglz
-(1 row)
-
-UPDATE cmmove2 SET f1 = cmdata1.f1 FROM cmdata1;
-SELECT pg_column_compression(f1) FROM cmmove2;
- pg_column_compression
------------------------
- lz4
-(1 row)
-
-- test externally stored compressed data
CREATE OR REPLACE FUNCTION large_val() RETURNS TEXT LANGUAGE SQL AS
'select array_agg(fipshash(g::text))::text from generate_series(1, 256) g';
@@ -111,21 +59,6 @@ SELECT pg_column_compression(f1) FROM cmdata2;
pglz
(1 row)
-INSERT INTO cmdata1 SELECT large_val() || repeat('a', 4000);
-SELECT pg_column_compression(f1) FROM cmdata1;
- pg_column_compression
------------------------
- lz4
- lz4
-(2 rows)
-
-SELECT SUBSTR(f1, 200, 5) FROM cmdata1;
- substr
---------
- 01234
- 79026
-(2 rows)
-
SELECT SUBSTR(f1, 200, 5) FROM cmdata2;
substr
--------
@@ -136,21 +69,21 @@ DROP TABLE cmdata2;
--test column type update varlena/non-varlena
CREATE TABLE cmdata2 (f1 int);
\d+ cmdata2
- Table "public.cmdata2"
+ Table "pglz.cmdata2"
Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
--------+---------+-----------+----------+---------+---------+-------------+--------------+-------------
f1 | integer | | | | plain | | |
ALTER TABLE cmdata2 ALTER COLUMN f1 TYPE varchar;
\d+ cmdata2
- Table "public.cmdata2"
+ Table "pglz.cmdata2"
Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
--------+-------------------+-----------+----------+---------+----------+-------------+--------------+-------------
f1 | character varying | | | | extended | | |
ALTER TABLE cmdata2 ALTER COLUMN f1 TYPE int USING f1::integer;
\d+ cmdata2
- Table "public.cmdata2"
+ Table "pglz.cmdata2"
Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
--------+---------+-----------+----------+---------+---------+-------------+--------------+-------------
f1 | integer | | | | plain | | |
@@ -160,14 +93,14 @@ ALTER TABLE cmdata2 ALTER COLUMN f1 TYPE int USING f1::integer;
ALTER TABLE cmdata2 ALTER COLUMN f1 TYPE varchar;
ALTER TABLE cmdata2 ALTER COLUMN f1 SET COMPRESSION pglz;
\d+ cmdata2
- Table "public.cmdata2"
+ Table "pglz.cmdata2"
Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
--------+-------------------+-----------+----------+---------+----------+-------------+--------------+-------------
f1 | character varying | | | | extended | pglz | |
ALTER TABLE cmdata2 ALTER COLUMN f1 SET STORAGE plain;
\d+ cmdata2
- Table "public.cmdata2"
+ Table "pglz.cmdata2"
Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
--------+-------------------+-----------+----------+---------+---------+-------------+--------------+-------------
f1 | character varying | | | | plain | pglz | |
@@ -179,164 +112,47 @@ SELECT pg_column_compression(f1) FROM cmdata2;
(1 row)
--- test compression with materialized view
-CREATE MATERIALIZED VIEW compressmv(x) AS SELECT * FROM cmdata1;
-\d+ compressmv
- Materialized view "public.compressmv"
- Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
---------+------+-----------+----------+---------+----------+-------------+--------------+-------------
- x | text | | | | extended | | |
-View definition:
- SELECT f1 AS x
- FROM cmdata1;
-
-SELECT pg_column_compression(f1) FROM cmdata1;
- pg_column_compression
------------------------
- lz4
- lz4
-(2 rows)
-
-SELECT pg_column_compression(x) FROM compressmv;
- pg_column_compression
------------------------
- lz4
- lz4
-(2 rows)
-
--- test compression with partition
-CREATE TABLE cmpart(f1 text COMPRESSION lz4) PARTITION BY HASH(f1);
-CREATE TABLE cmpart1 PARTITION OF cmpart FOR VALUES WITH (MODULUS 2, REMAINDER 0);
-CREATE TABLE cmpart2(f1 text COMPRESSION pglz);
-ALTER TABLE cmpart ATTACH PARTITION cmpart2 FOR VALUES WITH (MODULUS 2, REMAINDER 1);
-INSERT INTO cmpart VALUES (repeat('123456789', 1004));
-INSERT INTO cmpart VALUES (repeat('123456789', 4004));
-SELECT pg_column_compression(f1) FROM cmpart1;
- pg_column_compression
------------------------
- lz4
-(1 row)
-
-SELECT pg_column_compression(f1) FROM cmpart2;
- pg_column_compression
------------------------
- pglz
-(1 row)
-
-- test compression with inheritance
-CREATE TABLE cminh() INHERITS(cmdata, cmdata1); -- error
-NOTICE: merging multiple inherited definitions of column "f1"
-ERROR: column "f1" has a compression method conflict
-DETAIL: pglz versus lz4
-CREATE TABLE cminh(f1 TEXT COMPRESSION lz4) INHERITS(cmdata); -- error
-NOTICE: merging column "f1" with inherited definition
-ERROR: column "f1" has a compression method conflict
-DETAIL: pglz versus lz4
CREATE TABLE cmdata3(f1 text);
CREATE TABLE cminh() INHERITS (cmdata, cmdata3);
NOTICE: merging multiple inherited definitions of column "f1"
-- test default_toast_compression GUC
+-- suppress machine-dependent details
+\set VERBOSITY terse
SET default_toast_compression = '';
ERROR: invalid value for parameter "default_toast_compression": ""
-HINT: Available values: pglz, lz4.
SET default_toast_compression = 'I do not exist compression';
ERROR: invalid value for parameter "default_toast_compression": "I do not exist compression"
-HINT: Available values: pglz, lz4.
-SET default_toast_compression = 'lz4';
SET default_toast_compression = 'pglz';
--- test alter compression method
-ALTER TABLE cmdata ALTER COLUMN f1 SET COMPRESSION lz4;
-INSERT INTO cmdata VALUES (repeat('123456789', 4004));
-\d+ cmdata
- Table "public.cmdata"
- Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
---------+------+-----------+----------+---------+----------+-------------+--------------+-------------
- f1 | text | | | | extended | lz4 | |
-Indexes:
- "idx" btree (f1)
-Child tables: cminh
-
-SELECT pg_column_compression(f1) FROM cmdata;
- pg_column_compression
------------------------
- pglz
- lz4
-(2 rows)
-
+\set VERBOSITY default
ALTER TABLE cmdata2 ALTER COLUMN f1 SET COMPRESSION default;
\d+ cmdata2
- Table "public.cmdata2"
+ Table "pglz.cmdata2"
Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
--------+-------------------+-----------+----------+---------+---------+-------------+--------------+-------------
f1 | character varying | | | | plain | | |
--- test alter compression method for materialized views
-ALTER MATERIALIZED VIEW compressmv ALTER COLUMN x SET COMPRESSION lz4;
-\d+ compressmv
- Materialized view "public.compressmv"
- Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
---------+------+-----------+----------+---------+----------+-------------+--------------+-------------
- x | text | | | | extended | lz4 | |
-View definition:
- SELECT f1 AS x
- FROM cmdata1;
-
--- test alter compression method for partitioned tables
-ALTER TABLE cmpart1 ALTER COLUMN f1 SET COMPRESSION pglz;
-ALTER TABLE cmpart2 ALTER COLUMN f1 SET COMPRESSION lz4;
--- new data should be compressed with the current compression method
-INSERT INTO cmpart VALUES (repeat('123456789', 1004));
-INSERT INTO cmpart VALUES (repeat('123456789', 4004));
-SELECT pg_column_compression(f1) FROM cmpart1;
- pg_column_compression
------------------------
- lz4
- pglz
-(2 rows)
-
-SELECT pg_column_compression(f1) FROM cmpart2;
- pg_column_compression
------------------------
- pglz
- lz4
-(2 rows)
-
+DROP TABLE cmdata2;
-- VACUUM FULL does not recompress
SELECT pg_column_compression(f1) FROM cmdata;
pg_column_compression
-----------------------
pglz
- lz4
-(2 rows)
+(1 row)
VACUUM FULL cmdata;
SELECT pg_column_compression(f1) FROM cmdata;
pg_column_compression
-----------------------
pglz
- lz4
-(2 rows)
+(1 row)
--- test expression index
-DROP TABLE cmdata2;
-CREATE TABLE cmdata2 (f1 TEXT COMPRESSION pglz, f2 TEXT COMPRESSION lz4);
-CREATE UNIQUE INDEX idx1 ON cmdata2 ((f1 || f2));
-INSERT INTO cmdata2 VALUES((SELECT array_agg(fipshash(g::TEXT))::TEXT FROM
-generate_series(1, 50) g), VERSION());
-- check data is ok
SELECT length(f1) FROM cmdata;
length
--------
10000
- 36036
-(2 rows)
-
-SELECT length(f1) FROM cmdata1;
- length
---------
- 10040
- 12449
-(2 rows)
+(1 row)
SELECT length(f1) FROM cmmove1;
length
@@ -344,19 +160,6 @@ SELECT length(f1) FROM cmmove1;
10000
(1 row)
-SELECT length(f1) FROM cmmove2;
- length
---------
- 10040
-(1 row)
-
-SELECT length(f1) FROM cmmove3;
- length
---------
- 10000
- 10040
-(2 rows)
-
CREATE TABLE badcompresstbl (a text COMPRESSION I_Do_Not_Exist_Compression); -- fails
ERROR: invalid compression method "i_do_not_exist_compression"
CREATE TABLE badcompresstbl (a text);
diff --git a/src/test/regress/expected/compression_1.out b/src/test/regress/expected/compression_1.out
deleted file mode 100644
index 7bd7642b4b9..00000000000
--- a/src/test/regress/expected/compression_1.out
+++ /dev/null
@@ -1,360 +0,0 @@
-\set HIDE_TOAST_COMPRESSION false
--- ensure we get stable results regardless of installation's default
-SET default_toast_compression = 'pglz';
--- test creating table with compression method
-CREATE TABLE cmdata(f1 text COMPRESSION pglz);
-CREATE INDEX idx ON cmdata(f1);
-INSERT INTO cmdata VALUES(repeat('1234567890', 1000));
-\d+ cmdata
- Table "public.cmdata"
- Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
---------+------+-----------+----------+---------+----------+-------------+--------------+-------------
- f1 | text | | | | extended | pglz | |
-Indexes:
- "idx" btree (f1)
-
-CREATE TABLE cmdata1(f1 TEXT COMPRESSION lz4);
-ERROR: compression method lz4 not supported
-DETAIL: This functionality requires the server to be built with lz4 support.
-INSERT INTO cmdata1 VALUES(repeat('1234567890', 1004));
-ERROR: relation "cmdata1" does not exist
-LINE 1: INSERT INTO cmdata1 VALUES(repeat('1234567890', 1004));
- ^
-\d+ cmdata1
--- verify stored compression method in the data
-SELECT pg_column_compression(f1) FROM cmdata;
- pg_column_compression
------------------------
- pglz
-(1 row)
-
-SELECT pg_column_compression(f1) FROM cmdata1;
-ERROR: relation "cmdata1" does not exist
-LINE 1: SELECT pg_column_compression(f1) FROM cmdata1;
- ^
--- decompress data slice
-SELECT SUBSTR(f1, 200, 5) FROM cmdata;
- substr
---------
- 01234
-(1 row)
-
-SELECT SUBSTR(f1, 2000, 50) FROM cmdata1;
-ERROR: relation "cmdata1" does not exist
-LINE 1: SELECT SUBSTR(f1, 2000, 50) FROM cmdata1;
- ^
--- copy with table creation
-SELECT * INTO cmmove1 FROM cmdata;
-\d+ cmmove1
- Table "public.cmmove1"
- Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
---------+------+-----------+----------+---------+----------+-------------+--------------+-------------
- f1 | text | | | | extended | | |
-
-SELECT pg_column_compression(f1) FROM cmmove1;
- pg_column_compression
------------------------
- pglz
-(1 row)
-
--- copy to existing table
-CREATE TABLE cmmove3(f1 text COMPRESSION pglz);
-INSERT INTO cmmove3 SELECT * FROM cmdata;
-INSERT INTO cmmove3 SELECT * FROM cmdata1;
-ERROR: relation "cmdata1" does not exist
-LINE 1: INSERT INTO cmmove3 SELECT * FROM cmdata1;
- ^
-SELECT pg_column_compression(f1) FROM cmmove3;
- pg_column_compression
------------------------
- pglz
-(1 row)
-
--- test LIKE INCLUDING COMPRESSION
-CREATE TABLE cmdata2 (LIKE cmdata1 INCLUDING COMPRESSION);
-ERROR: relation "cmdata1" does not exist
-LINE 1: CREATE TABLE cmdata2 (LIKE cmdata1 INCLUDING COMPRESSION);
- ^
-\d+ cmdata2
-DROP TABLE cmdata2;
-ERROR: table "cmdata2" does not exist
--- try setting compression for incompressible data type
-CREATE TABLE cmdata2 (f1 int COMPRESSION pglz);
-ERROR: column data type integer does not support compression
--- update using datum from different table
-CREATE TABLE cmmove2(f1 text COMPRESSION pglz);
-INSERT INTO cmmove2 VALUES (repeat('1234567890', 1004));
-SELECT pg_column_compression(f1) FROM cmmove2;
- pg_column_compression
------------------------
- pglz
-(1 row)
-
-UPDATE cmmove2 SET f1 = cmdata1.f1 FROM cmdata1;
-ERROR: relation "cmdata1" does not exist
-LINE 1: UPDATE cmmove2 SET f1 = cmdata1.f1 FROM cmdata1;
- ^
-SELECT pg_column_compression(f1) FROM cmmove2;
- pg_column_compression
------------------------
- pglz
-(1 row)
-
--- test externally stored compressed data
-CREATE OR REPLACE FUNCTION large_val() RETURNS TEXT LANGUAGE SQL AS
-'select array_agg(fipshash(g::text))::text from generate_series(1, 256) g';
-CREATE TABLE cmdata2 (f1 text COMPRESSION pglz);
-INSERT INTO cmdata2 SELECT large_val() || repeat('a', 4000);
-SELECT pg_column_compression(f1) FROM cmdata2;
- pg_column_compression
------------------------
- pglz
-(1 row)
-
-INSERT INTO cmdata1 SELECT large_val() || repeat('a', 4000);
-ERROR: relation "cmdata1" does not exist
-LINE 1: INSERT INTO cmdata1 SELECT large_val() || repeat('a', 4000);
- ^
-SELECT pg_column_compression(f1) FROM cmdata1;
-ERROR: relation "cmdata1" does not exist
-LINE 1: SELECT pg_column_compression(f1) FROM cmdata1;
- ^
-SELECT SUBSTR(f1, 200, 5) FROM cmdata1;
-ERROR: relation "cmdata1" does not exist
-LINE 1: SELECT SUBSTR(f1, 200, 5) FROM cmdata1;
- ^
-SELECT SUBSTR(f1, 200, 5) FROM cmdata2;
- substr
---------
- 79026
-(1 row)
-
-DROP TABLE cmdata2;
---test column type update varlena/non-varlena
-CREATE TABLE cmdata2 (f1 int);
-\d+ cmdata2
- Table "public.cmdata2"
- Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
---------+---------+-----------+----------+---------+---------+-------------+--------------+-------------
- f1 | integer | | | | plain | | |
-
-ALTER TABLE cmdata2 ALTER COLUMN f1 TYPE varchar;
-\d+ cmdata2
- Table "public.cmdata2"
- Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
---------+-------------------+-----------+----------+---------+----------+-------------+--------------+-------------
- f1 | character varying | | | | extended | | |
-
-ALTER TABLE cmdata2 ALTER COLUMN f1 TYPE int USING f1::integer;
-\d+ cmdata2
- Table "public.cmdata2"
- Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
---------+---------+-----------+----------+---------+---------+-------------+--------------+-------------
- f1 | integer | | | | plain | | |
-
---changing column storage should not impact the compression method
---but the data should not be compressed
-ALTER TABLE cmdata2 ALTER COLUMN f1 TYPE varchar;
-ALTER TABLE cmdata2 ALTER COLUMN f1 SET COMPRESSION pglz;
-\d+ cmdata2
- Table "public.cmdata2"
- Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
---------+-------------------+-----------+----------+---------+----------+-------------+--------------+-------------
- f1 | character varying | | | | extended | pglz | |
-
-ALTER TABLE cmdata2 ALTER COLUMN f1 SET STORAGE plain;
-\d+ cmdata2
- Table "public.cmdata2"
- Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
---------+-------------------+-----------+----------+---------+---------+-------------+--------------+-------------
- f1 | character varying | | | | plain | pglz | |
-
-INSERT INTO cmdata2 VALUES (repeat('123456789', 800));
-SELECT pg_column_compression(f1) FROM cmdata2;
- pg_column_compression
------------------------
-
-(1 row)
-
--- test compression with materialized view
-CREATE MATERIALIZED VIEW compressmv(x) AS SELECT * FROM cmdata1;
-ERROR: relation "cmdata1" does not exist
-LINE 1: ...TE MATERIALIZED VIEW compressmv(x) AS SELECT * FROM cmdata1;
- ^
-\d+ compressmv
-SELECT pg_column_compression(f1) FROM cmdata1;
-ERROR: relation "cmdata1" does not exist
-LINE 1: SELECT pg_column_compression(f1) FROM cmdata1;
- ^
-SELECT pg_column_compression(x) FROM compressmv;
-ERROR: relation "compressmv" does not exist
-LINE 1: SELECT pg_column_compression(x) FROM compressmv;
- ^
--- test compression with partition
-CREATE TABLE cmpart(f1 text COMPRESSION lz4) PARTITION BY HASH(f1);
-ERROR: compression method lz4 not supported
-DETAIL: This functionality requires the server to be built with lz4 support.
-CREATE TABLE cmpart1 PARTITION OF cmpart FOR VALUES WITH (MODULUS 2, REMAINDER 0);
-ERROR: relation "cmpart" does not exist
-CREATE TABLE cmpart2(f1 text COMPRESSION pglz);
-ALTER TABLE cmpart ATTACH PARTITION cmpart2 FOR VALUES WITH (MODULUS 2, REMAINDER 1);
-ERROR: relation "cmpart" does not exist
-INSERT INTO cmpart VALUES (repeat('123456789', 1004));
-ERROR: relation "cmpart" does not exist
-LINE 1: INSERT INTO cmpart VALUES (repeat('123456789', 1004));
- ^
-INSERT INTO cmpart VALUES (repeat('123456789', 4004));
-ERROR: relation "cmpart" does not exist
-LINE 1: INSERT INTO cmpart VALUES (repeat('123456789', 4004));
- ^
-SELECT pg_column_compression(f1) FROM cmpart1;
-ERROR: relation "cmpart1" does not exist
-LINE 1: SELECT pg_column_compression(f1) FROM cmpart1;
- ^
-SELECT pg_column_compression(f1) FROM cmpart2;
- pg_column_compression
------------------------
-(0 rows)
-
--- test compression with inheritance
-CREATE TABLE cminh() INHERITS(cmdata, cmdata1); -- error
-ERROR: relation "cmdata1" does not exist
-CREATE TABLE cminh(f1 TEXT COMPRESSION lz4) INHERITS(cmdata); -- error
-NOTICE: merging column "f1" with inherited definition
-ERROR: column "f1" has a compression method conflict
-DETAIL: pglz versus lz4
-CREATE TABLE cmdata3(f1 text);
-CREATE TABLE cminh() INHERITS (cmdata, cmdata3);
-NOTICE: merging multiple inherited definitions of column "f1"
--- test default_toast_compression GUC
-SET default_toast_compression = '';
-ERROR: invalid value for parameter "default_toast_compression": ""
-HINT: Available values: pglz.
-SET default_toast_compression = 'I do not exist compression';
-ERROR: invalid value for parameter "default_toast_compression": "I do not exist compression"
-HINT: Available values: pglz.
-SET default_toast_compression = 'lz4';
-ERROR: invalid value for parameter "default_toast_compression": "lz4"
-HINT: Available values: pglz.
-SET default_toast_compression = 'pglz';
--- test alter compression method
-ALTER TABLE cmdata ALTER COLUMN f1 SET COMPRESSION lz4;
-ERROR: compression method lz4 not supported
-DETAIL: This functionality requires the server to be built with lz4 support.
-INSERT INTO cmdata VALUES (repeat('123456789', 4004));
-\d+ cmdata
- Table "public.cmdata"
- Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
---------+------+-----------+----------+---------+----------+-------------+--------------+-------------
- f1 | text | | | | extended | pglz | |
-Indexes:
- "idx" btree (f1)
-Child tables: cminh
-
-SELECT pg_column_compression(f1) FROM cmdata;
- pg_column_compression
------------------------
- pglz
- pglz
-(2 rows)
-
-ALTER TABLE cmdata2 ALTER COLUMN f1 SET COMPRESSION default;
-\d+ cmdata2
- Table "public.cmdata2"
- Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
---------+-------------------+-----------+----------+---------+---------+-------------+--------------+-------------
- f1 | character varying | | | | plain | | |
-
--- test alter compression method for materialized views
-ALTER MATERIALIZED VIEW compressmv ALTER COLUMN x SET COMPRESSION lz4;
-ERROR: relation "compressmv" does not exist
-\d+ compressmv
--- test alter compression method for partitioned tables
-ALTER TABLE cmpart1 ALTER COLUMN f1 SET COMPRESSION pglz;
-ERROR: relation "cmpart1" does not exist
-ALTER TABLE cmpart2 ALTER COLUMN f1 SET COMPRESSION lz4;
-ERROR: compression method lz4 not supported
-DETAIL: This functionality requires the server to be built with lz4 support.
--- new data should be compressed with the current compression method
-INSERT INTO cmpart VALUES (repeat('123456789', 1004));
-ERROR: relation "cmpart" does not exist
-LINE 1: INSERT INTO cmpart VALUES (repeat('123456789', 1004));
- ^
-INSERT INTO cmpart VALUES (repeat('123456789', 4004));
-ERROR: relation "cmpart" does not exist
-LINE 1: INSERT INTO cmpart VALUES (repeat('123456789', 4004));
- ^
-SELECT pg_column_compression(f1) FROM cmpart1;
-ERROR: relation "cmpart1" does not exist
-LINE 1: SELECT pg_column_compression(f1) FROM cmpart1;
- ^
-SELECT pg_column_compression(f1) FROM cmpart2;
- pg_column_compression
------------------------
-(0 rows)
-
--- VACUUM FULL does not recompress
-SELECT pg_column_compression(f1) FROM cmdata;
- pg_column_compression
------------------------
- pglz
- pglz
-(2 rows)
-
-VACUUM FULL cmdata;
-SELECT pg_column_compression(f1) FROM cmdata;
- pg_column_compression
------------------------
- pglz
- pglz
-(2 rows)
-
--- test expression index
-DROP TABLE cmdata2;
-CREATE TABLE cmdata2 (f1 TEXT COMPRESSION pglz, f2 TEXT COMPRESSION lz4);
-ERROR: compression method lz4 not supported
-DETAIL: This functionality requires the server to be built with lz4 support.
-CREATE UNIQUE INDEX idx1 ON cmdata2 ((f1 || f2));
-ERROR: relation "cmdata2" does not exist
-INSERT INTO cmdata2 VALUES((SELECT array_agg(fipshash(g::TEXT))::TEXT FROM
-generate_series(1, 50) g), VERSION());
-ERROR: relation "cmdata2" does not exist
-LINE 1: INSERT INTO cmdata2 VALUES((SELECT array_agg(fipshash(g::TEX...
- ^
--- check data is ok
-SELECT length(f1) FROM cmdata;
- length
---------
- 10000
- 36036
-(2 rows)
-
-SELECT length(f1) FROM cmdata1;
-ERROR: relation "cmdata1" does not exist
-LINE 1: SELECT length(f1) FROM cmdata1;
- ^
-SELECT length(f1) FROM cmmove1;
- length
---------
- 10000
-(1 row)
-
-SELECT length(f1) FROM cmmove2;
- length
---------
- 10040
-(1 row)
-
-SELECT length(f1) FROM cmmove3;
- length
---------
- 10000
-(1 row)
-
-CREATE TABLE badcompresstbl (a text COMPRESSION I_Do_Not_Exist_Compression); -- fails
-ERROR: invalid compression method "i_do_not_exist_compression"
-CREATE TABLE badcompresstbl (a text);
-ALTER TABLE badcompresstbl ALTER a SET COMPRESSION I_Do_Not_Exist_Compression; -- fails
-ERROR: invalid compression method "i_do_not_exist_compression"
-DROP TABLE badcompresstbl;
-\set HIDE_TOAST_COMPRESSION true
diff --git a/src/test/regress/expected/compression_lz4.out b/src/test/regress/expected/compression_lz4.out
new file mode 100644
index 00000000000..068dd7c3674
--- /dev/null
+++ b/src/test/regress/expected/compression_lz4.out
@@ -0,0 +1,249 @@
+-- Tests for TOAST compression with lz4
+SELECT NOT(enumvals @> '{lz4}') AS skip_test FROM pg_settings WHERE
+ name = 'default_toast_compression' \gset
+\if :skip_test
+ \echo '*** skipping TOAST tests with lz4 (not supported) ***'
+ \quit
+\endif
+CREATE SCHEMA lz4;
+SET search_path TO lz4, public;
+\set HIDE_TOAST_COMPRESSION false
+-- Ensure we get stable results regardless of the installation's default.
+-- We rely on this GUC value for a few tests.
+SET default_toast_compression = 'pglz';
+-- test creating table with compression method
+CREATE TABLE cmdata_pglz(f1 text COMPRESSION pglz);
+CREATE INDEX idx ON cmdata_pglz(f1);
+INSERT INTO cmdata_pglz VALUES(repeat('1234567890', 1000));
+\d+ cmdata
+CREATE TABLE cmdata_lz4(f1 TEXT COMPRESSION lz4);
+INSERT INTO cmdata_lz4 VALUES(repeat('1234567890', 1004));
+\d+ cmdata1
+-- verify stored compression method in the data
+SELECT pg_column_compression(f1) FROM cmdata_lz4;
+ pg_column_compression
+-----------------------
+ lz4
+(1 row)
+
+-- decompress data slice
+SELECT SUBSTR(f1, 200, 5) FROM cmdata_pglz;
+ substr
+--------
+ 01234
+(1 row)
+
+SELECT SUBSTR(f1, 2000, 50) FROM cmdata_lz4;
+ substr
+----------------------------------------------------
+ 01234567890123456789012345678901234567890123456789
+(1 row)
+
+-- copy with table creation
+SELECT * INTO cmmove1 FROM cmdata_lz4;
+\d+ cmmove1
+ Table "lz4.cmmove1"
+ Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
+--------+------+-----------+----------+---------+----------+-------------+--------------+-------------
+ f1 | text | | | | extended | | |
+
+SELECT pg_column_compression(f1) FROM cmmove1;
+ pg_column_compression
+-----------------------
+ lz4
+(1 row)
+
+-- test LIKE INCLUDING COMPRESSION. The GUC default_toast_compression
+-- has no effect, the compression method from the table being copied.
+CREATE TABLE cmdata2 (LIKE cmdata_lz4 INCLUDING COMPRESSION);
+\d+ cmdata2
+ Table "lz4.cmdata2"
+ Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
+--------+------+-----------+----------+---------+----------+-------------+--------------+-------------
+ f1 | text | | | | extended | lz4 | |
+
+DROP TABLE cmdata2;
+-- copy to existing table
+CREATE TABLE cmmove3(f1 text COMPRESSION pglz);
+INSERT INTO cmmove3 SELECT * FROM cmdata_pglz;
+INSERT INTO cmmove3 SELECT * FROM cmdata_lz4;
+SELECT pg_column_compression(f1) FROM cmmove3;
+ pg_column_compression
+-----------------------
+ pglz
+ lz4
+(2 rows)
+
+-- update using datum from different table with LZ4 data.
+CREATE TABLE cmmove2(f1 text COMPRESSION pglz);
+INSERT INTO cmmove2 VALUES (repeat('1234567890', 1004));
+SELECT pg_column_compression(f1) FROM cmmove2;
+ pg_column_compression
+-----------------------
+ pglz
+(1 row)
+
+UPDATE cmmove2 SET f1 = cmdata_lz4.f1 FROM cmdata_lz4;
+SELECT pg_column_compression(f1) FROM cmmove2;
+ pg_column_compression
+-----------------------
+ lz4
+(1 row)
+
+-- test externally stored compressed data
+CREATE OR REPLACE FUNCTION large_val_lz4() RETURNS TEXT LANGUAGE SQL AS
+'select array_agg(fipshash(g::text))::text from generate_series(1, 256) g';
+CREATE TABLE cmdata2 (f1 text COMPRESSION lz4);
+INSERT INTO cmdata2 SELECT large_val_lz4() || repeat('a', 4000);
+SELECT pg_column_compression(f1) FROM cmdata2;
+ pg_column_compression
+-----------------------
+ lz4
+(1 row)
+
+SELECT SUBSTR(f1, 200, 5) FROM cmdata2;
+ substr
+--------
+ 79026
+(1 row)
+
+DROP TABLE cmdata2;
+DROP FUNCTION large_val_lz4;
+-- test compression with materialized view
+CREATE MATERIALIZED VIEW compressmv(x) AS SELECT * FROM cmdata_lz4;
+\d+ compressmv
+ Materialized view "lz4.compressmv"
+ Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
+--------+------+-----------+----------+---------+----------+-------------+--------------+-------------
+ x | text | | | | extended | | |
+View definition:
+ SELECT f1 AS x
+ FROM cmdata_lz4;
+
+SELECT pg_column_compression(f1) FROM cmdata_lz4;
+ pg_column_compression
+-----------------------
+ lz4
+(1 row)
+
+SELECT pg_column_compression(x) FROM compressmv;
+ pg_column_compression
+-----------------------
+ lz4
+(1 row)
+
+-- test compression with partition
+CREATE TABLE cmpart(f1 text COMPRESSION lz4) PARTITION BY HASH(f1);
+CREATE TABLE cmpart1 PARTITION OF cmpart FOR VALUES WITH (MODULUS 2, REMAINDER 0);
+CREATE TABLE cmpart2(f1 text COMPRESSION pglz);
+ALTER TABLE cmpart ATTACH PARTITION cmpart2 FOR VALUES WITH (MODULUS 2, REMAINDER 1);
+INSERT INTO cmpart VALUES (repeat('123456789', 1004));
+INSERT INTO cmpart VALUES (repeat('123456789', 4004));
+SELECT pg_column_compression(f1) FROM cmpart1;
+ pg_column_compression
+-----------------------
+ lz4
+(1 row)
+
+SELECT pg_column_compression(f1) FROM cmpart2;
+ pg_column_compression
+-----------------------
+ pglz
+(1 row)
+
+-- test compression with inheritance
+CREATE TABLE cminh() INHERITS(cmdata_pglz, cmdata_lz4); -- error
+NOTICE: merging multiple inherited definitions of column "f1"
+ERROR: column "f1" has a compression method conflict
+DETAIL: pglz versus lz4
+CREATE TABLE cminh(f1 TEXT COMPRESSION lz4) INHERITS(cmdata_pglz); -- error
+NOTICE: merging column "f1" with inherited definition
+ERROR: column "f1" has a compression method conflict
+DETAIL: pglz versus lz4
+CREATE TABLE cmdata3(f1 text);
+CREATE TABLE cminh() INHERITS (cmdata_pglz, cmdata3);
+NOTICE: merging multiple inherited definitions of column "f1"
+-- test default_toast_compression GUC
+SET default_toast_compression = 'lz4';
+-- test alter compression method
+ALTER TABLE cmdata_pglz ALTER COLUMN f1 SET COMPRESSION lz4;
+INSERT INTO cmdata_pglz VALUES (repeat('123456789', 4004));
+\d+ cmdata
+SELECT pg_column_compression(f1) FROM cmdata_pglz;
+ pg_column_compression
+-----------------------
+ pglz
+ lz4
+(2 rows)
+
+ALTER TABLE cmdata_pglz ALTER COLUMN f1 SET COMPRESSION pglz;
+-- test alter compression method for materialized views
+ALTER MATERIALIZED VIEW compressmv ALTER COLUMN x SET COMPRESSION lz4;
+\d+ compressmv
+ Materialized view "lz4.compressmv"
+ Column | Type | Collation | Nullable | Default | Storage | Compression | Stats target | Description
+--------+------+-----------+----------+---------+----------+-------------+--------------+-------------
+ x | text | | | | extended | lz4 | |
+View definition:
+ SELECT f1 AS x
+ FROM cmdata_lz4;
+
+-- test alter compression method for partitioned tables
+ALTER TABLE cmpart1 ALTER COLUMN f1 SET COMPRESSION pglz;
+ALTER TABLE cmpart2 ALTER COLUMN f1 SET COMPRESSION lz4;
+-- new data should be compressed with the current compression method
+INSERT INTO cmpart VALUES (repeat('123456789', 1004));
+INSERT INTO cmpart VALUES (repeat('123456789', 4004));
+SELECT pg_column_compression(f1) FROM cmpart1;
+ pg_column_compression
+-----------------------
+ lz4
+ pglz
+(2 rows)
+
+SELECT pg_column_compression(f1) FROM cmpart2;
+ pg_column_compression
+-----------------------
+ pglz
+ lz4
+(2 rows)
+
+-- test expression index
+CREATE TABLE cmdata2 (f1 TEXT COMPRESSION pglz, f2 TEXT COMPRESSION lz4);
+CREATE UNIQUE INDEX idx1 ON cmdata2 ((f1 || f2));
+INSERT INTO cmdata2 VALUES((SELECT array_agg(fipshash(g::TEXT))::TEXT FROM
+generate_series(1, 50) g), VERSION());
+-- check data is ok
+SELECT length(f1) FROM cmdata_pglz;
+ length
+--------
+ 10000
+ 36036
+(2 rows)
+
+SELECT length(f1) FROM cmdata_lz4;
+ length
+--------
+ 10040
+(1 row)
+
+SELECT length(f1) FROM cmmove1;
+ length
+--------
+ 10040
+(1 row)
+
+SELECT length(f1) FROM cmmove2;
+ length
+--------
+ 10040
+(1 row)
+
+SELECT length(f1) FROM cmmove3;
+ length
+--------
+ 10000
+ 10040
+(2 rows)
+
+\set HIDE_TOAST_COMPRESSION true
diff --git a/src/test/regress/expected/compression_lz4_1.out b/src/test/regress/expected/compression_lz4_1.out
new file mode 100644
index 00000000000..198056fa224
--- /dev/null
+++ b/src/test/regress/expected/compression_lz4_1.out
@@ -0,0 +1,7 @@
+-- Tests for TOAST compression with lz4
+SELECT NOT(enumvals @> '{lz4}') AS skip_test FROM pg_settings WHERE
+ name = 'default_toast_compression' \gset
+\if :skip_test
+ \echo '*** skipping TOAST tests with lz4 (not supported) ***'
+*** skipping TOAST tests with lz4 (not supported) ***
+ \quit
diff --git a/src/test/regress/expected/constraints.out b/src/test/regress/expected/constraints.out
index ad6aaab7385..3590d3274f0 100644
--- a/src/test/regress/expected/constraints.out
+++ b/src/test/regress/expected/constraints.out
@@ -748,6 +748,11 @@ ALTER TABLE unique_tbl ALTER CONSTRAINT unique_tbl_i_key ENFORCED;
ERROR: cannot alter enforceability of constraint "unique_tbl_i_key" of relation "unique_tbl"
ALTER TABLE unique_tbl ALTER CONSTRAINT unique_tbl_i_key NOT ENFORCED;
ERROR: cannot alter enforceability of constraint "unique_tbl_i_key" of relation "unique_tbl"
+-- can't make an existing constraint NOT VALID
+ALTER TABLE unique_tbl ALTER CONSTRAINT unique_tbl_i_key NOT VALID;
+ERROR: constraints cannot be altered to be NOT VALID
+LINE 1: ...ABLE unique_tbl ALTER CONSTRAINT unique_tbl_i_key NOT VALID;
+ ^
DROP TABLE unique_tbl;
--
-- EXCLUDE constraints
@@ -1659,6 +1664,8 @@ EXECUTE get_nnconstraint_info('{constr_parent3, constr_child3}');
constr_parent3 | constr_parent3_a_not_null | t | t | 0
(2 rows)
+COMMENT ON CONSTRAINT constr_parent2_a_not_null ON constr_parent2 IS 'this constraint is invalid';
+COMMENT ON CONSTRAINT constr_parent2_a_not_null ON constr_child2 IS 'this constraint is valid';
DEALLOCATE get_nnconstraint_info;
-- end NOT NULL NOT VALID
-- Comments
@@ -1694,3 +1701,7 @@ DROP TABLE constraint_comments_tbl;
DROP DOMAIN constraint_comments_dom;
DROP ROLE regress_constraint_comments;
DROP ROLE regress_constraint_comments_noaccess;
+-- Leave some constraints for the pg_upgrade test to pick up
+CREATE DOMAIN constraint_comments_dom AS int;
+ALTER DOMAIN constraint_comments_dom ADD CONSTRAINT inv_ck CHECK (value > 0) NOT VALID;
+COMMENT ON CONSTRAINT inv_ck ON DOMAIN constraint_comments_dom IS 'comment on invalid constraint';
diff --git a/src/test/regress/expected/copy.out b/src/test/regress/expected/copy.out
index 8d5a06563c4..ac66eb55aee 100644
--- a/src/test/regress/expected/copy.out
+++ b/src/test/regress/expected/copy.out
@@ -81,6 +81,29 @@ copy copytest4 to stdout (header);
c1 colname with tab: \t
1 a
2 b
+-- test multi-line header line feature
+create temp table copytest5 (c1 int);
+copy copytest5 from stdin (format csv, header 2);
+copy copytest5 to stdout (header);
+c1
+1
+2
+truncate copytest5;
+copy copytest5 from stdin (format csv, header 4);
+select count(*) from copytest5;
+ count
+-------
+ 0
+(1 row)
+
+truncate copytest5;
+copy copytest5 from stdin (format csv, header 5);
+select count(*) from copytest5;
+ count
+-------
+ 0
+(1 row)
+
-- test copy from with a partitioned table
create table parted_copytest (
a int,
@@ -224,7 +247,7 @@ alter table header_copytest add column c text;
copy header_copytest to stdout with (header match);
ERROR: cannot use "match" with HEADER in COPY TO
copy header_copytest from stdin with (header wrong_choice);
-ERROR: header requires a Boolean value or "match"
+ERROR: header requires a Boolean value, a non-negative integer, or the string "match"
-- works
copy header_copytest from stdin with (header match);
copy header_copytest (c, a, b) from stdin with (header match);
diff --git a/src/test/regress/expected/copy2.out b/src/test/regress/expected/copy2.out
index 64ea33aeae8..caa3c44f0d0 100644
--- a/src/test/regress/expected/copy2.out
+++ b/src/test/regress/expected/copy2.out
@@ -132,6 +132,12 @@ COPY x from stdin with (reject_limit 1);
ERROR: COPY REJECT_LIMIT requires ON_ERROR to be set to IGNORE
COPY x from stdin with (on_error ignore, reject_limit 0);
ERROR: REJECT_LIMIT (0) must be greater than zero
+COPY x from stdin with (header -1);
+ERROR: a negative integer value cannot be specified for header
+COPY x from stdin with (header 2.5);
+ERROR: header requires a Boolean value, a non-negative integer, or the string "match"
+COPY x to stdout with (header 2);
+ERROR: cannot use multi-line header in COPY TO
-- too many columns in column list: should fail
COPY x (a, b, c, d, e, d, c) from stdin;
ERROR: column "d" specified more than once
diff --git a/src/test/regress/expected/create_table_like.out b/src/test/regress/expected/create_table_like.out
index bf34289e984..29a779c2e90 100644
--- a/src/test/regress/expected/create_table_like.out
+++ b/src/test/regress/expected/create_table_like.out
@@ -332,9 +332,10 @@ COMMENT ON CONSTRAINT ctlt1_a_check ON ctlt1 IS 't1_a_check';
COMMENT ON INDEX ctlt1_pkey IS 'index pkey';
COMMENT ON INDEX ctlt1_b_key IS 'index b_key';
ALTER TABLE ctlt1 ALTER COLUMN a SET STORAGE MAIN;
-CREATE TABLE ctlt2 (c text);
+CREATE TABLE ctlt2 (c text NOT NULL);
ALTER TABLE ctlt2 ALTER COLUMN c SET STORAGE EXTERNAL;
COMMENT ON COLUMN ctlt2.c IS 'C';
+COMMENT ON CONSTRAINT ctlt2_c_not_null ON ctlt2 IS 't2_c_not_null';
CREATE TABLE ctlt3 (a text CHECK (length(a) < 5), c text CHECK (length(c) < 7));
ALTER TABLE ctlt3 ALTER COLUMN c SET STORAGE EXTERNAL;
ALTER TABLE ctlt3 ALTER COLUMN a SET STORAGE MAIN;
@@ -351,9 +352,10 @@ CREATE TABLE ctlt12_storage (LIKE ctlt1 INCLUDING STORAGE, LIKE ctlt2 INCLUDING
--------+------+-----------+----------+---------+----------+--------------+-------------
a | text | | not null | | main | |
b | text | | | | extended | |
- c | text | | | | external | |
+ c | text | | not null | | external | |
Not-null constraints:
"ctlt1_a_not_null" NOT NULL "a"
+ "ctlt2_c_not_null" NOT NULL "c"
CREATE TABLE ctlt12_comments (LIKE ctlt1 INCLUDING COMMENTS, LIKE ctlt2 INCLUDING COMMENTS);
\d+ ctlt12_comments
@@ -362,9 +364,16 @@ CREATE TABLE ctlt12_comments (LIKE ctlt1 INCLUDING COMMENTS, LIKE ctlt2 INCLUDIN
--------+------+-----------+----------+---------+----------+--------------+-------------
a | text | | not null | | extended | | A
b | text | | | | extended | | B
- c | text | | | | extended | | C
+ c | text | | not null | | extended | | C
Not-null constraints:
"ctlt1_a_not_null" NOT NULL "a"
+ "ctlt2_c_not_null" NOT NULL "c"
+
+SELECT conname, description FROM pg_description, pg_constraint c WHERE classoid = 'pg_constraint'::regclass AND objoid = c.oid AND c.conrelid = 'ctlt12_comments'::regclass;
+ conname | description
+------------------+---------------
+ ctlt2_c_not_null | t2_c_not_null
+(1 row)
CREATE TABLE ctlt1_inh (LIKE ctlt1 INCLUDING CONSTRAINTS INCLUDING COMMENTS) INHERITS (ctlt1);
NOTICE: merging column "a" with inherited definition
@@ -529,7 +538,9 @@ NOTICE: drop cascades to table inhe
-- LIKE must respect NO INHERIT property of constraints
CREATE TABLE noinh_con_copy (a int CHECK (a > 0) NO INHERIT, b int not null,
c int not null no inherit);
-CREATE TABLE noinh_con_copy1 (LIKE noinh_con_copy INCLUDING CONSTRAINTS);
+COMMENT ON CONSTRAINT noinh_con_copy_b_not_null ON noinh_con_copy IS 'not null b';
+COMMENT ON CONSTRAINT noinh_con_copy_c_not_null ON noinh_con_copy IS 'not null c no inherit';
+CREATE TABLE noinh_con_copy1 (LIKE noinh_con_copy INCLUDING CONSTRAINTS INCLUDING COMMENTS);
\d+ noinh_con_copy1
Table "public.noinh_con_copy1"
Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
@@ -543,6 +554,17 @@ Not-null constraints:
"noinh_con_copy_b_not_null" NOT NULL "b"
"noinh_con_copy_c_not_null" NOT NULL "c" NO INHERIT
+SELECT conname, description
+FROM pg_description, pg_constraint c
+WHERE classoid = 'pg_constraint'::regclass
+AND objoid = c.oid AND c.conrelid = 'noinh_con_copy1'::regclass
+ORDER BY conname COLLATE "C";
+ conname | description
+---------------------------+-----------------------
+ noinh_con_copy_b_not_null | not null b
+ noinh_con_copy_c_not_null | not null c no inherit
+(2 rows)
+
-- fail, as partitioned tables don't allow NO INHERIT constraints
CREATE TABLE noinh_con_copy1_parted (LIKE noinh_con_copy INCLUDING ALL)
PARTITION BY LIST (a);
diff --git a/src/test/regress/expected/domain.out b/src/test/regress/expected/domain.out
index ba6f05eeb7d..b5ea707df31 100644
--- a/src/test/regress/expected/domain.out
+++ b/src/test/regress/expected/domain.out
@@ -1019,6 +1019,11 @@ insert into domain_test values (1, 2);
-- should fail
alter table domain_test add column c str_domain;
ERROR: domain str_domain does not allow null values
+-- disallow duplicated not-null constraints
+create domain int_domain1 as int constraint nn1 not null constraint nn2 not null;
+ERROR: redundant NOT NULL constraint definition
+LINE 1: ...domain int_domain1 as int constraint nn1 not null constraint...
+ ^
create domain str_domain2 as text check (value <> 'foo') default 'foo';
-- should fail
alter table domain_test add column d str_domain2;
diff --git a/src/test/regress/expected/foreign_key.out b/src/test/regress/expected/foreign_key.out
index 6a8f3959345..dc541d61adf 100644
--- a/src/test/regress/expected/foreign_key.out
+++ b/src/test/regress/expected/foreign_key.out
@@ -1359,7 +1359,7 @@ LINE 1: ...e ALTER CONSTRAINT fktable_fk_fkey NOT DEFERRABLE INITIALLY ...
ALTER TABLE fktable ALTER CONSTRAINT fktable_fk_fkey NO INHERIT;
ERROR: constraint "fktable_fk_fkey" of relation "fktable" is not a not-null constraint
ALTER TABLE fktable ALTER CONSTRAINT fktable_fk_fkey NOT VALID;
-ERROR: FOREIGN KEY constraints cannot be marked NOT VALID
+ERROR: constraints cannot be altered to be NOT VALID
LINE 1: ...ER TABLE fktable ALTER CONSTRAINT fktable_fk_fkey NOT VALID;
^
ALTER TABLE fktable ALTER CONSTRAINT fktable_fk_fkey ENFORCED NOT ENFORCED;
@@ -1750,7 +1750,7 @@ Indexes:
Referenced by:
TABLE "fk_partitioned_fk" CONSTRAINT "fk_partitioned_fk_a_b_fkey" FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b)
--- Check the exsting FK trigger
+-- Check the existing FK trigger
SELECT conname, tgrelid::regclass as tgrel, regexp_replace(tgname, '[0-9]+', 'N') as tgname, tgtype
FROM pg_trigger t JOIN pg_constraint c ON (t.tgconstraint = c.oid)
WHERE tgrelid IN (SELECT relid FROM pg_partition_tree('fk_partitioned_fk'::regclass)
diff --git a/src/test/regress/expected/generated_stored.out b/src/test/regress/expected/generated_stored.out
index 16de30ab191..adac2cedfb2 100644
--- a/src/test/regress/expected/generated_stored.out
+++ b/src/test/regress/expected/generated_stored.out
@@ -1313,6 +1313,18 @@ CREATE TABLE gtest31_1 (a int, b text GENERATED ALWAYS AS ('hello') STORED, c te
CREATE TABLE gtest31_2 (x int, y gtest31_1);
ALTER TABLE gtest31_1 ALTER COLUMN b TYPE varchar; -- fails
ERROR: cannot alter table "gtest31_1" because column "gtest31_2.y" uses its row type
+-- bug #18970: these cases are unsupported, but make sure they fail cleanly
+ALTER TABLE gtest31_2 ADD CONSTRAINT cc CHECK ((y).b IS NOT NULL);
+ALTER TABLE gtest31_1 ALTER COLUMN b SET EXPRESSION AS ('hello1');
+ERROR: cannot alter table "gtest31_1" because column "gtest31_2.y" uses its row type
+ALTER TABLE gtest31_2 DROP CONSTRAINT cc;
+CREATE STATISTICS gtest31_2_stat ON ((y).b is not null) FROM gtest31_2;
+ALTER TABLE gtest31_1 ALTER COLUMN b SET EXPRESSION AS ('hello2');
+ERROR: cannot alter table "gtest31_1" because column "gtest31_2.y" uses its row type
+DROP STATISTICS gtest31_2_stat;
+CREATE INDEX gtest31_2_y_idx ON gtest31_2(((y).b));
+ALTER TABLE gtest31_1 ALTER COLUMN b SET EXPRESSION AS ('hello3');
+ERROR: cannot alter table "gtest31_1" because column "gtest31_2.y" uses its row type
DROP TABLE gtest31_1, gtest31_2;
-- Check it for a partitioned table, too
CREATE TABLE gtest31_1 (a int, b text GENERATED ALWAYS AS ('hello') STORED, c text) PARTITION BY LIST (a);
diff --git a/src/test/regress/expected/generated_virtual.out b/src/test/regress/expected/generated_virtual.out
index 6300e7c1d96..aca6347babe 100644
--- a/src/test/regress/expected/generated_virtual.out
+++ b/src/test/regress/expected/generated_virtual.out
@@ -553,15 +553,11 @@ CREATE TABLE gtest4 (
a int,
b double_int GENERATED ALWAYS AS ((a * 2, a * 3)) VIRTUAL
);
-INSERT INTO gtest4 VALUES (1), (6);
-SELECT * FROM gtest4;
- a | b
----+---------
- 1 | (2,3)
- 6 | (12,18)
-(2 rows)
-
-DROP TABLE gtest4;
+ERROR: virtual generated column "b" cannot have a user-defined type
+DETAIL: Virtual generated columns that make use of user-defined types are not yet supported.
+--INSERT INTO gtest4 VALUES (1), (6);
+--SELECT * FROM gtest4;
+--DROP TABLE gtest4;
DROP TYPE double_int;
-- using tableoid is allowed
CREATE TABLE gtest_tableoid (
@@ -604,9 +600,13 @@ INSERT INTO gtest11 VALUES (1, 10), (2, 20);
GRANT SELECT (a, c) ON gtest11 TO regress_user11;
CREATE FUNCTION gf1(a int) RETURNS int AS $$ SELECT a * 3 $$ IMMUTABLE LANGUAGE SQL;
REVOKE ALL ON FUNCTION gf1(int) FROM PUBLIC;
-CREATE TABLE gtest12 (a int PRIMARY KEY, b int, c int GENERATED ALWAYS AS (gf1(b)) VIRTUAL);
-INSERT INTO gtest12 VALUES (1, 10), (2, 20);
-GRANT SELECT (a, c), INSERT ON gtest12 TO regress_user11;
+CREATE TABLE gtest12 (a int PRIMARY KEY, b int, c int GENERATED ALWAYS AS (gf1(b)) VIRTUAL); -- fails, user-defined function
+ERROR: generation expression uses user-defined function
+LINE 1: ...nt PRIMARY KEY, b int, c int GENERATED ALWAYS AS (gf1(b)) VI...
+ ^
+DETAIL: Virtual generated columns that make use of user-defined functions are not yet supported.
+--INSERT INTO gtest12 VALUES (1, 10), (2, 20);
+--GRANT SELECT (a, c), INSERT ON gtest12 TO regress_user11;
SET ROLE regress_user11;
SELECT a, b FROM gtest11; -- not allowed
ERROR: permission denied for table gtest11
@@ -619,15 +619,12 @@ SELECT a, c FROM gtest11; -- allowed
SELECT gf1(10); -- not allowed
ERROR: permission denied for function gf1
-INSERT INTO gtest12 VALUES (3, 30), (4, 40); -- allowed (does not actually invoke the function)
-SELECT a, c FROM gtest12; -- currently not allowed because of function permissions, should arguably be allowed
-ERROR: permission denied for function gf1
+--INSERT INTO gtest12 VALUES (3, 30), (4, 40); -- allowed (does not actually invoke the function)
+--SELECT a, c FROM gtest12; -- currently not allowed because of function permissions, should arguably be allowed
RESET ROLE;
-DROP FUNCTION gf1(int); -- fail
-ERROR: cannot drop function gf1(integer) because other objects depend on it
-DETAIL: column c of table gtest12 depends on function gf1(integer)
-HINT: Use DROP ... CASCADE to drop the dependent objects too.
-DROP TABLE gtest11, gtest12;
+--DROP FUNCTION gf1(int); -- fail
+DROP TABLE gtest11;
+--DROP TABLE gtest12;
DROP FUNCTION gf1(int);
DROP USER regress_user11;
-- check constraints
@@ -637,10 +634,10 @@ INSERT INTO gtest20 (a) VALUES (30); -- violates constraint
ERROR: new row for relation "gtest20" violates check constraint "gtest20_b_check"
DETAIL: Failing row contains (30, virtual).
ALTER TABLE gtest20 ALTER COLUMN b SET EXPRESSION AS (a * 100); -- violates constraint (currently not supported)
-ERROR: ALTER TABLE / SET EXPRESSION is not supported for virtual generated columns on tables with check constraints
+ERROR: ALTER TABLE / SET EXPRESSION is not supported for virtual generated columns in tables with check constraints
DETAIL: Column "b" of relation "gtest20" is a virtual generated column.
ALTER TABLE gtest20 ALTER COLUMN b SET EXPRESSION AS (a * 3); -- ok (currently not supported)
-ERROR: ALTER TABLE / SET EXPRESSION is not supported for virtual generated columns on tables with check constraints
+ERROR: ALTER TABLE / SET EXPRESSION is not supported for virtual generated columns in tables with check constraints
DETAIL: Column "b" of relation "gtest20" is a virtual generated column.
CREATE TABLE gtest20a (a int PRIMARY KEY, b int GENERATED ALWAYS AS (a * 2) VIRTUAL);
INSERT INTO gtest20a (a) VALUES (10);
@@ -800,11 +797,23 @@ CREATE TABLE gtest24r (a int PRIMARY KEY, b gtestdomain1range GENERATED ALWAYS A
ERROR: virtual generated column "b" cannot have a domain type
--INSERT INTO gtest24r (a) VALUES (4); -- ok
--INSERT INTO gtest24r (a) VALUES (6); -- error
+CREATE TABLE gtest24at (a int PRIMARY KEY);
+ALTER TABLE gtest24at ADD COLUMN b gtestdomain1 GENERATED ALWAYS AS (a * 2) VIRTUAL; -- error
+ERROR: virtual generated column "b" cannot have a domain type
+CREATE TABLE gtest24ata (a int PRIMARY KEY, b int GENERATED ALWAYS AS (a * 2) VIRTUAL);
+ALTER TABLE gtest24ata ALTER COLUMN b TYPE gtestdomain1; -- error
+ERROR: virtual generated column "b" cannot have a domain type
CREATE DOMAIN gtestdomainnn AS int CHECK (VALUE IS NOT NULL);
CREATE TABLE gtest24nn (a int, b gtestdomainnn GENERATED ALWAYS AS (a * 2) VIRTUAL);
ERROR: virtual generated column "b" cannot have a domain type
--INSERT INTO gtest24nn (a) VALUES (4); -- ok
--INSERT INTO gtest24nn (a) VALUES (NULL); -- error
+-- using user-defined type not yet supported
+CREATE TABLE gtest24xxx (a gtestdomain1, b gtestdomain1, c int GENERATED ALWAYS AS (greatest(a, b)) VIRTUAL); -- error
+ERROR: generation expression uses user-defined type
+LINE 1: ...main1, b gtestdomain1, c int GENERATED ALWAYS AS (greatest(a...
+ ^
+DETAIL: Virtual generated columns that make use of user-defined types are not yet supported.
-- typed tables (currently not supported)
CREATE TYPE gtest_type AS (f1 integer, f2 text, f3 bigint);
CREATE TABLE gtest28 OF gtest_type (f1 WITH OPTIONS GENERATED ALWAYS AS (f2 *2) VIRTUAL);
@@ -1274,6 +1283,15 @@ CREATE TABLE gtest31_1 (a int, b text GENERATED ALWAYS AS ('hello') VIRTUAL, c t
CREATE TABLE gtest31_2 (x int, y gtest31_1);
ALTER TABLE gtest31_1 ALTER COLUMN b TYPE varchar; -- fails
ERROR: cannot alter table "gtest31_1" because column "gtest31_2.y" uses its row type
+-- bug #18970
+ALTER TABLE gtest31_2 ADD CONSTRAINT cc CHECK ((y).b IS NOT NULL);
+ALTER TABLE gtest31_1 ALTER COLUMN b SET EXPRESSION AS ('hello1');
+ALTER TABLE gtest31_2 DROP CONSTRAINT cc;
+CREATE STATISTICS gtest31_2_stat ON ((y).b is not null) FROM gtest31_2;
+ALTER TABLE gtest31_1 ALTER COLUMN b SET EXPRESSION AS ('hello2');
+DROP STATISTICS gtest31_2_stat;
+CREATE INDEX gtest31_2_y_idx ON gtest31_2(((y).b));
+ALTER TABLE gtest31_1 ALTER COLUMN b SET EXPRESSION AS ('hello3');
DROP TABLE gtest31_1, gtest31_2;
-- Check it for a partitioned table, too
CREATE TABLE gtest31_1 (a int, b text GENERATED ALWAYS AS ('hello') VIRTUAL, c text) PARTITION BY LIST (a);
@@ -1470,7 +1488,8 @@ create table gtest32 (
a int primary key,
b int generated always as (a * 2),
c int generated always as (10 + 10),
- d int generated always as (coalesce(a, 100))
+ d int generated always as (coalesce(a, 100)),
+ e int
);
insert into gtest32 values (1), (2);
analyze gtest32;
@@ -1531,11 +1550,11 @@ where coalesce(t2.b, 1) = 2;
explain (costs off)
select t1.a from gtest32 t1 left join gtest32 t2 on t1.a = t2.a
where coalesce(t2.b, 1) = 2 or t1.a is null;
- QUERY PLAN
--------------------------------------------------------------
+ QUERY PLAN
+-----------------------------------------
Hash Left Join
Hash Cond: (t1.a = t2.a)
- Filter: ((COALESCE((t2.a * 2), 1) = 2) OR (t1.a IS NULL))
+ Filter: (COALESCE((t2.a * 2), 1) = 2)
-> Seq Scan on gtest32 t1
-> Hash
-> Seq Scan on gtest32 t2
@@ -1554,41 +1573,66 @@ select t2.* from gtest32 t1 left join gtest32 t2 on false;
QUERY PLAN
------------------------------------------------------
Nested Loop Left Join
- Output: a, (a * 2), (20), (COALESCE(a, 100))
+ Output: a, (a * 2), (20), (COALESCE(a, 100)), e
Join Filter: false
-> Seq Scan on generated_virtual_tests.gtest32 t1
- Output: t1.a, t1.b, t1.c, t1.d
+ Output: t1.a, t1.b, t1.c, t1.d, t1.e
-> Result
- Output: a, 20, COALESCE(a, 100)
+ Output: a, e, 20, COALESCE(a, 100)
One-Time Filter: false
(8 rows)
select t2.* from gtest32 t1 left join gtest32 t2 on false;
- a | b | c | d
----+---+---+---
- | | |
- | | |
+ a | b | c | d | e
+---+---+---+---+---
+ | | | |
+ | | | |
(2 rows)
explain (verbose, costs off)
-select * from gtest32 t group by grouping sets (a, b, c, d) having c = 20;
+select * from gtest32 t group by grouping sets (a, b, c, d, e) having c = 20;
QUERY PLAN
-----------------------------------------------------
HashAggregate
- Output: a, ((a * 2)), (20), (COALESCE(a, 100))
+ Output: a, ((a * 2)), (20), (COALESCE(a, 100)), e
Hash Key: t.a
Hash Key: (t.a * 2)
Hash Key: 20
Hash Key: COALESCE(t.a, 100)
+ Hash Key: t.e
Filter: ((20) = 20)
-> Seq Scan on generated_virtual_tests.gtest32 t
- Output: a, (a * 2), 20, COALESCE(a, 100)
-(9 rows)
+ Output: a, (a * 2), 20, COALESCE(a, 100), e
+(10 rows)
+
+select * from gtest32 t group by grouping sets (a, b, c, d, e) having c = 20;
+ a | b | c | d | e
+---+---+----+---+---
+ | | 20 | |
+(1 row)
+
+-- Ensure that the virtual generated columns in ALTER COLUMN TYPE USING expression are expanded
+alter table gtest32 alter column e type bigint using b;
+-- Ensure that virtual generated column references within SubLinks that should
+-- be transformed into joins can get expanded
+explain (costs off)
+select 1 from gtest32 t1 where exists
+ (select 1 from gtest32 t2 where t1.a > t2.a and t2.b = 2);
+ QUERY PLAN
+-------------------------------------
+ Nested Loop Semi Join
+ Join Filter: (t1.a > t2.a)
+ -> Seq Scan on gtest32 t1
+ -> Materialize
+ -> Seq Scan on gtest32 t2
+ Filter: ((a * 2) = 2)
+(6 rows)
-select * from gtest32 t group by grouping sets (a, b, c, d) having c = 20;
- a | b | c | d
----+---+----+---
- | | 20 |
+select 1 from gtest32 t1 where exists
+ (select 1 from gtest32 t2 where t1.a > t2.a and t2.b = 2);
+ ?column?
+----------
+ 1
(1 row)
drop table gtest32;
diff --git a/src/test/regress/expected/incremental_sort.out b/src/test/regress/expected/incremental_sort.out
index b00219643b9..5a1dd9fc022 100644
--- a/src/test/regress/expected/incremental_sort.out
+++ b/src/test/regress/expected/incremental_sort.out
@@ -1722,3 +1722,43 @@ order by t1.four, t1.two limit 1;
-> Seq Scan on tenk1 t2
(12 rows)
+--
+-- Test incremental sort for Append/MergeAppend
+--
+create table prt_tbl (a int, b int) partition by range (a);
+create table prt_tbl_1 partition of prt_tbl for values from (0) to (100);
+create table prt_tbl_2 partition of prt_tbl for values from (100) to (200);
+insert into prt_tbl select i%200, i from generate_series(1,1000)i;
+create index on prt_tbl_1(a);
+create index on prt_tbl_2(a, b);
+analyze prt_tbl;
+set enable_seqscan to off;
+set enable_bitmapscan to off;
+-- Ensure we get an incremental sort for the subpath of Append
+explain (costs off) select * from prt_tbl order by a, b;
+ QUERY PLAN
+------------------------------------------------------------
+ Append
+ -> Incremental Sort
+ Sort Key: prt_tbl_1.a, prt_tbl_1.b
+ Presorted Key: prt_tbl_1.a
+ -> Index Scan using prt_tbl_1_a_idx on prt_tbl_1
+ -> Index Only Scan using prt_tbl_2_a_b_idx on prt_tbl_2
+(6 rows)
+
+-- Ensure we get an incremental sort for the subpath of MergeAppend
+explain (costs off) select * from prt_tbl_1 union all select * from prt_tbl_2 order by a, b;
+ QUERY PLAN
+------------------------------------------------------------
+ Merge Append
+ Sort Key: prt_tbl_1.a, prt_tbl_1.b
+ -> Incremental Sort
+ Sort Key: prt_tbl_1.a, prt_tbl_1.b
+ Presorted Key: prt_tbl_1.a
+ -> Index Scan using prt_tbl_1_a_idx on prt_tbl_1
+ -> Index Only Scan using prt_tbl_2_a_b_idx on prt_tbl_2
+(7 rows)
+
+reset enable_bitmapscan;
+reset enable_seqscan;
+drop table prt_tbl;
diff --git a/src/test/regress/expected/inherit.out b/src/test/regress/expected/inherit.out
index f9b0c415cfd..5b5055babdc 100644
--- a/src/test/regress/expected/inherit.out
+++ b/src/test/regress/expected/inherit.out
@@ -1898,10 +1898,11 @@ ORDER BY thousand, tenthous;
Merge Append
Sort Key: tenk1.thousand, tenk1.tenthous
-> Index Only Scan using tenk1_thous_tenthous on tenk1
- -> Sort
+ -> Incremental Sort
Sort Key: tenk1_1.thousand, tenk1_1.thousand
+ Presorted Key: tenk1_1.thousand
-> Index Only Scan using tenk1_thous_tenthous on tenk1 tenk1_1
-(6 rows)
+(7 rows)
explain (costs off)
SELECT thousand, tenthous, thousand+tenthous AS x FROM tenk1
@@ -1982,10 +1983,11 @@ ORDER BY x, y;
Merge Append
Sort Key: a.thousand, a.tenthous
-> Index Only Scan using tenk1_thous_tenthous on tenk1 a
- -> Sort
+ -> Incremental Sort
Sort Key: b.unique2, b.unique2
+ Presorted Key: b.unique2
-> Index Only Scan using tenk1_unique2 on tenk1 b
-(6 rows)
+(7 rows)
-- exercise rescan code path via a repeatedly-evaluated subquery
explain (costs off)
@@ -2281,7 +2283,7 @@ Inherits: pp1,
create table cc3 (a2 int not null no inherit) inherits (cc1);
NOTICE: moving and merging column "a2" with inherited definition
DETAIL: User-specified column moved to the position of the inherited column.
-ERROR: cannot define not-null constraint on column "a2" with NO INHERIT
+ERROR: cannot define not-null constraint with NO INHERIT on column "a2"
DETAIL: The column has an inherited not-null constraint.
-- change NO INHERIT status of inherited constraint: no dice, it's inherited
alter table cc2 add not null a2 no inherit;
@@ -2530,7 +2532,7 @@ ERROR: conflicting NO INHERIT declaration for not-null constraint on column "a"
CREATE TABLE inh_nn1 (a int not null);
CREATE TABLE inh_nn2 (a int not null no inherit) INHERITS (inh_nn1);
NOTICE: merging column "a" with inherited definition
-ERROR: cannot define not-null constraint on column "a" with NO INHERIT
+ERROR: cannot define not-null constraint with NO INHERIT on column "a"
DETAIL: The column has an inherited not-null constraint.
CREATE TABLE inh_nn3 (a int not null, b int, not null a no inherit);
ERROR: conflicting NO INHERIT declaration for not-null constraint on column "a"
diff --git a/src/test/regress/expected/join.out b/src/test/regress/expected/join.out
index f35a0b18c37..4d5d35d0727 100644
--- a/src/test/regress/expected/join.out
+++ b/src/test/regress/expected/join.out
@@ -3639,8 +3639,8 @@ from nt3 as nt3
) as ss2
on ss2.id = nt3.nt2_id
where nt3.id = 1 and ss2.b3;
- QUERY PLAN
------------------------------------------------
+ QUERY PLAN
+----------------------------------------------
Nested Loop
-> Nested Loop
-> Index Scan using nt3_pkey on nt3
@@ -3649,7 +3649,7 @@ where nt3.id = 1 and ss2.b3;
Index Cond: (id = nt3.nt2_id)
-> Index Only Scan using nt1_pkey on nt1
Index Cond: (id = nt2.nt1_id)
- Filter: (nt2.b1 AND (id IS NOT NULL))
+ Filter: (nt2.b1 AND true)
(9 rows)
select nt3.id
@@ -3946,6 +3946,59 @@ where t1.unique2 < 42 and t1.stringu1 > t2.stringu2;
(1 row)
-- variant that isn't quite a star-schema case
+explain (verbose, costs off)
+select ss1.d1 from
+ tenk1 as t1
+ inner join tenk1 as t2
+ on t1.tenthous = t2.ten
+ inner join
+ int8_tbl as i8
+ left join int4_tbl as i4
+ inner join (select 64::information_schema.cardinal_number as d1
+ from tenk1 t3,
+ lateral (select abs(t3.unique1) + random()) ss0(x)
+ where t3.fivethous < 0) as ss1
+ on i4.f1 = ss1.d1
+ on i8.q1 = i4.f1
+ on t1.tenthous = ss1.d1
+where t1.unique1 < i4.f1;
+ QUERY PLAN
+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Nested Loop
+ Output: (64)::information_schema.cardinal_number
+ Join Filter: (t1.tenthous = ((64)::information_schema.cardinal_number)::integer)
+ -> Seq Scan on public.tenk1 t3
+ Output: t3.unique1, t3.unique2, t3.two, t3.four, t3.ten, t3.twenty, t3.hundred, t3.thousand, t3.twothousand, t3.fivethous, t3.tenthous, t3.odd, t3.even, t3.stringu1, t3.stringu2, t3.string4
+ Filter: (t3.fivethous < 0)
+ -> Nested Loop
+ Output: t1.tenthous, t2.ten
+ -> Nested Loop
+ Output: t1.tenthous, t2.ten, i4.f1
+ Join Filter: (t1.unique1 < i4.f1)
+ -> Hash Join
+ Output: t1.tenthous, t1.unique1, t2.ten
+ Hash Cond: (t2.ten = t1.tenthous)
+ -> Seq Scan on public.tenk1 t2
+ Output: t2.unique1, t2.unique2, t2.two, t2.four, t2.ten, t2.twenty, t2.hundred, t2.thousand, t2.twothousand, t2.fivethous, t2.tenthous, t2.odd, t2.even, t2.stringu1, t2.stringu2, t2.string4
+ -> Hash
+ Output: t1.tenthous, t1.unique1
+ -> Nested Loop
+ Output: t1.tenthous, t1.unique1
+ -> Subquery Scan on ss0
+ Output: ss0.x, (64)::information_schema.cardinal_number
+ -> Result
+ Output: ((abs(t3.unique1))::double precision + random())
+ -> Index Scan using tenk1_thous_tenthous on public.tenk1 t1
+ Output: t1.unique1, t1.unique2, t1.two, t1.four, t1.ten, t1.twenty, t1.hundred, t1.thousand, t1.twothousand, t1.fivethous, t1.tenthous, t1.odd, t1.even, t1.stringu1, t1.stringu2, t1.string4
+ Index Cond: (t1.tenthous = (((64)::information_schema.cardinal_number))::integer)
+ -> Seq Scan on public.int4_tbl i4
+ Output: i4.f1
+ Filter: (i4.f1 = ((64)::information_schema.cardinal_number)::integer)
+ -> Seq Scan on public.int8_tbl i8
+ Output: i8.q1, i8.q2
+ Filter: (i8.q1 = ((64)::information_schema.cardinal_number)::integer)
+(33 rows)
+
select ss1.d1 from
tenk1 as t1
inner join tenk1 as t2
@@ -4035,6 +4088,195 @@ select * from
1 | 2 | 2
(1 row)
+-- This example demonstrates the folly of our old "have_dangerous_phv" logic
+begin;
+set local from_collapse_limit to 2;
+explain (verbose, costs off)
+select * from int8_tbl t1
+ left join
+ (select coalesce(t2.q1 + x, 0) from int8_tbl t2,
+ lateral (select t3.q1 as x from int8_tbl t3,
+ lateral (select t2.q1, t3.q1 offset 0) s))
+ on true;
+ QUERY PLAN
+------------------------------------------------------------------
+ Nested Loop Left Join
+ Output: t1.q1, t1.q2, (COALESCE((t2.q1 + t3.q1), '0'::bigint))
+ -> Seq Scan on public.int8_tbl t1
+ Output: t1.q1, t1.q2
+ -> Materialize
+ Output: (COALESCE((t2.q1 + t3.q1), '0'::bigint))
+ -> Nested Loop
+ Output: COALESCE((t2.q1 + t3.q1), '0'::bigint)
+ -> Seq Scan on public.int8_tbl t2
+ Output: t2.q1, t2.q2
+ -> Nested Loop
+ Output: t3.q1
+ -> Seq Scan on public.int8_tbl t3
+ Output: t3.q1, t3.q2
+ -> Result
+ Output: NULL::bigint, NULL::bigint
+(16 rows)
+
+rollback;
+-- ... not that the initial replacement didn't have some bugs too
+begin;
+create temp table t(i int primary key);
+explain (verbose, costs off)
+select * from t t1
+ left join (select 1 as x, * from t t2(i2)) t2ss on t1.i = t2ss.i2
+ left join t t3(i3) on false
+ left join t t4(i4) on t4.i4 > t2ss.x;
+ QUERY PLAN
+----------------------------------------------------------
+ Nested Loop Left Join
+ Output: t1.i, (1), t2.i2, i3, t4.i4
+ -> Nested Loop Left Join
+ Output: t1.i, t2.i2, (1), i3
+ Join Filter: false
+ -> Hash Left Join
+ Output: t1.i, t2.i2, (1)
+ Inner Unique: true
+ Hash Cond: (t1.i = t2.i2)
+ -> Seq Scan on pg_temp.t t1
+ Output: t1.i
+ -> Hash
+ Output: t2.i2, (1)
+ -> Seq Scan on pg_temp.t t2
+ Output: t2.i2, 1
+ -> Result
+ Output: i3
+ One-Time Filter: false
+ -> Memoize
+ Output: t4.i4
+ Cache Key: (1)
+ Cache Mode: binary
+ -> Index Only Scan using t_pkey on pg_temp.t t4
+ Output: t4.i4
+ Index Cond: (t4.i4 > (1))
+(25 rows)
+
+explain (verbose, costs off)
+select * from
+ (select k from
+ (select i, coalesce(i, j) as k from
+ (select i from t union all select 0)
+ join (select 1 as j limit 1) on i = j)
+ right join (select 2 as x) on true
+ join (select 3 as y) on i is not null
+ ),
+ lateral (select k as kl limit 1);
+ QUERY PLAN
+-------------------------------------------------------------------
+ Nested Loop
+ Output: COALESCE(t.i, (1)), ((COALESCE(t.i, (1))))
+ -> Limit
+ Output: 1
+ -> Result
+ Output: 1
+ -> Nested Loop
+ Output: t.i, ((COALESCE(t.i, (1))))
+ -> Result
+ Output: t.i, COALESCE(t.i, (1))
+ -> Append
+ -> Index Only Scan using t_pkey on pg_temp.t
+ Output: t.i
+ Index Cond: (t.i = (1))
+ -> Result
+ Output: 0
+ One-Time Filter: ((1) = 0)
+ -> Limit
+ Output: ((COALESCE(t.i, (1))))
+ -> Result
+ Output: (COALESCE(t.i, (1)))
+(21 rows)
+
+rollback;
+-- PHVs containing SubLinks are quite tricky to get right
+explain (verbose, costs off)
+select *
+from int8_tbl i8
+ inner join
+ (select (select true) as x
+ from int4_tbl i4, lateral (select i4.f1 as y limit 1) ss1
+ where i4.f1 = 0) ss2 on true
+ right join (select false as z) ss3 on true,
+ lateral (select i8.q2 as q2l where x limit 1) ss4
+where i8.q2 = 123;
+ QUERY PLAN
+----------------------------------------------------------------
+ Nested Loop
+ Output: i8.q1, i8.q2, (InitPlan 1).col1, false, (i8.q2)
+ InitPlan 1
+ -> Result
+ Output: true
+ InitPlan 2
+ -> Result
+ Output: true
+ -> Seq Scan on public.int4_tbl i4
+ Output: i4.f1
+ Filter: (i4.f1 = 0)
+ -> Nested Loop
+ Output: i8.q1, i8.q2, (i8.q2)
+ -> Subquery Scan on ss1
+ Output: ss1.y, (InitPlan 1).col1
+ -> Limit
+ Output: NULL::integer
+ -> Result
+ Output: NULL::integer
+ -> Nested Loop
+ Output: i8.q1, i8.q2, (i8.q2)
+ -> Seq Scan on public.int8_tbl i8
+ Output: i8.q1, i8.q2
+ Filter: (i8.q2 = 123)
+ -> Limit
+ Output: (i8.q2)
+ -> Result
+ Output: i8.q2
+ One-Time Filter: ((InitPlan 1).col1)
+(29 rows)
+
+explain (verbose, costs off)
+select *
+from int8_tbl i8
+ inner join
+ (select (select true) as x
+ from int4_tbl i4, lateral (select 1 as y limit 1) ss1
+ where i4.f1 = 0) ss2 on true
+ right join (select false as z) ss3 on true,
+ lateral (select i8.q2 as q2l where x limit 1) ss4
+where i8.q2 = 123;
+ QUERY PLAN
+----------------------------------------------------------------
+ Nested Loop
+ Output: i8.q1, i8.q2, (InitPlan 1).col1, false, (i8.q2)
+ InitPlan 1
+ -> Result
+ Output: true
+ InitPlan 2
+ -> Result
+ Output: true
+ -> Limit
+ Output: NULL::integer
+ -> Result
+ Output: NULL::integer
+ -> Nested Loop
+ Output: i8.q1, i8.q2, (i8.q2)
+ -> Seq Scan on public.int4_tbl i4
+ Output: i4.f1, (InitPlan 1).col1
+ Filter: (i4.f1 = 0)
+ -> Nested Loop
+ Output: i8.q1, i8.q2, (i8.q2)
+ -> Seq Scan on public.int8_tbl i8
+ Output: i8.q1, i8.q2
+ Filter: (i8.q2 = 123)
+ -> Limit
+ Output: (i8.q2)
+ -> Result
+ Output: i8.q2
+ One-Time Filter: ((InitPlan 1).col1)
+(27 rows)
+
-- Test proper handling of appendrel PHVs during useless-RTE removal
explain (costs off)
select * from
@@ -5384,14 +5626,14 @@ select * from
(select 1 as id) as xx
left join
(tenk1 as a1 full join (select 1 as id) as yy on (a1.unique1 = yy.id))
- on (xx.id = coalesce(yy.id));
- QUERY PLAN
----------------------------------------
+ on (xx.id = coalesce(yy.id, yy.id));
+ QUERY PLAN
+------------------------------------------
Nested Loop Left Join
-> Result
-> Hash Full Join
Hash Cond: (a1.unique1 = (1))
- Filter: (1 = COALESCE((1)))
+ Filter: (1 = COALESCE((1), (1)))
-> Seq Scan on tenk1 a1
-> Hash
-> Result
@@ -5401,7 +5643,7 @@ select * from
(select 1 as id) as xx
left join
(tenk1 as a1 full join (select 1 as id) as yy on (a1.unique1 = yy.id))
- on (xx.id = coalesce(yy.id));
+ on (xx.id = coalesce(yy.id, yy.id));
id | unique1 | unique2 | two | four | ten | twenty | hundred | thousand | twothousand | fivethous | tenthous | odd | even | stringu1 | stringu2 | string4 | id
----+---------+---------+-----+------+-----+--------+---------+----------+-------------+-----------+----------+-----+------+----------+----------+---------+----
1 | 1 | 2838 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 2 | 3 | BAAAAA | EFEAAA | OOOOxx | 1
@@ -8169,20 +8411,20 @@ select * from int4_tbl i left join
explain (verbose, costs off)
select * from int4_tbl i left join
- lateral (select coalesce(i) from int2_tbl j where i.f1 = j.f1) k on true;
- QUERY PLAN
--------------------------------------
+ lateral (select coalesce(i, i) from int2_tbl j where i.f1 = j.f1) k on true;
+ QUERY PLAN
+------------------------------------------
Nested Loop Left Join
- Output: i.f1, (COALESCE(i.*))
+ Output: i.f1, (COALESCE(i.*, i.*))
-> Seq Scan on public.int4_tbl i
Output: i.f1, i.*
-> Seq Scan on public.int2_tbl j
- Output: j.f1, COALESCE(i.*)
+ Output: j.f1, COALESCE(i.*, i.*)
Filter: (i.f1 = j.f1)
(7 rows)
select * from int4_tbl i left join
- lateral (select coalesce(i) from int2_tbl j where i.f1 = j.f1) k on true;
+ lateral (select coalesce(i, i) from int2_tbl j where i.f1 = j.f1) k on true;
f1 | coalesce
-------------+----------
0 | (0)
@@ -9351,14 +9593,14 @@ CREATE STATISTICS group_tbl_stat (ndistinct) ON a, b FROM group_tbl;
ANALYZE group_tbl;
EXPLAIN (COSTS OFF)
SELECT 1 FROM group_tbl t1
- LEFT JOIN (SELECT a c1, COALESCE(a) c2 FROM group_tbl t2) s ON TRUE
+ LEFT JOIN (SELECT a c1, COALESCE(a, a) c2 FROM group_tbl t2) s ON TRUE
GROUP BY s.c1, s.c2;
- QUERY PLAN
---------------------------------------------
+ QUERY PLAN
+------------------------------------------------
Group
- Group Key: t2.a, (COALESCE(t2.a))
+ Group Key: t2.a, (COALESCE(t2.a, t2.a))
-> Sort
- Sort Key: t2.a, (COALESCE(t2.a))
+ Sort Key: t2.a, (COALESCE(t2.a, t2.a))
-> Nested Loop Left Join
-> Seq Scan on group_tbl t1
-> Seq Scan on group_tbl t2
diff --git a/src/test/regress/expected/matview.out b/src/test/regress/expected/matview.out
index 54939ecc6b0..c56c9fa3a25 100644
--- a/src/test/regress/expected/matview.out
+++ b/src/test/regress/expected/matview.out
@@ -587,7 +587,7 @@ CREATE MATERIALIZED VIEW drop_idx_matview AS
NOTICE: index "mvtest_drop_idx" does not exist, skipping
CREATE UNIQUE INDEX mvtest_drop_idx ON drop_idx_matview (i);
REFRESH MATERIALIZED VIEW CONCURRENTLY drop_idx_matview;
-ERROR: could not find suitable unique index on materialized view
+ERROR: could not find suitable unique index on materialized view "drop_idx_matview"
DROP MATERIALIZED VIEW drop_idx_matview; -- clean up
RESET search_path;
-- make sure that create WITH NO DATA works via SPI
diff --git a/src/test/regress/expected/memoize.out b/src/test/regress/expected/memoize.out
index 38dfaf021c9..150dc1b44cf 100644
--- a/src/test/regress/expected/memoize.out
+++ b/src/test/regress/expected/memoize.out
@@ -25,6 +25,7 @@ begin
ln := regexp_replace(ln, 'Heap Fetches: \d+', 'Heap Fetches: N');
ln := regexp_replace(ln, 'loops=\d+', 'loops=N');
ln := regexp_replace(ln, 'Index Searches: \d+', 'Index Searches: N');
+ ln := regexp_replace(ln, 'Memory: \d+kB', 'Memory: NkB');
return next ln;
end loop;
end;
@@ -500,3 +501,62 @@ RESET max_parallel_workers_per_gather;
RESET parallel_tuple_cost;
RESET parallel_setup_cost;
RESET min_parallel_table_scan_size;
+-- Ensure memoize works for ANTI joins
+CREATE TABLE tab_anti (a int, b boolean);
+INSERT INTO tab_anti SELECT i%3, false FROM generate_series(1,100)i;
+ANALYZE tab_anti;
+-- Ensure we get a Memoize plan for ANTI join
+SELECT explain_memoize('
+SELECT COUNT(*) FROM tab_anti t1 LEFT JOIN
+LATERAL (SELECT DISTINCT ON (a) a, b, t1.a AS x FROM tab_anti t2) t2
+ON t1.a+1 = t2.a
+WHERE t2.a IS NULL;', false);
+ explain_memoize
+--------------------------------------------------------------------------------------------
+ Aggregate (actual rows=1.00 loops=N)
+ -> Nested Loop Anti Join (actual rows=33.00 loops=N)
+ -> Seq Scan on tab_anti t1 (actual rows=100.00 loops=N)
+ -> Memoize (actual rows=0.67 loops=N)
+ Cache Key: (t1.a + 1), t1.a
+ Cache Mode: binary
+ Hits: 97 Misses: 3 Evictions: Zero Overflows: 0 Memory Usage: NkB
+ -> Subquery Scan on t2 (actual rows=0.67 loops=N)
+ Filter: ((t1.a + 1) = t2.a)
+ Rows Removed by Filter: 2
+ -> Unique (actual rows=2.67 loops=N)
+ -> Sort (actual rows=67.33 loops=N)
+ Sort Key: t2_1.a
+ Sort Method: quicksort Memory: NkB
+ -> Seq Scan on tab_anti t2_1 (actual rows=100.00 loops=N)
+(15 rows)
+
+-- And check we get the expected results.
+SELECT COUNT(*) FROM tab_anti t1 LEFT JOIN
+LATERAL (SELECT DISTINCT ON (a) a, b, t1.a AS x FROM tab_anti t2) t2
+ON t1.a+1 = t2.a
+WHERE t2.a IS NULL;
+ count
+-------
+ 33
+(1 row)
+
+-- Ensure we do not add memoize node for SEMI join
+EXPLAIN (COSTS OFF)
+SELECT * FROM tab_anti t1 WHERE t1.a IN
+ (SELECT a FROM tab_anti t2 WHERE t2.b IN
+ (SELECT t1.b FROM tab_anti t3 WHERE t2.a > 1 OFFSET 0));
+ QUERY PLAN
+-------------------------------------------------
+ Nested Loop Semi Join
+ -> Seq Scan on tab_anti t1
+ -> Nested Loop Semi Join
+ Join Filter: (t1.a = t2.a)
+ -> Seq Scan on tab_anti t2
+ -> Subquery Scan on "ANY_subquery"
+ Filter: (t2.b = "ANY_subquery".b)
+ -> Result
+ One-Time Filter: (t2.a > 1)
+ -> Seq Scan on tab_anti t3
+(10 rows)
+
+DROP TABLE tab_anti;
diff --git a/src/test/regress/expected/numeric.out b/src/test/regress/expected/numeric.out
index 072d76ce131..c58e232a263 100644
--- a/src/test/regress/expected/numeric.out
+++ b/src/test/regress/expected/numeric.out
@@ -1464,9 +1464,21 @@ ERROR: count must be greater than zero
SELECT width_bucket(3.5::float8, 3.0::float8, 3.0::float8, 888);
ERROR: lower bound cannot equal upper bound
SELECT width_bucket('NaN', 3.0, 4.0, 888);
-ERROR: operand, lower bound, and upper bound cannot be NaN
+ width_bucket
+--------------
+ 889
+(1 row)
+
+SELECT width_bucket('NaN'::float8, 3.0::float8, 4.0::float8, 888);
+ width_bucket
+--------------
+ 889
+(1 row)
+
+SELECT width_bucket(0, 'NaN', 4.0, 888);
+ERROR: lower and upper bounds cannot be NaN
SELECT width_bucket(0::float8, 'NaN', 4.0::float8, 888);
-ERROR: operand, lower bound, and upper bound cannot be NaN
+ERROR: lower and upper bounds cannot be NaN
SELECT width_bucket(2.0, 3.0, '-inf', 888);
ERROR: lower and upper bounds must be finite
SELECT width_bucket(0::float8, '-inf', 4.0::float8, 888);
@@ -3860,15 +3872,15 @@ ERROR: factorial of a negative number is undefined
-- Tests for pg_lsn()
--
SELECT pg_lsn(23783416::numeric);
- pg_lsn
------------
- 0/16AE7F8
+ pg_lsn
+------------
+ 0/016AE7F8
(1 row)
SELECT pg_lsn(0::numeric);
- pg_lsn
---------
- 0/0
+ pg_lsn
+------------
+ 0/00000000
(1 row)
SELECT pg_lsn(18446744073709551615::numeric);
diff --git a/src/test/regress/expected/pg_lsn.out b/src/test/regress/expected/pg_lsn.out
index b27eec7c015..8ab59b2e445 100644
--- a/src/test/regress/expected/pg_lsn.out
+++ b/src/test/regress/expected/pg_lsn.out
@@ -41,9 +41,9 @@ SELECT * FROM pg_input_error_info('16AE7F7', 'pg_lsn');
-- Min/Max aggregation
SELECT MIN(f1), MAX(f1) FROM PG_LSN_TBL;
- min | max
------+-------------------
- 0/0 | FFFFFFFF/FFFFFFFF
+ min | max
+------------+-------------------
+ 0/00000000 | FFFFFFFF/FFFFFFFF
(1 row)
DROP TABLE PG_LSN_TBL;
@@ -85,21 +85,21 @@ SELECT '0/16AE7F8'::pg_lsn - '0/16AE7F7'::pg_lsn;
(1 row)
SELECT '0/16AE7F7'::pg_lsn + 16::numeric;
- ?column?
------------
- 0/16AE807
+ ?column?
+------------
+ 0/016AE807
(1 row)
SELECT 16::numeric + '0/16AE7F7'::pg_lsn;
- ?column?
------------
- 0/16AE807
+ ?column?
+------------
+ 0/016AE807
(1 row)
SELECT '0/16AE7F7'::pg_lsn - 16::numeric;
- ?column?
------------
- 0/16AE7E7
+ ?column?
+------------
+ 0/016AE7E7
(1 row)
SELECT 'FFFFFFFF/FFFFFFFE'::pg_lsn + 1::numeric;
@@ -111,9 +111,9 @@ SELECT 'FFFFFFFF/FFFFFFFE'::pg_lsn + 1::numeric;
SELECT 'FFFFFFFF/FFFFFFFE'::pg_lsn + 2::numeric; -- out of range error
ERROR: pg_lsn out of range
SELECT '0/1'::pg_lsn - 1::numeric;
- ?column?
-----------
- 0/0
+ ?column?
+------------
+ 0/00000000
(1 row)
SELECT '0/1'::pg_lsn - 2::numeric; -- out of range error
@@ -125,9 +125,9 @@ SELECT '0/0'::pg_lsn + ('FFFFFFFF/FFFFFFFF'::pg_lsn - '0/0'::pg_lsn);
(1 row)
SELECT 'FFFFFFFF/FFFFFFFF'::pg_lsn - ('FFFFFFFF/FFFFFFFF'::pg_lsn - '0/0'::pg_lsn);
- ?column?
-----------
- 0/0
+ ?column?
+------------
+ 0/00000000
(1 row)
SELECT '0/16AE7F7'::pg_lsn + 'NaN'::numeric;
@@ -164,107 +164,107 @@ SELECT DISTINCT (i || '/' || j)::pg_lsn f
generate_series(1, 5) k
WHERE i <= 10 AND j > 0 AND j <= 10
ORDER BY f;
- f
--------
- 1/1
- 1/2
- 1/3
- 1/4
- 1/5
- 1/6
- 1/7
- 1/8
- 1/9
- 1/10
- 2/1
- 2/2
- 2/3
- 2/4
- 2/5
- 2/6
- 2/7
- 2/8
- 2/9
- 2/10
- 3/1
- 3/2
- 3/3
- 3/4
- 3/5
- 3/6
- 3/7
- 3/8
- 3/9
- 3/10
- 4/1
- 4/2
- 4/3
- 4/4
- 4/5
- 4/6
- 4/7
- 4/8
- 4/9
- 4/10
- 5/1
- 5/2
- 5/3
- 5/4
- 5/5
- 5/6
- 5/7
- 5/8
- 5/9
- 5/10
- 6/1
- 6/2
- 6/3
- 6/4
- 6/5
- 6/6
- 6/7
- 6/8
- 6/9
- 6/10
- 7/1
- 7/2
- 7/3
- 7/4
- 7/5
- 7/6
- 7/7
- 7/8
- 7/9
- 7/10
- 8/1
- 8/2
- 8/3
- 8/4
- 8/5
- 8/6
- 8/7
- 8/8
- 8/9
- 8/10
- 9/1
- 9/2
- 9/3
- 9/4
- 9/5
- 9/6
- 9/7
- 9/8
- 9/9
- 9/10
- 10/1
- 10/2
- 10/3
- 10/4
- 10/5
- 10/6
- 10/7
- 10/8
- 10/9
- 10/10
+ f
+-------------
+ 1/00000001
+ 1/00000002
+ 1/00000003
+ 1/00000004
+ 1/00000005
+ 1/00000006
+ 1/00000007
+ 1/00000008
+ 1/00000009
+ 1/00000010
+ 2/00000001
+ 2/00000002
+ 2/00000003
+ 2/00000004
+ 2/00000005
+ 2/00000006
+ 2/00000007
+ 2/00000008
+ 2/00000009
+ 2/00000010
+ 3/00000001
+ 3/00000002
+ 3/00000003
+ 3/00000004
+ 3/00000005
+ 3/00000006
+ 3/00000007
+ 3/00000008
+ 3/00000009
+ 3/00000010
+ 4/00000001
+ 4/00000002
+ 4/00000003
+ 4/00000004
+ 4/00000005
+ 4/00000006
+ 4/00000007
+ 4/00000008
+ 4/00000009
+ 4/00000010
+ 5/00000001
+ 5/00000002
+ 5/00000003
+ 5/00000004
+ 5/00000005
+ 5/00000006
+ 5/00000007
+ 5/00000008
+ 5/00000009
+ 5/00000010
+ 6/00000001
+ 6/00000002
+ 6/00000003
+ 6/00000004
+ 6/00000005
+ 6/00000006
+ 6/00000007
+ 6/00000008
+ 6/00000009
+ 6/00000010
+ 7/00000001
+ 7/00000002
+ 7/00000003
+ 7/00000004
+ 7/00000005
+ 7/00000006
+ 7/00000007
+ 7/00000008
+ 7/00000009
+ 7/00000010
+ 8/00000001
+ 8/00000002
+ 8/00000003
+ 8/00000004
+ 8/00000005
+ 8/00000006
+ 8/00000007
+ 8/00000008
+ 8/00000009
+ 8/00000010
+ 9/00000001
+ 9/00000002
+ 9/00000003
+ 9/00000004
+ 9/00000005
+ 9/00000006
+ 9/00000007
+ 9/00000008
+ 9/00000009
+ 9/00000010
+ 10/00000001
+ 10/00000002
+ 10/00000003
+ 10/00000004
+ 10/00000005
+ 10/00000006
+ 10/00000007
+ 10/00000008
+ 10/00000009
+ 10/00000010
(100 rows)
diff --git a/src/test/regress/expected/predicate.out b/src/test/regress/expected/predicate.out
index b79037748b7..59bfe33bb1c 100644
--- a/src/test/regress/expected/predicate.out
+++ b/src/test/regress/expected/predicate.out
@@ -84,10 +84,10 @@ SELECT * FROM pred_tab t WHERE t.a IS NULL OR t.c IS NULL;
-- are provably false
EXPLAIN (COSTS OFF)
SELECT * FROM pred_tab t WHERE t.b IS NULL OR t.c IS NULL;
- QUERY PLAN
-----------------------------------------
+ QUERY PLAN
+------------------------
Seq Scan on pred_tab t
- Filter: ((b IS NULL) OR (c IS NULL))
+ Filter: (b IS NULL)
(2 rows)
--
@@ -231,6 +231,54 @@ SELECT * FROM pred_tab t1
-> Seq Scan on pred_tab t3
(9 rows)
+--
+-- Tests for NullTest reduction in EXISTS sublink
+--
+-- Ensure the IS_NOT_NULL qual is ignored
+EXPLAIN (COSTS OFF)
+SELECT * FROM pred_tab t1
+ LEFT JOIN pred_tab t2 ON EXISTS
+ (SELECT 1 FROM pred_tab t3, pred_tab t4, pred_tab t5, pred_tab t6
+ WHERE t1.a = t3.a AND t6.a IS NOT NULL);
+ QUERY PLAN
+---------------------------------------------------------
+ Nested Loop Left Join
+ Join Filter: EXISTS(SubPlan 1)
+ -> Seq Scan on pred_tab t1
+ -> Materialize
+ -> Seq Scan on pred_tab t2
+ SubPlan 1
+ -> Nested Loop
+ -> Nested Loop
+ -> Nested Loop
+ -> Seq Scan on pred_tab t4
+ -> Materialize
+ -> Seq Scan on pred_tab t3
+ Filter: (t1.a = a)
+ -> Materialize
+ -> Seq Scan on pred_tab t5
+ -> Materialize
+ -> Seq Scan on pred_tab t6
+(17 rows)
+
+-- Ensure the IS_NULL qual is reduced to constant-FALSE
+EXPLAIN (COSTS OFF)
+SELECT * FROM pred_tab t1
+ LEFT JOIN pred_tab t2 ON EXISTS
+ (SELECT 1 FROM pred_tab t3, pred_tab t4, pred_tab t5, pred_tab t6
+ WHERE t1.a = t3.a AND t6.a IS NULL);
+ QUERY PLAN
+-------------------------------------
+ Nested Loop Left Join
+ Join Filter: (InitPlan 1).col1
+ InitPlan 1
+ -> Result
+ One-Time Filter: false
+ -> Seq Scan on pred_tab t1
+ -> Materialize
+ -> Seq Scan on pred_tab t2
+(8 rows)
+
DROP TABLE pred_tab;
-- Validate we handle IS NULL and IS NOT NULL quals correctly with inheritance
-- parents.
diff --git a/src/test/regress/expected/privileges.out b/src/test/regress/expected/privileges.out
index c25062c288f..602a6b255bc 100644
--- a/src/test/regress/expected/privileges.out
+++ b/src/test/regress/expected/privileges.out
@@ -2568,6 +2568,26 @@ SELECT makeaclitem('regress_priv_user1'::regrole, 'regress_priv_user2'::regrole,
SELECT makeaclitem('regress_priv_user1'::regrole, 'regress_priv_user2'::regrole,
'SELECT, fake_privilege', FALSE); -- error
ERROR: unrecognized privilege type: "fake_privilege"
+-- Test quoting and dequoting of user names in ACLs
+CREATE ROLE "regress_""quoted";
+SELECT makeaclitem('regress_"quoted'::regrole, 'regress_"quoted'::regrole,
+ 'SELECT', TRUE);
+ makeaclitem
+------------------------------------------
+ "regress_""quoted"=r*/"regress_""quoted"
+(1 row)
+
+SELECT '"regress_""quoted"=r*/"regress_""quoted"'::aclitem;
+ aclitem
+------------------------------------------
+ "regress_""quoted"=r*/"regress_""quoted"
+(1 row)
+
+SELECT '""=r*/""'::aclitem; -- used to be misparsed as """"
+ERROR: a name must follow the "/" sign
+LINE 1: SELECT '""=r*/""'::aclitem;
+ ^
+DROP ROLE "regress_""quoted";
-- Test non-throwing aclitem I/O
SELECT pg_input_is_valid('regress_priv_user1=r/regress_priv_user2', 'aclitem');
pg_input_is_valid
@@ -3220,7 +3240,8 @@ REVOKE MAINTAIN ON lock_table FROM regress_locktable_user;
DROP TABLE lock_table;
DROP USER regress_locktable_user;
-- test to check privileges of system views pg_shmem_allocations,
--- pg_shmem_allocations_numa and pg_backend_memory_contexts.
+-- pg_shmem_allocations_numa, pg_dsm_registry_allocations, and
+-- pg_backend_memory_contexts.
-- switch to superuser
\c -
CREATE ROLE regress_readallstats;
@@ -3248,6 +3269,12 @@ SELECT has_table_privilege('regress_readallstats','pg_shmem_allocations_numa','S
f
(1 row)
+SELECT has_table_privilege('regress_readallstats','pg_dsm_registry_allocations','SELECT'); -- no
+ has_table_privilege
+---------------------
+ f
+(1 row)
+
GRANT pg_read_all_stats TO regress_readallstats;
SELECT has_table_privilege('regress_readallstats','pg_aios','SELECT'); -- yes
has_table_privilege
@@ -3273,6 +3300,12 @@ SELECT has_table_privilege('regress_readallstats','pg_shmem_allocations_numa','S
t
(1 row)
+SELECT has_table_privilege('regress_readallstats','pg_dsm_registry_allocations','SELECT'); -- yes
+ has_table_privilege
+---------------------
+ t
+(1 row)
+
-- run query to ensure that functions within views can be executed
SET ROLE regress_readallstats;
SELECT COUNT(*) >= 0 AS ok FROM pg_aios;
diff --git a/src/test/regress/expected/psql.out b/src/test/regress/expected/psql.out
index cf48ae6d0c2..236eba2540e 100644
--- a/src/test/regress/expected/psql.out
+++ b/src/test/regress/expected/psql.out
@@ -160,12 +160,12 @@ LINE 1: SELECT $1, $2
foo4 | bar4
(1 row)
--- \close (extended query protocol)
-\close
-\close: missing required argument
-\close ''
-\close stmt2
-\close stmt2
+-- \close_prepared (extended query protocol)
+\close_prepared
+\close_prepared: missing required argument
+\close_prepared ''
+\close_prepared stmt2
+\close_prepared stmt2
SELECT name, statement FROM pg_prepared_statements ORDER BY name;
name | statement
-------+----------------
@@ -4666,7 +4666,7 @@ bar 'bar' "bar"
\C arg1
\c arg1 arg2 arg3 arg4
\cd arg1
- \close stmt1
+ \close_prepared stmt1
\conninfo
\copy arg1 arg2 arg3 arg4 arg5 arg6
\copyright
diff --git a/src/test/regress/expected/psql_pipeline.out b/src/test/regress/expected/psql_pipeline.out
index a30dec088b9..a0816fb10b6 100644
--- a/src/test/regress/expected/psql_pipeline.out
+++ b/src/test/regress/expected/psql_pipeline.out
@@ -228,192 +228,6 @@ BEGIN \bind \sendpipeline
INSERT INTO psql_pipeline VALUES ($1) \bind 1 \sendpipeline
COMMIT \bind \sendpipeline
\endpipeline
--- COPY FROM STDIN
--- with \sendpipeline and \bind
-\startpipeline
-SELECT $1 \bind 'val1' \sendpipeline
-COPY psql_pipeline FROM STDIN \bind \sendpipeline
-\endpipeline
- ?column?
-----------
- val1
-(1 row)
-
--- with semicolon
-\startpipeline
-SELECT 'val1';
-COPY psql_pipeline FROM STDIN;
-\endpipeline
- ?column?
-----------
- val1
-(1 row)
-
--- COPY FROM STDIN with \flushrequest + \getresults
--- with \sendpipeline and \bind
-\startpipeline
-SELECT $1 \bind 'val1' \sendpipeline
-COPY psql_pipeline FROM STDIN \bind \sendpipeline
-\flushrequest
-\getresults
- ?column?
-----------
- val1
-(1 row)
-
-message type 0x5a arrived from server while idle
-\endpipeline
--- with semicolon
-\startpipeline
-SELECT 'val1';
-COPY psql_pipeline FROM STDIN;
-\flushrequest
-\getresults
- ?column?
-----------
- val1
-(1 row)
-
-message type 0x5a arrived from server while idle
-\endpipeline
--- COPY FROM STDIN with \syncpipeline + \getresults
--- with \bind and \sendpipeline
-\startpipeline
-SELECT $1 \bind 'val1' \sendpipeline
-COPY psql_pipeline FROM STDIN \bind \sendpipeline
-\syncpipeline
-\getresults
- ?column?
-----------
- val1
-(1 row)
-
-\endpipeline
--- with semicolon
-\startpipeline
-SELECT 'val1';
-COPY psql_pipeline FROM STDIN;
-\syncpipeline
-\getresults
- ?column?
-----------
- val1
-(1 row)
-
-\endpipeline
--- COPY TO STDOUT
--- with \bind and \sendpipeline
-\startpipeline
-SELECT $1 \bind 'val1' \sendpipeline
-copy psql_pipeline TO STDOUT \bind \sendpipeline
-\endpipeline
- ?column?
-----------
- val1
-(1 row)
-
-1 \N
-2 test2
-20 test2
-3 test3
-30 test3
-4 test4
-40 test4
--- with semicolon
-\startpipeline
-SELECT 'val1';
-copy psql_pipeline TO STDOUT;
-\endpipeline
- ?column?
-----------
- val1
-(1 row)
-
-1 \N
-2 test2
-20 test2
-3 test3
-30 test3
-4 test4
-40 test4
--- COPY TO STDOUT with \flushrequest + \getresults
--- with \bind and \sendpipeline
-\startpipeline
-SELECT $1 \bind 'val1' \sendpipeline
-copy psql_pipeline TO STDOUT \bind \sendpipeline
-\flushrequest
-\getresults
- ?column?
-----------
- val1
-(1 row)
-
-1 \N
-2 test2
-20 test2
-3 test3
-30 test3
-4 test4
-40 test4
-\endpipeline
--- with semicolon
-\startpipeline
-SELECT 'val1';
-copy psql_pipeline TO STDOUT;
-\flushrequest
-\getresults
- ?column?
-----------
- val1
-(1 row)
-
-1 \N
-2 test2
-20 test2
-3 test3
-30 test3
-4 test4
-40 test4
-\endpipeline
--- COPY TO STDOUT with \syncpipeline + \getresults
--- with \bind and \sendpipeline
-\startpipeline
-SELECT $1 \bind 'val1' \sendpipeline
-copy psql_pipeline TO STDOUT \bind \sendpipeline
-\syncpipeline
-\getresults
- ?column?
-----------
- val1
-(1 row)
-
-1 \N
-2 test2
-20 test2
-3 test3
-30 test3
-4 test4
-40 test4
-\endpipeline
--- with semicolon
-\startpipeline
-SELECT 'val1';
-copy psql_pipeline TO STDOUT;
-\syncpipeline
-\getresults
- ?column?
-----------
- val1
-(1 row)
-
-1 \N
-2 test2
-20 test2
-3 test3
-30 test3
-4 test4
-40 test4
-\endpipeline
-- Use \parse and \bind_named
\startpipeline
SELECT $1 \parse ''
@@ -740,7 +554,7 @@ SELECT COUNT(*) FROM psql_pipeline \bind \sendpipeline
count
-------
- 7
+ 1
(1 row)
-- After an error, pipeline is aborted and requires \syncpipeline to be
@@ -750,7 +564,7 @@ SELECT $1 \bind \sendpipeline
SELECT $1 \bind 1 \sendpipeline
SELECT $1 \parse a
\bind_named a 1 \sendpipeline
-\close a
+\close_prepared a
\flushrequest
\getresults
ERROR: bind message supplies 0 parameters, but prepared statement "" requires 1
@@ -758,7 +572,7 @@ ERROR: bind message supplies 0 parameters, but prepared statement "" requires 1
SELECT $1 \bind 1 \sendpipeline
SELECT $1 \parse a
\bind_named a 1 \sendpipeline
-\close a
+\close_prepared a
-- Sync allows pipeline to recover.
\syncpipeline
\getresults
@@ -766,7 +580,7 @@ Pipeline aborted, command did not run
SELECT $1 \bind 1 \sendpipeline
SELECT $1 \parse a
\bind_named a 1 \sendpipeline
-\close a
+\close_prepared a
\flushrequest
\getresults
?column?
diff --git a/src/test/regress/expected/publication.out b/src/test/regress/expected/publication.out
index 4de96c04f9d..1ec3fa34a2d 100644
--- a/src/test/regress/expected/publication.out
+++ b/src/test/regress/expected/publication.out
@@ -34,7 +34,8 @@ ERROR: conflicting or redundant options
LINE 1: ...pub_xxx WITH (publish_generated_columns = stored, publish_ge...
^
CREATE PUBLICATION testpub_xxx WITH (publish_generated_columns = foo);
-ERROR: publish_generated_columns requires a "none" or "stored" value
+ERROR: invalid value for publication parameter "publish_generated_columns": "foo"
+DETAIL: Valid values are "none" and "stored".
\dRp
List of publications
Name | Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root
@@ -524,16 +525,22 @@ Tables from schemas:
"testpub_rf_schema2"
-- fail - virtual generated column uses user-defined function
+-- (Actually, this already fails at CREATE TABLE rather than at CREATE
+-- PUBLICATION, but let's keep the test in case the former gets
+-- relaxed sometime.)
CREATE TABLE testpub_rf_tbl6 (id int PRIMARY KEY, x int, y int GENERATED ALWAYS AS (x * testpub_rf_func2()) VIRTUAL);
+ERROR: generation expression uses user-defined function
+LINE 1: ...RIMARY KEY, x int, y int GENERATED ALWAYS AS (x * testpub_rf...
+ ^
+DETAIL: Virtual generated columns that make use of user-defined functions are not yet supported.
CREATE PUBLICATION testpub7 FOR TABLE testpub_rf_tbl6 WHERE (y > 100);
-ERROR: invalid publication WHERE expression
-DETAIL: User-defined or built-in mutable functions are not allowed.
+ERROR: relation "testpub_rf_tbl6" does not exist
-- test that SET EXPRESSION is rejected, because it could affect a row filter
SET client_min_messages = 'ERROR';
CREATE TABLE testpub_rf_tbl7 (id int PRIMARY KEY, x int, y int GENERATED ALWAYS AS (x * 111) VIRTUAL);
CREATE PUBLICATION testpub8 FOR TABLE testpub_rf_tbl7 WHERE (y > 100);
ALTER TABLE testpub_rf_tbl7 ALTER COLUMN y SET EXPRESSION AS (x * testpub_rf_func2());
-ERROR: ALTER TABLE / SET EXPRESSION is not supported for virtual generated columns on tables that are part of a publication
+ERROR: ALTER TABLE / SET EXPRESSION is not supported for virtual generated columns in tables that are part of a publication
DETAIL: Column "y" of relation "testpub_rf_tbl7" is a virtual generated column.
RESET client_min_messages;
DROP TABLE testpub_rf_tbl1;
@@ -541,7 +548,7 @@ DROP TABLE testpub_rf_tbl2;
DROP TABLE testpub_rf_tbl3;
DROP TABLE testpub_rf_tbl4;
DROP TABLE testpub_rf_tbl5;
-DROP TABLE testpub_rf_tbl6;
+--DROP TABLE testpub_rf_tbl6;
DROP TABLE testpub_rf_schema1.testpub_rf_tbl5;
DROP TABLE testpub_rf_schema2.testpub_rf_tbl6;
DROP SCHEMA testpub_rf_schema1;
@@ -1927,3 +1934,24 @@ RESET client_min_messages;
RESET SESSION AUTHORIZATION;
DROP ROLE regress_publication_user, regress_publication_user2;
DROP ROLE regress_publication_user_dummy;
+-- stage objects for pg_dump tests
+CREATE SCHEMA pubme CREATE TABLE t0 (c int, d int) CREATE TABLE t1 (c int);
+CREATE SCHEMA pubme2 CREATE TABLE t0 (c int, d int);
+SET client_min_messages = 'ERROR';
+CREATE PUBLICATION dump_pub_qual_1ct FOR
+ TABLE ONLY pubme.t0 (c, d) WHERE (c > 0);
+CREATE PUBLICATION dump_pub_qual_2ct FOR
+ TABLE ONLY pubme.t0 (c) WHERE (c > 0),
+ TABLE ONLY pubme.t1 (c);
+CREATE PUBLICATION dump_pub_nsp_1ct FOR
+ TABLES IN SCHEMA pubme;
+CREATE PUBLICATION dump_pub_nsp_2ct FOR
+ TABLES IN SCHEMA pubme,
+ TABLES IN SCHEMA pubme2;
+CREATE PUBLICATION dump_pub_all FOR
+ TABLE ONLY pubme.t0,
+ TABLE ONLY pubme.t1 WHERE (c < 0),
+ TABLES IN SCHEMA pubme,
+ TABLES IN SCHEMA pubme2
+ WITH (publish_via_partition_root = true);
+RESET client_min_messages;
diff --git a/src/test/regress/expected/regproc.out b/src/test/regress/expected/regproc.out
index 97b917502ca..84c84aef420 100644
--- a/src/test/regress/expected/regproc.out
+++ b/src/test/regress/expected/regproc.out
@@ -192,6 +192,18 @@ SELECT regnamespace('"pg_catalog"');
pg_catalog
(1 row)
+SELECT regdatabase('template1');
+ regdatabase
+-------------
+ template1
+(1 row)
+
+SELECT regdatabase('"template1"');
+ regdatabase
+-------------
+ template1
+(1 row)
+
SELECT to_regrole('regress_regrole_test');
to_regrole
----------------------
@@ -216,6 +228,132 @@ SELECT to_regnamespace('"pg_catalog"');
pg_catalog
(1 row)
+SELECT to_regdatabase('template1');
+ to_regdatabase
+----------------
+ template1
+(1 row)
+
+SELECT to_regdatabase('"template1"');
+ to_regdatabase
+----------------
+ template1
+(1 row)
+
+-- special "single dash" case
+SELECT regproc('-')::oid;
+ regproc
+---------
+ 0
+(1 row)
+
+SELECT regprocedure('-')::oid;
+ regprocedure
+--------------
+ 0
+(1 row)
+
+SELECT regclass('-')::oid;
+ regclass
+----------
+ 0
+(1 row)
+
+SELECT regcollation('-')::oid;
+ regcollation
+--------------
+ 0
+(1 row)
+
+SELECT regtype('-')::oid;
+ regtype
+---------
+ 0
+(1 row)
+
+SELECT regconfig('-')::oid;
+ regconfig
+-----------
+ 0
+(1 row)
+
+SELECT regdictionary('-')::oid;
+ regdictionary
+---------------
+ 0
+(1 row)
+
+SELECT regrole('-')::oid;
+ regrole
+---------
+ 0
+(1 row)
+
+SELECT regnamespace('-')::oid;
+ regnamespace
+--------------
+ 0
+(1 row)
+
+SELECT regdatabase('-')::oid;
+ regdatabase
+-------------
+ 0
+(1 row)
+
+SELECT to_regproc('-')::oid;
+ to_regproc
+------------
+ 0
+(1 row)
+
+SELECT to_regprocedure('-')::oid;
+ to_regprocedure
+-----------------
+ 0
+(1 row)
+
+SELECT to_regclass('-')::oid;
+ to_regclass
+-------------
+ 0
+(1 row)
+
+SELECT to_regcollation('-')::oid;
+ to_regcollation
+-----------------
+ 0
+(1 row)
+
+SELECT to_regtype('-')::oid;
+ to_regtype
+------------
+ 0
+(1 row)
+
+SELECT to_regrole('-')::oid;
+ to_regrole
+------------
+ 0
+(1 row)
+
+SELECT to_regnamespace('-')::oid;
+ to_regnamespace
+-----------------
+ 0
+(1 row)
+
+SELECT to_regdatabase('-')::oid;
+ to_regdatabase
+----------------
+ 0
+(1 row)
+
+-- constant cannot be used here
+CREATE TABLE regrole_test (rolid OID DEFAULT 'regress_regrole_test'::regrole);
+ERROR: constant of the type regrole cannot be used here
+CREATE TABLE regdatabase_test (datid OID DEFAULT 'template1'::regdatabase);
+ERROR: constant of the type regdatabase cannot be used here
/* If objects don't exist, raise errors. */
DROP ROLE regress_regrole_test;
-- without schemaname
@@ -305,6 +443,18 @@ SELECT regnamespace('foo.bar');
ERROR: invalid name syntax
LINE 1: SELECT regnamespace('foo.bar');
^
+SELECT regdatabase('Nonexistent');
+ERROR: database "nonexistent" does not exist
+LINE 1: SELECT regdatabase('Nonexistent');
+ ^
+SELECT regdatabase('"Nonexistent"');
+ERROR: database "Nonexistent" does not exist
+LINE 1: SELECT regdatabase('"Nonexistent"');
+ ^
+SELECT regdatabase('foo.bar');
+ERROR: invalid name syntax
+LINE 1: SELECT regdatabase('foo.bar');
+ ^
/* If objects don't exist, return NULL with no error. */
-- without schemaname
SELECT to_regoper('||//');
@@ -447,6 +597,24 @@ SELECT to_regnamespace('foo.bar');
(1 row)
+SELECT to_regdatabase('Nonexistent');
+ to_regdatabase
+----------------
+
+(1 row)
+
+SELECT to_regdatabase('"Nonexistent"');
+ to_regdatabase
+----------------
+
+(1 row)
+
+SELECT to_regdatabase('foo.bar');
+ to_regdatabase
+----------------
+
+(1 row)
+
-- Test to_regtypemod
SELECT to_regtypemod('text');
to_regtypemod
@@ -569,6 +737,12 @@ SELECT * FROM pg_input_error_info('no_such_type', 'regtype');
type "no_such_type" does not exist | | | 42704
(1 row)
+SELECT * FROM pg_input_error_info('Nonexistent', 'regdatabase');
+ message | detail | hint | sql_error_code
+---------------------------------------+--------+------+----------------
+ database "nonexistent" does not exist | | | 42704
+(1 row)
+
-- Some cases that should be soft errors, but are not yet
SELECT * FROM pg_input_error_info('incorrect type name syntax', 'regtype');
ERROR: syntax error at or near "type"
diff --git a/src/test/regress/expected/rules.out b/src/test/regress/expected/rules.out
index 6cf828ca8d0..6509fda77a9 100644
--- a/src/test/regress/expected/rules.out
+++ b/src/test/regress/expected/rules.out
@@ -1340,6 +1340,10 @@ pg_cursors| SELECT name,
is_scrollable,
creation_time
FROM pg_cursor() c(name, statement, is_holdable, is_binary, is_scrollable, creation_time);
+pg_dsm_registry_allocations| SELECT name,
+ type,
+ size
+ FROM pg_get_dsm_registry_allocations() pg_get_dsm_registry_allocations(name, type, size);
pg_file_settings| SELECT sourcefile,
sourceline,
seqno,
@@ -2175,13 +2179,14 @@ pg_stat_subscription_stats| SELECT ss.subid,
ss.confl_insert_exists,
ss.confl_update_origin_differs,
ss.confl_update_exists,
+ ss.confl_update_deleted,
ss.confl_update_missing,
ss.confl_delete_origin_differs,
ss.confl_delete_missing,
ss.confl_multiple_unique_conflicts,
ss.stats_reset
FROM pg_subscription s,
- LATERAL pg_stat_get_subscription_stats(s.oid) ss(subid, apply_error_count, sync_error_count, confl_insert_exists, confl_update_origin_differs, confl_update_exists, confl_update_missing, confl_delete_origin_differs, confl_delete_missing, confl_multiple_unique_conflicts, stats_reset);
+ LATERAL pg_stat_get_subscription_stats(s.oid) ss(subid, apply_error_count, sync_error_count, confl_insert_exists, confl_update_origin_differs, confl_update_exists, confl_update_deleted, confl_update_missing, confl_delete_origin_differs, confl_delete_missing, confl_multiple_unique_conflicts, stats_reset);
pg_stat_sys_indexes| SELECT relid,
indexrelid,
schemaname,
diff --git a/src/test/regress/expected/stats.out b/src/test/regress/expected/stats.out
index 776f1ad0e53..605f5070376 100644
--- a/src/test/regress/expected/stats.out
+++ b/src/test/regress/expected/stats.out
@@ -926,8 +926,19 @@ DROP TABLE test_stats_temp;
-- Checkpoint twice: The checkpointer reports stats after reporting completion
-- of the checkpoint. But after a second checkpoint we'll see at least the
-- results of the first.
-CHECKPOINT;
-CHECKPOINT;
+--
+-- While at it, test checkpoint options. Note that we don't test MODE SPREAD
+-- because it would prolong the test.
+CHECKPOINT (WRONG);
+ERROR: unrecognized CHECKPOINT option "wrong"
+LINE 1: CHECKPOINT (WRONG);
+ ^
+CHECKPOINT (MODE WRONG);
+ERROR: unrecognized MODE option "wrong"
+LINE 1: CHECKPOINT (MODE WRONG);
+ ^
+CHECKPOINT (MODE FAST, FLUSH_UNLOGGED FALSE);
+CHECKPOINT (FLUSH_UNLOGGED);
SELECT num_requested > :rqst_ckpts_before FROM pg_stat_checkpointer;
?column?
----------
diff --git a/src/test/regress/expected/strings.out b/src/test/regress/expected/strings.out
index 788844abd20..1bfd33de3f3 100644
--- a/src/test/regress/expected/strings.out
+++ b/src/test/regress/expected/strings.out
@@ -236,6 +236,12 @@ SELECT E'De\\678dBeEf'::bytea;
ERROR: invalid input syntax for type bytea
LINE 1: SELECT E'De\\678dBeEf'::bytea;
^
+SELECT E'DeAd\\\\BeEf'::bytea;
+ bytea
+----------------------
+ \x446541645c42654566
+(1 row)
+
SELECT reverse(''::bytea);
reverse
---------
@@ -291,6 +297,12 @@ SELECT E'De\\123dBeEf'::bytea;
DeSdBeEf
(1 row)
+SELECT E'DeAd\\\\BeEf'::bytea;
+ bytea
+------------
+ DeAd\\BeEf
+(1 row)
+
-- Test non-error-throwing API too
SELECT pg_input_is_valid(E'\\xDeAdBeE', 'bytea');
pg_input_is_valid
diff --git a/src/test/regress/expected/subscription.out b/src/test/regress/expected/subscription.out
index 1443e1d9292..a98c97f7616 100644
--- a/src/test/regress/expected/subscription.out
+++ b/src/test/regress/expected/subscription.out
@@ -116,18 +116,18 @@ CREATE SUBSCRIPTION regress_testsub4 CONNECTION 'dbname=regress_doesnotexist' PU
WARNING: subscription was created, but is not connected
HINT: To initiate replication, you must manually create the replication slot, enable the subscription, and refresh the subscription.
\dRs+ regress_testsub4
- List of subscriptions
- Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN
-------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+-----------------------------+----------
- regress_testsub4 | regress_subscription_user | f | {testpub} | f | parallel | d | f | none | t | f | f | off | dbname=regress_doesnotexist | 0/0
+ List of subscriptions
+ Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Retain dead tuples | Synchronous commit | Conninfo | Skip LSN
+------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+--------------------+-----------------------------+------------
+ regress_testsub4 | regress_subscription_user | f | {testpub} | f | parallel | d | f | none | t | f | f | f | off | dbname=regress_doesnotexist | 0/00000000
(1 row)
ALTER SUBSCRIPTION regress_testsub4 SET (origin = any);
\dRs+ regress_testsub4
- List of subscriptions
- Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN
-------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+-----------------------------+----------
- regress_testsub4 | regress_subscription_user | f | {testpub} | f | parallel | d | f | any | t | f | f | off | dbname=regress_doesnotexist | 0/0
+ List of subscriptions
+ Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Retain dead tuples | Synchronous commit | Conninfo | Skip LSN
+------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+--------------------+-----------------------------+------------
+ regress_testsub4 | regress_subscription_user | f | {testpub} | f | parallel | d | f | any | t | f | f | f | off | dbname=regress_doesnotexist | 0/00000000
(1 row)
DROP SUBSCRIPTION regress_testsub3;
@@ -145,10 +145,10 @@ ALTER SUBSCRIPTION regress_testsub CONNECTION 'foobar';
ERROR: invalid connection string syntax: missing "=" after "foobar" in connection info string
\dRs+
- List of subscriptions
- Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN
------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+-----------------------------+----------
- regress_testsub | regress_subscription_user | f | {testpub} | f | parallel | d | f | any | t | f | f | off | dbname=regress_doesnotexist | 0/0
+ List of subscriptions
+ Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Retain dead tuples | Synchronous commit | Conninfo | Skip LSN
+-----------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+--------------------+-----------------------------+------------
+ regress_testsub | regress_subscription_user | f | {testpub} | f | parallel | d | f | any | t | f | f | f | off | dbname=regress_doesnotexist | 0/00000000
(1 row)
ALTER SUBSCRIPTION regress_testsub SET PUBLICATION testpub2, testpub3 WITH (refresh = false);
@@ -157,10 +157,10 @@ ALTER SUBSCRIPTION regress_testsub SET (slot_name = 'newname');
ALTER SUBSCRIPTION regress_testsub SET (password_required = false);
ALTER SUBSCRIPTION regress_testsub SET (run_as_owner = true);
\dRs+
- List of subscriptions
- Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN
------------------+---------------------------+---------+---------------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+------------------------------+----------
- regress_testsub | regress_subscription_user | f | {testpub2,testpub3} | f | parallel | d | f | any | f | t | f | off | dbname=regress_doesnotexist2 | 0/0
+ List of subscriptions
+ Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Retain dead tuples | Synchronous commit | Conninfo | Skip LSN
+-----------------+---------------------------+---------+---------------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+--------------------+------------------------------+------------
+ regress_testsub | regress_subscription_user | f | {testpub2,testpub3} | f | parallel | d | f | any | f | t | f | f | off | dbname=regress_doesnotexist2 | 0/00000000
(1 row)
ALTER SUBSCRIPTION regress_testsub SET (password_required = true);
@@ -176,10 +176,10 @@ ERROR: unrecognized subscription parameter: "create_slot"
-- ok
ALTER SUBSCRIPTION regress_testsub SKIP (lsn = '0/12345');
\dRs+
- List of subscriptions
- Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN
------------------+---------------------------+---------+---------------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+------------------------------+----------
- regress_testsub | regress_subscription_user | f | {testpub2,testpub3} | f | parallel | d | f | any | t | f | f | off | dbname=regress_doesnotexist2 | 0/12345
+ List of subscriptions
+ Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Retain dead tuples | Synchronous commit | Conninfo | Skip LSN
+-----------------+---------------------------+---------+---------------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+--------------------+------------------------------+------------
+ regress_testsub | regress_subscription_user | f | {testpub2,testpub3} | f | parallel | d | f | any | t | f | f | f | off | dbname=regress_doesnotexist2 | 0/00012345
(1 row)
-- ok - with lsn = NONE
@@ -188,10 +188,10 @@ ALTER SUBSCRIPTION regress_testsub SKIP (lsn = NONE);
ALTER SUBSCRIPTION regress_testsub SKIP (lsn = '0/0');
ERROR: invalid WAL location (LSN): 0/0
\dRs+
- List of subscriptions
- Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN
------------------+---------------------------+---------+---------------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+------------------------------+----------
- regress_testsub | regress_subscription_user | f | {testpub2,testpub3} | f | parallel | d | f | any | t | f | f | off | dbname=regress_doesnotexist2 | 0/0
+ List of subscriptions
+ Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Retain dead tuples | Synchronous commit | Conninfo | Skip LSN
+-----------------+---------------------------+---------+---------------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+--------------------+------------------------------+------------
+ regress_testsub | regress_subscription_user | f | {testpub2,testpub3} | f | parallel | d | f | any | t | f | f | f | off | dbname=regress_doesnotexist2 | 0/00000000
(1 row)
BEGIN;
@@ -223,10 +223,10 @@ ALTER SUBSCRIPTION regress_testsub_foo SET (synchronous_commit = foobar);
ERROR: invalid value for parameter "synchronous_commit": "foobar"
HINT: Available values: local, remote_write, remote_apply, on, off.
\dRs+
- List of subscriptions
- Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN
----------------------+---------------------------+---------+---------------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+------------------------------+----------
- regress_testsub_foo | regress_subscription_user | f | {testpub2,testpub3} | f | parallel | d | f | any | t | f | f | local | dbname=regress_doesnotexist2 | 0/0
+ List of subscriptions
+ Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Retain dead tuples | Synchronous commit | Conninfo | Skip LSN
+---------------------+---------------------------+---------+---------------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+--------------------+------------------------------+------------
+ regress_testsub_foo | regress_subscription_user | f | {testpub2,testpub3} | f | parallel | d | f | any | t | f | f | f | local | dbname=regress_doesnotexist2 | 0/00000000
(1 row)
-- rename back to keep the rest simple
@@ -255,19 +255,19 @@ CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUB
WARNING: subscription was created, but is not connected
HINT: To initiate replication, you must manually create the replication slot, enable the subscription, and refresh the subscription.
\dRs+
- List of subscriptions
- Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN
------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+-----------------------------+----------
- regress_testsub | regress_subscription_user | f | {testpub} | t | parallel | d | f | any | t | f | f | off | dbname=regress_doesnotexist | 0/0
+ List of subscriptions
+ Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Retain dead tuples | Synchronous commit | Conninfo | Skip LSN
+-----------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+--------------------+-----------------------------+------------
+ regress_testsub | regress_subscription_user | f | {testpub} | t | parallel | d | f | any | t | f | f | f | off | dbname=regress_doesnotexist | 0/00000000
(1 row)
ALTER SUBSCRIPTION regress_testsub SET (binary = false);
ALTER SUBSCRIPTION regress_testsub SET (slot_name = NONE);
\dRs+
- List of subscriptions
- Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN
------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+-----------------------------+----------
- regress_testsub | regress_subscription_user | f | {testpub} | f | parallel | d | f | any | t | f | f | off | dbname=regress_doesnotexist | 0/0
+ List of subscriptions
+ Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Retain dead tuples | Synchronous commit | Conninfo | Skip LSN
+-----------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+--------------------+-----------------------------+------------
+ regress_testsub | regress_subscription_user | f | {testpub} | f | parallel | d | f | any | t | f | f | f | off | dbname=regress_doesnotexist | 0/00000000
(1 row)
DROP SUBSCRIPTION regress_testsub;
@@ -279,27 +279,27 @@ CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUB
WARNING: subscription was created, but is not connected
HINT: To initiate replication, you must manually create the replication slot, enable the subscription, and refresh the subscription.
\dRs+
- List of subscriptions
- Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN
------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+-----------------------------+----------
- regress_testsub | regress_subscription_user | f | {testpub} | f | on | d | f | any | t | f | f | off | dbname=regress_doesnotexist | 0/0
+ List of subscriptions
+ Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Retain dead tuples | Synchronous commit | Conninfo | Skip LSN
+-----------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+--------------------+-----------------------------+------------
+ regress_testsub | regress_subscription_user | f | {testpub} | f | on | d | f | any | t | f | f | f | off | dbname=regress_doesnotexist | 0/00000000
(1 row)
ALTER SUBSCRIPTION regress_testsub SET (streaming = parallel);
\dRs+
- List of subscriptions
- Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN
------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+-----------------------------+----------
- regress_testsub | regress_subscription_user | f | {testpub} | f | parallel | d | f | any | t | f | f | off | dbname=regress_doesnotexist | 0/0
+ List of subscriptions
+ Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Retain dead tuples | Synchronous commit | Conninfo | Skip LSN
+-----------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+--------------------+-----------------------------+------------
+ regress_testsub | regress_subscription_user | f | {testpub} | f | parallel | d | f | any | t | f | f | f | off | dbname=regress_doesnotexist | 0/00000000
(1 row)
ALTER SUBSCRIPTION regress_testsub SET (streaming = false);
ALTER SUBSCRIPTION regress_testsub SET (slot_name = NONE);
\dRs+
- List of subscriptions
- Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN
------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+-----------------------------+----------
- regress_testsub | regress_subscription_user | f | {testpub} | f | off | d | f | any | t | f | f | off | dbname=regress_doesnotexist | 0/0
+ List of subscriptions
+ Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Retain dead tuples | Synchronous commit | Conninfo | Skip LSN
+-----------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+--------------------+-----------------------------+------------
+ regress_testsub | regress_subscription_user | f | {testpub} | f | off | d | f | any | t | f | f | f | off | dbname=regress_doesnotexist | 0/00000000
(1 row)
-- fail - publication already exists
@@ -314,10 +314,10 @@ ALTER SUBSCRIPTION regress_testsub ADD PUBLICATION testpub1, testpub2 WITH (refr
ALTER SUBSCRIPTION regress_testsub ADD PUBLICATION testpub1, testpub2 WITH (refresh = false);
ERROR: publication "testpub1" is already in subscription "regress_testsub"
\dRs+
- List of subscriptions
- Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN
------------------+---------------------------+---------+-----------------------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+-----------------------------+----------
- regress_testsub | regress_subscription_user | f | {testpub,testpub1,testpub2} | f | off | d | f | any | t | f | f | off | dbname=regress_doesnotexist | 0/0
+ List of subscriptions
+ Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Retain dead tuples | Synchronous commit | Conninfo | Skip LSN
+-----------------+---------------------------+---------+-----------------------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+--------------------+-----------------------------+------------
+ regress_testsub | regress_subscription_user | f | {testpub,testpub1,testpub2} | f | off | d | f | any | t | f | f | f | off | dbname=regress_doesnotexist | 0/00000000
(1 row)
-- fail - publication used more than once
@@ -332,10 +332,10 @@ ERROR: publication "testpub3" is not in subscription "regress_testsub"
-- ok - delete publications
ALTER SUBSCRIPTION regress_testsub DROP PUBLICATION testpub1, testpub2 WITH (refresh = false);
\dRs+
- List of subscriptions
- Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN
------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+-----------------------------+----------
- regress_testsub | regress_subscription_user | f | {testpub} | f | off | d | f | any | t | f | f | off | dbname=regress_doesnotexist | 0/0
+ List of subscriptions
+ Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Retain dead tuples | Synchronous commit | Conninfo | Skip LSN
+-----------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+--------------------+-----------------------------+------------
+ regress_testsub | regress_subscription_user | f | {testpub} | f | off | d | f | any | t | f | f | f | off | dbname=regress_doesnotexist | 0/00000000
(1 row)
DROP SUBSCRIPTION regress_testsub;
@@ -371,19 +371,19 @@ CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUB
WARNING: subscription was created, but is not connected
HINT: To initiate replication, you must manually create the replication slot, enable the subscription, and refresh the subscription.
\dRs+
- List of subscriptions
- Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN
------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+-----------------------------+----------
- regress_testsub | regress_subscription_user | f | {testpub} | f | parallel | p | f | any | t | f | f | off | dbname=regress_doesnotexist | 0/0
+ List of subscriptions
+ Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Retain dead tuples | Synchronous commit | Conninfo | Skip LSN
+-----------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+--------------------+-----------------------------+------------
+ regress_testsub | regress_subscription_user | f | {testpub} | f | parallel | p | f | any | t | f | f | f | off | dbname=regress_doesnotexist | 0/00000000
(1 row)
-- we can alter streaming when two_phase enabled
ALTER SUBSCRIPTION regress_testsub SET (streaming = true);
\dRs+
- List of subscriptions
- Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN
------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+-----------------------------+----------
- regress_testsub | regress_subscription_user | f | {testpub} | f | on | p | f | any | t | f | f | off | dbname=regress_doesnotexist | 0/0
+ List of subscriptions
+ Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Retain dead tuples | Synchronous commit | Conninfo | Skip LSN
+-----------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+--------------------+-----------------------------+------------
+ regress_testsub | regress_subscription_user | f | {testpub} | f | on | p | f | any | t | f | f | f | off | dbname=regress_doesnotexist | 0/00000000
(1 row)
ALTER SUBSCRIPTION regress_testsub SET (slot_name = NONE);
@@ -393,10 +393,10 @@ CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUB
WARNING: subscription was created, but is not connected
HINT: To initiate replication, you must manually create the replication slot, enable the subscription, and refresh the subscription.
\dRs+
- List of subscriptions
- Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN
------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+-----------------------------+----------
- regress_testsub | regress_subscription_user | f | {testpub} | f | on | p | f | any | t | f | f | off | dbname=regress_doesnotexist | 0/0
+ List of subscriptions
+ Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Retain dead tuples | Synchronous commit | Conninfo | Skip LSN
+-----------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+--------------------+-----------------------------+------------
+ regress_testsub | regress_subscription_user | f | {testpub} | f | on | p | f | any | t | f | f | f | off | dbname=regress_doesnotexist | 0/00000000
(1 row)
ALTER SUBSCRIPTION regress_testsub SET (slot_name = NONE);
@@ -409,18 +409,34 @@ CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUB
WARNING: subscription was created, but is not connected
HINT: To initiate replication, you must manually create the replication slot, enable the subscription, and refresh the subscription.
\dRs+
- List of subscriptions
- Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN
------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+-----------------------------+----------
- regress_testsub | regress_subscription_user | f | {testpub} | f | parallel | d | f | any | t | f | f | off | dbname=regress_doesnotexist | 0/0
+ List of subscriptions
+ Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Retain dead tuples | Synchronous commit | Conninfo | Skip LSN
+-----------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+--------------------+-----------------------------+------------
+ regress_testsub | regress_subscription_user | f | {testpub} | f | parallel | d | f | any | t | f | f | f | off | dbname=regress_doesnotexist | 0/00000000
(1 row)
ALTER SUBSCRIPTION regress_testsub SET (disable_on_error = true);
\dRs+
- List of subscriptions
- Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Synchronous commit | Conninfo | Skip LSN
------------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+-----------------------------+----------
- regress_testsub | regress_subscription_user | f | {testpub} | f | parallel | d | t | any | t | f | f | off | dbname=regress_doesnotexist | 0/0
+ List of subscriptions
+ Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Retain dead tuples | Synchronous commit | Conninfo | Skip LSN
+-----------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+--------------------+-----------------------------+------------
+ regress_testsub | regress_subscription_user | f | {testpub} | f | parallel | d | t | any | t | f | f | f | off | dbname=regress_doesnotexist | 0/00000000
+(1 row)
+
+ALTER SUBSCRIPTION regress_testsub SET (slot_name = NONE);
+DROP SUBSCRIPTION regress_testsub;
+-- fail - retain_dead_tuples must be boolean
+CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, retain_dead_tuples = foo);
+ERROR: retain_dead_tuples requires a Boolean value
+-- ok
+CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, retain_dead_tuples = false);
+WARNING: subscription was created, but is not connected
+HINT: To initiate replication, you must manually create the replication slot, enable the subscription, and refresh the subscription.
+\dRs+
+ List of subscriptions
+ Name | Owner | Enabled | Publication | Binary | Streaming | Two-phase commit | Disable on error | Origin | Password required | Run as owner? | Failover | Retain dead tuples | Synchronous commit | Conninfo | Skip LSN
+-----------------+---------------------------+---------+-------------+--------+-----------+------------------+------------------+--------+-------------------+---------------+----------+--------------------+--------------------+-----------------------------+------------
+ regress_testsub | regress_subscription_user | f | {testpub} | f | parallel | d | f | any | t | f | f | f | off | dbname=regress_doesnotexist | 0/00000000
(1 row)
ALTER SUBSCRIPTION regress_testsub SET (slot_name = NONE);
diff --git a/src/test/regress/expected/subselect.out b/src/test/regress/expected/subselect.out
index 40d8056fcea..18fed63e738 100644
--- a/src/test/regress/expected/subselect.out
+++ b/src/test/regress/expected/subselect.out
@@ -2127,30 +2127,30 @@ explain (verbose, costs off)
select ss2.* from
int8_tbl t1 left join
(int8_tbl t2 left join
- (select coalesce(q1) as x, * from int8_tbl t3) ss1 on t2.q1 = ss1.q2 inner join
+ (select coalesce(q1, q1) as x, * from int8_tbl t3) ss1 on t2.q1 = ss1.q2 inner join
lateral (select ss1.x as y, * from int8_tbl t4) ss2 on t2.q2 = ss2.q1)
on t1.q2 = ss2.q1
order by 1, 2, 3;
- QUERY PLAN
-----------------------------------------------------------------
+ QUERY PLAN
+-----------------------------------------------------------------------
Sort
- Output: (COALESCE(t3.q1)), t4.q1, t4.q2
- Sort Key: (COALESCE(t3.q1)), t4.q1, t4.q2
+ Output: (COALESCE(t3.q1, t3.q1)), t4.q1, t4.q2
+ Sort Key: (COALESCE(t3.q1, t3.q1)), t4.q1, t4.q2
-> Hash Right Join
- Output: (COALESCE(t3.q1)), t4.q1, t4.q2
+ Output: (COALESCE(t3.q1, t3.q1)), t4.q1, t4.q2
Hash Cond: (t4.q1 = t1.q2)
-> Hash Join
- Output: (COALESCE(t3.q1)), t4.q1, t4.q2
+ Output: (COALESCE(t3.q1, t3.q1)), t4.q1, t4.q2
Hash Cond: (t2.q2 = t4.q1)
-> Hash Left Join
- Output: t2.q2, (COALESCE(t3.q1))
+ Output: t2.q2, (COALESCE(t3.q1, t3.q1))
Hash Cond: (t2.q1 = t3.q2)
-> Seq Scan on public.int8_tbl t2
Output: t2.q1, t2.q2
-> Hash
- Output: t3.q2, (COALESCE(t3.q1))
+ Output: t3.q2, (COALESCE(t3.q1, t3.q1))
-> Seq Scan on public.int8_tbl t3
- Output: t3.q2, COALESCE(t3.q1)
+ Output: t3.q2, COALESCE(t3.q1, t3.q1)
-> Hash
Output: t4.q1, t4.q2
-> Seq Scan on public.int8_tbl t4
@@ -2164,7 +2164,7 @@ order by 1, 2, 3;
select ss2.* from
int8_tbl t1 left join
(int8_tbl t2 left join
- (select coalesce(q1) as x, * from int8_tbl t3) ss1 on t2.q1 = ss1.q2 inner join
+ (select coalesce(q1, q1) as x, * from int8_tbl t3) ss1 on t2.q1 = ss1.q2 inner join
lateral (select ss1.x as y, * from int8_tbl t4) ss2 on t2.q2 = ss2.q1)
on t1.q2 = ss2.q1
order by 1, 2, 3;
@@ -2201,32 +2201,32 @@ explain (verbose, costs off)
select ss2.* from
int8_tbl t1 left join
(int8_tbl t2 left join
- (select coalesce(q1) as x, * from int8_tbl t3) ss1 on t2.q1 = ss1.q2 left join
+ (select coalesce(q1, q1) as x, * from int8_tbl t3) ss1 on t2.q1 = ss1.q2 left join
lateral (select ss1.x as y, * from int8_tbl t4) ss2 on t2.q2 = ss2.q1)
on t1.q2 = ss2.q1
order by 1, 2, 3;
- QUERY PLAN
-----------------------------------------------------------------
+ QUERY PLAN
+-----------------------------------------------------------------------
Sort
- Output: ((COALESCE(t3.q1))), t4.q1, t4.q2
- Sort Key: ((COALESCE(t3.q1))), t4.q1, t4.q2
+ Output: ((COALESCE(t3.q1, t3.q1))), t4.q1, t4.q2
+ Sort Key: ((COALESCE(t3.q1, t3.q1))), t4.q1, t4.q2
-> Hash Right Join
- Output: ((COALESCE(t3.q1))), t4.q1, t4.q2
+ Output: ((COALESCE(t3.q1, t3.q1))), t4.q1, t4.q2
Hash Cond: (t4.q1 = t1.q2)
-> Nested Loop
- Output: t4.q1, t4.q2, ((COALESCE(t3.q1)))
+ Output: t4.q1, t4.q2, ((COALESCE(t3.q1, t3.q1)))
Join Filter: (t2.q2 = t4.q1)
-> Hash Left Join
- Output: t2.q2, (COALESCE(t3.q1))
+ Output: t2.q2, (COALESCE(t3.q1, t3.q1))
Hash Cond: (t2.q1 = t3.q2)
-> Seq Scan on public.int8_tbl t2
Output: t2.q1, t2.q2
-> Hash
- Output: t3.q2, (COALESCE(t3.q1))
+ Output: t3.q2, (COALESCE(t3.q1, t3.q1))
-> Seq Scan on public.int8_tbl t3
- Output: t3.q2, COALESCE(t3.q1)
+ Output: t3.q2, COALESCE(t3.q1, t3.q1)
-> Seq Scan on public.int8_tbl t4
- Output: t4.q1, t4.q2, (COALESCE(t3.q1))
+ Output: t4.q1, t4.q2, (COALESCE(t3.q1, t3.q1))
-> Hash
Output: t1.q2
-> Seq Scan on public.int8_tbl t1
@@ -2236,7 +2236,7 @@ order by 1, 2, 3;
select ss2.* from
int8_tbl t1 left join
(int8_tbl t2 left join
- (select coalesce(q1) as x, * from int8_tbl t3) ss1 on t2.q1 = ss1.q2 left join
+ (select coalesce(q1, q1) as x, * from int8_tbl t3) ss1 on t2.q1 = ss1.q2 left join
lateral (select ss1.x as y, * from int8_tbl t4) ss2 on t2.q2 = ss2.q1)
on t1.q2 = ss2.q1
order by 1, 2, 3;
diff --git a/src/test/regress/expected/triggers.out b/src/test/regress/expected/triggers.out
index 2bf0e77d61e..872b9100e1a 100644
--- a/src/test/regress/expected/triggers.out
+++ b/src/test/regress/expected/triggers.out
@@ -2280,6 +2280,27 @@ select * from parted;
drop table parted;
drop function parted_trigfunc();
--
+-- Constraint triggers
+--
+create constraint trigger crtr
+ after insert on foo not valid
+ for each row execute procedure foo ();
+ERROR: constraint triggers cannot be marked NOT VALID
+LINE 2: after insert on foo not valid
+ ^
+create constraint trigger crtr
+ after insert on foo no inherit
+ for each row execute procedure foo ();
+ERROR: constraint triggers cannot be marked NO INHERIT
+LINE 2: after insert on foo no inherit
+ ^
+create constraint trigger crtr
+ after insert on foo not enforced
+ for each row execute procedure foo ();
+ERROR: constraint triggers cannot be marked NOT ENFORCED
+LINE 2: after insert on foo not enforced
+ ^
+--
-- Constraint triggers and partitioned tables
create table parted_constr_ancestor (a int, b text)
partition by range (b);
@@ -2294,7 +2315,7 @@ create constraint trigger parted_trig after insert on parted_constr_ancestor
deferrable
for each row execute procedure trigger_notice_ab();
create constraint trigger parted_trig_two after insert on parted_constr
- deferrable initially deferred
+ deferrable initially deferred enforced
for each row when (bark(new.b) AND new.a % 2 = 1)
execute procedure trigger_notice_ab();
-- The immediate constraint is fired immediately; the WHEN clause of the
diff --git a/src/test/regress/expected/type_sanity.out b/src/test/regress/expected/type_sanity.out
index dd0c52ab08b..943e56506bf 100644
--- a/src/test/regress/expected/type_sanity.out
+++ b/src/test/regress/expected/type_sanity.out
@@ -711,6 +711,7 @@ CREATE TABLE tab_core_types AS SELECT
'regtype'::regtype type,
'pg_monitor'::regrole,
'pg_class'::regclass::oid,
+ 'template1'::regdatabase,
'(1,1)'::tid, '2'::xid, '3'::cid,
'10:20:10,14,15'::txid_snapshot,
'10:20:10,14,15'::pg_snapshot,
diff --git a/src/test/regress/expected/without_overlaps.out b/src/test/regress/expected/without_overlaps.out
index ea607bed0a4..f3144bdc39c 100644
--- a/src/test/regress/expected/without_overlaps.out
+++ b/src/test/regress/expected/without_overlaps.out
@@ -1426,7 +1426,7 @@ CREATE TABLE temporal_fk_rng2rng (
CONSTRAINT temporal_fk_rng2rng_fk FOREIGN KEY (parent_id, valid_at)
REFERENCES temporal_rng (id, valid_at)
);
-ERROR: foreign key must use PERIOD when referencing a primary using WITHOUT OVERLAPS
+ERROR: foreign key must use PERIOD when referencing a primary key using WITHOUT OVERLAPS
-- (parent_id, valid_at) REFERENCES (id, PERIOD valid_at)
-- FOREIGN KEY part should specify PERIOD
CREATE TABLE temporal_fk_rng2rng (
@@ -1900,7 +1900,7 @@ CREATE TABLE temporal_fk_mltrng2mltrng (
CONSTRAINT temporal_fk_mltrng2mltrng_fk FOREIGN KEY (parent_id, valid_at)
REFERENCES temporal_mltrng (id, valid_at)
);
-ERROR: foreign key must use PERIOD when referencing a primary using WITHOUT OVERLAPS
+ERROR: foreign key must use PERIOD when referencing a primary key using WITHOUT OVERLAPS
-- (parent_id, valid_at) REFERENCES (id, PERIOD valid_at)
-- FOREIGN KEY part should specify PERIOD
CREATE TABLE temporal_fk_mltrng2mltrng (
diff --git a/src/test/regress/parallel_schedule b/src/test/regress/parallel_schedule
index a424be2a6bf..fbffc67ae60 100644
--- a/src/test/regress/parallel_schedule
+++ b/src/test/regress/parallel_schedule
@@ -123,7 +123,7 @@ test: plancache limit plpgsql copy2 temp domain rangefuncs prepare conversion tr
# The stats test resets stats, so nothing else needing stats access can be in
# this group.
# ----------
-test: partition_join partition_prune reloptions hash_part indexing partition_aggregate partition_info tuplesort explain compression memoize stats predicate numa
+test: partition_join partition_prune reloptions hash_part indexing partition_aggregate partition_info tuplesort explain compression compression_lz4 memoize stats predicate numa
# event_trigger depends on create_am and cannot run concurrently with
# any test that runs DDL
diff --git a/src/test/regress/sql/alter_table.sql b/src/test/regress/sql/alter_table.sql
index 5ce9d1e429f..fc6e36d0e78 100644
--- a/src/test/regress/sql/alter_table.sql
+++ b/src/test/regress/sql/alter_table.sql
@@ -3069,6 +3069,23 @@ drop table attbl, atref;
/* End test case for bug #17409 */
+/* Test case for bug #18970 */
+
+create table attbl(a int);
+create table atref(b attbl check ((b).a is not null));
+alter table attbl alter column a type numeric; -- someday this should work
+alter table atref drop constraint atref_b_check;
+
+create statistics atref_stat on ((b).a is not null) from atref;
+alter table attbl alter column a type numeric; -- someday this should work
+drop statistics atref_stat;
+
+create index atref_idx on atref (((b).a));
+alter table attbl alter column a type numeric; -- someday this should work
+drop table attbl, atref;
+
+/* End test case for bug #18970 */
+
-- Test that ALTER TABLE rewrite preserves a clustered index
-- for normal indexes and indexes on constraints.
create table alttype_cluster (a int);
diff --git a/src/test/regress/sql/btree_index.sql b/src/test/regress/sql/btree_index.sql
index 68c61dbc7d1..6aaaa386abc 100644
--- a/src/test/regress/sql/btree_index.sql
+++ b/src/test/regress/sql/btree_index.sql
@@ -143,38 +143,83 @@ SELECT proname, proargtypes, pronamespace
ORDER BY proname DESC, proargtypes DESC, pronamespace DESC LIMIT 1;
--
--- Add coverage for RowCompare quals whose rhs row has a NULL that ends scan
+-- Forwards scan RowCompare qual whose row arg has a NULL that affects our
+-- initial positioning strategy
--
explain (costs off)
SELECT proname, proargtypes, pronamespace
FROM pg_proc
- WHERE proname = 'abs' AND (proname, proargtypes) < ('abs', NULL)
+ WHERE (proname, proargtypes) >= ('abs', NULL) AND proname <= 'abs'
ORDER BY proname, proargtypes, pronamespace;
SELECT proname, proargtypes, pronamespace
FROM pg_proc
- WHERE proname = 'abs' AND (proname, proargtypes) < ('abs', NULL)
+ WHERE (proname, proargtypes) >= ('abs', NULL) AND proname <= 'abs'
ORDER BY proname, proargtypes, pronamespace;
--
--- Add coverage for backwards scan RowCompare quals whose rhs row has a NULL
--- that ends scan
+-- Forwards scan RowCompare quals whose row arg has a NULL that ends scan
--
explain (costs off)
SELECT proname, proargtypes, pronamespace
FROM pg_proc
- WHERE proname = 'abs' AND (proname, proargtypes) > ('abs', NULL)
+ WHERE proname >= 'abs' AND (proname, proargtypes) < ('abs', NULL)
+ORDER BY proname, proargtypes, pronamespace;
+
+SELECT proname, proargtypes, pronamespace
+ FROM pg_proc
+ WHERE proname >= 'abs' AND (proname, proargtypes) < ('abs', NULL)
+ORDER BY proname, proargtypes, pronamespace;
+
+--
+-- Backwards scan RowCompare qual whose row arg has a NULL that affects our
+-- initial positioning strategy
+--
+explain (costs off)
+SELECT proname, proargtypes, pronamespace
+ FROM pg_proc
+ WHERE proname >= 'abs' AND (proname, proargtypes) <= ('abs', NULL)
ORDER BY proname DESC, proargtypes DESC, pronamespace DESC;
SELECT proname, proargtypes, pronamespace
FROM pg_proc
- WHERE proname = 'abs' AND (proname, proargtypes) > ('abs', NULL)
+ WHERE proname >= 'abs' AND (proname, proargtypes) <= ('abs', NULL)
ORDER BY proname DESC, proargtypes DESC, pronamespace DESC;
--
--- Add coverage for recheck of > key following array advancement on previous
--- (left sibling) page that used a high key whose attribute value corresponding
--- to the > key was -inf (due to being truncated when the high key was created).
+-- Backwards scan RowCompare qual whose row arg has a NULL that ends scan
+--
+explain (costs off)
+SELECT proname, proargtypes, pronamespace
+ FROM pg_proc
+ WHERE (proname, proargtypes) > ('abs', NULL) AND proname <= 'abs'
+ORDER BY proname DESC, proargtypes DESC, pronamespace DESC;
+
+SELECT proname, proargtypes, pronamespace
+ FROM pg_proc
+ WHERE (proname, proargtypes) > ('abs', NULL) AND proname <= 'abs'
+ORDER BY proname DESC, proargtypes DESC, pronamespace DESC;
+
+-- Makes B-Tree preprocessing deal with unmarking redundant keys that were
+-- initially marked required (test case relies on current row compare
+-- preprocessing limitations)
+explain (costs off)
+SELECT proname, proargtypes, pronamespace
+ FROM pg_proc
+ WHERE proname = 'zzzzzz' AND (proname, proargtypes) > ('abs', NULL)
+ AND pronamespace IN (1, 2, 3) AND proargtypes IN ('26 23', '5077')
+ORDER BY proname, proargtypes, pronamespace;
+
+SELECT proname, proargtypes, pronamespace
+ FROM pg_proc
+ WHERE proname = 'zzzzzz' AND (proname, proargtypes) > ('abs', NULL)
+ AND pronamespace IN (1, 2, 3) AND proargtypes IN ('26 23', '5077')
+ORDER BY proname, proargtypes, pronamespace;
+
+--
+-- Performs a recheck of > key following array advancement on previous (left
+-- sibling) page that used a high key whose attribute value corresponding to
+-- the > key was -inf (due to being truncated when the high key was created).
--
-- XXX This relies on the assumption that tenk1_thous_tenthous has a truncated
-- high key "(183, -inf)" on the first page that we'll scan. The test will only
diff --git a/src/test/regress/sql/compression.sql b/src/test/regress/sql/compression.sql
index 490595fcfb2..ce5ea37a660 100644
--- a/src/test/regress/sql/compression.sql
+++ b/src/test/regress/sql/compression.sql
@@ -1,3 +1,8 @@
+-- Default set of tests for TOAST compression, independent on compression
+-- methods supported by the build.
+
+CREATE SCHEMA pglz;
+SET search_path TO pglz, public;
\set HIDE_TOAST_COMPRESSION false
-- ensure we get stable results regardless of installation's default
@@ -8,53 +13,27 @@ CREATE TABLE cmdata(f1 text COMPRESSION pglz);
CREATE INDEX idx ON cmdata(f1);
INSERT INTO cmdata VALUES(repeat('1234567890', 1000));
\d+ cmdata
-CREATE TABLE cmdata1(f1 TEXT COMPRESSION lz4);
-INSERT INTO cmdata1 VALUES(repeat('1234567890', 1004));
-\d+ cmdata1
-- verify stored compression method in the data
SELECT pg_column_compression(f1) FROM cmdata;
-SELECT pg_column_compression(f1) FROM cmdata1;
-- decompress data slice
SELECT SUBSTR(f1, 200, 5) FROM cmdata;
-SELECT SUBSTR(f1, 2000, 50) FROM cmdata1;
-- copy with table creation
SELECT * INTO cmmove1 FROM cmdata;
\d+ cmmove1
SELECT pg_column_compression(f1) FROM cmmove1;
--- copy to existing table
-CREATE TABLE cmmove3(f1 text COMPRESSION pglz);
-INSERT INTO cmmove3 SELECT * FROM cmdata;
-INSERT INTO cmmove3 SELECT * FROM cmdata1;
-SELECT pg_column_compression(f1) FROM cmmove3;
-
--- test LIKE INCLUDING COMPRESSION
-CREATE TABLE cmdata2 (LIKE cmdata1 INCLUDING COMPRESSION);
-\d+ cmdata2
-DROP TABLE cmdata2;
-
-- try setting compression for incompressible data type
CREATE TABLE cmdata2 (f1 int COMPRESSION pglz);
--- update using datum from different table
-CREATE TABLE cmmove2(f1 text COMPRESSION pglz);
-INSERT INTO cmmove2 VALUES (repeat('1234567890', 1004));
-SELECT pg_column_compression(f1) FROM cmmove2;
-UPDATE cmmove2 SET f1 = cmdata1.f1 FROM cmdata1;
-SELECT pg_column_compression(f1) FROM cmmove2;
-
-- test externally stored compressed data
CREATE OR REPLACE FUNCTION large_val() RETURNS TEXT LANGUAGE SQL AS
'select array_agg(fipshash(g::text))::text from generate_series(1, 256) g';
CREATE TABLE cmdata2 (f1 text COMPRESSION pglz);
INSERT INTO cmdata2 SELECT large_val() || repeat('a', 4000);
SELECT pg_column_compression(f1) FROM cmdata2;
-INSERT INTO cmdata1 SELECT large_val() || repeat('a', 4000);
-SELECT pg_column_compression(f1) FROM cmdata1;
-SELECT SUBSTR(f1, 200, 5) FROM cmdata1;
SELECT SUBSTR(f1, 200, 5) FROM cmdata2;
DROP TABLE cmdata2;
@@ -76,76 +55,31 @@ ALTER TABLE cmdata2 ALTER COLUMN f1 SET STORAGE plain;
INSERT INTO cmdata2 VALUES (repeat('123456789', 800));
SELECT pg_column_compression(f1) FROM cmdata2;
--- test compression with materialized view
-CREATE MATERIALIZED VIEW compressmv(x) AS SELECT * FROM cmdata1;
-\d+ compressmv
-SELECT pg_column_compression(f1) FROM cmdata1;
-SELECT pg_column_compression(x) FROM compressmv;
-
--- test compression with partition
-CREATE TABLE cmpart(f1 text COMPRESSION lz4) PARTITION BY HASH(f1);
-CREATE TABLE cmpart1 PARTITION OF cmpart FOR VALUES WITH (MODULUS 2, REMAINDER 0);
-CREATE TABLE cmpart2(f1 text COMPRESSION pglz);
-
-ALTER TABLE cmpart ATTACH PARTITION cmpart2 FOR VALUES WITH (MODULUS 2, REMAINDER 1);
-INSERT INTO cmpart VALUES (repeat('123456789', 1004));
-INSERT INTO cmpart VALUES (repeat('123456789', 4004));
-SELECT pg_column_compression(f1) FROM cmpart1;
-SELECT pg_column_compression(f1) FROM cmpart2;
-
-- test compression with inheritance
-CREATE TABLE cminh() INHERITS(cmdata, cmdata1); -- error
-CREATE TABLE cminh(f1 TEXT COMPRESSION lz4) INHERITS(cmdata); -- error
CREATE TABLE cmdata3(f1 text);
CREATE TABLE cminh() INHERITS (cmdata, cmdata3);
-- test default_toast_compression GUC
+-- suppress machine-dependent details
+\set VERBOSITY terse
SET default_toast_compression = '';
SET default_toast_compression = 'I do not exist compression';
-SET default_toast_compression = 'lz4';
SET default_toast_compression = 'pglz';
-
--- test alter compression method
-ALTER TABLE cmdata ALTER COLUMN f1 SET COMPRESSION lz4;
-INSERT INTO cmdata VALUES (repeat('123456789', 4004));
-\d+ cmdata
-SELECT pg_column_compression(f1) FROM cmdata;
+\set VERBOSITY default
ALTER TABLE cmdata2 ALTER COLUMN f1 SET COMPRESSION default;
\d+ cmdata2
--- test alter compression method for materialized views
-ALTER MATERIALIZED VIEW compressmv ALTER COLUMN x SET COMPRESSION lz4;
-\d+ compressmv
-
--- test alter compression method for partitioned tables
-ALTER TABLE cmpart1 ALTER COLUMN f1 SET COMPRESSION pglz;
-ALTER TABLE cmpart2 ALTER COLUMN f1 SET COMPRESSION lz4;
-
--- new data should be compressed with the current compression method
-INSERT INTO cmpart VALUES (repeat('123456789', 1004));
-INSERT INTO cmpart VALUES (repeat('123456789', 4004));
-SELECT pg_column_compression(f1) FROM cmpart1;
-SELECT pg_column_compression(f1) FROM cmpart2;
+DROP TABLE cmdata2;
-- VACUUM FULL does not recompress
SELECT pg_column_compression(f1) FROM cmdata;
VACUUM FULL cmdata;
SELECT pg_column_compression(f1) FROM cmdata;
--- test expression index
-DROP TABLE cmdata2;
-CREATE TABLE cmdata2 (f1 TEXT COMPRESSION pglz, f2 TEXT COMPRESSION lz4);
-CREATE UNIQUE INDEX idx1 ON cmdata2 ((f1 || f2));
-INSERT INTO cmdata2 VALUES((SELECT array_agg(fipshash(g::TEXT))::TEXT FROM
-generate_series(1, 50) g), VERSION());
-
-- check data is ok
SELECT length(f1) FROM cmdata;
-SELECT length(f1) FROM cmdata1;
SELECT length(f1) FROM cmmove1;
-SELECT length(f1) FROM cmmove2;
-SELECT length(f1) FROM cmmove3;
CREATE TABLE badcompresstbl (a text COMPRESSION I_Do_Not_Exist_Compression); -- fails
CREATE TABLE badcompresstbl (a text);
diff --git a/src/test/regress/sql/compression_lz4.sql b/src/test/regress/sql/compression_lz4.sql
new file mode 100644
index 00000000000..3849f8618de
--- /dev/null
+++ b/src/test/regress/sql/compression_lz4.sql
@@ -0,0 +1,129 @@
+-- Tests for TOAST compression with lz4
+
+SELECT NOT(enumvals @> '{lz4}') AS skip_test FROM pg_settings WHERE
+ name = 'default_toast_compression' \gset
+\if :skip_test
+ \echo '*** skipping TOAST tests with lz4 (not supported) ***'
+ \quit
+\endif
+
+CREATE SCHEMA lz4;
+SET search_path TO lz4, public;
+
+\set HIDE_TOAST_COMPRESSION false
+
+-- Ensure we get stable results regardless of the installation's default.
+-- We rely on this GUC value for a few tests.
+SET default_toast_compression = 'pglz';
+
+-- test creating table with compression method
+CREATE TABLE cmdata_pglz(f1 text COMPRESSION pglz);
+CREATE INDEX idx ON cmdata_pglz(f1);
+INSERT INTO cmdata_pglz VALUES(repeat('1234567890', 1000));
+\d+ cmdata
+CREATE TABLE cmdata_lz4(f1 TEXT COMPRESSION lz4);
+INSERT INTO cmdata_lz4 VALUES(repeat('1234567890', 1004));
+\d+ cmdata1
+
+-- verify stored compression method in the data
+SELECT pg_column_compression(f1) FROM cmdata_lz4;
+
+-- decompress data slice
+SELECT SUBSTR(f1, 200, 5) FROM cmdata_pglz;
+SELECT SUBSTR(f1, 2000, 50) FROM cmdata_lz4;
+
+-- copy with table creation
+SELECT * INTO cmmove1 FROM cmdata_lz4;
+\d+ cmmove1
+SELECT pg_column_compression(f1) FROM cmmove1;
+
+-- test LIKE INCLUDING COMPRESSION. The GUC default_toast_compression
+-- has no effect, the compression method from the table being copied.
+CREATE TABLE cmdata2 (LIKE cmdata_lz4 INCLUDING COMPRESSION);
+\d+ cmdata2
+DROP TABLE cmdata2;
+
+-- copy to existing table
+CREATE TABLE cmmove3(f1 text COMPRESSION pglz);
+INSERT INTO cmmove3 SELECT * FROM cmdata_pglz;
+INSERT INTO cmmove3 SELECT * FROM cmdata_lz4;
+SELECT pg_column_compression(f1) FROM cmmove3;
+
+-- update using datum from different table with LZ4 data.
+CREATE TABLE cmmove2(f1 text COMPRESSION pglz);
+INSERT INTO cmmove2 VALUES (repeat('1234567890', 1004));
+SELECT pg_column_compression(f1) FROM cmmove2;
+UPDATE cmmove2 SET f1 = cmdata_lz4.f1 FROM cmdata_lz4;
+SELECT pg_column_compression(f1) FROM cmmove2;
+
+-- test externally stored compressed data
+CREATE OR REPLACE FUNCTION large_val_lz4() RETURNS TEXT LANGUAGE SQL AS
+'select array_agg(fipshash(g::text))::text from generate_series(1, 256) g';
+CREATE TABLE cmdata2 (f1 text COMPRESSION lz4);
+INSERT INTO cmdata2 SELECT large_val_lz4() || repeat('a', 4000);
+SELECT pg_column_compression(f1) FROM cmdata2;
+SELECT SUBSTR(f1, 200, 5) FROM cmdata2;
+DROP TABLE cmdata2;
+DROP FUNCTION large_val_lz4;
+
+-- test compression with materialized view
+CREATE MATERIALIZED VIEW compressmv(x) AS SELECT * FROM cmdata_lz4;
+\d+ compressmv
+SELECT pg_column_compression(f1) FROM cmdata_lz4;
+SELECT pg_column_compression(x) FROM compressmv;
+
+-- test compression with partition
+CREATE TABLE cmpart(f1 text COMPRESSION lz4) PARTITION BY HASH(f1);
+CREATE TABLE cmpart1 PARTITION OF cmpart FOR VALUES WITH (MODULUS 2, REMAINDER 0);
+CREATE TABLE cmpart2(f1 text COMPRESSION pglz);
+
+ALTER TABLE cmpart ATTACH PARTITION cmpart2 FOR VALUES WITH (MODULUS 2, REMAINDER 1);
+INSERT INTO cmpart VALUES (repeat('123456789', 1004));
+INSERT INTO cmpart VALUES (repeat('123456789', 4004));
+SELECT pg_column_compression(f1) FROM cmpart1;
+SELECT pg_column_compression(f1) FROM cmpart2;
+
+-- test compression with inheritance
+CREATE TABLE cminh() INHERITS(cmdata_pglz, cmdata_lz4); -- error
+CREATE TABLE cminh(f1 TEXT COMPRESSION lz4) INHERITS(cmdata_pglz); -- error
+CREATE TABLE cmdata3(f1 text);
+CREATE TABLE cminh() INHERITS (cmdata_pglz, cmdata3);
+
+-- test default_toast_compression GUC
+SET default_toast_compression = 'lz4';
+
+-- test alter compression method
+ALTER TABLE cmdata_pglz ALTER COLUMN f1 SET COMPRESSION lz4;
+INSERT INTO cmdata_pglz VALUES (repeat('123456789', 4004));
+\d+ cmdata
+SELECT pg_column_compression(f1) FROM cmdata_pglz;
+ALTER TABLE cmdata_pglz ALTER COLUMN f1 SET COMPRESSION pglz;
+
+-- test alter compression method for materialized views
+ALTER MATERIALIZED VIEW compressmv ALTER COLUMN x SET COMPRESSION lz4;
+\d+ compressmv
+
+-- test alter compression method for partitioned tables
+ALTER TABLE cmpart1 ALTER COLUMN f1 SET COMPRESSION pglz;
+ALTER TABLE cmpart2 ALTER COLUMN f1 SET COMPRESSION lz4;
+
+-- new data should be compressed with the current compression method
+INSERT INTO cmpart VALUES (repeat('123456789', 1004));
+INSERT INTO cmpart VALUES (repeat('123456789', 4004));
+SELECT pg_column_compression(f1) FROM cmpart1;
+SELECT pg_column_compression(f1) FROM cmpart2;
+
+-- test expression index
+CREATE TABLE cmdata2 (f1 TEXT COMPRESSION pglz, f2 TEXT COMPRESSION lz4);
+CREATE UNIQUE INDEX idx1 ON cmdata2 ((f1 || f2));
+INSERT INTO cmdata2 VALUES((SELECT array_agg(fipshash(g::TEXT))::TEXT FROM
+generate_series(1, 50) g), VERSION());
+
+-- check data is ok
+SELECT length(f1) FROM cmdata_pglz;
+SELECT length(f1) FROM cmdata_lz4;
+SELECT length(f1) FROM cmmove1;
+SELECT length(f1) FROM cmmove2;
+SELECT length(f1) FROM cmmove3;
+
+\set HIDE_TOAST_COMPRESSION true
diff --git a/src/test/regress/sql/constraints.sql b/src/test/regress/sql/constraints.sql
index 337baab7ced..1f6dc8fd69f 100644
--- a/src/test/regress/sql/constraints.sql
+++ b/src/test/regress/sql/constraints.sql
@@ -537,6 +537,9 @@ CREATE TABLE UNIQUE_NOTEN_TBL(i int UNIQUE NOT ENFORCED);
ALTER TABLE unique_tbl ALTER CONSTRAINT unique_tbl_i_key ENFORCED;
ALTER TABLE unique_tbl ALTER CONSTRAINT unique_tbl_i_key NOT ENFORCED;
+-- can't make an existing constraint NOT VALID
+ALTER TABLE unique_tbl ALTER CONSTRAINT unique_tbl_i_key NOT VALID;
+
DROP TABLE unique_tbl;
--
@@ -997,6 +1000,9 @@ create table constr_parent3 (a int not null);
create table constr_child3 () inherits (constr_parent2, constr_parent3);
EXECUTE get_nnconstraint_info('{constr_parent3, constr_child3}');
+COMMENT ON CONSTRAINT constr_parent2_a_not_null ON constr_parent2 IS 'this constraint is invalid';
+COMMENT ON CONSTRAINT constr_parent2_a_not_null ON constr_child2 IS 'this constraint is valid';
+
DEALLOCATE get_nnconstraint_info;
-- end NOT NULL NOT VALID
@@ -1037,3 +1043,9 @@ DROP DOMAIN constraint_comments_dom;
DROP ROLE regress_constraint_comments;
DROP ROLE regress_constraint_comments_noaccess;
+
+-- Leave some constraints for the pg_upgrade test to pick up
+CREATE DOMAIN constraint_comments_dom AS int;
+
+ALTER DOMAIN constraint_comments_dom ADD CONSTRAINT inv_ck CHECK (value > 0) NOT VALID;
+COMMENT ON CONSTRAINT inv_ck ON DOMAIN constraint_comments_dom IS 'comment on invalid constraint';
diff --git a/src/test/regress/sql/copy.sql b/src/test/regress/sql/copy.sql
index f0b88a23db8..a1316c73bac 100644
--- a/src/test/regress/sql/copy.sql
+++ b/src/test/regress/sql/copy.sql
@@ -94,6 +94,36 @@ this is just a line full of junk that would error out if parsed
copy copytest4 to stdout (header);
+-- test multi-line header line feature
+
+create temp table copytest5 (c1 int);
+
+copy copytest5 from stdin (format csv, header 2);
+this is a first header line.
+this is a second header line.
+1
+2
+\.
+copy copytest5 to stdout (header);
+
+truncate copytest5;
+copy copytest5 from stdin (format csv, header 4);
+this is a first header line.
+this is a second header line.
+1
+2
+\.
+select count(*) from copytest5;
+
+truncate copytest5;
+copy copytest5 from stdin (format csv, header 5);
+this is a first header line.
+this is a second header line.
+1
+2
+\.
+select count(*) from copytest5;
+
-- test copy from with a partitioned table
create table parted_copytest (
a int,
diff --git a/src/test/regress/sql/copy2.sql b/src/test/regress/sql/copy2.sql
index 45273557ce0..cef45868db5 100644
--- a/src/test/regress/sql/copy2.sql
+++ b/src/test/regress/sql/copy2.sql
@@ -90,6 +90,9 @@ COPY x to stdout (format BINARY, on_error unsupported);
COPY x from stdin (log_verbosity unsupported);
COPY x from stdin with (reject_limit 1);
COPY x from stdin with (on_error ignore, reject_limit 0);
+COPY x from stdin with (header -1);
+COPY x from stdin with (header 2.5);
+COPY x to stdout with (header 2);
-- too many columns in column list: should fail
COPY x (a, b, c, d, e, d, c) from stdin;
diff --git a/src/test/regress/sql/create_table_like.sql b/src/test/regress/sql/create_table_like.sql
index 6e21722aaeb..bf8702116a7 100644
--- a/src/test/regress/sql/create_table_like.sql
+++ b/src/test/regress/sql/create_table_like.sql
@@ -143,9 +143,10 @@ COMMENT ON INDEX ctlt1_pkey IS 'index pkey';
COMMENT ON INDEX ctlt1_b_key IS 'index b_key';
ALTER TABLE ctlt1 ALTER COLUMN a SET STORAGE MAIN;
-CREATE TABLE ctlt2 (c text);
+CREATE TABLE ctlt2 (c text NOT NULL);
ALTER TABLE ctlt2 ALTER COLUMN c SET STORAGE EXTERNAL;
COMMENT ON COLUMN ctlt2.c IS 'C';
+COMMENT ON CONSTRAINT ctlt2_c_not_null ON ctlt2 IS 't2_c_not_null';
CREATE TABLE ctlt3 (a text CHECK (length(a) < 5), c text CHECK (length(c) < 7));
ALTER TABLE ctlt3 ALTER COLUMN c SET STORAGE EXTERNAL;
@@ -162,6 +163,7 @@ CREATE TABLE ctlt12_storage (LIKE ctlt1 INCLUDING STORAGE, LIKE ctlt2 INCLUDING
\d+ ctlt12_storage
CREATE TABLE ctlt12_comments (LIKE ctlt1 INCLUDING COMMENTS, LIKE ctlt2 INCLUDING COMMENTS);
\d+ ctlt12_comments
+SELECT conname, description FROM pg_description, pg_constraint c WHERE classoid = 'pg_constraint'::regclass AND objoid = c.oid AND c.conrelid = 'ctlt12_comments'::regclass;
CREATE TABLE ctlt1_inh (LIKE ctlt1 INCLUDING CONSTRAINTS INCLUDING COMMENTS) INHERITS (ctlt1);
\d+ ctlt1_inh
SELECT description FROM pg_description, pg_constraint c WHERE classoid = 'pg_constraint'::regclass AND objoid = c.oid AND c.conrelid = 'ctlt1_inh'::regclass;
@@ -197,9 +199,19 @@ DROP TABLE ctlt1, ctlt2, ctlt3, ctlt4, ctlt12_storage, ctlt12_comments, ctlt1_in
-- LIKE must respect NO INHERIT property of constraints
CREATE TABLE noinh_con_copy (a int CHECK (a > 0) NO INHERIT, b int not null,
c int not null no inherit);
-CREATE TABLE noinh_con_copy1 (LIKE noinh_con_copy INCLUDING CONSTRAINTS);
+
+COMMENT ON CONSTRAINT noinh_con_copy_b_not_null ON noinh_con_copy IS 'not null b';
+COMMENT ON CONSTRAINT noinh_con_copy_c_not_null ON noinh_con_copy IS 'not null c no inherit';
+
+CREATE TABLE noinh_con_copy1 (LIKE noinh_con_copy INCLUDING CONSTRAINTS INCLUDING COMMENTS);
\d+ noinh_con_copy1
+SELECT conname, description
+FROM pg_description, pg_constraint c
+WHERE classoid = 'pg_constraint'::regclass
+AND objoid = c.oid AND c.conrelid = 'noinh_con_copy1'::regclass
+ORDER BY conname COLLATE "C";
+
-- fail, as partitioned tables don't allow NO INHERIT constraints
CREATE TABLE noinh_con_copy1_parted (LIKE noinh_con_copy INCLUDING ALL)
PARTITION BY LIST (a);
diff --git a/src/test/regress/sql/domain.sql b/src/test/regress/sql/domain.sql
index b752a63ab5f..b8f5a639712 100644
--- a/src/test/regress/sql/domain.sql
+++ b/src/test/regress/sql/domain.sql
@@ -602,6 +602,9 @@ insert into domain_test values (1, 2);
-- should fail
alter table domain_test add column c str_domain;
+-- disallow duplicated not-null constraints
+create domain int_domain1 as int constraint nn1 not null constraint nn2 not null;
+
create domain str_domain2 as text check (value <> 'foo') default 'foo';
-- should fail
diff --git a/src/test/regress/sql/foreign_key.sql b/src/test/regress/sql/foreign_key.sql
index cfcecb4e911..39174ad1eb9 100644
--- a/src/test/regress/sql/foreign_key.sql
+++ b/src/test/regress/sql/foreign_key.sql
@@ -1296,7 +1296,7 @@ UPDATE fk_notpartitioned_pk SET b = 2504 WHERE a = 2500;
-- check psql behavior
\d fk_notpartitioned_pk
--- Check the exsting FK trigger
+-- Check the existing FK trigger
SELECT conname, tgrelid::regclass as tgrel, regexp_replace(tgname, '[0-9]+', 'N') as tgname, tgtype
FROM pg_trigger t JOIN pg_constraint c ON (t.tgconstraint = c.oid)
WHERE tgrelid IN (SELECT relid FROM pg_partition_tree('fk_partitioned_fk'::regclass)
diff --git a/src/test/regress/sql/generated_stored.sql b/src/test/regress/sql/generated_stored.sql
index 4ec155f2da9..f56fde8d4e5 100644
--- a/src/test/regress/sql/generated_stored.sql
+++ b/src/test/regress/sql/generated_stored.sql
@@ -595,6 +595,19 @@ ALTER TABLE gtest30_1 ALTER COLUMN b DROP EXPRESSION; -- error
CREATE TABLE gtest31_1 (a int, b text GENERATED ALWAYS AS ('hello') STORED, c text);
CREATE TABLE gtest31_2 (x int, y gtest31_1);
ALTER TABLE gtest31_1 ALTER COLUMN b TYPE varchar; -- fails
+
+-- bug #18970: these cases are unsupported, but make sure they fail cleanly
+ALTER TABLE gtest31_2 ADD CONSTRAINT cc CHECK ((y).b IS NOT NULL);
+ALTER TABLE gtest31_1 ALTER COLUMN b SET EXPRESSION AS ('hello1');
+ALTER TABLE gtest31_2 DROP CONSTRAINT cc;
+
+CREATE STATISTICS gtest31_2_stat ON ((y).b is not null) FROM gtest31_2;
+ALTER TABLE gtest31_1 ALTER COLUMN b SET EXPRESSION AS ('hello2');
+DROP STATISTICS gtest31_2_stat;
+
+CREATE INDEX gtest31_2_y_idx ON gtest31_2(((y).b));
+ALTER TABLE gtest31_1 ALTER COLUMN b SET EXPRESSION AS ('hello3');
+
DROP TABLE gtest31_1, gtest31_2;
-- Check it for a partitioned table, too
diff --git a/src/test/regress/sql/generated_virtual.sql b/src/test/regress/sql/generated_virtual.sql
index b4eedeee2fb..ba19bc4c701 100644
--- a/src/test/regress/sql/generated_virtual.sql
+++ b/src/test/regress/sql/generated_virtual.sql
@@ -253,10 +253,10 @@ CREATE TABLE gtest4 (
a int,
b double_int GENERATED ALWAYS AS ((a * 2, a * 3)) VIRTUAL
);
-INSERT INTO gtest4 VALUES (1), (6);
-SELECT * FROM gtest4;
+--INSERT INTO gtest4 VALUES (1), (6);
+--SELECT * FROM gtest4;
-DROP TABLE gtest4;
+--DROP TABLE gtest4;
DROP TYPE double_int;
-- using tableoid is allowed
@@ -290,20 +290,21 @@ GRANT SELECT (a, c) ON gtest11 TO regress_user11;
CREATE FUNCTION gf1(a int) RETURNS int AS $$ SELECT a * 3 $$ IMMUTABLE LANGUAGE SQL;
REVOKE ALL ON FUNCTION gf1(int) FROM PUBLIC;
-CREATE TABLE gtest12 (a int PRIMARY KEY, b int, c int GENERATED ALWAYS AS (gf1(b)) VIRTUAL);
-INSERT INTO gtest12 VALUES (1, 10), (2, 20);
-GRANT SELECT (a, c), INSERT ON gtest12 TO regress_user11;
+CREATE TABLE gtest12 (a int PRIMARY KEY, b int, c int GENERATED ALWAYS AS (gf1(b)) VIRTUAL); -- fails, user-defined function
+--INSERT INTO gtest12 VALUES (1, 10), (2, 20);
+--GRANT SELECT (a, c), INSERT ON gtest12 TO regress_user11;
SET ROLE regress_user11;
SELECT a, b FROM gtest11; -- not allowed
SELECT a, c FROM gtest11; -- allowed
SELECT gf1(10); -- not allowed
-INSERT INTO gtest12 VALUES (3, 30), (4, 40); -- allowed (does not actually invoke the function)
-SELECT a, c FROM gtest12; -- currently not allowed because of function permissions, should arguably be allowed
+--INSERT INTO gtest12 VALUES (3, 30), (4, 40); -- allowed (does not actually invoke the function)
+--SELECT a, c FROM gtest12; -- currently not allowed because of function permissions, should arguably be allowed
RESET ROLE;
-DROP FUNCTION gf1(int); -- fail
-DROP TABLE gtest11, gtest12;
+--DROP FUNCTION gf1(int); -- fail
+DROP TABLE gtest11;
+--DROP TABLE gtest12;
DROP FUNCTION gf1(int);
DROP USER regress_user11;
@@ -453,11 +454,19 @@ CREATE TABLE gtest24r (a int PRIMARY KEY, b gtestdomain1range GENERATED ALWAYS A
--INSERT INTO gtest24r (a) VALUES (4); -- ok
--INSERT INTO gtest24r (a) VALUES (6); -- error
+CREATE TABLE gtest24at (a int PRIMARY KEY);
+ALTER TABLE gtest24at ADD COLUMN b gtestdomain1 GENERATED ALWAYS AS (a * 2) VIRTUAL; -- error
+CREATE TABLE gtest24ata (a int PRIMARY KEY, b int GENERATED ALWAYS AS (a * 2) VIRTUAL);
+ALTER TABLE gtest24ata ALTER COLUMN b TYPE gtestdomain1; -- error
+
CREATE DOMAIN gtestdomainnn AS int CHECK (VALUE IS NOT NULL);
CREATE TABLE gtest24nn (a int, b gtestdomainnn GENERATED ALWAYS AS (a * 2) VIRTUAL);
--INSERT INTO gtest24nn (a) VALUES (4); -- ok
--INSERT INTO gtest24nn (a) VALUES (NULL); -- error
+-- using user-defined type not yet supported
+CREATE TABLE gtest24xxx (a gtestdomain1, b gtestdomain1, c int GENERATED ALWAYS AS (greatest(a, b)) VIRTUAL); -- error
+
-- typed tables (currently not supported)
CREATE TYPE gtest_type AS (f1 integer, f2 text, f3 bigint);
CREATE TABLE gtest28 OF gtest_type (f1 WITH OPTIONS GENERATED ALWAYS AS (f2 *2) VIRTUAL);
@@ -637,6 +646,19 @@ ALTER TABLE gtest30_1 ALTER COLUMN b DROP EXPRESSION; -- error
CREATE TABLE gtest31_1 (a int, b text GENERATED ALWAYS AS ('hello') VIRTUAL, c text);
CREATE TABLE gtest31_2 (x int, y gtest31_1);
ALTER TABLE gtest31_1 ALTER COLUMN b TYPE varchar; -- fails
+
+-- bug #18970
+ALTER TABLE gtest31_2 ADD CONSTRAINT cc CHECK ((y).b IS NOT NULL);
+ALTER TABLE gtest31_1 ALTER COLUMN b SET EXPRESSION AS ('hello1');
+ALTER TABLE gtest31_2 DROP CONSTRAINT cc;
+
+CREATE STATISTICS gtest31_2_stat ON ((y).b is not null) FROM gtest31_2;
+ALTER TABLE gtest31_1 ALTER COLUMN b SET EXPRESSION AS ('hello2');
+DROP STATISTICS gtest31_2_stat;
+
+CREATE INDEX gtest31_2_y_idx ON gtest31_2(((y).b));
+ALTER TABLE gtest31_1 ALTER COLUMN b SET EXPRESSION AS ('hello3');
+
DROP TABLE gtest31_1, gtest31_2;
-- Check it for a partitioned table, too
@@ -788,7 +810,8 @@ create table gtest32 (
a int primary key,
b int generated always as (a * 2),
c int generated always as (10 + 10),
- d int generated always as (coalesce(a, 100))
+ d int generated always as (coalesce(a, 100)),
+ e int
);
insert into gtest32 values (1), (2);
@@ -829,7 +852,19 @@ select t2.* from gtest32 t1 left join gtest32 t2 on false;
select t2.* from gtest32 t1 left join gtest32 t2 on false;
explain (verbose, costs off)
-select * from gtest32 t group by grouping sets (a, b, c, d) having c = 20;
-select * from gtest32 t group by grouping sets (a, b, c, d) having c = 20;
+select * from gtest32 t group by grouping sets (a, b, c, d, e) having c = 20;
+select * from gtest32 t group by grouping sets (a, b, c, d, e) having c = 20;
+
+-- Ensure that the virtual generated columns in ALTER COLUMN TYPE USING expression are expanded
+alter table gtest32 alter column e type bigint using b;
+
+-- Ensure that virtual generated column references within SubLinks that should
+-- be transformed into joins can get expanded
+explain (costs off)
+select 1 from gtest32 t1 where exists
+ (select 1 from gtest32 t2 where t1.a > t2.a and t2.b = 2);
+
+select 1 from gtest32 t1 where exists
+ (select 1 from gtest32 t2 where t1.a > t2.a and t2.b = 2);
drop table gtest32;
diff --git a/src/test/regress/sql/incremental_sort.sql b/src/test/regress/sql/incremental_sort.sql
index f1f8fae5654..bbe658a7588 100644
--- a/src/test/regress/sql/incremental_sort.sql
+++ b/src/test/regress/sql/incremental_sort.sql
@@ -298,3 +298,27 @@ explain (costs off)
select * from
(select * from tenk1 order by four) t1 join tenk1 t2 on t1.four = t2.four and t1.two = t2.two
order by t1.four, t1.two limit 1;
+
+--
+-- Test incremental sort for Append/MergeAppend
+--
+create table prt_tbl (a int, b int) partition by range (a);
+create table prt_tbl_1 partition of prt_tbl for values from (0) to (100);
+create table prt_tbl_2 partition of prt_tbl for values from (100) to (200);
+insert into prt_tbl select i%200, i from generate_series(1,1000)i;
+create index on prt_tbl_1(a);
+create index on prt_tbl_2(a, b);
+analyze prt_tbl;
+
+set enable_seqscan to off;
+set enable_bitmapscan to off;
+
+-- Ensure we get an incremental sort for the subpath of Append
+explain (costs off) select * from prt_tbl order by a, b;
+
+-- Ensure we get an incremental sort for the subpath of MergeAppend
+explain (costs off) select * from prt_tbl_1 union all select * from prt_tbl_2 order by a, b;
+
+reset enable_bitmapscan;
+reset enable_seqscan;
+drop table prt_tbl;
diff --git a/src/test/regress/sql/join.sql b/src/test/regress/sql/join.sql
index cc5128add4d..5f0a475894d 100644
--- a/src/test/regress/sql/join.sql
+++ b/src/test/regress/sql/join.sql
@@ -1277,6 +1277,23 @@ where t1.unique2 < 42 and t1.stringu1 > t2.stringu2;
-- variant that isn't quite a star-schema case
+explain (verbose, costs off)
+select ss1.d1 from
+ tenk1 as t1
+ inner join tenk1 as t2
+ on t1.tenthous = t2.ten
+ inner join
+ int8_tbl as i8
+ left join int4_tbl as i4
+ inner join (select 64::information_schema.cardinal_number as d1
+ from tenk1 t3,
+ lateral (select abs(t3.unique1) + random()) ss0(x)
+ where t3.fivethous < 0) as ss1
+ on i4.f1 = ss1.d1
+ on i8.q1 = i4.f1
+ on t1.tenthous = ss1.d1
+where t1.unique1 < i4.f1;
+
select ss1.d1 from
tenk1 as t1
inner join tenk1 as t2
@@ -1332,6 +1349,64 @@ select * from
(select 1 as x) ss1 left join (select 2 as y) ss2 on (true),
lateral (select ss2.y as z limit 1) ss3;
+-- This example demonstrates the folly of our old "have_dangerous_phv" logic
+begin;
+set local from_collapse_limit to 2;
+explain (verbose, costs off)
+select * from int8_tbl t1
+ left join
+ (select coalesce(t2.q1 + x, 0) from int8_tbl t2,
+ lateral (select t3.q1 as x from int8_tbl t3,
+ lateral (select t2.q1, t3.q1 offset 0) s))
+ on true;
+rollback;
+
+-- ... not that the initial replacement didn't have some bugs too
+begin;
+create temp table t(i int primary key);
+
+explain (verbose, costs off)
+select * from t t1
+ left join (select 1 as x, * from t t2(i2)) t2ss on t1.i = t2ss.i2
+ left join t t3(i3) on false
+ left join t t4(i4) on t4.i4 > t2ss.x;
+
+explain (verbose, costs off)
+select * from
+ (select k from
+ (select i, coalesce(i, j) as k from
+ (select i from t union all select 0)
+ join (select 1 as j limit 1) on i = j)
+ right join (select 2 as x) on true
+ join (select 3 as y) on i is not null
+ ),
+ lateral (select k as kl limit 1);
+
+rollback;
+
+-- PHVs containing SubLinks are quite tricky to get right
+explain (verbose, costs off)
+select *
+from int8_tbl i8
+ inner join
+ (select (select true) as x
+ from int4_tbl i4, lateral (select i4.f1 as y limit 1) ss1
+ where i4.f1 = 0) ss2 on true
+ right join (select false as z) ss3 on true,
+ lateral (select i8.q2 as q2l where x limit 1) ss4
+where i8.q2 = 123;
+
+explain (verbose, costs off)
+select *
+from int8_tbl i8
+ inner join
+ (select (select true) as x
+ from int4_tbl i4, lateral (select 1 as y limit 1) ss1
+ where i4.f1 = 0) ss2 on true
+ right join (select false as z) ss3 on true,
+ lateral (select i8.q2 as q2l where x limit 1) ss4
+where i8.q2 = 123;
+
-- Test proper handling of appendrel PHVs during useless-RTE removal
explain (costs off)
select * from
@@ -1902,13 +1977,13 @@ select * from
(select 1 as id) as xx
left join
(tenk1 as a1 full join (select 1 as id) as yy on (a1.unique1 = yy.id))
- on (xx.id = coalesce(yy.id));
+ on (xx.id = coalesce(yy.id, yy.id));
select * from
(select 1 as id) as xx
left join
(tenk1 as a1 full join (select 1 as id) as yy on (a1.unique1 = yy.id))
- on (xx.id = coalesce(yy.id));
+ on (xx.id = coalesce(yy.id, yy.id));
--
-- test ability to push constants through outer join clauses
@@ -3094,9 +3169,9 @@ select * from int4_tbl i left join
lateral (select * from int2_tbl j where i.f1 = j.f1) k on true;
explain (verbose, costs off)
select * from int4_tbl i left join
- lateral (select coalesce(i) from int2_tbl j where i.f1 = j.f1) k on true;
+ lateral (select coalesce(i, i) from int2_tbl j where i.f1 = j.f1) k on true;
select * from int4_tbl i left join
- lateral (select coalesce(i) from int2_tbl j where i.f1 = j.f1) k on true;
+ lateral (select coalesce(i, i) from int2_tbl j where i.f1 = j.f1) k on true;
explain (verbose, costs off)
select * from int4_tbl a,
lateral (
@@ -3562,7 +3637,7 @@ ANALYZE group_tbl;
EXPLAIN (COSTS OFF)
SELECT 1 FROM group_tbl t1
- LEFT JOIN (SELECT a c1, COALESCE(a) c2 FROM group_tbl t2) s ON TRUE
+ LEFT JOIN (SELECT a c1, COALESCE(a, a) c2 FROM group_tbl t2) s ON TRUE
GROUP BY s.c1, s.c2;
DROP TABLE group_tbl;
diff --git a/src/test/regress/sql/memoize.sql b/src/test/regress/sql/memoize.sql
index c0d47fa875a..8d1cdd6990c 100644
--- a/src/test/regress/sql/memoize.sql
+++ b/src/test/regress/sql/memoize.sql
@@ -26,6 +26,7 @@ begin
ln := regexp_replace(ln, 'Heap Fetches: \d+', 'Heap Fetches: N');
ln := regexp_replace(ln, 'loops=\d+', 'loops=N');
ln := regexp_replace(ln, 'Index Searches: \d+', 'Index Searches: N');
+ ln := regexp_replace(ln, 'Memory: \d+kB', 'Memory: NkB');
return next ln;
end loop;
end;
@@ -244,3 +245,29 @@ RESET max_parallel_workers_per_gather;
RESET parallel_tuple_cost;
RESET parallel_setup_cost;
RESET min_parallel_table_scan_size;
+
+-- Ensure memoize works for ANTI joins
+CREATE TABLE tab_anti (a int, b boolean);
+INSERT INTO tab_anti SELECT i%3, false FROM generate_series(1,100)i;
+ANALYZE tab_anti;
+
+-- Ensure we get a Memoize plan for ANTI join
+SELECT explain_memoize('
+SELECT COUNT(*) FROM tab_anti t1 LEFT JOIN
+LATERAL (SELECT DISTINCT ON (a) a, b, t1.a AS x FROM tab_anti t2) t2
+ON t1.a+1 = t2.a
+WHERE t2.a IS NULL;', false);
+
+-- And check we get the expected results.
+SELECT COUNT(*) FROM tab_anti t1 LEFT JOIN
+LATERAL (SELECT DISTINCT ON (a) a, b, t1.a AS x FROM tab_anti t2) t2
+ON t1.a+1 = t2.a
+WHERE t2.a IS NULL;
+
+-- Ensure we do not add memoize node for SEMI join
+EXPLAIN (COSTS OFF)
+SELECT * FROM tab_anti t1 WHERE t1.a IN
+ (SELECT a FROM tab_anti t2 WHERE t2.b IN
+ (SELECT t1.b FROM tab_anti t3 WHERE t2.a > 1 OFFSET 0));
+
+DROP TABLE tab_anti;
diff --git a/src/test/regress/sql/numeric.sql b/src/test/regress/sql/numeric.sql
index b98ae27df56..640c6d92f4c 100644
--- a/src/test/regress/sql/numeric.sql
+++ b/src/test/regress/sql/numeric.sql
@@ -869,6 +869,8 @@ SELECT width_bucket(5.0::float8, 3.0::float8, 4.0::float8, 0);
SELECT width_bucket(5.0::float8, 3.0::float8, 4.0::float8, -5);
SELECT width_bucket(3.5::float8, 3.0::float8, 3.0::float8, 888);
SELECT width_bucket('NaN', 3.0, 4.0, 888);
+SELECT width_bucket('NaN'::float8, 3.0::float8, 4.0::float8, 888);
+SELECT width_bucket(0, 'NaN', 4.0, 888);
SELECT width_bucket(0::float8, 'NaN', 4.0::float8, 888);
SELECT width_bucket(2.0, 3.0, '-inf', 888);
SELECT width_bucket(0::float8, '-inf', 4.0::float8, 888);
diff --git a/src/test/regress/sql/predicate.sql b/src/test/regress/sql/predicate.sql
index 9dcb81b1bc5..d92277353a0 100644
--- a/src/test/regress/sql/predicate.sql
+++ b/src/test/regress/sql/predicate.sql
@@ -115,6 +115,24 @@ SELECT * FROM pred_tab t1
LEFT JOIN pred_tab t2 ON t1.a = 1
LEFT JOIN pred_tab t3 ON t2.a IS NULL OR t2.c IS NULL;
+--
+-- Tests for NullTest reduction in EXISTS sublink
+--
+
+-- Ensure the IS_NOT_NULL qual is ignored
+EXPLAIN (COSTS OFF)
+SELECT * FROM pred_tab t1
+ LEFT JOIN pred_tab t2 ON EXISTS
+ (SELECT 1 FROM pred_tab t3, pred_tab t4, pred_tab t5, pred_tab t6
+ WHERE t1.a = t3.a AND t6.a IS NOT NULL);
+
+-- Ensure the IS_NULL qual is reduced to constant-FALSE
+EXPLAIN (COSTS OFF)
+SELECT * FROM pred_tab t1
+ LEFT JOIN pred_tab t2 ON EXISTS
+ (SELECT 1 FROM pred_tab t3, pred_tab t4, pred_tab t5, pred_tab t6
+ WHERE t1.a = t3.a AND t6.a IS NULL);
+
DROP TABLE pred_tab;
-- Validate we handle IS NULL and IS NOT NULL quals correctly with inheritance
diff --git a/src/test/regress/sql/privileges.sql b/src/test/regress/sql/privileges.sql
index f337aa67c13..3eacc1340aa 100644
--- a/src/test/regress/sql/privileges.sql
+++ b/src/test/regress/sql/privileges.sql
@@ -1544,6 +1544,14 @@ SELECT makeaclitem('regress_priv_user1'::regrole, 'regress_priv_user2'::regrole,
SELECT makeaclitem('regress_priv_user1'::regrole, 'regress_priv_user2'::regrole,
'SELECT, fake_privilege', FALSE); -- error
+-- Test quoting and dequoting of user names in ACLs
+CREATE ROLE "regress_""quoted";
+SELECT makeaclitem('regress_"quoted'::regrole, 'regress_"quoted'::regrole,
+ 'SELECT', TRUE);
+SELECT '"regress_""quoted"=r*/"regress_""quoted"'::aclitem;
+SELECT '""=r*/""'::aclitem; -- used to be misparsed as """"
+DROP ROLE "regress_""quoted";
+
-- Test non-throwing aclitem I/O
SELECT pg_input_is_valid('regress_priv_user1=r/regress_priv_user2', 'aclitem');
SELECT pg_input_is_valid('regress_priv_user1=r/', 'aclitem');
@@ -1948,7 +1956,8 @@ DROP TABLE lock_table;
DROP USER regress_locktable_user;
-- test to check privileges of system views pg_shmem_allocations,
--- pg_shmem_allocations_numa and pg_backend_memory_contexts.
+-- pg_shmem_allocations_numa, pg_dsm_registry_allocations, and
+-- pg_backend_memory_contexts.
-- switch to superuser
\c -
@@ -1959,6 +1968,7 @@ SELECT has_table_privilege('regress_readallstats','pg_aios','SELECT'); -- no
SELECT has_table_privilege('regress_readallstats','pg_backend_memory_contexts','SELECT'); -- no
SELECT has_table_privilege('regress_readallstats','pg_shmem_allocations','SELECT'); -- no
SELECT has_table_privilege('regress_readallstats','pg_shmem_allocations_numa','SELECT'); -- no
+SELECT has_table_privilege('regress_readallstats','pg_dsm_registry_allocations','SELECT'); -- no
GRANT pg_read_all_stats TO regress_readallstats;
@@ -1966,6 +1976,7 @@ SELECT has_table_privilege('regress_readallstats','pg_aios','SELECT'); -- yes
SELECT has_table_privilege('regress_readallstats','pg_backend_memory_contexts','SELECT'); -- yes
SELECT has_table_privilege('regress_readallstats','pg_shmem_allocations','SELECT'); -- yes
SELECT has_table_privilege('regress_readallstats','pg_shmem_allocations_numa','SELECT'); -- yes
+SELECT has_table_privilege('regress_readallstats','pg_dsm_registry_allocations','SELECT'); -- yes
-- run query to ensure that functions within views can be executed
SET ROLE regress_readallstats;
diff --git a/src/test/regress/sql/psql.sql b/src/test/regress/sql/psql.sql
index 1a8a83462f0..e2e31245439 100644
--- a/src/test/regress/sql/psql.sql
+++ b/src/test/regress/sql/psql.sql
@@ -68,11 +68,11 @@ SELECT $1, $2 \parse stmt3
-- Multiple \g calls mean multiple executions
\bind_named stmt2 'foo3' \g \bind_named stmt3 'foo4' 'bar4' \g
--- \close (extended query protocol)
-\close
-\close ''
-\close stmt2
-\close stmt2
+-- \close_prepared (extended query protocol)
+\close_prepared
+\close_prepared ''
+\close_prepared stmt2
+\close_prepared stmt2
SELECT name, statement FROM pg_prepared_statements ORDER BY name;
-- \bind (extended query protocol)
@@ -1035,7 +1035,7 @@ select \if false \\ (bogus \else \\ 42 \endif \\ forty_two;
\C arg1
\c arg1 arg2 arg3 arg4
\cd arg1
- \close stmt1
+ \close_prepared stmt1
\conninfo
\copy arg1 arg2 arg3 arg4 arg5 arg6
\copyright
diff --git a/src/test/regress/sql/psql_pipeline.sql b/src/test/regress/sql/psql_pipeline.sql
index 16e1e1e84cd..6788dceee2e 100644
--- a/src/test/regress/sql/psql_pipeline.sql
+++ b/src/test/regress/sql/psql_pipeline.sql
@@ -105,106 +105,6 @@ INSERT INTO psql_pipeline VALUES ($1) \bind 1 \sendpipeline
COMMIT \bind \sendpipeline
\endpipeline
--- COPY FROM STDIN
--- with \sendpipeline and \bind
-\startpipeline
-SELECT $1 \bind 'val1' \sendpipeline
-COPY psql_pipeline FROM STDIN \bind \sendpipeline
-\endpipeline
-2 test2
-\.
--- with semicolon
-\startpipeline
-SELECT 'val1';
-COPY psql_pipeline FROM STDIN;
-\endpipeline
-20 test2
-\.
-
--- COPY FROM STDIN with \flushrequest + \getresults
--- with \sendpipeline and \bind
-\startpipeline
-SELECT $1 \bind 'val1' \sendpipeline
-COPY psql_pipeline FROM STDIN \bind \sendpipeline
-\flushrequest
-\getresults
-3 test3
-\.
-\endpipeline
--- with semicolon
-\startpipeline
-SELECT 'val1';
-COPY psql_pipeline FROM STDIN;
-\flushrequest
-\getresults
-30 test3
-\.
-\endpipeline
-
--- COPY FROM STDIN with \syncpipeline + \getresults
--- with \bind and \sendpipeline
-\startpipeline
-SELECT $1 \bind 'val1' \sendpipeline
-COPY psql_pipeline FROM STDIN \bind \sendpipeline
-\syncpipeline
-\getresults
-4 test4
-\.
-\endpipeline
--- with semicolon
-\startpipeline
-SELECT 'val1';
-COPY psql_pipeline FROM STDIN;
-\syncpipeline
-\getresults
-40 test4
-\.
-\endpipeline
-
--- COPY TO STDOUT
--- with \bind and \sendpipeline
-\startpipeline
-SELECT $1 \bind 'val1' \sendpipeline
-copy psql_pipeline TO STDOUT \bind \sendpipeline
-\endpipeline
--- with semicolon
-\startpipeline
-SELECT 'val1';
-copy psql_pipeline TO STDOUT;
-\endpipeline
-
--- COPY TO STDOUT with \flushrequest + \getresults
--- with \bind and \sendpipeline
-\startpipeline
-SELECT $1 \bind 'val1' \sendpipeline
-copy psql_pipeline TO STDOUT \bind \sendpipeline
-\flushrequest
-\getresults
-\endpipeline
--- with semicolon
-\startpipeline
-SELECT 'val1';
-copy psql_pipeline TO STDOUT;
-\flushrequest
-\getresults
-\endpipeline
-
--- COPY TO STDOUT with \syncpipeline + \getresults
--- with \bind and \sendpipeline
-\startpipeline
-SELECT $1 \bind 'val1' \sendpipeline
-copy psql_pipeline TO STDOUT \bind \sendpipeline
-\syncpipeline
-\getresults
-\endpipeline
--- with semicolon
-\startpipeline
-SELECT 'val1';
-copy psql_pipeline TO STDOUT;
-\syncpipeline
-\getresults
-\endpipeline
-
-- Use \parse and \bind_named
\startpipeline
SELECT $1 \parse ''
@@ -406,21 +306,21 @@ SELECT $1 \bind \sendpipeline
SELECT $1 \bind 1 \sendpipeline
SELECT $1 \parse a
\bind_named a 1 \sendpipeline
-\close a
+\close_prepared a
\flushrequest
\getresults
-- Pipeline is aborted.
SELECT $1 \bind 1 \sendpipeline
SELECT $1 \parse a
\bind_named a 1 \sendpipeline
-\close a
+\close_prepared a
-- Sync allows pipeline to recover.
\syncpipeline
\getresults
SELECT $1 \bind 1 \sendpipeline
SELECT $1 \parse a
\bind_named a 1 \sendpipeline
-\close a
+\close_prepared a
\flushrequest
\getresults
\endpipeline
diff --git a/src/test/regress/sql/publication.sql b/src/test/regress/sql/publication.sql
index 68001de4000..2585f083181 100644
--- a/src/test/regress/sql/publication.sql
+++ b/src/test/regress/sql/publication.sql
@@ -262,6 +262,9 @@ ALTER PUBLICATION testpub6 SET TABLES IN SCHEMA testpub_rf_schema2, TABLE testpu
RESET client_min_messages;
\dRp+ testpub6
-- fail - virtual generated column uses user-defined function
+-- (Actually, this already fails at CREATE TABLE rather than at CREATE
+-- PUBLICATION, but let's keep the test in case the former gets
+-- relaxed sometime.)
CREATE TABLE testpub_rf_tbl6 (id int PRIMARY KEY, x int, y int GENERATED ALWAYS AS (x * testpub_rf_func2()) VIRTUAL);
CREATE PUBLICATION testpub7 FOR TABLE testpub_rf_tbl6 WHERE (y > 100);
-- test that SET EXPRESSION is rejected, because it could affect a row filter
@@ -276,7 +279,7 @@ DROP TABLE testpub_rf_tbl2;
DROP TABLE testpub_rf_tbl3;
DROP TABLE testpub_rf_tbl4;
DROP TABLE testpub_rf_tbl5;
-DROP TABLE testpub_rf_tbl6;
+--DROP TABLE testpub_rf_tbl6;
DROP TABLE testpub_rf_schema1.testpub_rf_tbl5;
DROP TABLE testpub_rf_schema2.testpub_rf_tbl6;
DROP SCHEMA testpub_rf_schema1;
@@ -1226,3 +1229,25 @@ RESET client_min_messages;
RESET SESSION AUTHORIZATION;
DROP ROLE regress_publication_user, regress_publication_user2;
DROP ROLE regress_publication_user_dummy;
+
+-- stage objects for pg_dump tests
+CREATE SCHEMA pubme CREATE TABLE t0 (c int, d int) CREATE TABLE t1 (c int);
+CREATE SCHEMA pubme2 CREATE TABLE t0 (c int, d int);
+SET client_min_messages = 'ERROR';
+CREATE PUBLICATION dump_pub_qual_1ct FOR
+ TABLE ONLY pubme.t0 (c, d) WHERE (c > 0);
+CREATE PUBLICATION dump_pub_qual_2ct FOR
+ TABLE ONLY pubme.t0 (c) WHERE (c > 0),
+ TABLE ONLY pubme.t1 (c);
+CREATE PUBLICATION dump_pub_nsp_1ct FOR
+ TABLES IN SCHEMA pubme;
+CREATE PUBLICATION dump_pub_nsp_2ct FOR
+ TABLES IN SCHEMA pubme,
+ TABLES IN SCHEMA pubme2;
+CREATE PUBLICATION dump_pub_all FOR
+ TABLE ONLY pubme.t0,
+ TABLE ONLY pubme.t1 WHERE (c < 0),
+ TABLES IN SCHEMA pubme,
+ TABLES IN SCHEMA pubme2
+ WITH (publish_via_partition_root = true);
+RESET client_min_messages;
diff --git a/src/test/regress/sql/regproc.sql b/src/test/regress/sql/regproc.sql
index 232289ac398..cfec8f8c754 100644
--- a/src/test/regress/sql/regproc.sql
+++ b/src/test/regress/sql/regproc.sql
@@ -47,11 +47,42 @@ SELECT regrole('regress_regrole_test');
SELECT regrole('"regress_regrole_test"');
SELECT regnamespace('pg_catalog');
SELECT regnamespace('"pg_catalog"');
+SELECT regdatabase('template1');
+SELECT regdatabase('"template1"');
SELECT to_regrole('regress_regrole_test');
SELECT to_regrole('"regress_regrole_test"');
SELECT to_regnamespace('pg_catalog');
SELECT to_regnamespace('"pg_catalog"');
+SELECT to_regdatabase('template1');
+SELECT to_regdatabase('"template1"');
+
+-- special "single dash" case
+
+SELECT regproc('-')::oid;
+SELECT regprocedure('-')::oid;
+SELECT regclass('-')::oid;
+SELECT regcollation('-')::oid;
+SELECT regtype('-')::oid;
+SELECT regconfig('-')::oid;
+SELECT regdictionary('-')::oid;
+SELECT regrole('-')::oid;
+SELECT regnamespace('-')::oid;
+SELECT regdatabase('-')::oid;
+
+SELECT to_regproc('-')::oid;
+SELECT to_regprocedure('-')::oid;
+SELECT to_regclass('-')::oid;
+SELECT to_regcollation('-')::oid;
+SELECT to_regtype('-')::oid;
+SELECT to_regrole('-')::oid;
+SELECT to_regnamespace('-')::oid;
+SELECT to_regdatabase('-')::oid;
+
+-- constant cannot be used here
+
+CREATE TABLE regrole_test (rolid OID DEFAULT 'regress_regrole_test'::regrole);
+CREATE TABLE regdatabase_test (datid OID DEFAULT 'template1'::regdatabase);
/* If objects don't exist, raise errors. */
@@ -88,6 +119,9 @@ SELECT regrole('foo.bar');
SELECT regnamespace('Nonexistent');
SELECT regnamespace('"Nonexistent"');
SELECT regnamespace('foo.bar');
+SELECT regdatabase('Nonexistent');
+SELECT regdatabase('"Nonexistent"');
+SELECT regdatabase('foo.bar');
/* If objects don't exist, return NULL with no error. */
@@ -122,6 +156,9 @@ SELECT to_regrole('foo.bar');
SELECT to_regnamespace('Nonexistent');
SELECT to_regnamespace('"Nonexistent"');
SELECT to_regnamespace('foo.bar');
+SELECT to_regdatabase('Nonexistent');
+SELECT to_regdatabase('"Nonexistent"');
+SELECT to_regdatabase('foo.bar');
-- Test to_regtypemod
SELECT to_regtypemod('text');
@@ -147,6 +184,7 @@ SELECT * FROM pg_input_error_info('ng_catalog.abs(numeric)', 'regprocedure');
SELECT * FROM pg_input_error_info('ng_catalog.abs(numeric', 'regprocedure');
SELECT * FROM pg_input_error_info('regress_regrole_test', 'regrole');
SELECT * FROM pg_input_error_info('no_such_type', 'regtype');
+SELECT * FROM pg_input_error_info('Nonexistent', 'regdatabase');
-- Some cases that should be soft errors, but are not yet
SELECT * FROM pg_input_error_info('incorrect type name syntax', 'regtype');
diff --git a/src/test/regress/sql/stats.sql b/src/test/regress/sql/stats.sql
index 232ab8db8fa..54e72866344 100644
--- a/src/test/regress/sql/stats.sql
+++ b/src/test/regress/sql/stats.sql
@@ -439,8 +439,13 @@ DROP TABLE test_stats_temp;
-- Checkpoint twice: The checkpointer reports stats after reporting completion
-- of the checkpoint. But after a second checkpoint we'll see at least the
-- results of the first.
-CHECKPOINT;
-CHECKPOINT;
+--
+-- While at it, test checkpoint options. Note that we don't test MODE SPREAD
+-- because it would prolong the test.
+CHECKPOINT (WRONG);
+CHECKPOINT (MODE WRONG);
+CHECKPOINT (MODE FAST, FLUSH_UNLOGGED FALSE);
+CHECKPOINT (FLUSH_UNLOGGED);
SELECT num_requested > :rqst_ckpts_before FROM pg_stat_checkpointer;
SELECT wal_bytes > :wal_bytes_before FROM pg_stat_wal;
diff --git a/src/test/regress/sql/strings.sql b/src/test/regress/sql/strings.sql
index 2577a42987d..92c445c2439 100644
--- a/src/test/regress/sql/strings.sql
+++ b/src/test/regress/sql/strings.sql
@@ -76,6 +76,7 @@ SELECT E'De\\000dBeEf'::bytea;
SELECT E'De\123dBeEf'::bytea;
SELECT E'De\\123dBeEf'::bytea;
SELECT E'De\\678dBeEf'::bytea;
+SELECT E'DeAd\\\\BeEf'::bytea;
SELECT reverse(''::bytea);
SELECT reverse('\xaa'::bytea);
@@ -88,6 +89,7 @@ SELECT E'\\xDe00BeEf'::bytea;
SELECT E'DeAdBeEf'::bytea;
SELECT E'De\\000dBeEf'::bytea;
SELECT E'De\\123dBeEf'::bytea;
+SELECT E'DeAd\\\\BeEf'::bytea;
-- Test non-error-throwing API too
SELECT pg_input_is_valid(E'\\xDeAdBeE', 'bytea');
diff --git a/src/test/regress/sql/subscription.sql b/src/test/regress/sql/subscription.sql
index 007c9e70374..f0f714fe747 100644
--- a/src/test/regress/sql/subscription.sql
+++ b/src/test/regress/sql/subscription.sql
@@ -287,6 +287,17 @@ ALTER SUBSCRIPTION regress_testsub SET (disable_on_error = true);
ALTER SUBSCRIPTION regress_testsub SET (slot_name = NONE);
DROP SUBSCRIPTION regress_testsub;
+-- fail - retain_dead_tuples must be boolean
+CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, retain_dead_tuples = foo);
+
+-- ok
+CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, retain_dead_tuples = false);
+
+\dRs+
+
+ALTER SUBSCRIPTION regress_testsub SET (slot_name = NONE);
+DROP SUBSCRIPTION regress_testsub;
+
-- let's do some tests with pg_create_subscription rather than superuser
SET SESSION AUTHORIZATION regress_subscription_user3;
diff --git a/src/test/regress/sql/subselect.sql b/src/test/regress/sql/subselect.sql
index fec38ef85a6..d9a841fbc9f 100644
--- a/src/test/regress/sql/subselect.sql
+++ b/src/test/regress/sql/subselect.sql
@@ -1041,7 +1041,7 @@ explain (verbose, costs off)
select ss2.* from
int8_tbl t1 left join
(int8_tbl t2 left join
- (select coalesce(q1) as x, * from int8_tbl t3) ss1 on t2.q1 = ss1.q2 inner join
+ (select coalesce(q1, q1) as x, * from int8_tbl t3) ss1 on t2.q1 = ss1.q2 inner join
lateral (select ss1.x as y, * from int8_tbl t4) ss2 on t2.q2 = ss2.q1)
on t1.q2 = ss2.q1
order by 1, 2, 3;
@@ -1049,7 +1049,7 @@ order by 1, 2, 3;
select ss2.* from
int8_tbl t1 left join
(int8_tbl t2 left join
- (select coalesce(q1) as x, * from int8_tbl t3) ss1 on t2.q1 = ss1.q2 inner join
+ (select coalesce(q1, q1) as x, * from int8_tbl t3) ss1 on t2.q1 = ss1.q2 inner join
lateral (select ss1.x as y, * from int8_tbl t4) ss2 on t2.q2 = ss2.q1)
on t1.q2 = ss2.q1
order by 1, 2, 3;
@@ -1059,7 +1059,7 @@ explain (verbose, costs off)
select ss2.* from
int8_tbl t1 left join
(int8_tbl t2 left join
- (select coalesce(q1) as x, * from int8_tbl t3) ss1 on t2.q1 = ss1.q2 left join
+ (select coalesce(q1, q1) as x, * from int8_tbl t3) ss1 on t2.q1 = ss1.q2 left join
lateral (select ss1.x as y, * from int8_tbl t4) ss2 on t2.q2 = ss2.q1)
on t1.q2 = ss2.q1
order by 1, 2, 3;
@@ -1067,7 +1067,7 @@ order by 1, 2, 3;
select ss2.* from
int8_tbl t1 left join
(int8_tbl t2 left join
- (select coalesce(q1) as x, * from int8_tbl t3) ss1 on t2.q1 = ss1.q2 left join
+ (select coalesce(q1, q1) as x, * from int8_tbl t3) ss1 on t2.q1 = ss1.q2 left join
lateral (select ss1.x as y, * from int8_tbl t4) ss2 on t2.q2 = ss2.q1)
on t1.q2 = ss2.q1
order by 1, 2, 3;
diff --git a/src/test/regress/sql/triggers.sql b/src/test/regress/sql/triggers.sql
index 9ffd318385f..d674b25c83b 100644
--- a/src/test/regress/sql/triggers.sql
+++ b/src/test/regress/sql/triggers.sql
@@ -1577,6 +1577,19 @@ drop table parted;
drop function parted_trigfunc();
--
+-- Constraint triggers
+--
+create constraint trigger crtr
+ after insert on foo not valid
+ for each row execute procedure foo ();
+create constraint trigger crtr
+ after insert on foo no inherit
+ for each row execute procedure foo ();
+create constraint trigger crtr
+ after insert on foo not enforced
+ for each row execute procedure foo ();
+
+--
-- Constraint triggers and partitioned tables
create table parted_constr_ancestor (a int, b text)
partition by range (b);
@@ -1591,7 +1604,7 @@ create constraint trigger parted_trig after insert on parted_constr_ancestor
deferrable
for each row execute procedure trigger_notice_ab();
create constraint trigger parted_trig_two after insert on parted_constr
- deferrable initially deferred
+ deferrable initially deferred enforced
for each row when (bark(new.b) AND new.a % 2 = 1)
execute procedure trigger_notice_ab();
diff --git a/src/test/regress/sql/type_sanity.sql b/src/test/regress/sql/type_sanity.sql
index c94dd83d306..df795759bb4 100644
--- a/src/test/regress/sql/type_sanity.sql
+++ b/src/test/regress/sql/type_sanity.sql
@@ -539,6 +539,7 @@ CREATE TABLE tab_core_types AS SELECT
'regtype'::regtype type,
'pg_monitor'::regrole,
'pg_class'::regclass::oid,
+ 'template1'::regdatabase,
'(1,1)'::tid, '2'::xid, '3'::cid,
'10:20:10,14,15'::txid_snapshot,
'10:20:10,14,15'::pg_snapshot,
diff --git a/src/test/ssl/meson.build b/src/test/ssl/meson.build
index cf8b2b9303a..d8e0fb518e0 100644
--- a/src/test/ssl/meson.build
+++ b/src/test/ssl/meson.build
@@ -7,7 +7,7 @@ tests += {
'tap': {
'env': {
'with_ssl': ssl_library,
- 'OPENSSL': openssl.found() ? openssl.path() : '',
+ 'OPENSSL': openssl.found() ? openssl.full_path() : '',
},
'tests': [
't/001_ssltests.pl',
diff --git a/src/test/ssl/t/001_ssltests.pl b/src/test/ssl/t/001_ssltests.pl
index 2cb4d0ffd41..b2eb18d3e81 100644
--- a/src/test/ssl/t/001_ssltests.pl
+++ b/src/test/ssl/t/001_ssltests.pl
@@ -173,6 +173,13 @@ SKIP:
ok( (@status = stat("$tempdir/key.txt")),
"keylog file exists and returned status");
ok(@status && !($status[2] & 0006), "keylog file is not world readable");
+
+ # Connect should work with an incorrect sslkeylogfile, with the error to
+ # open the logfile printed to stderr
+ $node->connect_ok(
+ "$common_connstr sslrootcert=ssl/root+server_ca.crt sslkeylogfile=$tempdir/invalid/key.txt sslmode=require",
+ "connect with server root cert and incorrect sslkeylogfile path",
+ expected_stderr => qr/could not open/);
}
# The server should not accept non-SSL connections.
diff --git a/src/test/ssl/t/SSL/Server.pm b/src/test/ssl/t/SSL/Server.pm
index 96f0f201e9c..efbd0dafaf6 100644
--- a/src/test/ssl/t/SSL/Server.pm
+++ b/src/test/ssl/t/SSL/Server.pm
@@ -318,7 +318,8 @@ sub switch_server_cert
$node->append_conf('sslconfig.conf', "ssl=on");
$node->append_conf('sslconfig.conf', $backend->set_server_cert(\%params));
# use lists of ECDH curves and cipher suites for syntax testing
- $node->append_conf('sslconfig.conf', 'ssl_groups=X25519:prime256v1:secp521r1');
+ $node->append_conf('sslconfig.conf',
+ 'ssl_groups=X25519:prime256v1:secp521r1');
$node->append_conf('sslconfig.conf',
'ssl_tls13_ciphers=TLS_AES_256_GCM_SHA384:TLS_AES_128_GCM_SHA256');
diff --git a/src/test/subscription/t/007_ddl.pl b/src/test/subscription/t/007_ddl.pl
index 7d12bcbddb6..2a45fb13739 100644
--- a/src/test/subscription/t/007_ddl.pl
+++ b/src/test/subscription/t/007_ddl.pl
@@ -70,7 +70,8 @@ ok( $stderr =~
);
# Cleanup
-$node_publisher->safe_psql('postgres', qq[
+$node_publisher->safe_psql(
+ 'postgres', qq[
DROP PUBLICATION mypub;
SELECT pg_drop_replication_slot('mysub');
]);
@@ -86,32 +87,38 @@ sub test_swap
my ($table_name, $pubname, $appname) = @_;
# Confirms tuples can be replicated
- $node_publisher->safe_psql('postgres', "INSERT INTO $table_name VALUES (1);");
+ $node_publisher->safe_psql('postgres',
+ "INSERT INTO $table_name VALUES (1);");
$node_publisher->wait_for_catchup($appname);
my $result =
- $node_subscriber->safe_psql('postgres', "SELECT a FROM $table_name");
- is($result, qq(1), 'check replication worked well before renaming a publication');
+ $node_subscriber->safe_psql('postgres', "SELECT a FROM $table_name");
+ is($result, qq(1),
+ 'check replication worked well before renaming a publication');
# Swap the name of publications; $pubname <-> pub_empty
- $node_publisher->safe_psql('postgres', qq[
+ $node_publisher->safe_psql(
+ 'postgres', qq[
ALTER PUBLICATION $pubname RENAME TO tap_pub_tmp;
ALTER PUBLICATION pub_empty RENAME TO $pubname;
ALTER PUBLICATION tap_pub_tmp RENAME TO pub_empty;
]);
# Insert the data again
- $node_publisher->safe_psql('postgres', "INSERT INTO $table_name VALUES (2);");
+ $node_publisher->safe_psql('postgres',
+ "INSERT INTO $table_name VALUES (2);");
$node_publisher->wait_for_catchup($appname);
# Confirms the second tuple won't be replicated because $pubname does not
# contains relations anymore.
$result =
- $node_subscriber->safe_psql('postgres', "SELECT a FROM $table_name ORDER BY a");
+ $node_subscriber->safe_psql('postgres',
+ "SELECT a FROM $table_name ORDER BY a");
is($result, qq(1),
'check the tuple inserted after the RENAME was not replicated');
# Restore the name of publications because it can be called several times
- $node_publisher->safe_psql('postgres', qq[
+ $node_publisher->safe_psql(
+ 'postgres', qq[
ALTER PUBLICATION $pubname RENAME TO tap_pub_tmp;
ALTER PUBLICATION pub_empty RENAME TO $pubname;
ALTER PUBLICATION tap_pub_tmp RENAME TO pub_empty;
@@ -124,7 +131,8 @@ $node_publisher->safe_psql('postgres', $ddl);
$node_subscriber->safe_psql('postgres', $ddl);
# Create publications and a subscription
-$node_publisher->safe_psql('postgres', qq[
+$node_publisher->safe_psql(
+ 'postgres', qq[
CREATE PUBLICATION pub_empty;
CREATE PUBLICATION pub_for_tab FOR TABLE test1;
CREATE PUBLICATION pub_for_all_tables FOR ALL TABLES;
@@ -139,19 +147,20 @@ test_swap('test1', 'pub_for_tab', 'tap_sub');
# Switches a publication which includes all tables
$node_subscriber->safe_psql('postgres',
- "ALTER SUBSCRIPTION tap_sub SET PUBLICATION pub_for_all_tables;"
-);
+ "ALTER SUBSCRIPTION tap_sub SET PUBLICATION pub_for_all_tables;");
$node_subscriber->wait_for_subscription_sync($node_publisher, 'tap_sub');
# Confirms RENAME command works well for ALL TABLES publication
test_swap('test2', 'pub_for_all_tables', 'tap_sub');
# Cleanup
-$node_publisher->safe_psql('postgres', qq[
+$node_publisher->safe_psql(
+ 'postgres', qq[
DROP PUBLICATION pub_empty, pub_for_tab, pub_for_all_tables;
DROP TABLE test1, test2;
]);
-$node_subscriber->safe_psql('postgres', qq[
+$node_subscriber->safe_psql(
+ 'postgres', qq[
DROP SUBSCRIPTION tap_sub;
DROP TABLE test1, test2;
]);
diff --git a/src/test/subscription/t/013_partition.pl b/src/test/subscription/t/013_partition.pl
index 61b0cb4aa1a..4f78dd48815 100644
--- a/src/test/subscription/t/013_partition.pl
+++ b/src/test/subscription/t/013_partition.pl
@@ -51,8 +51,7 @@ $node_subscriber1->safe_psql('postgres',
);
# make a BRIN index to test aminsertcleanup logic in subscriber
$node_subscriber1->safe_psql('postgres',
- "CREATE INDEX tab1_c_brin_idx ON tab1 USING brin (c)"
-);
+ "CREATE INDEX tab1_c_brin_idx ON tab1 USING brin (c)");
$node_subscriber1->safe_psql('postgres',
"CREATE TABLE tab1_1 (b text, c text DEFAULT 'sub1_tab1', a int NOT NULL)"
);
diff --git a/src/test/subscription/t/024_add_drop_pub.pl b/src/test/subscription/t/024_add_drop_pub.pl
index e995d8b3839..b396abe5599 100644
--- a/src/test/subscription/t/024_add_drop_pub.pl
+++ b/src/test/subscription/t/024_add_drop_pub.pl
@@ -108,11 +108,12 @@ $node_publisher->poll_query_until('postgres',
my $offset = -s $node_publisher->logfile;
-$node_publisher->safe_psql('postgres',"INSERT INTO tab_3 values(1)");
+$node_publisher->safe_psql('postgres', "INSERT INTO tab_3 values(1)");
# Verify that a warning is logged.
$node_publisher->wait_for_log(
- qr/WARNING: ( [A-Z0-9]+:)? skipped loading publication: tap_pub_3/, $offset);
+ qr/WARNING: ( [A-Z0-9]+:)? skipped loading publication "tap_pub_3"/,
+ $offset);
$node_publisher->safe_psql('postgres',
"CREATE PUBLICATION tap_pub_3 FOR TABLE tab_3");
@@ -128,10 +129,11 @@ $node_publisher->wait_for_catchup('tap_sub');
# Verify that the insert operation gets replicated to subscriber after
# publication is created.
-$result = $node_subscriber->safe_psql('postgres',
- "SELECT * FROM tab_3");
-is($result, qq(1
-2), 'check that the incremental data is replicated after the publication is created');
+$result = $node_subscriber->safe_psql('postgres', "SELECT * FROM tab_3");
+is( $result, qq(1
+2),
+ 'check that the incremental data is replicated after the publication is created'
+);
# shutdown
$node_subscriber->stop('fast');
diff --git a/src/test/subscription/t/035_conflicts.pl b/src/test/subscription/t/035_conflicts.pl
index 2a7a8239a29..36aeb14c563 100644
--- a/src/test/subscription/t/035_conflicts.pl
+++ b/src/test/subscription/t/035_conflicts.pl
@@ -1,6 +1,6 @@
# Copyright (c) 2025, PostgreSQL Global Development Group
-# Test the conflict detection of conflict type 'multiple_unique_conflicts'.
+# Test conflicts in logical replication
use strict;
use warnings FATAL => 'all';
use PostgreSQL::Test::Cluster;
@@ -18,7 +18,7 @@ $node_publisher->start;
# Create a subscriber node
my $node_subscriber = PostgreSQL::Test::Cluster->new('subscriber');
-$node_subscriber->init;
+$node_subscriber->init(allows_streaming => 'logical');
$node_subscriber->start;
# Create a table on publisher
@@ -26,7 +26,8 @@ $node_publisher->safe_psql('postgres',
"CREATE TABLE conf_tab (a int PRIMARY KEY, b int UNIQUE, c int UNIQUE);");
$node_publisher->safe_psql('postgres',
- "CREATE TABLE conf_tab_2 (a int PRIMARY KEY, b int UNIQUE, c int UNIQUE);");
+ "CREATE TABLE conf_tab_2 (a int PRIMARY KEY, b int UNIQUE, c int UNIQUE);"
+);
# Create same table on subscriber
$node_subscriber->safe_psql('postgres',
@@ -145,4 +146,267 @@ $node_subscriber->wait_for_log(
pass('multiple_unique_conflicts detected on a leaf partition during insert');
+###############################################################################
+# Setup a bidirectional logical replication between node_A & node_B
+###############################################################################
+
+# Initialize nodes. Enable the track_commit_timestamp on both nodes to detect
+# the conflict when attempting to update a row that was previously modified by
+# a different origin.
+
+# node_A. Increase the log_min_messages setting to DEBUG2 to debug test
+# failures. Disable autovacuum to avoid generating xid that could affect the
+# replication slot's xmin value.
+my $node_A = $node_publisher;
+$node_A->append_conf(
+ 'postgresql.conf',
+ qq{track_commit_timestamp = on
+ autovacuum = off
+ log_min_messages = 'debug2'});
+$node_A->restart;
+
+# node_B
+my $node_B = $node_subscriber;
+$node_B->append_conf('postgresql.conf', "track_commit_timestamp = on");
+$node_B->restart;
+
+# Create table on node_A
+$node_A->safe_psql('postgres', "CREATE TABLE tab (a int PRIMARY KEY, b int)");
+
+# Create the same table on node_B
+$node_B->safe_psql('postgres', "CREATE TABLE tab (a int PRIMARY KEY, b int)");
+
+my $subname_AB = 'tap_sub_a_b';
+my $subname_BA = 'tap_sub_b_a';
+
+# Setup logical replication
+# node_A (pub) -> node_B (sub)
+my $node_A_connstr = $node_A->connstr . ' dbname=postgres';
+$node_A->safe_psql('postgres', "CREATE PUBLICATION tap_pub_A FOR TABLE tab");
+$node_B->safe_psql(
+ 'postgres', "
+ CREATE SUBSCRIPTION $subname_BA
+ CONNECTION '$node_A_connstr application_name=$subname_BA'
+ PUBLICATION tap_pub_A
+ WITH (origin = none, retain_dead_tuples = true)");
+
+# node_B (pub) -> node_A (sub)
+my $node_B_connstr = $node_B->connstr . ' dbname=postgres';
+$node_B->safe_psql('postgres', "CREATE PUBLICATION tap_pub_B FOR TABLE tab");
+$node_A->safe_psql(
+ 'postgres', "
+ CREATE SUBSCRIPTION $subname_AB
+ CONNECTION '$node_B_connstr application_name=$subname_AB'
+ PUBLICATION tap_pub_B
+ WITH (origin = none, copy_data = off)");
+
+# Wait for initial table sync to finish
+$node_A->wait_for_subscription_sync($node_B, $subname_AB);
+$node_B->wait_for_subscription_sync($node_A, $subname_BA);
+
+is(1, 1, 'Bidirectional replication setup is complete');
+
+# Confirm that the conflict detection slot is created on Node B and the xmin
+# value is valid.
+ok( $node_B->poll_query_until(
+ 'postgres',
+ "SELECT xmin IS NOT NULL from pg_replication_slots WHERE slot_name = 'pg_conflict_detection'"
+ ),
+ "the xmin value of slot 'pg_conflict_detection' is valid on Node B");
+
+##################################################
+# Check that the retain_dead_tuples option can be enabled only for disabled
+# subscriptions. Validate the NOTICE message during the subscription DDL, and
+# ensure the conflict detection slot is created upon enabling the
+# retain_dead_tuples option.
+##################################################
+
+# Alter retain_dead_tuples for enabled subscription
+my ($cmdret, $stdout, $stderr) = $node_A->psql('postgres',
+ "ALTER SUBSCRIPTION $subname_AB SET (retain_dead_tuples = true)");
+ok( $stderr =~
+ /ERROR: cannot set option \"retain_dead_tuples\" for enabled subscription/,
+ "altering retain_dead_tuples is not allowed for enabled subscription");
+
+# Disable the subscription
+$node_A->psql('postgres', "ALTER SUBSCRIPTION $subname_AB DISABLE;");
+
+# Wait for the apply worker to stop
+$node_A->poll_query_until('postgres',
+ "SELECT count(*) = 0 FROM pg_stat_activity WHERE backend_type = 'logical replication apply worker'"
+);
+
+# Enable retain_dead_tuples for disabled subscription
+($cmdret, $stdout, $stderr) = $node_A->psql('postgres',
+ "ALTER SUBSCRIPTION $subname_AB SET (retain_dead_tuples = true);");
+ok( $stderr =~
+ /NOTICE: deleted rows to detect conflicts would not be removed until the subscription is enabled/,
+ "altering retain_dead_tuples is allowed for disabled subscription");
+
+# Re-enable the subscription
+$node_A->safe_psql('postgres', "ALTER SUBSCRIPTION $subname_AB ENABLE;");
+
+# Confirm that the conflict detection slot is created on Node A and the xmin
+# value is valid.
+ok( $node_A->poll_query_until(
+ 'postgres',
+ "SELECT xmin IS NOT NULL from pg_replication_slots WHERE slot_name = 'pg_conflict_detection'"
+ ),
+ "the xmin value of slot 'pg_conflict_detection' is valid on Node A");
+
+##################################################
+# Check the WARNING when changing the origin to ANY, if retain_dead_tuples is
+# enabled. This warns of the possibility of receiving changes from origins
+# other than the publisher.
+##################################################
+
+($cmdret, $stdout, $stderr) = $node_A->psql('postgres',
+ "ALTER SUBSCRIPTION $subname_AB SET (origin = any);");
+ok( $stderr =~
+ /WARNING: subscription "tap_sub_a_b" enabled retain_dead_tuples but might not reliably detect conflicts for changes from different origins/,
+ "warn of the possibility of receiving changes from origins other than the publisher");
+
+# Reset the origin to none
+$node_A->psql('postgres',
+ "ALTER SUBSCRIPTION $subname_AB SET (origin = none);");
+
+###############################################################################
+# Check that dead tuples on node A cannot be cleaned by VACUUM until the
+# concurrent transactions on Node B have been applied and flushed on Node A.
+# Also, check that an update_deleted conflict is detected when updating a row
+# that was deleted by a different origin.
+###############################################################################
+
+# Insert a record
+$node_A->safe_psql('postgres', "INSERT INTO tab VALUES (1, 1), (2, 2);");
+$node_A->wait_for_catchup($subname_BA);
+
+my $result = $node_B->safe_psql('postgres', "SELECT * FROM tab;");
+is($result, qq(1|1
+2|2), 'check replicated insert on node B');
+
+# Disable the logical replication from node B to node A
+$node_A->safe_psql('postgres', "ALTER SUBSCRIPTION $subname_AB DISABLE");
+
+# Wait for the apply worker to stop
+$node_A->poll_query_until('postgres',
+ "SELECT count(*) = 0 FROM pg_stat_activity WHERE backend_type = 'logical replication apply worker'"
+);
+
+my $log_location = -s $node_B->logfile;
+
+$node_B->safe_psql('postgres', "UPDATE tab SET b = 3 WHERE a = 1;");
+$node_A->safe_psql('postgres', "DELETE FROM tab WHERE a = 1;");
+
+($cmdret, $stdout, $stderr) = $node_A->psql(
+ 'postgres', qq(VACUUM (verbose) public.tab;)
+);
+
+ok( $stderr =~
+ qr/1 are dead but not yet removable/,
+ 'the deleted column is non-removable');
+
+# Ensure the DELETE is replayed on Node B
+$node_A->wait_for_catchup($subname_BA);
+
+# Check the conflict detected on Node B
+my $logfile = slurp_file($node_B->logfile(), $log_location);
+ok( $logfile =~
+ qr/conflict detected on relation "public.tab": conflict=delete_origin_differs.*
+.*DETAIL:.* Deleting the row that was modified locally in transaction [0-9]+ at .*
+.*Existing local tuple \(1, 3\); replica identity \(a\)=\(1\)/,
+ 'delete target row was modified in tab');
+
+$log_location = -s $node_A->logfile;
+
+$node_A->safe_psql(
+ 'postgres', "ALTER SUBSCRIPTION $subname_AB ENABLE;");
+$node_B->wait_for_catchup($subname_AB);
+
+$logfile = slurp_file($node_A->logfile(), $log_location);
+ok( $logfile =~
+ qr/conflict detected on relation "public.tab": conflict=update_deleted.*
+.*DETAIL:.* The row to be updated was deleted locally in transaction [0-9]+ at .*
+.*Remote tuple \(1, 3\); replica identity \(a\)=\(1\)/,
+ 'update target row was deleted in tab');
+
+# Remember the next transaction ID to be assigned
+my $next_xid = $node_A->safe_psql('postgres', "SELECT txid_current() + 1;");
+
+# Confirm that the xmin value is advanced to the latest nextXid. If no
+# transactions are running, the apply worker selects nextXid as the candidate
+# for the non-removable xid. See GetOldestActiveTransactionId().
+ok( $node_A->poll_query_until(
+ 'postgres',
+ "SELECT xmin = $next_xid from pg_replication_slots WHERE slot_name = 'pg_conflict_detection'"
+ ),
+ "the xmin value of slot 'pg_conflict_detection' is updated on Node A");
+
+# Confirm that the dead tuple can be removed now
+($cmdret, $stdout, $stderr) = $node_A->psql(
+ 'postgres', qq(VACUUM (verbose) public.tab;)
+);
+
+ok( $stderr =~
+ qr/1 removed, 1 remain, 0 are dead but not yet removable/,
+ 'the deleted column is removed');
+
+###############################################################################
+# Ensure that the deleted tuple needed to detect an update_deleted conflict is
+# accessible via a sequential table scan.
+###############################################################################
+
+# Drop the primary key from tab on node A and set REPLICA IDENTITY to FULL to
+# enforce sequential scanning of the table.
+$node_A->safe_psql('postgres', "ALTER TABLE tab REPLICA IDENTITY FULL");
+$node_B->safe_psql('postgres', "ALTER TABLE tab REPLICA IDENTITY FULL");
+$node_A->safe_psql('postgres', "ALTER TABLE tab DROP CONSTRAINT tab_pkey;");
+
+# Disable the logical replication from node B to node A
+$node_A->safe_psql('postgres', "ALTER SUBSCRIPTION $subname_AB DISABLE");
+
+# Wait for the apply worker to stop
+$node_A->poll_query_until('postgres',
+ "SELECT count(*) = 0 FROM pg_stat_activity WHERE backend_type = 'logical replication apply worker'"
+);
+
+$node_B->safe_psql('postgres', "UPDATE tab SET b = 4 WHERE a = 2;");
+$node_A->safe_psql('postgres', "DELETE FROM tab WHERE a = 2;");
+
+$log_location = -s $node_A->logfile;
+
+$node_A->safe_psql(
+ 'postgres', "ALTER SUBSCRIPTION $subname_AB ENABLE;");
+$node_B->wait_for_catchup($subname_AB);
+
+$logfile = slurp_file($node_A->logfile(), $log_location);
+ok( $logfile =~
+ qr/conflict detected on relation "public.tab": conflict=update_deleted.*
+.*DETAIL:.* The row to be updated was deleted locally in transaction [0-9]+ at .*
+.*Remote tuple \(2, 4\); replica identity full \(2, 2\)/,
+ 'update target row was deleted in tab');
+
+###############################################################################
+# Check that the replication slot pg_conflict_detection is dropped after
+# removing all the subscriptions.
+###############################################################################
+
+$node_B->safe_psql(
+ 'postgres', "DROP SUBSCRIPTION $subname_BA");
+
+ok( $node_B->poll_query_until(
+ 'postgres',
+ "SELECT count(*) = 0 FROM pg_replication_slots WHERE slot_name = 'pg_conflict_detection'"
+ ),
+ "the slot 'pg_conflict_detection' has been dropped on Node B");
+
+$node_A->safe_psql(
+ 'postgres', "DROP SUBSCRIPTION $subname_AB");
+
+ok( $node_A->poll_query_until(
+ 'postgres',
+ "SELECT count(*) = 0 FROM pg_replication_slots WHERE slot_name = 'pg_conflict_detection'"
+ ),
+ "the slot 'pg_conflict_detection' has been dropped on Node A");
+
done_testing();