aboutsummaryrefslogtreecommitdiff
path: root/src/bin
diff options
context:
space:
mode:
Diffstat (limited to 'src/bin')
-rw-r--r--src/bin/initdb/Makefile2
-rw-r--r--src/bin/initdb/t/001_initdb.pl3
-rw-r--r--src/bin/pg_amcheck/t/004_verify_heapam.pl4
-rw-r--r--src/bin/pg_basebackup/meson.build6
-rw-r--r--src/bin/pg_basebackup/pg_basebackup.c6
-rw-r--r--src/bin/pg_basebackup/pg_createsubscriber.c57
-rw-r--r--src/bin/pg_basebackup/pg_receivewal.c10
-rw-r--r--src/bin/pg_basebackup/pg_recvlogical.c34
-rw-r--r--src/bin/pg_basebackup/receivelog.c6
-rw-r--r--src/bin/pg_basebackup/streamutil.c4
-rw-r--r--src/bin/pg_basebackup/t/030_pg_recvlogical.pl7
-rw-r--r--src/bin/pg_basebackup/t/040_pg_createsubscriber.pl10
-rw-r--r--src/bin/pg_combinebackup/backup_label.c2
-rw-r--r--src/bin/pg_combinebackup/pg_combinebackup.c2
-rw-r--r--src/bin/pg_combinebackup/t/010_hardlink.pl92
-rw-r--r--src/bin/pg_combinebackup/write_manifest.c2
-rw-r--r--src/bin/pg_controldata/pg_controldata.c12
-rw-r--r--src/bin/pg_dump/common.c19
-rw-r--r--src/bin/pg_dump/meson.build7
-rw-r--r--src/bin/pg_dump/parallel.c10
-rw-r--r--src/bin/pg_dump/pg_backup.h2
-rw-r--r--src/bin/pg_dump/pg_backup_archiver.c39
-rw-r--r--src/bin/pg_dump/pg_backup_archiver.h1
-rw-r--r--src/bin/pg_dump/pg_backup_directory.c11
-rw-r--r--src/bin/pg_dump/pg_backup_tar.c2
-rw-r--r--src/bin/pg_dump/pg_dump.c492
-rw-r--r--src/bin/pg_dump/pg_dump.h12
-rw-r--r--src/bin/pg_dump/pg_dump_sort.c255
-rw-r--r--src/bin/pg_dump/pg_dumpall.c240
-rw-r--r--src/bin/pg_dump/pg_restore.c790
-rw-r--r--src/bin/pg_dump/t/001_basic.pl19
-rw-r--r--src/bin/pg_dump/t/002_pg_dump.pl146
-rw-r--r--src/bin/pg_dump/t/006_pg_dumpall.pl391
-rw-r--r--src/bin/pg_rewind/libpq_source.c2
-rw-r--r--src/bin/pg_rewind/parsexlog.c18
-rw-r--r--src/bin/pg_rewind/pg_rewind.c10
-rw-r--r--src/bin/pg_rewind/t/RewindTest.pm2
-rw-r--r--src/bin/pg_rewind/timeline.c2
-rw-r--r--src/bin/pg_test_timing/pg_test_timing.c186
-rw-r--r--src/bin/pg_test_timing/t/001_basic.pl17
-rw-r--r--src/bin/pg_upgrade/check.c219
-rw-r--r--src/bin/pg_upgrade/dump.c2
-rw-r--r--src/bin/pg_upgrade/info.c63
-rw-r--r--src/bin/pg_upgrade/parallel.c11
-rw-r--r--src/bin/pg_upgrade/pg_upgrade.c60
-rw-r--r--src/bin/pg_upgrade/pg_upgrade.h12
-rw-r--r--src/bin/pg_upgrade/relfilenumber.c69
-rw-r--r--src/bin/pg_upgrade/server.c18
-rw-r--r--src/bin/pg_upgrade/t/004_subscription.pl91
-rw-r--r--src/bin/pg_upgrade/t/005_char_signedness.pl2
-rw-r--r--src/bin/pg_upgrade/t/006_transfer_modes.pl63
-rw-r--r--src/bin/pg_upgrade/tablespace.c65
-rw-r--r--src/bin/pg_upgrade/task.c5
-rw-r--r--src/bin/pg_verifybackup/meson.build8
-rw-r--r--src/bin/pg_verifybackup/pg_verifybackup.c2
-rw-r--r--src/bin/pg_verifybackup/t/008_untar.pl22
-rw-r--r--src/bin/pg_verifybackup/t/010_client_untar.pl22
-rw-r--r--src/bin/pg_waldump/pg_waldump.c18
-rw-r--r--src/bin/pg_walsummary/t/002_blocks.pl9
-rw-r--r--src/bin/pgbench/t/002_pgbench_no_server.pl18
-rw-r--r--src/bin/psql/command.c53
-rw-r--r--src/bin/psql/common.c64
-rw-r--r--src/bin/psql/common.h1
-rw-r--r--src/bin/psql/describe.c15
-rw-r--r--src/bin/psql/help.c16
-rw-r--r--src/bin/psql/prompt.c8
-rw-r--r--src/bin/psql/t/001_basic.pl38
-rw-r--r--src/bin/psql/tab-complete.in.c138
-rw-r--r--src/bin/psql/variables.c10
-rw-r--r--src/bin/scripts/t/100_vacuumdb.pl79
70 files changed, 2145 insertions, 1988 deletions
diff --git a/src/bin/initdb/Makefile b/src/bin/initdb/Makefile
index 997e0a013e9..c0470efda92 100644
--- a/src/bin/initdb/Makefile
+++ b/src/bin/initdb/Makefile
@@ -20,7 +20,7 @@ include $(top_builddir)/src/Makefile.global
# from libpq, else we have risks of version skew if we run with a libpq
# shared library from a different PG version. Define
# USE_PRIVATE_ENCODING_FUNCS to ensure that that happens.
-override CPPFLAGS := -DUSE_PRIVATE_ENCODING_FUNCS -I$(libpq_srcdir) -I$(top_srcdir)/src/timezone $(ICU_CFLAGS) $(CPPFLAGS)
+override CPPFLAGS := -DUSE_PRIVATE_ENCODING_FUNCS -I$(libpq_srcdir) -I$(top_srcdir)/src/timezone $(CPPFLAGS) $(ICU_CFLAGS)
# We need libpq only because fe_utils does.
LDFLAGS_INTERNAL += -L$(top_builddir)/src/fe_utils -lpgfeutils $(libpq_pgport) $(ICU_LIBS)
diff --git a/src/bin/initdb/t/001_initdb.pl b/src/bin/initdb/t/001_initdb.pl
index 15dd10ce40a..b7ef7ed8d06 100644
--- a/src/bin/initdb/t/001_initdb.pl
+++ b/src/bin/initdb/t/001_initdb.pl
@@ -76,7 +76,8 @@ command_like(
'checksums are enabled in control file');
command_ok([ 'initdb', '--sync-only', $datadir ], 'sync only');
-command_ok([ 'initdb', '--sync-only', '--no-sync-data-files', $datadir ], '--no-sync-data-files');
+command_ok([ 'initdb', '--sync-only', '--no-sync-data-files', $datadir ],
+ '--no-sync-data-files');
command_fails([ 'initdb', $datadir ], 'existing data directory');
if ($supports_syncfs)
diff --git a/src/bin/pg_amcheck/t/004_verify_heapam.pl b/src/bin/pg_amcheck/t/004_verify_heapam.pl
index 2a3af2666f5..72693660fb6 100644
--- a/src/bin/pg_amcheck/t/004_verify_heapam.pl
+++ b/src/bin/pg_amcheck/t/004_verify_heapam.pl
@@ -529,7 +529,7 @@ for (my $tupidx = 0; $tupidx < $ROWCOUNT; $tupidx++)
$tup->{t_infomask2} |= HEAP_NATTS_MASK;
push @expected,
- qr/${$header}number of attributes 2047 exceeds maximum expected for table 3/;
+ qr/${$header}number of attributes 2047 exceeds maximum 3 expected for table/;
}
elsif ($offnum == 10)
{
@@ -552,7 +552,7 @@ for (my $tupidx = 0; $tupidx < $ROWCOUNT; $tupidx++)
$tup->{t_hoff} = 32;
push @expected,
- qr/${$header}number of attributes 67 exceeds maximum expected for table 3/;
+ qr/${$header}number of attributes 67 exceeds maximum 3 expected for table/;
}
elsif ($offnum == 12)
{
diff --git a/src/bin/pg_basebackup/meson.build b/src/bin/pg_basebackup/meson.build
index 8a1c96b4f5c..3a7fc10eab0 100644
--- a/src/bin/pg_basebackup/meson.build
+++ b/src/bin/pg_basebackup/meson.build
@@ -93,9 +93,9 @@ tests += {
'sd': meson.current_source_dir(),
'bd': meson.current_build_dir(),
'tap': {
- 'env': {'GZIP_PROGRAM': gzip.found() ? gzip.path() : '',
- 'TAR': tar.found() ? tar.path() : '',
- 'LZ4': program_lz4.found() ? program_lz4.path() : '',
+ 'env': {'GZIP_PROGRAM': gzip.found() ? gzip.full_path() : '',
+ 'TAR': tar.found() ? tar.full_path() : '',
+ 'LZ4': program_lz4.found() ? program_lz4.full_path() : '',
},
'tests': [
't/010_pg_basebackup.pl',
diff --git a/src/bin/pg_basebackup/pg_basebackup.c b/src/bin/pg_basebackup/pg_basebackup.c
index eb7354200bc..55621f35fb6 100644
--- a/src/bin/pg_basebackup/pg_basebackup.c
+++ b/src/bin/pg_basebackup/pg_basebackup.c
@@ -487,7 +487,7 @@ reached_end_position(XLogRecPtr segendpos, uint32 timeline,
if (r < 0)
pg_fatal("could not read from ready pipe: %m");
- if (sscanf(xlogend, "%X/%X", &hi, &lo) != 2)
+ if (sscanf(xlogend, "%X/%08X", &hi, &lo) != 2)
pg_fatal("could not parse write-ahead log location \"%s\"",
xlogend);
xlogendptr = ((uint64) hi) << 32 | lo;
@@ -629,7 +629,7 @@ StartLogStreamer(char *startpos, uint32 timeline, char *sysidentifier,
param->wal_compress_level = wal_compress_level;
/* Convert the starting position */
- if (sscanf(startpos, "%X/%X", &hi, &lo) != 2)
+ if (sscanf(startpos, "%X/%08X", &hi, &lo) != 2)
pg_fatal("could not parse write-ahead log location \"%s\"",
startpos);
param->startptr = ((uint64) hi) << 32 | lo;
@@ -2255,7 +2255,7 @@ BaseBackup(char *compression_algorithm, char *compression_detail,
* value directly in the variable, and then set the flag that says
* it's there.
*/
- if (sscanf(xlogend, "%X/%X", &hi, &lo) != 2)
+ if (sscanf(xlogend, "%X/%08X", &hi, &lo) != 2)
pg_fatal("could not parse write-ahead log location \"%s\"",
xlogend);
xlogendptr = ((uint64) hi) << 32 | lo;
diff --git a/src/bin/pg_basebackup/pg_createsubscriber.c b/src/bin/pg_basebackup/pg_createsubscriber.c
index f65acc7cb11..3986882f042 100644
--- a/src/bin/pg_basebackup/pg_createsubscriber.c
+++ b/src/bin/pg_basebackup/pg_createsubscriber.c
@@ -46,7 +46,7 @@ struct CreateSubscriberOptions
SimpleStringList replslot_names; /* list of replication slot names */
int recovery_timeout; /* stop recovery after this time */
bool all_dbs; /* all option */
- SimpleStringList objecttypes_to_remove; /* list of object types to remove */
+ SimpleStringList objecttypes_to_clean; /* list of object types to cleanup */
};
/* per-database publication/subscription info */
@@ -71,8 +71,8 @@ struct LogicalRepInfos
{
struct LogicalRepInfo *dbinfo;
bool two_phase; /* enable-two-phase option */
- bits32 objecttypes_to_remove; /* flags indicating which object types
- * to remove on subscriber */
+ bits32 objecttypes_to_clean; /* flags indicating which object types
+ * to clean up on subscriber */
};
static void cleanup_objects_atexit(void);
@@ -247,19 +247,19 @@ usage(void)
printf(_(" %s [OPTION]...\n"), progname);
printf(_("\nOptions:\n"));
printf(_(" -a, --all create subscriptions for all databases except template\n"
- " databases or databases that don't allow connections\n"));
+ " databases and databases that don't allow connections\n"));
printf(_(" -d, --database=DBNAME database in which to create a subscription\n"));
printf(_(" -D, --pgdata=DATADIR location for the subscriber data directory\n"));
printf(_(" -n, --dry-run dry run, just show what would be done\n"));
printf(_(" -p, --subscriber-port=PORT subscriber port number (default %s)\n"), DEFAULT_SUB_PORT);
printf(_(" -P, --publisher-server=CONNSTR publisher connection string\n"));
- printf(_(" -R, --remove=OBJECTTYPE remove all objects of the specified type from specified\n"
- " databases on the subscriber; accepts: publications\n"));
printf(_(" -s, --socketdir=DIR socket directory to use (default current dir.)\n"));
printf(_(" -t, --recovery-timeout=SECS seconds to wait for recovery to end\n"));
printf(_(" -T, --enable-two-phase enable two-phase commit for all subscriptions\n"));
printf(_(" -U, --subscriber-username=NAME user name for subscriber connection\n"));
printf(_(" -v, --verbose output verbose messages\n"));
+ printf(_(" --clean=OBJECTTYPE drop all objects of the specified type from specified\n"
+ " databases on the subscriber; accepts: \"%s\"\n"), "publications");
printf(_(" --config-file=FILENAME use specified main server configuration\n"
" file when running target cluster\n"));
printf(_(" --publication=NAME publication name\n"));
@@ -973,7 +973,7 @@ check_publisher(const struct LogicalRepInfo *dbinfo)
pg_log_warning("two_phase option will not be enabled for replication slots");
pg_log_warning_detail("Subscriptions will be created with the two_phase option disabled. "
"Prepared transactions will be replicated at COMMIT PREPARED.");
- pg_log_warning_hint("You can use --enable-two-phase switch to enable two_phase.");
+ pg_log_warning_hint("You can use the command-line option --enable-two-phase to enable two_phase.");
}
/*
@@ -1250,8 +1250,17 @@ setup_recovery(const struct LogicalRepInfo *dbinfo, const char *datadir, const c
appendPQExpBufferStr(recoveryconfcontents, "recovery_target = ''\n");
appendPQExpBufferStr(recoveryconfcontents,
"recovery_target_timeline = 'latest'\n");
+
+ /*
+ * Set recovery_target_inclusive = false to avoid reapplying the
+ * transaction committed at 'lsn' after subscription is enabled. This is
+ * because the provided 'lsn' is also used as the replication start point
+ * for the subscription. So, the server can send the transaction committed
+ * at that 'lsn' after replication is started which can lead to applying
+ * the same transaction twice if we keep recovery_target_inclusive = true.
+ */
appendPQExpBufferStr(recoveryconfcontents,
- "recovery_target_inclusive = true\n");
+ "recovery_target_inclusive = false\n");
appendPQExpBufferStr(recoveryconfcontents,
"recovery_target_action = promote\n");
appendPQExpBufferStr(recoveryconfcontents, "recovery_target_name = ''\n");
@@ -1262,7 +1271,7 @@ setup_recovery(const struct LogicalRepInfo *dbinfo, const char *datadir, const c
{
appendPQExpBufferStr(recoveryconfcontents, "# dry run mode");
appendPQExpBuffer(recoveryconfcontents,
- "recovery_target_lsn = '%X/%X'\n",
+ "recovery_target_lsn = '%X/%08X'\n",
LSN_FORMAT_ARGS((XLogRecPtr) InvalidXLogRecPtr));
}
else
@@ -1730,7 +1739,7 @@ static void
check_and_drop_publications(PGconn *conn, struct LogicalRepInfo *dbinfo)
{
PGresult *res;
- bool drop_all_pubs = dbinfos.objecttypes_to_remove & OBJECTTYPE_PUBLICATIONS;
+ bool drop_all_pubs = dbinfos.objecttypes_to_clean & OBJECTTYPE_PUBLICATIONS;
Assert(conn != NULL);
@@ -1876,7 +1885,7 @@ set_replication_progress(PGconn *conn, const struct LogicalRepInfo *dbinfo, cons
if (dry_run)
{
suboid = InvalidOid;
- lsnstr = psprintf("%X/%X", LSN_FORMAT_ARGS((XLogRecPtr) InvalidXLogRecPtr));
+ lsnstr = psprintf("%X/%08X", LSN_FORMAT_ARGS((XLogRecPtr) InvalidXLogRecPtr));
}
else
{
@@ -2026,7 +2035,6 @@ main(int argc, char **argv)
{"dry-run", no_argument, NULL, 'n'},
{"subscriber-port", required_argument, NULL, 'p'},
{"publisher-server", required_argument, NULL, 'P'},
- {"remove", required_argument, NULL, 'R'},
{"socketdir", required_argument, NULL, 's'},
{"recovery-timeout", required_argument, NULL, 't'},
{"enable-two-phase", no_argument, NULL, 'T'},
@@ -2038,6 +2046,7 @@ main(int argc, char **argv)
{"publication", required_argument, NULL, 2},
{"replication-slot", required_argument, NULL, 3},
{"subscription", required_argument, NULL, 4},
+ {"clean", required_argument, NULL, 5},
{NULL, 0, NULL, 0}
};
@@ -2109,7 +2118,7 @@ main(int argc, char **argv)
get_restricted_token();
- while ((c = getopt_long(argc, argv, "ad:D:np:P:R:s:t:TU:v",
+ while ((c = getopt_long(argc, argv, "ad:D:np:P:s:t:TU:v",
long_options, &option_index)) != -1)
{
switch (c)
@@ -2139,12 +2148,6 @@ main(int argc, char **argv)
case 'P':
opt.pub_conninfo_str = pg_strdup(optarg);
break;
- case 'R':
- if (!simple_string_list_member(&opt.objecttypes_to_remove, optarg))
- simple_string_list_append(&opt.objecttypes_to_remove, optarg);
- else
- pg_fatal("object type \"%s\" is specified more than once for -R/--remove", optarg);
- break;
case 's':
opt.socket_dir = pg_strdup(optarg);
canonicalize_path(opt.socket_dir);
@@ -2191,6 +2194,12 @@ main(int argc, char **argv)
else
pg_fatal("subscription \"%s\" specified more than once for --subscription", optarg);
break;
+ case 5:
+ if (!simple_string_list_member(&opt.objecttypes_to_clean, optarg))
+ simple_string_list_append(&opt.objecttypes_to_clean, optarg);
+ else
+ pg_fatal("object type \"%s\" specified more than once for --clean", optarg);
+ break;
default:
/* getopt_long already emitted a complaint */
pg_log_error_hint("Try \"%s --help\" for more information.", progname);
@@ -2214,7 +2223,7 @@ main(int argc, char **argv)
if (bad_switch)
{
- pg_log_error("%s cannot be used with -a/--all", bad_switch);
+ pg_log_error("options %s and -a/--all cannot be used together", bad_switch);
pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
@@ -2334,14 +2343,14 @@ main(int argc, char **argv)
}
/* Verify the object types specified for removal from the subscriber */
- for (SimpleStringListCell *cell = opt.objecttypes_to_remove.head; cell; cell = cell->next)
+ for (SimpleStringListCell *cell = opt.objecttypes_to_clean.head; cell; cell = cell->next)
{
if (pg_strcasecmp(cell->val, "publications") == 0)
- dbinfos.objecttypes_to_remove |= OBJECTTYPE_PUBLICATIONS;
+ dbinfos.objecttypes_to_clean |= OBJECTTYPE_PUBLICATIONS;
else
{
- pg_log_error("invalid object type \"%s\" specified for -R/--remove", cell->val);
- pg_log_error_hint("The valid option is: \"publications\"");
+ pg_log_error("invalid object type \"%s\" specified for --clean", cell->val);
+ pg_log_error_hint("The valid value is: \"%s\"", "publications");
exit(1);
}
}
diff --git a/src/bin/pg_basebackup/pg_receivewal.c b/src/bin/pg_basebackup/pg_receivewal.c
index e816cf58101..289ca14dcfe 100644
--- a/src/bin/pg_basebackup/pg_receivewal.c
+++ b/src/bin/pg_basebackup/pg_receivewal.c
@@ -188,14 +188,14 @@ stop_streaming(XLogRecPtr xlogpos, uint32 timeline, bool segment_finished)
/* we assume that we get called once at the end of each segment */
if (verbose && segment_finished)
- pg_log_info("finished segment at %X/%X (timeline %u)",
+ pg_log_info("finished segment at %X/%08X (timeline %u)",
LSN_FORMAT_ARGS(xlogpos),
timeline);
if (!XLogRecPtrIsInvalid(endpos) && endpos < xlogpos)
{
if (verbose)
- pg_log_info("stopped log streaming at %X/%X (timeline %u)",
+ pg_log_info("stopped log streaming at %X/%08X (timeline %u)",
LSN_FORMAT_ARGS(xlogpos),
timeline);
time_to_stop = true;
@@ -211,7 +211,7 @@ stop_streaming(XLogRecPtr xlogpos, uint32 timeline, bool segment_finished)
* timeline, but it's close enough for reporting purposes.
*/
if (verbose && prevtimeline != 0 && prevtimeline != timeline)
- pg_log_info("switched to timeline %u at %X/%X",
+ pg_log_info("switched to timeline %u at %X/%08X",
timeline,
LSN_FORMAT_ARGS(prevpos));
@@ -575,7 +575,7 @@ StreamLog(void)
* Start the replication
*/
if (verbose)
- pg_log_info("starting log streaming at %X/%X (timeline %u)",
+ pg_log_info("starting log streaming at %X/%08X (timeline %u)",
LSN_FORMAT_ARGS(stream.startpos),
stream.timeline);
@@ -689,7 +689,7 @@ main(int argc, char **argv)
basedir = pg_strdup(optarg);
break;
case 'E':
- if (sscanf(optarg, "%X/%X", &hi, &lo) != 2)
+ if (sscanf(optarg, "%X/%08X", &hi, &lo) != 2)
pg_fatal("could not parse end position \"%s\"", optarg);
endpos = ((uint64) hi) << 32 | lo;
break;
diff --git a/src/bin/pg_basebackup/pg_recvlogical.c b/src/bin/pg_basebackup/pg_recvlogical.c
index e6810efe5f0..8a5dd24e6c9 100644
--- a/src/bin/pg_basebackup/pg_recvlogical.c
+++ b/src/bin/pg_basebackup/pg_recvlogical.c
@@ -41,8 +41,8 @@ typedef enum
/* Global Options */
static char *outfile = NULL;
static int verbose = 0;
-static bool two_phase = false;
-static bool failover = false;
+static bool two_phase = false; /* enable-two-phase option */
+static bool failover = false; /* enable-failover option */
static int noloop = 0;
static int standby_message_timeout = 10 * 1000; /* 10 sec = default */
static int fsync_interval = 10 * 1000; /* 10 sec = default */
@@ -89,9 +89,9 @@ usage(void)
printf(_(" --drop-slot drop the replication slot (for the slot's name see --slot)\n"));
printf(_(" --start start streaming in a replication slot (for the slot's name see --slot)\n"));
printf(_("\nOptions:\n"));
+ printf(_(" --enable-failover enable replication slot synchronization to standby servers when\n"
+ " creating a replication slot\n"));
printf(_(" -E, --endpos=LSN exit after receiving the specified LSN\n"));
- printf(_(" --failover enable replication slot synchronization to standby servers when\n"
- " creating a slot\n"));
printf(_(" -f, --file=FILE receive log into this file, - for stdout\n"));
printf(_(" -F --fsync-interval=SECS\n"
" time between fsyncs to the output file (default: %d)\n"), (fsync_interval / 1000));
@@ -105,7 +105,8 @@ usage(void)
printf(_(" -s, --status-interval=SECS\n"
" time between status packets sent to server (default: %d)\n"), (standby_message_timeout / 1000));
printf(_(" -S, --slot=SLOTNAME name of the logical replication slot\n"));
- printf(_(" -t, --two-phase enable decoding of prepared transactions when creating a slot\n"));
+ printf(_(" -t, --enable-two-phase enable decoding of prepared transactions when creating a slot\n"));
+ printf(_(" --two-phase (same as --enable-two-phase, deprecated)\n"));
printf(_(" -v, --verbose output verbose messages\n"));
printf(_(" -V, --version output version information, then exit\n"));
printf(_(" -?, --help show this help, then exit\n"));
@@ -143,7 +144,7 @@ sendFeedback(PGconn *conn, TimestampTz now, bool force, bool replyRequested)
return true;
if (verbose)
- pg_log_info("confirming write up to %X/%X, flush to %X/%X (slot %s)",
+ pg_log_info("confirming write up to %X/%08X, flush to %X/%08X (slot %s)",
LSN_FORMAT_ARGS(output_written_lsn),
LSN_FORMAT_ARGS(output_fsync_lsn),
replication_slot);
@@ -237,13 +238,13 @@ StreamLogicalLog(void)
* Start the replication
*/
if (verbose)
- pg_log_info("starting log streaming at %X/%X (slot %s)",
+ pg_log_info("starting log streaming at %X/%08X (slot %s)",
LSN_FORMAT_ARGS(startpos),
replication_slot);
/* Initiate the replication stream at specified location */
query = createPQExpBuffer();
- appendPQExpBuffer(query, "START_REPLICATION SLOT \"%s\" LOGICAL %X/%X",
+ appendPQExpBuffer(query, "START_REPLICATION SLOT \"%s\" LOGICAL %X/%08X",
replication_slot, LSN_FORMAT_ARGS(startpos));
/* print options if there are any */
@@ -698,9 +699,10 @@ main(int argc, char **argv)
{"file", required_argument, NULL, 'f'},
{"fsync-interval", required_argument, NULL, 'F'},
{"no-loop", no_argument, NULL, 'n'},
- {"failover", no_argument, NULL, 5},
+ {"enable-failover", no_argument, NULL, 5},
+ {"enable-two-phase", no_argument, NULL, 't'},
+ {"two-phase", no_argument, NULL, 't'}, /* deprecated */
{"verbose", no_argument, NULL, 'v'},
- {"two-phase", no_argument, NULL, 't'},
{"version", no_argument, NULL, 'V'},
{"help", no_argument, NULL, '?'},
/* connection options */
@@ -798,12 +800,12 @@ main(int argc, char **argv)
break;
/* replication options */
case 'I':
- if (sscanf(optarg, "%X/%X", &hi, &lo) != 2)
+ if (sscanf(optarg, "%X/%08X", &hi, &lo) != 2)
pg_fatal("could not parse start position \"%s\"", optarg);
startpos = ((uint64) hi) << 32 | lo;
break;
case 'E':
- if (sscanf(optarg, "%X/%X", &hi, &lo) != 2)
+ if (sscanf(optarg, "%X/%08X", &hi, &lo) != 2)
pg_fatal("could not parse end position \"%s\"", optarg);
endpos = ((uint64) hi) << 32 | lo;
break;
@@ -928,14 +930,14 @@ main(int argc, char **argv)
{
if (two_phase)
{
- pg_log_error("--two-phase may only be specified with --create-slot");
+ pg_log_error("%s may only be specified with --create-slot", "--enable-two-phase");
pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
if (failover)
{
- pg_log_error("--failover may only be specified with --create-slot");
+ pg_log_error("%s may only be specified with --create-slot", "--enable-failover");
pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
@@ -1073,12 +1075,12 @@ prepareToTerminate(PGconn *conn, XLogRecPtr endpos, StreamStopReason reason,
pg_log_info("received interrupt signal, exiting");
break;
case STREAM_STOP_KEEPALIVE:
- pg_log_info("end position %X/%X reached by keepalive",
+ pg_log_info("end position %X/%08X reached by keepalive",
LSN_FORMAT_ARGS(endpos));
break;
case STREAM_STOP_END_OF_WAL:
Assert(!XLogRecPtrIsInvalid(lsn));
- pg_log_info("end position %X/%X reached by WAL record at %X/%X",
+ pg_log_info("end position %X/%08X reached by WAL record at %X/%08X",
LSN_FORMAT_ARGS(endpos), LSN_FORMAT_ARGS(lsn));
break;
case STREAM_STOP_NONE:
diff --git a/src/bin/pg_basebackup/receivelog.c b/src/bin/pg_basebackup/receivelog.c
index 6b6e32dfbdf..d6b7f117fa3 100644
--- a/src/bin/pg_basebackup/receivelog.c
+++ b/src/bin/pg_basebackup/receivelog.c
@@ -571,7 +571,7 @@ ReceiveXlogStream(PGconn *conn, StreamCtl *stream)
return true;
/* Initiate the replication stream at specified location */
- snprintf(query, sizeof(query), "START_REPLICATION %s%X/%X TIMELINE %u",
+ snprintf(query, sizeof(query), "START_REPLICATION %s%X/%08X TIMELINE %u",
slotcmd,
LSN_FORMAT_ARGS(stream->startpos),
stream->timeline);
@@ -628,7 +628,7 @@ ReceiveXlogStream(PGconn *conn, StreamCtl *stream)
}
if (stream->startpos > stoppos)
{
- pg_log_error("server stopped streaming timeline %u at %X/%X, but reported next timeline %u to begin at %X/%X",
+ pg_log_error("server stopped streaming timeline %u at %X/%08X, but reported next timeline %u to begin at %X/%08X",
stream->timeline, LSN_FORMAT_ARGS(stoppos),
newtimeline, LSN_FORMAT_ARGS(stream->startpos));
goto error;
@@ -720,7 +720,7 @@ ReadEndOfStreamingResult(PGresult *res, XLogRecPtr *startpos, uint32 *timeline)
}
*timeline = atoi(PQgetvalue(res, 0, 0));
- if (sscanf(PQgetvalue(res, 0, 1), "%X/%X", &startpos_xlogid,
+ if (sscanf(PQgetvalue(res, 0, 1), "%X/%08X", &startpos_xlogid,
&startpos_xrecoff) != 2)
{
pg_log_error("could not parse next timeline's starting point \"%s\"",
diff --git a/src/bin/pg_basebackup/streamutil.c b/src/bin/pg_basebackup/streamutil.c
index c7b8a4c3a4b..e5a7cb6e5b1 100644
--- a/src/bin/pg_basebackup/streamutil.c
+++ b/src/bin/pg_basebackup/streamutil.c
@@ -445,7 +445,7 @@ RunIdentifySystem(PGconn *conn, char **sysid, TimeLineID *starttli,
/* Get LSN start position if necessary */
if (startpos != NULL)
{
- if (sscanf(PQgetvalue(res, 0, 2), "%X/%X", &hi, &lo) != 2)
+ if (sscanf(PQgetvalue(res, 0, 2), "%X/%08X", &hi, &lo) != 2)
{
pg_log_error("could not parse write-ahead log location \"%s\"",
PQgetvalue(res, 0, 2));
@@ -551,7 +551,7 @@ GetSlotInformation(PGconn *conn, const char *slot_name,
uint32 hi,
lo;
- if (sscanf(PQgetvalue(res, 0, 1), "%X/%X", &hi, &lo) != 2)
+ if (sscanf(PQgetvalue(res, 0, 1), "%X/%08X", &hi, &lo) != 2)
{
pg_log_error("could not parse restart_lsn \"%s\" for replication slot \"%s\"",
PQgetvalue(res, 0, 1), slot_name);
diff --git a/src/bin/pg_basebackup/t/030_pg_recvlogical.pl b/src/bin/pg_basebackup/t/030_pg_recvlogical.pl
index c82e78847b3..1b7a6f6f43f 100644
--- a/src/bin/pg_basebackup/t/030_pg_recvlogical.pl
+++ b/src/bin/pg_basebackup/t/030_pg_recvlogical.pl
@@ -110,7 +110,7 @@ $node->command_fails(
'--dbname' => $node->connstr('postgres'),
'--start',
'--endpos' => $nextlsn,
- '--two-phase', '--no-loop',
+ '--enable-two-phase', '--no-loop',
'--file' => '-',
],
'incorrect usage');
@@ -142,12 +142,13 @@ $node->command_ok(
'--slot' => 'test',
'--dbname' => $node->connstr('postgres'),
'--create-slot',
- '--failover',
+ '--enable-failover',
],
'slot with failover created');
my $result = $node->safe_psql('postgres',
- "SELECT failover FROM pg_catalog.pg_replication_slots WHERE slot_name = 'test'");
+ "SELECT failover FROM pg_catalog.pg_replication_slots WHERE slot_name = 'test'"
+);
is($result, 't', "failover is enabled for the new slot");
done_testing();
diff --git a/src/bin/pg_basebackup/t/040_pg_createsubscriber.pl b/src/bin/pg_basebackup/t/040_pg_createsubscriber.pl
index 2d532fee567..229fef5b3b5 100644
--- a/src/bin/pg_basebackup/t/040_pg_createsubscriber.pl
+++ b/src/bin/pg_basebackup/t/040_pg_createsubscriber.pl
@@ -331,7 +331,7 @@ $node_p->safe_psql($db1,
$node_p->wait_for_replay_catchup($node_s);
# Create user-defined publications, wait for streaming replication to sync them
-# to the standby, then verify that '--remove'
+# to the standby, then verify that '--clean'
# removes them.
$node_p->safe_psql(
$db1, qq(
@@ -399,7 +399,7 @@ command_fails_like(
'--database' => $db1,
'--all',
],
- qr/--database cannot be used with -a\/--all/,
+ qr/options --database and -a\/--all cannot be used together/,
'fail if --database is used with --all');
# run pg_createsubscriber with '--publication' and '--all' and verify
@@ -416,7 +416,7 @@ command_fails_like(
'--all',
'--publication' => 'pub1',
],
- qr/--publication cannot be used with -a\/--all/,
+ qr/options --publication and -a\/--all cannot be used together/,
'fail if --publication is used with --all');
# run pg_createsubscriber with '--all' option
@@ -446,7 +446,7 @@ is(scalar(() = $stderr =~ /creating subscription/g),
# Run pg_createsubscriber on node S. --verbose is used twice
# to show more information.
# In passing, also test the --enable-two-phase option and
-# --remove option
+# --clean option
command_ok(
[
'pg_createsubscriber',
@@ -463,7 +463,7 @@ command_ok(
'--database' => $db1,
'--database' => $db2,
'--enable-two-phase',
- '--remove' => 'publications',
+ '--clean' => 'publications',
],
'run pg_createsubscriber on node S');
diff --git a/src/bin/pg_combinebackup/backup_label.c b/src/bin/pg_combinebackup/backup_label.c
index e89d4603f09..e774bc78a62 100644
--- a/src/bin/pg_combinebackup/backup_label.c
+++ b/src/bin/pg_combinebackup/backup_label.c
@@ -247,7 +247,7 @@ parse_lsn(char *s, char *e, XLogRecPtr *lsn, char **c)
unsigned lo;
*e = '\0';
- success = (sscanf(s, "%X/%X%n", &hi, &lo, &nchars) == 2);
+ success = (sscanf(s, "%X/%08X%n", &hi, &lo, &nchars) == 2);
*e = save;
if (success)
diff --git a/src/bin/pg_combinebackup/pg_combinebackup.c b/src/bin/pg_combinebackup/pg_combinebackup.c
index 28e58cd8ef4..f5cef99f627 100644
--- a/src/bin/pg_combinebackup/pg_combinebackup.c
+++ b/src/bin/pg_combinebackup/pg_combinebackup.c
@@ -569,7 +569,7 @@ check_backup_label_files(int n_backups, char **backup_dirs)
pg_fatal("backup at \"%s\" starts on timeline %u, but expected %u",
backup_dirs[i], start_tli, check_tli);
if (i < n_backups - 1 && start_lsn != check_lsn)
- pg_fatal("backup at \"%s\" starts at LSN %X/%X, but expected %X/%X",
+ pg_fatal("backup at \"%s\" starts at LSN %X/%08X, but expected %X/%08X",
backup_dirs[i],
LSN_FORMAT_ARGS(start_lsn),
LSN_FORMAT_ARGS(check_lsn));
diff --git a/src/bin/pg_combinebackup/t/010_hardlink.pl b/src/bin/pg_combinebackup/t/010_hardlink.pl
index a0ee419090c..4f92d6676bd 100644
--- a/src/bin/pg_combinebackup/t/010_hardlink.pl
+++ b/src/bin/pg_combinebackup/t/010_hardlink.pl
@@ -56,7 +56,7 @@ $primary->command_ok(
'--pgdata' => $backup1path,
'--no-sync',
'--checkpoint' => 'fast',
- '--wal-method' => 'none'
+ '--wal-method' => 'none'
],
"full backup");
@@ -74,7 +74,7 @@ $primary->command_ok(
'--pgdata' => $backup2path,
'--no-sync',
'--checkpoint' => 'fast',
- '--wal-method' => 'none',
+ '--wal-method' => 'none',
'--incremental' => $backup1path . '/backup_manifest'
],
"incremental backup");
@@ -112,45 +112,45 @@ done_testing();
# of the given data file.
sub check_data_file
{
- my ($data_file, $last_segment_nlinks) = @_;
-
- my @data_file_segments = ($data_file);
-
- # Start checking for additional segments
- my $segment_number = 1;
-
- while (1)
- {
- my $next_segment = $data_file . '.' . $segment_number;
-
- # If the file exists and is a regular file, add it to the list
- if (-f $next_segment)
- {
- push @data_file_segments, $next_segment;
- $segment_number++;
- }
- # Stop the loop if the file doesn't exist
- else
- {
- last;
- }
- }
-
- # All segments of the given data file should contain 2 hard links, except
- # for the last one, which should match the given number of links.
- my $last_segment = pop @data_file_segments;
-
- for my $segment (@data_file_segments)
- {
- # Get the file's stat information of each segment
- my $nlink_count = get_hard_link_count($segment);
- ok($nlink_count == 2, "File '$segment' has 2 hard links");
- }
-
- # Get the file's stat information of the last segment
- my $nlink_count = get_hard_link_count($last_segment);
- ok($nlink_count == $last_segment_nlinks,
- "File '$last_segment' has $last_segment_nlinks hard link(s)");
+ my ($data_file, $last_segment_nlinks) = @_;
+
+ my @data_file_segments = ($data_file);
+
+ # Start checking for additional segments
+ my $segment_number = 1;
+
+ while (1)
+ {
+ my $next_segment = $data_file . '.' . $segment_number;
+
+ # If the file exists and is a regular file, add it to the list
+ if (-f $next_segment)
+ {
+ push @data_file_segments, $next_segment;
+ $segment_number++;
+ }
+ # Stop the loop if the file doesn't exist
+ else
+ {
+ last;
+ }
+ }
+
+ # All segments of the given data file should contain 2 hard links, except
+ # for the last one, which should match the given number of links.
+ my $last_segment = pop @data_file_segments;
+
+ for my $segment (@data_file_segments)
+ {
+ # Get the file's stat information of each segment
+ my $nlink_count = get_hard_link_count($segment);
+ ok($nlink_count == 2, "File '$segment' has 2 hard links");
+ }
+
+ # Get the file's stat information of the last segment
+ my $nlink_count = get_hard_link_count($last_segment);
+ ok($nlink_count == $last_segment_nlinks,
+ "File '$last_segment' has $last_segment_nlinks hard link(s)");
}
@@ -159,11 +159,11 @@ sub check_data_file
# that file.
sub get_hard_link_count
{
- my ($file) = @_;
+ my ($file) = @_;
- # Get file stats
- my @stats = stat($file);
- my $nlink = $stats[3]; # Number of hard links
+ # Get file stats
+ my @stats = stat($file);
+ my $nlink = $stats[3]; # Number of hard links
- return $nlink;
+ return $nlink;
}
diff --git a/src/bin/pg_combinebackup/write_manifest.c b/src/bin/pg_combinebackup/write_manifest.c
index 313f8929df5..819a3fd0b7a 100644
--- a/src/bin/pg_combinebackup/write_manifest.c
+++ b/src/bin/pg_combinebackup/write_manifest.c
@@ -155,7 +155,7 @@ finalize_manifest(manifest_writer *mwriter,
for (wal_range = first_wal_range; wal_range != NULL;
wal_range = wal_range->next)
appendStringInfo(&mwriter->buf,
- "%s{ \"Timeline\": %u, \"Start-LSN\": \"%X/%X\", \"End-LSN\": \"%X/%X\" }",
+ "%s{ \"Timeline\": %u, \"Start-LSN\": \"%X/%08X\", \"End-LSN\": \"%X/%08X\" }",
wal_range == first_wal_range ? "" : ",\n",
wal_range->tli,
LSN_FORMAT_ARGS(wal_range->start_lsn),
diff --git a/src/bin/pg_controldata/pg_controldata.c b/src/bin/pg_controldata/pg_controldata.c
index 7bb801bb886..10de058ce91 100644
--- a/src/bin/pg_controldata/pg_controldata.c
+++ b/src/bin/pg_controldata/pg_controldata.c
@@ -245,9 +245,9 @@ main(int argc, char *argv[])
dbState(ControlFile->state));
printf(_("pg_control last modified: %s\n"),
pgctime_str);
- printf(_("Latest checkpoint location: %X/%X\n"),
+ printf(_("Latest checkpoint location: %X/%08X\n"),
LSN_FORMAT_ARGS(ControlFile->checkPoint));
- printf(_("Latest checkpoint's REDO location: %X/%X\n"),
+ printf(_("Latest checkpoint's REDO location: %X/%08X\n"),
LSN_FORMAT_ARGS(ControlFile->checkPointCopy.redo));
printf(_("Latest checkpoint's REDO WAL file: %s\n"),
xlogfilename);
@@ -282,15 +282,15 @@ main(int argc, char *argv[])
ControlFile->checkPointCopy.newestCommitTsXid);
printf(_("Time of latest checkpoint: %s\n"),
ckpttime_str);
- printf(_("Fake LSN counter for unlogged rels: %X/%X\n"),
+ printf(_("Fake LSN counter for unlogged rels: %X/%08X\n"),
LSN_FORMAT_ARGS(ControlFile->unloggedLSN));
- printf(_("Minimum recovery ending location: %X/%X\n"),
+ printf(_("Minimum recovery ending location: %X/%08X\n"),
LSN_FORMAT_ARGS(ControlFile->minRecoveryPoint));
printf(_("Min recovery ending loc's timeline: %u\n"),
ControlFile->minRecoveryPointTLI);
- printf(_("Backup start location: %X/%X\n"),
+ printf(_("Backup start location: %X/%08X\n"),
LSN_FORMAT_ARGS(ControlFile->backupStartPoint));
- printf(_("Backup end location: %X/%X\n"),
+ printf(_("Backup end location: %X/%08X\n"),
LSN_FORMAT_ARGS(ControlFile->backupEndPoint));
printf(_("End-of-backup record required: %s\n"),
ControlFile->backupEndRequired ? _("yes") : _("no"));
diff --git a/src/bin/pg_dump/common.c b/src/bin/pg_dump/common.c
index aa1589e3331..a1976fae607 100644
--- a/src/bin/pg_dump/common.c
+++ b/src/bin/pg_dump/common.c
@@ -17,6 +17,7 @@
#include <ctype.h>
+#include "catalog/pg_am_d.h"
#include "catalog/pg_class_d.h"
#include "catalog/pg_collation_d.h"
#include "catalog/pg_extension_d.h"
@@ -945,6 +946,24 @@ findOprByOid(Oid oid)
}
/*
+ * findAccessMethodByOid
+ * finds the DumpableObject for the access method with the given oid
+ * returns NULL if not found
+ */
+AccessMethodInfo *
+findAccessMethodByOid(Oid oid)
+{
+ CatalogId catId;
+ DumpableObject *dobj;
+
+ catId.tableoid = AccessMethodRelationId;
+ catId.oid = oid;
+ dobj = findObjectByCatalogId(catId);
+ Assert(dobj == NULL || dobj->objType == DO_ACCESS_METHOD);
+ return (AccessMethodInfo *) dobj;
+}
+
+/*
* findCollationByOid
* finds the DumpableObject for the collation with the given oid
* returns NULL if not found
diff --git a/src/bin/pg_dump/meson.build b/src/bin/pg_dump/meson.build
index d8e9e101254..a2233b0a1b4 100644
--- a/src/bin/pg_dump/meson.build
+++ b/src/bin/pg_dump/meson.build
@@ -91,9 +91,9 @@ tests += {
'bd': meson.current_build_dir(),
'tap': {
'env': {
- 'GZIP_PROGRAM': gzip.found() ? gzip.path() : '',
- 'LZ4': program_lz4.found() ? program_lz4.path() : '',
- 'ZSTD': program_zstd.found() ? program_zstd.path() : '',
+ 'GZIP_PROGRAM': gzip.found() ? gzip.full_path() : '',
+ 'LZ4': program_lz4.found() ? program_lz4.full_path() : '',
+ 'ZSTD': program_zstd.found() ? program_zstd.full_path() : '',
'with_icu': icu.found() ? 'yes' : 'no',
},
'tests': [
@@ -102,7 +102,6 @@ tests += {
't/003_pg_dump_with_server.pl',
't/004_pg_dump_parallel.pl',
't/005_pg_dump_filterfile.pl',
- 't/006_pg_dumpall.pl',
't/010_dump_connstr.pl',
],
},
diff --git a/src/bin/pg_dump/parallel.c b/src/bin/pg_dump/parallel.c
index 5974d6706fd..086adcdc502 100644
--- a/src/bin/pg_dump/parallel.c
+++ b/src/bin/pg_dump/parallel.c
@@ -334,16 +334,6 @@ on_exit_close_archive(Archive *AHX)
}
/*
- * When pg_restore restores multiple databases, then update already added entry
- * into array for cleanup.
- */
-void
-replace_on_exit_close_archive(Archive *AHX)
-{
- shutdown_info.AHX = AHX;
-}
-
-/*
* on_exit_nicely handler for shutting down database connections and
* worker processes cleanly.
*/
diff --git a/src/bin/pg_dump/pg_backup.h b/src/bin/pg_dump/pg_backup.h
index af0007fb6d2..4ebef1e8644 100644
--- a/src/bin/pg_dump/pg_backup.h
+++ b/src/bin/pg_dump/pg_backup.h
@@ -308,7 +308,7 @@ extern void SetArchiveOptions(Archive *AH, DumpOptions *dopt, RestoreOptions *ro
extern void ProcessArchiveRestoreOptions(Archive *AHX);
-extern void RestoreArchive(Archive *AHX, bool append_data);
+extern void RestoreArchive(Archive *AHX);
/* Open an existing archive */
extern Archive *OpenArchive(const char *FileSpec, const ArchiveFormat fmt);
diff --git a/src/bin/pg_dump/pg_backup_archiver.c b/src/bin/pg_dump/pg_backup_archiver.c
index afa42337b11..dce88f040ac 100644
--- a/src/bin/pg_dump/pg_backup_archiver.c
+++ b/src/bin/pg_dump/pg_backup_archiver.c
@@ -31,6 +31,8 @@
#endif
#include "catalog/pg_class_d.h"
+#include "catalog/pg_largeobject_metadata_d.h"
+#include "catalog/pg_shdepend_d.h"
#include "common/string.h"
#include "compress_io.h"
#include "dumputils.h"
@@ -85,7 +87,7 @@ static int RestoringToDB(ArchiveHandle *AH);
static void dump_lo_buf(ArchiveHandle *AH);
static void dumpTimestamp(ArchiveHandle *AH, const char *msg, time_t tim);
static void SetOutput(ArchiveHandle *AH, const char *filename,
- const pg_compress_specification compression_spec, bool append_data);
+ const pg_compress_specification compression_spec);
static CompressFileHandle *SaveOutput(ArchiveHandle *AH);
static void RestoreOutput(ArchiveHandle *AH, CompressFileHandle *savedOutput);
@@ -152,7 +154,7 @@ InitDumpOptions(DumpOptions *opts)
opts->dumpSections = DUMP_UNSECTIONED;
opts->dumpSchema = true;
opts->dumpData = true;
- opts->dumpStatistics = true;
+ opts->dumpStatistics = false;
}
/*
@@ -337,14 +339,9 @@ ProcessArchiveRestoreOptions(Archive *AHX)
StrictNamesCheck(ropt);
}
-/*
- * RestoreArchive
- *
- * If append_data is set, then append data into file as we are restoring dump
- * of multiple databases which was taken by pg_dumpall.
- */
+/* Public */
void
-RestoreArchive(Archive *AHX, bool append_data)
+RestoreArchive(Archive *AHX)
{
ArchiveHandle *AH = (ArchiveHandle *) AHX;
RestoreOptions *ropt = AH->public.ropt;
@@ -461,7 +458,7 @@ RestoreArchive(Archive *AHX, bool append_data)
*/
sav = SaveOutput(AH);
if (ropt->filename || ropt->compression_spec.algorithm != PG_COMPRESSION_NONE)
- SetOutput(AH, ropt->filename, ropt->compression_spec, append_data);
+ SetOutput(AH, ropt->filename, ropt->compression_spec);
ahprintf(AH, "--\n-- PostgreSQL database dump\n--\n\n");
@@ -1300,7 +1297,7 @@ PrintTOCSummary(Archive *AHX)
sav = SaveOutput(AH);
if (ropt->filename)
- SetOutput(AH, ropt->filename, out_compression_spec, false);
+ SetOutput(AH, ropt->filename, out_compression_spec);
if (strftime(stamp_str, sizeof(stamp_str), PGDUMP_STRFTIME_FMT,
localtime(&AH->createDate)) == 0)
@@ -1679,8 +1676,7 @@ archprintf(Archive *AH, const char *fmt,...)
static void
SetOutput(ArchiveHandle *AH, const char *filename,
- const pg_compress_specification compression_spec,
- bool append_data)
+ const pg_compress_specification compression_spec)
{
CompressFileHandle *CFH;
const char *mode;
@@ -1700,7 +1696,7 @@ SetOutput(ArchiveHandle *AH, const char *filename,
else
fn = fileno(stdout);
- if (append_data || AH->mode == archModeAppend)
+ if (AH->mode == archModeAppend)
mode = PG_BINARY_A;
else
mode = PG_BINARY_W;
@@ -2655,7 +2651,7 @@ WriteToc(ArchiveHandle *AH)
pg_fatal("unexpected TOC entry in WriteToc(): %d %s %s",
te->dumpId, te->desc, te->tag);
- if (fseeko(AH->FH, te->defnLen, SEEK_CUR != 0))
+ if (fseeko(AH->FH, te->defnLen, SEEK_CUR) != 0)
pg_fatal("error during file seek: %m");
}
else if (te->defnDumper)
@@ -2974,6 +2970,19 @@ _tocEntryRequired(TocEntry *te, teSection curSection, ArchiveHandle *AH)
int res = REQ_SCHEMA | REQ_DATA;
RestoreOptions *ropt = AH->public.ropt;
+ /*
+ * For binary upgrade mode, dump pg_largeobject_metadata and the
+ * associated pg_shdepend rows. This is faster to restore than the
+ * equivalent set of large object commands. We can only do this for
+ * upgrades from v12 and newer; in older versions, pg_largeobject_metadata
+ * was created WITH OIDS, so the OID column is hidden and won't be dumped.
+ */
+ if (ropt->binary_upgrade && AH->public.remoteVersion >= 120000 &&
+ strcmp(te->desc, "TABLE DATA") == 0 &&
+ (te->catalogId.oid == LargeObjectMetadataRelationId ||
+ te->catalogId.oid == SharedDependRelationId))
+ return REQ_DATA;
+
/* These items are treated specially */
if (strcmp(te->desc, "ENCODING") == 0 ||
strcmp(te->desc, "STDSTRINGS") == 0 ||
diff --git a/src/bin/pg_dump/pg_backup_archiver.h b/src/bin/pg_dump/pg_backup_archiver.h
index 365073b3eae..325b53fc9bd 100644
--- a/src/bin/pg_dump/pg_backup_archiver.h
+++ b/src/bin/pg_dump/pg_backup_archiver.h
@@ -394,7 +394,6 @@ struct _tocEntry
extern int parallel_restore(ArchiveHandle *AH, TocEntry *te);
extern void on_exit_close_archive(Archive *AHX);
-extern void replace_on_exit_close_archive(Archive *AHX);
extern void warn_or_exit_horribly(ArchiveHandle *AH, const char *fmt,...) pg_attribute_printf(2, 3);
diff --git a/src/bin/pg_dump/pg_backup_directory.c b/src/bin/pg_dump/pg_backup_directory.c
index 21b00792a8a..bc2a2fb4797 100644
--- a/src/bin/pg_dump/pg_backup_directory.c
+++ b/src/bin/pg_dump/pg_backup_directory.c
@@ -412,10 +412,15 @@ _LoadLOs(ArchiveHandle *AH, TocEntry *te)
/*
* Note: before archive v16, there was always only one BLOBS TOC entry,
- * now there can be multiple. We don't need to worry what version we are
- * reading though, because tctx->filename should be correct either way.
+ * now there can be multiple. Furthermore, although the actual filename
+ * was always "blobs.toc" before v16, the value of tctx->filename did not
+ * match that before commit 548e50976 fixed it. For simplicity we assume
+ * it must be "blobs.toc" in all archives before v16.
*/
- setFilePath(AH, tocfname, tctx->filename);
+ if (AH->version < K_VERS_1_16)
+ setFilePath(AH, tocfname, "blobs.toc");
+ else
+ setFilePath(AH, tocfname, tctx->filename);
CFH = ctx->LOsTocFH = InitDiscoverCompressFileHandle(tocfname, PG_BINARY_R);
diff --git a/src/bin/pg_dump/pg_backup_tar.c b/src/bin/pg_dump/pg_backup_tar.c
index d94d0de2a5d..b5ba3b46dd9 100644
--- a/src/bin/pg_dump/pg_backup_tar.c
+++ b/src/bin/pg_dump/pg_backup_tar.c
@@ -826,7 +826,7 @@ _CloseArchive(ArchiveHandle *AH)
savVerbose = AH->public.verbose;
AH->public.verbose = 0;
- RestoreArchive((Archive *) AH, false);
+ RestoreArchive((Archive *) AH);
SetArchiveOptions((Archive *) AH, savDopt, savRopt);
diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c
index c73e73a87d1..b1ac8d7b509 100644
--- a/src/bin/pg_dump/pg_dump.c
+++ b/src/bin/pg_dump/pg_dump.c
@@ -47,10 +47,13 @@
#include "catalog/pg_authid_d.h"
#include "catalog/pg_cast_d.h"
#include "catalog/pg_class_d.h"
+#include "catalog/pg_constraint_d.h"
#include "catalog/pg_default_acl_d.h"
#include "catalog/pg_largeobject_d.h"
+#include "catalog/pg_largeobject_metadata_d.h"
#include "catalog/pg_proc_d.h"
#include "catalog/pg_publication_d.h"
+#include "catalog/pg_shdepend_d.h"
#include "catalog/pg_subscription_d.h"
#include "catalog/pg_type_d.h"
#include "common/connect.h"
@@ -209,6 +212,12 @@ static int nbinaryUpgradeClassOids = 0;
static SequenceItem *sequences = NULL;
static int nsequences = 0;
+/*
+ * For binary upgrade, the dump ID of pg_largeobject_metadata is saved for use
+ * as a dependency for pg_shdepend and any large object comments/seclabels.
+ */
+static DumpId lo_metadata_dumpId;
+
/* Maximum number of relations to fetch in a fetchAttributeStats() call. */
#define MAX_ATTR_STATS_RELS 64
@@ -350,7 +359,9 @@ static void buildMatViewRefreshDependencies(Archive *fout);
static void getTableDataFKConstraints(void);
static void determineNotNullFlags(Archive *fout, PGresult *res, int r,
TableInfo *tbinfo, int j,
- int i_notnull_name, int i_notnull_invalidoid,
+ int i_notnull_name,
+ int i_notnull_comment,
+ int i_notnull_invalidoid,
int i_notnull_noinherit,
int i_notnull_islocal,
PQExpBuffer *invalidnotnulloids);
@@ -849,6 +860,17 @@ main(int argc, char **argv)
if (with_statistics && no_statistics)
pg_fatal("options --with-statistics and --no-statistics cannot be used together");
+ /* reject conflicting "-only" and "with-" options */
+ if (data_only && (with_schema || with_statistics))
+ pg_fatal("options %s and %s cannot be used together",
+ "-a/--data-only", with_schema ? "--with-schema" : "--with-statistics");
+ if (schema_only && (with_data || with_statistics))
+ pg_fatal("options %s and %s cannot be used together",
+ "-s/--schema-only", with_data ? "--with-data" : "--with-statistics");
+ if (statistics_only && (with_data || with_schema))
+ pg_fatal("options %s and %s cannot be used together",
+ "--statistics-only", with_data ? "--with-data" : "--with-schema");
+
if (schema_only && foreign_servers_include_patterns.head != NULL)
pg_fatal("options -s/--schema-only and --include-foreign-data cannot be used together");
@@ -862,11 +884,9 @@ main(int argc, char **argv)
pg_fatal("option --if-exists requires option -c/--clean");
/*
- * Set derivative flags. An "-only" option may be overridden by an
- * explicit "with-" option; e.g. "--schema-only --with-statistics" will
- * include schema and statistics. Other ambiguous or nonsensical
- * combinations, e.g. "--schema-only --no-schema", will have already
- * caused an error in one of the checks above.
+ * Set derivative flags. Ambiguous or nonsensical combinations, e.g.
+ * "--schema-only --no-schema", will have already caused an error in one
+ * of the checks above.
*/
dopt.dumpData = ((dopt.dumpData && !schema_only && !statistics_only) ||
(data_only || with_data)) && !no_data;
@@ -1084,6 +1104,36 @@ main(int argc, char **argv)
getTableData(&dopt, tblinfo, numTables, RELKIND_SEQUENCE);
/*
+ * For binary upgrade mode, dump pg_largeobject_metadata and the
+ * associated pg_shdepend rows. This is faster to restore than the
+ * equivalent set of large object commands. We can only do this for
+ * upgrades from v12 and newer; in older versions, pg_largeobject_metadata
+ * was created WITH OIDS, so the OID column is hidden and won't be dumped.
+ */
+ if (dopt.binary_upgrade && fout->remoteVersion >= 120000)
+ {
+ TableInfo *lo_metadata = findTableByOid(LargeObjectMetadataRelationId);
+ TableInfo *shdepend = findTableByOid(SharedDependRelationId);
+
+ makeTableDataInfo(&dopt, lo_metadata);
+ makeTableDataInfo(&dopt, shdepend);
+
+ /*
+ * Save pg_largeobject_metadata's dump ID for use as a dependency for
+ * pg_shdepend and any large object comments/seclabels.
+ */
+ lo_metadata_dumpId = lo_metadata->dataObj->dobj.dumpId;
+ addObjectDependency(&shdepend->dataObj->dobj, lo_metadata_dumpId);
+
+ /*
+ * Only dump large object shdepend rows for this database.
+ */
+ shdepend->dataObj->filtercond = "WHERE classid = 'pg_largeobject'::regclass "
+ "AND dbid = (SELECT oid FROM pg_database "
+ " WHERE datname = current_database())";
+ }
+
+ /*
* In binary-upgrade mode, we do not have to worry about the actual LO
* data or the associated metadata that resides in the pg_largeobject and
* pg_largeobject_metadata tables, respectively.
@@ -1224,7 +1274,7 @@ main(int argc, char **argv)
* right now.
*/
if (plainText)
- RestoreArchive(fout, false);
+ RestoreArchive(fout);
CloseArchive(fout);
@@ -1235,7 +1285,7 @@ main(int argc, char **argv)
static void
help(const char *progname)
{
- printf(_("%s dumps a database as a text file or to other formats.\n\n"), progname);
+ printf(_("%s exports a PostgreSQL database as an SQL script or to other formats.\n\n"), progname);
printf(_("Usage:\n"));
printf(_(" %s [OPTION]... [DBNAME]\n"), progname);
@@ -2166,6 +2216,13 @@ selectDumpableProcLang(ProcLangInfo *plang, Archive *fout)
static void
selectDumpableAccessMethod(AccessMethodInfo *method, Archive *fout)
{
+ /* see getAccessMethods() comment about v9.6. */
+ if (fout->remoteVersion < 90600)
+ {
+ method->dobj.dump = DUMP_COMPONENT_NONE;
+ return;
+ }
+
if (checkExtensionMembership(&method->dobj, fout))
return; /* extension membership overrides all else */
@@ -3922,10 +3979,37 @@ getLOs(Archive *fout)
* as it will be copied by pg_upgrade, which simply copies the
* pg_largeobject table. We *do* however dump out anything but the
* data, as pg_upgrade copies just pg_largeobject, but not
- * pg_largeobject_metadata, after the dump is restored.
+ * pg_largeobject_metadata, after the dump is restored. In versions
+ * before v12, this is done via proper large object commands. In
+ * newer versions, we dump the content of pg_largeobject_metadata and
+ * any associated pg_shdepend rows, which is faster to restore. (On
+ * <v12, pg_largeobject_metadata was created WITH OIDS, so the OID
+ * column is hidden and won't be dumped.)
*/
if (dopt->binary_upgrade)
- loinfo->dobj.dump &= ~DUMP_COMPONENT_DATA;
+ {
+ if (fout->remoteVersion >= 120000)
+ {
+ /*
+ * We should've saved pg_largeobject_metadata's dump ID before
+ * this point.
+ */
+ Assert(lo_metadata_dumpId);
+
+ loinfo->dobj.dump &= ~(DUMP_COMPONENT_DATA | DUMP_COMPONENT_ACL | DUMP_COMPONENT_DEFINITION);
+
+ /*
+ * Mark the large object as dependent on
+ * pg_largeobject_metadata so that any large object
+ * comments/seclables are dumped after it.
+ */
+ loinfo->dobj.dependencies = (DumpId *) pg_malloc(sizeof(DumpId));
+ loinfo->dobj.dependencies[0] = lo_metadata_dumpId;
+ loinfo->dobj.nDeps = loinfo->dobj.allocDeps = 1;
+ }
+ else
+ loinfo->dobj.dump &= ~DUMP_COMPONENT_DATA;
+ }
/*
* Create a "BLOBS" data item for the group, too. This is just a
@@ -4960,6 +5044,7 @@ getSubscriptions(Archive *fout)
int i_suboriginremotelsn;
int i_subenabled;
int i_subfailover;
+ int i_subretaindeadtuples;
int i,
ntups;
@@ -5032,10 +5117,17 @@ getSubscriptions(Archive *fout)
if (fout->remoteVersion >= 170000)
appendPQExpBufferStr(query,
- " s.subfailover\n");
+ " s.subfailover,\n");
+ else
+ appendPQExpBufferStr(query,
+ " false AS subfailover,\n");
+
+ if (fout->remoteVersion >= 190000)
+ appendPQExpBufferStr(query,
+ " s.subretaindeadtuples\n");
else
appendPQExpBufferStr(query,
- " false AS subfailover\n");
+ " false AS subretaindeadtuples\n");
appendPQExpBufferStr(query,
"FROM pg_subscription s\n");
@@ -5069,6 +5161,7 @@ getSubscriptions(Archive *fout)
i_subpasswordrequired = PQfnumber(res, "subpasswordrequired");
i_subrunasowner = PQfnumber(res, "subrunasowner");
i_subfailover = PQfnumber(res, "subfailover");
+ i_subretaindeadtuples = PQfnumber(res, "subretaindeadtuples");
i_subconninfo = PQfnumber(res, "subconninfo");
i_subslotname = PQfnumber(res, "subslotname");
i_subsynccommit = PQfnumber(res, "subsynccommit");
@@ -5102,6 +5195,8 @@ getSubscriptions(Archive *fout)
(strcmp(PQgetvalue(res, i, i_subrunasowner), "t") == 0);
subinfo[i].subfailover =
(strcmp(PQgetvalue(res, i, i_subfailover), "t") == 0);
+ subinfo[i].subretaindeadtuples =
+ (strcmp(PQgetvalue(res, i, i_subretaindeadtuples), "t") == 0);
subinfo[i].subconninfo =
pg_strdup(PQgetvalue(res, i, i_subconninfo));
if (PQgetisnull(res, i, i_subslotname))
@@ -5360,6 +5455,9 @@ dumpSubscription(Archive *fout, const SubscriptionInfo *subinfo)
if (subinfo->subfailover)
appendPQExpBufferStr(query, ", failover = true");
+ if (subinfo->subretaindeadtuples)
+ appendPQExpBufferStr(query, ", retain_dead_tuples = true");
+
if (strcmp(subinfo->subsynccommit, "off") != 0)
appendPQExpBuffer(query, ", synchronous_commit = %s", fmtId(subinfo->subsynccommit));
@@ -6120,6 +6218,7 @@ getTypes(Archive *fout)
*/
tyinfo[i].nDomChecks = 0;
tyinfo[i].domChecks = NULL;
+ tyinfo[i].notnull = NULL;
if ((tyinfo[i].dobj.dump & DUMP_COMPONENT_DEFINITION) &&
tyinfo[i].typtype == TYPTYPE_DOMAIN)
getDomainConstraints(fout, &(tyinfo[i]));
@@ -6179,6 +6278,8 @@ getOperators(Archive *fout)
int i_oprnamespace;
int i_oprowner;
int i_oprkind;
+ int i_oprleft;
+ int i_oprright;
int i_oprcode;
/*
@@ -6190,6 +6291,8 @@ getOperators(Archive *fout)
"oprnamespace, "
"oprowner, "
"oprkind, "
+ "oprleft, "
+ "oprright, "
"oprcode::oid AS oprcode "
"FROM pg_operator");
@@ -6205,6 +6308,8 @@ getOperators(Archive *fout)
i_oprnamespace = PQfnumber(res, "oprnamespace");
i_oprowner = PQfnumber(res, "oprowner");
i_oprkind = PQfnumber(res, "oprkind");
+ i_oprleft = PQfnumber(res, "oprleft");
+ i_oprright = PQfnumber(res, "oprright");
i_oprcode = PQfnumber(res, "oprcode");
for (i = 0; i < ntups; i++)
@@ -6218,6 +6323,8 @@ getOperators(Archive *fout)
findNamespace(atooid(PQgetvalue(res, i, i_oprnamespace)));
oprinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_oprowner));
oprinfo[i].oprkind = (PQgetvalue(res, i, i_oprkind))[0];
+ oprinfo[i].oprleft = atooid(PQgetvalue(res, i, i_oprleft));
+ oprinfo[i].oprright = atooid(PQgetvalue(res, i, i_oprright));
oprinfo[i].oprcode = atooid(PQgetvalue(res, i, i_oprcode));
/* Decide whether we want to dump it */
@@ -6246,6 +6353,7 @@ getCollations(Archive *fout)
int i_collname;
int i_collnamespace;
int i_collowner;
+ int i_collencoding;
query = createPQExpBuffer();
@@ -6256,7 +6364,8 @@ getCollations(Archive *fout)
appendPQExpBufferStr(query, "SELECT tableoid, oid, collname, "
"collnamespace, "
- "collowner "
+ "collowner, "
+ "collencoding "
"FROM pg_collation");
res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
@@ -6270,6 +6379,7 @@ getCollations(Archive *fout)
i_collname = PQfnumber(res, "collname");
i_collnamespace = PQfnumber(res, "collnamespace");
i_collowner = PQfnumber(res, "collowner");
+ i_collencoding = PQfnumber(res, "collencoding");
for (i = 0; i < ntups; i++)
{
@@ -6281,6 +6391,7 @@ getCollations(Archive *fout)
collinfo[i].dobj.namespace =
findNamespace(atooid(PQgetvalue(res, i, i_collnamespace)));
collinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_collowner));
+ collinfo[i].collencoding = atoi(PQgetvalue(res, i, i_collencoding));
/* Decide whether we want to dump it */
selectDumpableObject(&(collinfo[i].dobj), fout);
@@ -6371,16 +6482,28 @@ getAccessMethods(Archive *fout)
int i_amhandler;
int i_amtype;
- /* Before 9.6, there are no user-defined access methods */
- if (fout->remoteVersion < 90600)
- return;
-
query = createPQExpBuffer();
- /* Select all access methods from pg_am table */
- appendPQExpBufferStr(query, "SELECT tableoid, oid, amname, amtype, "
- "amhandler::pg_catalog.regproc AS amhandler "
- "FROM pg_am");
+ /*
+ * Select all access methods from pg_am table. v9.6 introduced CREATE
+ * ACCESS METHOD, so earlier versions usually have only built-in access
+ * methods. v9.6 also changed the access method API, replacing dozens of
+ * pg_am columns with amhandler. Even if a user created an access method
+ * by "INSERT INTO pg_am", we have no way to translate pre-v9.6 pg_am
+ * columns to a v9.6+ CREATE ACCESS METHOD. Hence, before v9.6, read
+ * pg_am just to facilitate findAccessMethodByOid() providing the
+ * OID-to-name mapping.
+ */
+ appendPQExpBufferStr(query, "SELECT tableoid, oid, amname, ");
+ if (fout->remoteVersion >= 90600)
+ appendPQExpBufferStr(query,
+ "amtype, "
+ "amhandler::pg_catalog.regproc AS amhandler ");
+ else
+ appendPQExpBufferStr(query,
+ "'i'::pg_catalog.\"char\" AS amtype, "
+ "'-'::pg_catalog.regproc AS amhandler ");
+ appendPQExpBufferStr(query, "FROM pg_am");
res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
@@ -6429,6 +6552,7 @@ getOpclasses(Archive *fout)
OpclassInfo *opcinfo;
int i_tableoid;
int i_oid;
+ int i_opcmethod;
int i_opcname;
int i_opcnamespace;
int i_opcowner;
@@ -6438,7 +6562,7 @@ getOpclasses(Archive *fout)
* system-defined opclasses at dump-out time.
*/
- appendPQExpBufferStr(query, "SELECT tableoid, oid, opcname, "
+ appendPQExpBufferStr(query, "SELECT tableoid, oid, opcmethod, opcname, "
"opcnamespace, "
"opcowner "
"FROM pg_opclass");
@@ -6451,6 +6575,7 @@ getOpclasses(Archive *fout)
i_tableoid = PQfnumber(res, "tableoid");
i_oid = PQfnumber(res, "oid");
+ i_opcmethod = PQfnumber(res, "opcmethod");
i_opcname = PQfnumber(res, "opcname");
i_opcnamespace = PQfnumber(res, "opcnamespace");
i_opcowner = PQfnumber(res, "opcowner");
@@ -6464,6 +6589,7 @@ getOpclasses(Archive *fout)
opcinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_opcname));
opcinfo[i].dobj.namespace =
findNamespace(atooid(PQgetvalue(res, i, i_opcnamespace)));
+ opcinfo[i].opcmethod = atooid(PQgetvalue(res, i, i_opcmethod));
opcinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_opcowner));
/* Decide whether we want to dump it */
@@ -6489,6 +6615,7 @@ getOpfamilies(Archive *fout)
OpfamilyInfo *opfinfo;
int i_tableoid;
int i_oid;
+ int i_opfmethod;
int i_opfname;
int i_opfnamespace;
int i_opfowner;
@@ -6500,7 +6627,7 @@ getOpfamilies(Archive *fout)
* system-defined opfamilies at dump-out time.
*/
- appendPQExpBufferStr(query, "SELECT tableoid, oid, opfname, "
+ appendPQExpBufferStr(query, "SELECT tableoid, oid, opfmethod, opfname, "
"opfnamespace, "
"opfowner "
"FROM pg_opfamily");
@@ -6514,6 +6641,7 @@ getOpfamilies(Archive *fout)
i_tableoid = PQfnumber(res, "tableoid");
i_oid = PQfnumber(res, "oid");
i_opfname = PQfnumber(res, "opfname");
+ i_opfmethod = PQfnumber(res, "opfmethod");
i_opfnamespace = PQfnumber(res, "opfnamespace");
i_opfowner = PQfnumber(res, "opfowner");
@@ -6526,6 +6654,7 @@ getOpfamilies(Archive *fout)
opfinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_opfname));
opfinfo[i].dobj.namespace =
findNamespace(atooid(PQgetvalue(res, i, i_opfnamespace)));
+ opfinfo[i].opfmethod = atooid(PQgetvalue(res, i, i_opfmethod));
opfinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_opfowner));
/* Decide whether we want to dump it */
@@ -6890,7 +7019,8 @@ getRelationStatistics(Archive *fout, DumpableObject *rel, int32 relpages,
(relkind == RELKIND_PARTITIONED_TABLE) ||
(relkind == RELKIND_INDEX) ||
(relkind == RELKIND_PARTITIONED_INDEX) ||
- (relkind == RELKIND_MATVIEW))
+ (relkind == RELKIND_MATVIEW ||
+ relkind == RELKIND_FOREIGN_TABLE))
{
RelStatsInfo *info = pg_malloc0(sizeof(RelStatsInfo));
DumpableObject *dobj = &info->dobj;
@@ -6929,6 +7059,7 @@ getRelationStatistics(Archive *fout, DumpableObject *rel, int32 relpages,
case RELKIND_RELATION:
case RELKIND_PARTITIONED_TABLE:
case RELKIND_MATVIEW:
+ case RELKIND_FOREIGN_TABLE:
info->section = SECTION_DATA;
break;
case RELKIND_INDEX:
@@ -6936,7 +7067,7 @@ getRelationStatistics(Archive *fout, DumpableObject *rel, int32 relpages,
info->section = SECTION_POST_DATA;
break;
default:
- pg_fatal("cannot dump statistics for relation kind '%c'",
+ pg_fatal("cannot dump statistics for relation kind \"%c\"",
info->relkind);
}
@@ -8243,27 +8374,33 @@ addConstrChildIdxDeps(DumpableObject *dobj, const IndxInfo *refidx)
static void
getDomainConstraints(Archive *fout, TypeInfo *tyinfo)
{
- int i;
ConstraintInfo *constrinfo;
PQExpBuffer query = createPQExpBuffer();
PGresult *res;
int i_tableoid,
i_oid,
i_conname,
- i_consrc;
+ i_consrc,
+ i_convalidated,
+ i_contype;
int ntups;
if (!fout->is_prepared[PREPQUERY_GETDOMAINCONSTRAINTS])
{
- /* Set up query for constraint-specific details */
- appendPQExpBufferStr(query,
- "PREPARE getDomainConstraints(pg_catalog.oid) AS\n"
- "SELECT tableoid, oid, conname, "
- "pg_catalog.pg_get_constraintdef(oid) AS consrc, "
- "convalidated "
- "FROM pg_catalog.pg_constraint "
- "WHERE contypid = $1 AND contype = 'c' "
- "ORDER BY conname");
+ /*
+ * Set up query for constraint-specific details. For servers 17 and
+ * up, domains have constraints of type 'n' as well as 'c', otherwise
+ * just the latter.
+ */
+ appendPQExpBuffer(query,
+ "PREPARE getDomainConstraints(pg_catalog.oid) AS\n"
+ "SELECT tableoid, oid, conname, "
+ "pg_catalog.pg_get_constraintdef(oid) AS consrc, "
+ "convalidated, contype "
+ "FROM pg_catalog.pg_constraint "
+ "WHERE contypid = $1 AND contype IN (%s) "
+ "ORDER BY conname",
+ fout->remoteVersion < 170000 ? "'c'" : "'c', 'n'");
ExecuteSqlStatement(fout, query->data);
@@ -8282,33 +8419,50 @@ getDomainConstraints(Archive *fout, TypeInfo *tyinfo)
i_oid = PQfnumber(res, "oid");
i_conname = PQfnumber(res, "conname");
i_consrc = PQfnumber(res, "consrc");
+ i_convalidated = PQfnumber(res, "convalidated");
+ i_contype = PQfnumber(res, "contype");
constrinfo = (ConstraintInfo *) pg_malloc(ntups * sizeof(ConstraintInfo));
-
- tyinfo->nDomChecks = ntups;
tyinfo->domChecks = constrinfo;
- for (i = 0; i < ntups; i++)
+ /* 'i' tracks result rows; 'j' counts CHECK constraints */
+ for (int i = 0, j = 0; i < ntups; i++)
{
- bool validated = PQgetvalue(res, i, 4)[0] == 't';
-
- constrinfo[i].dobj.objType = DO_CONSTRAINT;
- constrinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
- constrinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
- AssignDumpId(&constrinfo[i].dobj);
- constrinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_conname));
- constrinfo[i].dobj.namespace = tyinfo->dobj.namespace;
- constrinfo[i].contable = NULL;
- constrinfo[i].condomain = tyinfo;
- constrinfo[i].contype = 'c';
- constrinfo[i].condef = pg_strdup(PQgetvalue(res, i, i_consrc));
- constrinfo[i].confrelid = InvalidOid;
- constrinfo[i].conindex = 0;
- constrinfo[i].condeferrable = false;
- constrinfo[i].condeferred = false;
- constrinfo[i].conislocal = true;
-
- constrinfo[i].separate = !validated;
+ bool validated = PQgetvalue(res, i, i_convalidated)[0] == 't';
+ char contype = (PQgetvalue(res, i, i_contype))[0];
+ ConstraintInfo *constraint;
+
+ if (contype == CONSTRAINT_CHECK)
+ {
+ constraint = &constrinfo[j++];
+ tyinfo->nDomChecks++;
+ }
+ else
+ {
+ Assert(contype == CONSTRAINT_NOTNULL);
+ Assert(tyinfo->notnull == NULL);
+ /* use last item in array for the not-null constraint */
+ tyinfo->notnull = &(constrinfo[ntups - 1]);
+ constraint = tyinfo->notnull;
+ }
+
+ constraint->dobj.objType = DO_CONSTRAINT;
+ constraint->dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
+ constraint->dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
+ AssignDumpId(&(constraint->dobj));
+ constraint->dobj.name = pg_strdup(PQgetvalue(res, i, i_conname));
+ constraint->dobj.namespace = tyinfo->dobj.namespace;
+ constraint->contable = NULL;
+ constraint->condomain = tyinfo;
+ constraint->contype = contype;
+ constraint->condef = pg_strdup(PQgetvalue(res, i, i_consrc));
+ constraint->confrelid = InvalidOid;
+ constraint->conindex = 0;
+ constraint->condeferrable = false;
+ constraint->condeferred = false;
+ constraint->conislocal = true;
+
+ constraint->separate = !validated;
/*
* Make the domain depend on the constraint, ensuring it won't be
@@ -8317,8 +8471,7 @@ getDomainConstraints(Archive *fout, TypeInfo *tyinfo)
* anyway, so this doesn't matter.
*/
if (validated)
- addObjectDependency(&tyinfo->dobj,
- constrinfo[i].dobj.dumpId);
+ addObjectDependency(&tyinfo->dobj, constraint->dobj.dumpId);
}
PQclear(res);
@@ -9004,6 +9157,7 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
int i_attalign;
int i_attislocal;
int i_notnull_name;
+ int i_notnull_comment;
int i_notnull_noinherit;
int i_notnull_islocal;
int i_notnull_invalidoid;
@@ -9034,8 +9188,20 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
if (tbinfo->relkind == RELKIND_SEQUENCE)
continue;
- /* Don't bother with uninteresting tables, either */
- if (!tbinfo->interesting)
+ /*
+ * Don't bother with uninteresting tables, either. For binary
+ * upgrades, this is bypassed for pg_largeobject_metadata and
+ * pg_shdepend so that the columns names are collected for the
+ * corresponding COPY commands. Restoring the data for those catalogs
+ * is faster than restoring the equivalent set of large object
+ * commands. We can only do this for upgrades from v12 and newer; in
+ * older versions, pg_largeobject_metadata was created WITH OIDS, so
+ * the OID column is hidden and won't be dumped.
+ */
+ if (!tbinfo->interesting &&
+ !(fout->dopt->binary_upgrade && fout->remoteVersion >= 120000 &&
+ (tbinfo->dobj.catId.oid == LargeObjectMetadataRelationId ||
+ tbinfo->dobj.catId.oid == SharedDependRelationId)))
continue;
/* OK, we need info for this table */
@@ -9087,7 +9253,8 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
/*
* Find out any NOT NULL markings for each column. In 18 and up we read
- * pg_constraint to obtain the constraint name. notnull_noinherit is set
+ * pg_constraint to obtain the constraint name, and for valid constraints
+ * also pg_description to obtain its comment. notnull_noinherit is set
* according to the NO INHERIT property. For versions prior to 18, we
* store an empty string as the name when a constraint is marked as
* attnotnull (this cues dumpTableSchema to print the NOT NULL clause
@@ -9095,7 +9262,8 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
*
* For invalid constraints, we need to store their OIDs for processing
* elsewhere, so we bring the pg_constraint.oid value when the constraint
- * is invalid, and NULL otherwise.
+ * is invalid, and NULL otherwise. Their comments are handled not here
+ * but by collectComments, because they're their own dumpable object.
*
* We track in notnull_islocal whether the constraint was defined directly
* in this table or via an ancestor, for binary upgrade. flagInhAttrs
@@ -9105,6 +9273,8 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
if (fout->remoteVersion >= 180000)
appendPQExpBufferStr(q,
"co.conname AS notnull_name,\n"
+ "CASE WHEN co.convalidated THEN pt.description"
+ " ELSE NULL END AS notnull_comment,\n"
"CASE WHEN NOT co.convalidated THEN co.oid "
"ELSE NULL END AS notnull_invalidoid,\n"
"co.connoinherit AS notnull_noinherit,\n"
@@ -9112,6 +9282,7 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
else
appendPQExpBufferStr(q,
"CASE WHEN a.attnotnull THEN '' ELSE NULL END AS notnull_name,\n"
+ "NULL AS notnull_comment,\n"
"NULL AS notnull_invalidoid,\n"
"false AS notnull_noinherit,\n"
"a.attislocal AS notnull_islocal,\n");
@@ -9155,15 +9326,16 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
/*
* In versions 18 and up, we need pg_constraint for explicit NOT NULL
- * entries. Also, we need to know if the NOT NULL for each column is
- * backing a primary key.
+ * entries and pg_description to get their comments.
*/
if (fout->remoteVersion >= 180000)
appendPQExpBufferStr(q,
" LEFT JOIN pg_catalog.pg_constraint co ON "
"(a.attrelid = co.conrelid\n"
" AND co.contype = 'n' AND "
- "co.conkey = array[a.attnum])\n");
+ "co.conkey = array[a.attnum])\n"
+ " LEFT JOIN pg_catalog.pg_description pt ON "
+ "(pt.classoid = co.tableoid AND pt.objoid = co.oid)\n");
appendPQExpBufferStr(q,
"WHERE a.attnum > 0::pg_catalog.int2\n"
@@ -9187,6 +9359,7 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
i_attalign = PQfnumber(res, "attalign");
i_attislocal = PQfnumber(res, "attislocal");
i_notnull_name = PQfnumber(res, "notnull_name");
+ i_notnull_comment = PQfnumber(res, "notnull_comment");
i_notnull_invalidoid = PQfnumber(res, "notnull_invalidoid");
i_notnull_noinherit = PQfnumber(res, "notnull_noinherit");
i_notnull_islocal = PQfnumber(res, "notnull_islocal");
@@ -9232,7 +9405,10 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
pg_fatal("unrecognized table OID %u", attrelid);
/* cross-check that we only got requested tables */
if (tbinfo->relkind == RELKIND_SEQUENCE ||
- !tbinfo->interesting)
+ (!tbinfo->interesting &&
+ !(fout->dopt->binary_upgrade && fout->remoteVersion >= 120000 &&
+ (tbinfo->dobj.catId.oid == LargeObjectMetadataRelationId ||
+ tbinfo->dobj.catId.oid == SharedDependRelationId))))
pg_fatal("unexpected column data for table \"%s\"",
tbinfo->dobj.name);
@@ -9255,6 +9431,7 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
tbinfo->attfdwoptions = (char **) pg_malloc(numatts * sizeof(char *));
tbinfo->attmissingval = (char **) pg_malloc(numatts * sizeof(char *));
tbinfo->notnull_constrs = (char **) pg_malloc(numatts * sizeof(char *));
+ tbinfo->notnull_comment = (char **) pg_malloc(numatts * sizeof(char *));
tbinfo->notnull_invalid = (bool *) pg_malloc(numatts * sizeof(bool));
tbinfo->notnull_noinh = (bool *) pg_malloc(numatts * sizeof(bool));
tbinfo->notnull_islocal = (bool *) pg_malloc(numatts * sizeof(bool));
@@ -9286,11 +9463,14 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
determineNotNullFlags(fout, res, r,
tbinfo, j,
i_notnull_name,
+ i_notnull_comment,
i_notnull_invalidoid,
i_notnull_noinherit,
i_notnull_islocal,
&invalidnotnulloids);
+ tbinfo->notnull_comment[j] = PQgetisnull(res, r, i_notnull_comment) ?
+ NULL : pg_strdup(PQgetvalue(res, r, i_notnull_comment));
tbinfo->attoptions[j] = pg_strdup(PQgetvalue(res, r, i_attoptions));
tbinfo->attcollation[j] = atooid(PQgetvalue(res, r, i_attcollation));
tbinfo->attcompression[j] = *(PQgetvalue(res, r, i_attcompression));
@@ -9461,7 +9641,7 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
int i_consrc;
int i_conislocal;
- pg_log_info("finding invalid not null constraints");
+ pg_log_info("finding invalid not-null constraints");
resetPQExpBuffer(q);
appendPQExpBuffer(q,
@@ -9702,8 +9882,9 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
* 4) The column has a constraint with a known name; in that case
* notnull_constrs carries that name and dumpTableSchema will print
* "CONSTRAINT the_name NOT NULL". However, if the name is the default
- * (table_column_not_null), there's no need to print that name in the dump,
- * so notnull_constrs is set to the empty string and it behaves as case 2.
+ * (table_column_not_null) and there's no comment on the constraint,
+ * there's no need to print that name in the dump, so notnull_constrs
+ * is set to the empty string and it behaves as case 2.
*
* In a child table that inherits from a parent already containing NOT NULL
* constraints and the columns in the child don't have their own NOT NULL
@@ -9730,6 +9911,7 @@ static void
determineNotNullFlags(Archive *fout, PGresult *res, int r,
TableInfo *tbinfo, int j,
int i_notnull_name,
+ int i_notnull_comment,
int i_notnull_invalidoid,
int i_notnull_noinherit,
int i_notnull_islocal,
@@ -9803,11 +9985,13 @@ determineNotNullFlags(Archive *fout, PGresult *res, int r,
{
/*
* In binary upgrade of inheritance child tables, must have a
- * constraint name that we can UPDATE later.
+ * constraint name that we can UPDATE later; same if there's a
+ * comment on the constraint.
*/
- if (dopt->binary_upgrade &&
- !tbinfo->ispartition &&
- !tbinfo->notnull_islocal)
+ if ((dopt->binary_upgrade &&
+ !tbinfo->ispartition &&
+ !tbinfo->notnull_islocal) ||
+ !PQgetisnull(res, r, i_notnull_comment))
{
tbinfo->notnull_constrs[j] =
pstrdup(PQgetvalue(res, r, i_notnull_name));
@@ -10855,7 +11039,7 @@ dumpRelationStats_dumper(Archive *fout, const void *userArg, const TocEntry *te)
expected_te = expected_te->next;
if (te != expected_te)
- pg_fatal("stats dumped out of order (current: %d %s %s) (expected: %d %s %s)",
+ pg_fatal("statistics dumped out of order (current: %d %s %s, expected: %d %s %s)",
te->dumpId, te->desc, te->tag,
expected_te->dumpId, expected_te->desc, expected_te->tag);
@@ -10929,7 +11113,20 @@ dumpRelationStats_dumper(Archive *fout, const void *userArg, const TocEntry *te)
appendStringLiteralAH(out, rsinfo->dobj.name, fout);
appendPQExpBufferStr(out, ",\n");
appendPQExpBuffer(out, "\t'relpages', '%d'::integer,\n", rsinfo->relpages);
- appendPQExpBuffer(out, "\t'reltuples', '%s'::real,\n", rsinfo->reltuples);
+
+ /*
+ * Before v14, a reltuples value of 0 was ambiguous: it could either mean
+ * the relation is empty, or it could mean that it hadn't yet been
+ * vacuumed or analyzed. (Newer versions use -1 for the latter case.)
+ * This ambiguity allegedly can cause the planner to choose inefficient
+ * plans after restoring to v18 or newer. To deal with this, let's just
+ * set reltuples to -1 in that case.
+ */
+ if (fout->remoteVersion < 140000 && strcmp("0", rsinfo->reltuples) == 0)
+ appendPQExpBufferStr(out, "\t'reltuples', '-1'::real,\n");
+ else
+ appendPQExpBuffer(out, "\t'reltuples', '%s'::real,\n", rsinfo->reltuples);
+
appendPQExpBuffer(out, "\t'relallvisible', '%d'::integer",
rsinfo->relallvisible);
@@ -10983,7 +11180,7 @@ dumpRelationStats_dumper(Archive *fout, const void *userArg, const TocEntry *te)
appendStringLiteralAH(out, rsinfo->dobj.name, fout);
if (PQgetisnull(res, rownum, i_attname))
- pg_fatal("attname cannot be NULL");
+ pg_fatal("unexpected null attname");
attname = PQgetvalue(res, rownum, i_attname);
/*
@@ -12484,8 +12681,36 @@ dumpDomain(Archive *fout, const TypeInfo *tyinfo)
appendPQExpBuffer(q, " COLLATE %s", fmtQualifiedDumpable(coll));
}
+ /*
+ * Print a not-null constraint if there's one. In servers older than 17
+ * these don't have names, so just print it unadorned; in newer ones they
+ * do, but most of the time it's going to be the standard generated one,
+ * so omit the name in that case also.
+ */
if (typnotnull[0] == 't')
- appendPQExpBufferStr(q, " NOT NULL");
+ {
+ if (fout->remoteVersion < 170000 || tyinfo->notnull == NULL)
+ appendPQExpBufferStr(q, " NOT NULL");
+ else
+ {
+ ConstraintInfo *notnull = tyinfo->notnull;
+
+ if (!notnull->separate)
+ {
+ char *default_name;
+
+ /* XXX should match ChooseConstraintName better */
+ default_name = psprintf("%s_not_null", tyinfo->dobj.name);
+
+ if (strcmp(default_name, notnull->dobj.name) == 0)
+ appendPQExpBufferStr(q, " NOT NULL");
+ else
+ appendPQExpBuffer(q, " CONSTRAINT %s %s",
+ fmtId(notnull->dobj.name), notnull->condef);
+ free(default_name);
+ }
+ }
+ }
if (typdefault != NULL)
{
@@ -12505,7 +12730,7 @@ dumpDomain(Archive *fout, const TypeInfo *tyinfo)
{
ConstraintInfo *domcheck = &(tyinfo->domChecks[i]);
- if (!domcheck->separate)
+ if (!domcheck->separate && domcheck->contype == 'c')
appendPQExpBuffer(q, "\n\tCONSTRAINT %s %s",
fmtId(domcheck->dobj.name), domcheck->condef);
}
@@ -12550,8 +12775,13 @@ dumpDomain(Archive *fout, const TypeInfo *tyinfo)
for (i = 0; i < tyinfo->nDomChecks; i++)
{
ConstraintInfo *domcheck = &(tyinfo->domChecks[i]);
- PQExpBuffer conprefix = createPQExpBuffer();
+ PQExpBuffer conprefix;
+
+ /* but only if the constraint itself was dumped here */
+ if (domcheck->separate)
+ continue;
+ conprefix = createPQExpBuffer();
appendPQExpBuffer(conprefix, "CONSTRAINT %s ON DOMAIN",
fmtId(domcheck->dobj.name));
@@ -12564,6 +12794,25 @@ dumpDomain(Archive *fout, const TypeInfo *tyinfo)
destroyPQExpBuffer(conprefix);
}
+ /*
+ * And a comment on the not-null constraint, if there's one -- but only if
+ * the constraint itself was dumped here
+ */
+ if (tyinfo->notnull != NULL && !tyinfo->notnull->separate)
+ {
+ PQExpBuffer conprefix = createPQExpBuffer();
+
+ appendPQExpBuffer(conprefix, "CONSTRAINT %s ON DOMAIN",
+ fmtId(tyinfo->notnull->dobj.name));
+
+ if (tyinfo->notnull->dobj.dump & DUMP_COMPONENT_COMMENT)
+ dumpComment(fout, conprefix->data, qtypname,
+ tyinfo->dobj.namespace->dobj.name,
+ tyinfo->rolname,
+ tyinfo->notnull->dobj.catId, 0, tyinfo->dobj.dumpId);
+ destroyPQExpBuffer(conprefix);
+ }
+
destroyPQExpBuffer(q);
destroyPQExpBuffer(delq);
destroyPQExpBuffer(query);
@@ -17671,6 +17920,56 @@ dumpTableSchema(Archive *fout, const TableInfo *tbinfo)
if (tbinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
dumpTableSecLabel(fout, tbinfo, reltypename);
+ /*
+ * Dump comments for not-null constraints that aren't to be dumped
+ * separately (those are processed by collectComments/dumpComment).
+ */
+ if (!fout->dopt->no_comments && dopt->dumpSchema &&
+ fout->remoteVersion >= 180000)
+ {
+ PQExpBuffer comment = NULL;
+ PQExpBuffer tag = NULL;
+
+ for (j = 0; j < tbinfo->numatts; j++)
+ {
+ if (tbinfo->notnull_constrs[j] != NULL &&
+ tbinfo->notnull_comment[j] != NULL)
+ {
+ if (comment == NULL)
+ {
+ comment = createPQExpBuffer();
+ tag = createPQExpBuffer();
+ }
+ else
+ {
+ resetPQExpBuffer(comment);
+ resetPQExpBuffer(tag);
+ }
+
+ appendPQExpBuffer(comment, "COMMENT ON CONSTRAINT %s ON %s IS ",
+ fmtId(tbinfo->notnull_constrs[j]), qualrelname);
+ appendStringLiteralAH(comment, tbinfo->notnull_comment[j], fout);
+ appendPQExpBufferStr(comment, ";\n");
+
+ appendPQExpBuffer(tag, "CONSTRAINT %s ON %s",
+ fmtId(tbinfo->notnull_constrs[j]), qrelname);
+
+ ArchiveEntry(fout, nilCatalogId, createDumpId(),
+ ARCHIVE_OPTS(.tag = tag->data,
+ .namespace = tbinfo->dobj.namespace->dobj.name,
+ .owner = tbinfo->rolname,
+ .description = "COMMENT",
+ .section = SECTION_NONE,
+ .createStmt = comment->data,
+ .deps = &(tbinfo->dobj.dumpId),
+ .nDeps = 1));
+ }
+ }
+
+ destroyPQExpBuffer(comment);
+ destroyPQExpBuffer(tag);
+ }
+
/* Dump comments on inlined table constraints */
for (j = 0; j < tbinfo->ncheck; j++)
{
@@ -18375,14 +18674,23 @@ dumpConstraint(Archive *fout, const ConstraintInfo *coninfo)
.dropStmt = delq->data));
}
}
- else if (coninfo->contype == 'c' && tbinfo == NULL)
+ else if (tbinfo == NULL)
{
- /* CHECK constraint on a domain */
+ /* CHECK, NOT NULL constraint on a domain */
TypeInfo *tyinfo = coninfo->condomain;
+ Assert(coninfo->contype == 'c' || coninfo->contype == 'n');
+
/* Ignore if not to be dumped separately */
if (coninfo->separate)
{
+ const char *keyword;
+
+ if (coninfo->contype == 'c')
+ keyword = "CHECK CONSTRAINT";
+ else
+ keyword = "CONSTRAINT";
+
appendPQExpBuffer(q, "ALTER DOMAIN %s\n",
fmtQualifiedDumpable(tyinfo));
appendPQExpBuffer(q, " ADD CONSTRAINT %s %s;\n",
@@ -18401,10 +18709,26 @@ dumpConstraint(Archive *fout, const ConstraintInfo *coninfo)
ARCHIVE_OPTS(.tag = tag,
.namespace = tyinfo->dobj.namespace->dobj.name,
.owner = tyinfo->rolname,
- .description = "CHECK CONSTRAINT",
+ .description = keyword,
.section = SECTION_POST_DATA,
.createStmt = q->data,
.dropStmt = delq->data));
+
+ if (coninfo->dobj.dump & DUMP_COMPONENT_COMMENT)
+ {
+ PQExpBuffer conprefix = createPQExpBuffer();
+ char *qtypname = pg_strdup(fmtId(tyinfo->dobj.name));
+
+ appendPQExpBuffer(conprefix, "CONSTRAINT %s ON DOMAIN",
+ fmtId(coninfo->dobj.name));
+
+ dumpComment(fout, conprefix->data, qtypname,
+ tyinfo->dobj.namespace->dobj.name,
+ tyinfo->rolname,
+ coninfo->dobj.catId, 0, tyinfo->dobj.dumpId);
+ destroyPQExpBuffer(conprefix);
+ free(qtypname);
+ }
}
}
else
diff --git a/src/bin/pg_dump/pg_dump.h b/src/bin/pg_dump/pg_dump.h
index 7417eab6aef..dde85ed156c 100644
--- a/src/bin/pg_dump/pg_dump.h
+++ b/src/bin/pg_dump/pg_dump.h
@@ -222,7 +222,9 @@ typedef struct _typeInfo
bool isDefined; /* true if typisdefined */
/* If needed, we'll create a "shell type" entry for it; link that here: */
struct _shellTypeInfo *shellType; /* shell-type entry, or NULL */
- /* If it's a domain, we store links to its constraints here: */
+ /* If it's a domain, its not-null constraint is here: */
+ struct _constraintInfo *notnull;
+ /* If it's a domain, we store links to its CHECK constraints here: */
int nDomChecks;
struct _constraintInfo *domChecks;
} TypeInfo;
@@ -258,6 +260,8 @@ typedef struct _oprInfo
DumpableObject dobj;
const char *rolname;
char oprkind;
+ Oid oprleft;
+ Oid oprright;
Oid oprcode;
} OprInfo;
@@ -271,12 +275,14 @@ typedef struct _accessMethodInfo
typedef struct _opclassInfo
{
DumpableObject dobj;
+ Oid opcmethod;
const char *rolname;
} OpclassInfo;
typedef struct _opfamilyInfo
{
DumpableObject dobj;
+ Oid opfmethod;
const char *rolname;
} OpfamilyInfo;
@@ -284,6 +290,7 @@ typedef struct _collInfo
{
DumpableObject dobj;
const char *rolname;
+ int collencoding;
} CollInfo;
typedef struct _convInfo
@@ -365,6 +372,7 @@ typedef struct _tableInfo
* there isn't one on this column. If
* empty string, unnamed constraint
* (pre-v17) */
+ char **notnull_comment; /* comment thereof */
bool *notnull_invalid; /* true for NOT NULL NOT VALID */
bool *notnull_noinh; /* NOT NULL is NO INHERIT */
bool *notnull_islocal; /* true if NOT NULL has local definition */
@@ -708,6 +716,7 @@ typedef struct _SubscriptionInfo
bool subpasswordrequired;
bool subrunasowner;
bool subfailover;
+ bool subretaindeadtuples;
char *subconninfo;
char *subslotname;
char *subsynccommit;
@@ -756,6 +765,7 @@ extern TableInfo *findTableByOid(Oid oid);
extern TypeInfo *findTypeByOid(Oid oid);
extern FuncInfo *findFuncByOid(Oid oid);
extern OprInfo *findOprByOid(Oid oid);
+extern AccessMethodInfo *findAccessMethodByOid(Oid oid);
extern CollInfo *findCollationByOid(Oid oid);
extern NamespaceInfo *findNamespaceByOid(Oid oid);
extern ExtensionInfo *findExtensionByOid(Oid oid);
diff --git a/src/bin/pg_dump/pg_dump_sort.c b/src/bin/pg_dump/pg_dump_sort.c
index 0b0977788f1..a02da3e9652 100644
--- a/src/bin/pg_dump/pg_dump_sort.c
+++ b/src/bin/pg_dump/pg_dump_sort.c
@@ -76,10 +76,10 @@ enum dbObjectTypePriorities
PRIO_TABLE_ATTACH,
PRIO_DUMMY_TYPE,
PRIO_ATTRDEF,
- PRIO_LARGE_OBJECT,
PRIO_PRE_DATA_BOUNDARY, /* boundary! */
PRIO_TABLE_DATA,
PRIO_SEQUENCE_SET,
+ PRIO_LARGE_OBJECT,
PRIO_LARGE_OBJECT_DATA,
PRIO_STATISTICS_DATA_DATA,
PRIO_POST_DATA_BOUNDARY, /* boundary! */
@@ -162,6 +162,8 @@ static DumpId postDataBoundId;
static int DOTypeNameCompare(const void *p1, const void *p2);
+static int pgTypeNameCompare(Oid typid1, Oid typid2);
+static int accessMethodNameCompare(Oid am1, Oid am2);
static bool TopoSort(DumpableObject **objs,
int numObjs,
DumpableObject **ordering,
@@ -228,12 +230,39 @@ DOTypeNameCompare(const void *p1, const void *p2)
else if (obj2->namespace)
return 1;
- /* Sort by name */
+ /*
+ * Sort by name. With a few exceptions, names here are single catalog
+ * columns. To get a fuller picture, grep pg_dump.c for "dobj.name = ".
+ * Names here don't match "Name:" in plain format output, which is a
+ * _tocEntry.tag. For example, DumpableObject.name of a constraint is
+ * pg_constraint.conname, but _tocEntry.tag of a constraint is relname and
+ * conname joined with a space.
+ */
cmpval = strcmp(obj1->name, obj2->name);
if (cmpval != 0)
return cmpval;
- /* To have a stable sort order, break ties for some object types */
+ /*
+ * Sort by type. This helps types that share a type priority without
+ * sharing a unique name constraint, e.g. opclass and opfamily.
+ */
+ cmpval = obj1->objType - obj2->objType;
+ if (cmpval != 0)
+ return cmpval;
+
+ /*
+ * To have a stable sort order, break ties for some object types. Most
+ * catalogs have a natural key, e.g. pg_proc_proname_args_nsp_index. Where
+ * the above "namespace" and "name" comparisons don't cover all natural
+ * key columns, compare the rest here.
+ *
+ * The natural key usually refers to other catalogs by surrogate keys.
+ * Hence, this translates each of those references to the natural key of
+ * the referenced catalog. That may descend through multiple levels of
+ * catalog references. For example, to sort by pg_proc.proargtypes,
+ * descend to each pg_type and then further to its pg_namespace, for an
+ * overall sort by (nspname, typname).
+ */
if (obj1->objType == DO_FUNC || obj1->objType == DO_AGG)
{
FuncInfo *fobj1 = *(FuncInfo *const *) p1;
@@ -246,22 +275,10 @@ DOTypeNameCompare(const void *p1, const void *p2)
return cmpval;
for (i = 0; i < fobj1->nargs; i++)
{
- TypeInfo *argtype1 = findTypeByOid(fobj1->argtypes[i]);
- TypeInfo *argtype2 = findTypeByOid(fobj2->argtypes[i]);
-
- if (argtype1 && argtype2)
- {
- if (argtype1->dobj.namespace && argtype2->dobj.namespace)
- {
- cmpval = strcmp(argtype1->dobj.namespace->dobj.name,
- argtype2->dobj.namespace->dobj.name);
- if (cmpval != 0)
- return cmpval;
- }
- cmpval = strcmp(argtype1->dobj.name, argtype2->dobj.name);
- if (cmpval != 0)
- return cmpval;
- }
+ cmpval = pgTypeNameCompare(fobj1->argtypes[i],
+ fobj2->argtypes[i]);
+ if (cmpval != 0)
+ return cmpval;
}
}
else if (obj1->objType == DO_OPERATOR)
@@ -273,6 +290,57 @@ DOTypeNameCompare(const void *p1, const void *p2)
cmpval = (oobj2->oprkind - oobj1->oprkind);
if (cmpval != 0)
return cmpval;
+ /* Within an oprkind, sort by argument type names */
+ cmpval = pgTypeNameCompare(oobj1->oprleft, oobj2->oprleft);
+ if (cmpval != 0)
+ return cmpval;
+ cmpval = pgTypeNameCompare(oobj1->oprright, oobj2->oprright);
+ if (cmpval != 0)
+ return cmpval;
+ }
+ else if (obj1->objType == DO_OPCLASS)
+ {
+ OpclassInfo *opcobj1 = *(OpclassInfo *const *) p1;
+ OpclassInfo *opcobj2 = *(OpclassInfo *const *) p2;
+
+ /* Sort by access method name, per pg_opclass_am_name_nsp_index */
+ cmpval = accessMethodNameCompare(opcobj1->opcmethod,
+ opcobj2->opcmethod);
+ if (cmpval != 0)
+ return cmpval;
+ }
+ else if (obj1->objType == DO_OPFAMILY)
+ {
+ OpfamilyInfo *opfobj1 = *(OpfamilyInfo *const *) p1;
+ OpfamilyInfo *opfobj2 = *(OpfamilyInfo *const *) p2;
+
+ /* Sort by access method name, per pg_opfamily_am_name_nsp_index */
+ cmpval = accessMethodNameCompare(opfobj1->opfmethod,
+ opfobj2->opfmethod);
+ if (cmpval != 0)
+ return cmpval;
+ }
+ else if (obj1->objType == DO_COLLATION)
+ {
+ CollInfo *cobj1 = *(CollInfo *const *) p1;
+ CollInfo *cobj2 = *(CollInfo *const *) p2;
+
+ /*
+ * Sort by encoding, per pg_collation_name_enc_nsp_index. Technically,
+ * this is not necessary, because wherever this changes dump order,
+ * restoring the dump fails anyway. CREATE COLLATION can't create a
+ * tie for this to break, because it imposes restrictions to make
+ * (nspname, collname) uniquely identify a collation within a given
+ * DatabaseEncoding. While pg_import_system_collations() can create a
+ * tie, pg_dump+restore fails after
+ * pg_import_system_collations('my_schema') does so. However, there's
+ * little to gain by ignoring one natural key column on the basis of
+ * those limitations elsewhere, so respect the full natural key like
+ * we do for other object types.
+ */
+ cmpval = cobj1->collencoding - cobj2->collencoding;
+ if (cmpval != 0)
+ return cmpval;
}
else if (obj1->objType == DO_ATTRDEF)
{
@@ -317,11 +385,143 @@ DOTypeNameCompare(const void *p1, const void *p2)
if (cmpval != 0)
return cmpval;
}
+ else if (obj1->objType == DO_CONSTRAINT)
+ {
+ ConstraintInfo *robj1 = *(ConstraintInfo *const *) p1;
+ ConstraintInfo *robj2 = *(ConstraintInfo *const *) p2;
+
+ /*
+ * Sort domain constraints before table constraints, for consistency
+ * with our decision to sort CREATE DOMAIN before CREATE TABLE.
+ */
+ if (robj1->condomain)
+ {
+ if (robj2->condomain)
+ {
+ /* Sort by domain name (domain namespace was considered) */
+ cmpval = strcmp(robj1->condomain->dobj.name,
+ robj2->condomain->dobj.name);
+ if (cmpval != 0)
+ return cmpval;
+ }
+ else
+ return PRIO_TYPE - PRIO_TABLE;
+ }
+ else if (robj2->condomain)
+ return PRIO_TABLE - PRIO_TYPE;
+ else
+ {
+ /* Sort by table name (table namespace was considered already) */
+ cmpval = strcmp(robj1->contable->dobj.name,
+ robj2->contable->dobj.name);
+ if (cmpval != 0)
+ return cmpval;
+ }
+ }
+ else if (obj1->objType == DO_PUBLICATION_REL)
+ {
+ PublicationRelInfo *probj1 = *(PublicationRelInfo *const *) p1;
+ PublicationRelInfo *probj2 = *(PublicationRelInfo *const *) p2;
+
+ /* Sort by publication name, since (namespace, name) match the rel */
+ cmpval = strcmp(probj1->publication->dobj.name,
+ probj2->publication->dobj.name);
+ if (cmpval != 0)
+ return cmpval;
+ }
+ else if (obj1->objType == DO_PUBLICATION_TABLE_IN_SCHEMA)
+ {
+ PublicationSchemaInfo *psobj1 = *(PublicationSchemaInfo *const *) p1;
+ PublicationSchemaInfo *psobj2 = *(PublicationSchemaInfo *const *) p2;
+
+ /* Sort by publication name, since ->name is just nspname */
+ cmpval = strcmp(psobj1->publication->dobj.name,
+ psobj2->publication->dobj.name);
+ if (cmpval != 0)
+ return cmpval;
+ }
- /* Usually shouldn't get here, but if we do, sort by OID */
+ /*
+ * Shouldn't get here except after catalog corruption, but if we do, sort
+ * by OID. This may make logically-identical databases differ in the
+ * order of objects in dump output. Users will get spurious schema diffs.
+ * Expect flaky failures of 002_pg_upgrade.pl test 'dump outputs from
+ * original and restored regression databases match' if the regression
+ * database contains objects allowing that test to reach here. That's a
+ * consequence of the test using "pg_restore -j", which doesn't fully
+ * constrain OID assignment order.
+ */
+ Assert(false);
return oidcmp(obj1->catId.oid, obj2->catId.oid);
}
+/* Compare two OID-identified pg_type values by nspname, then by typname. */
+static int
+pgTypeNameCompare(Oid typid1, Oid typid2)
+{
+ TypeInfo *typobj1;
+ TypeInfo *typobj2;
+ int cmpval;
+
+ if (typid1 == typid2)
+ return 0;
+
+ typobj1 = findTypeByOid(typid1);
+ typobj2 = findTypeByOid(typid2);
+
+ if (!typobj1 || !typobj2)
+ {
+ /*
+ * getTypes() didn't find some OID. Assume catalog corruption, e.g.
+ * an oprright value without the corresponding OID in a pg_type row.
+ * Report as "equal", so the caller uses the next available basis for
+ * comparison, e.g. the next function argument.
+ *
+ * Unary operators have InvalidOid in oprleft (if oprkind='r') or in
+ * oprright (if oprkind='l'). Caller already sorted by oprkind,
+ * calling us only for like-kind operators. Hence, "typid1 == typid2"
+ * took care of InvalidOid. (v14 removed postfix operator support.
+ * Hence, when dumping from v14+, only oprleft can be InvalidOid.)
+ */
+ Assert(false);
+ return 0;
+ }
+
+ if (!typobj1->dobj.namespace || !typobj2->dobj.namespace)
+ Assert(false); /* catalog corruption */
+ else
+ {
+ cmpval = strcmp(typobj1->dobj.namespace->dobj.name,
+ typobj2->dobj.namespace->dobj.name);
+ if (cmpval != 0)
+ return cmpval;
+ }
+ return strcmp(typobj1->dobj.name, typobj2->dobj.name);
+}
+
+/* Compare two OID-identified pg_am values by amname. */
+static int
+accessMethodNameCompare(Oid am1, Oid am2)
+{
+ AccessMethodInfo *amobj1;
+ AccessMethodInfo *amobj2;
+
+ if (am1 == am2)
+ return 0;
+
+ amobj1 = findAccessMethodByOid(am1);
+ amobj2 = findAccessMethodByOid(am2);
+
+ if (!amobj1 || !amobj2)
+ {
+ /* catalog corruption: handle like pgTypeNameCompare() does */
+ Assert(false);
+ return 0;
+ }
+
+ return strcmp(amobj1->dobj.name, amobj2->dobj.name);
+}
+
/*
* Sort the given objects into a safe dump order using dependency
@@ -907,7 +1107,7 @@ repairTableAttrDefMultiLoop(DumpableObject *tableobj,
}
/*
- * CHECK constraints on domains work just like those on tables ...
+ * CHECK, NOT NULL constraints on domains work just like those on tables ...
*/
static void
repairDomainConstraintLoop(DumpableObject *domainobj,
@@ -1173,11 +1373,12 @@ repairDependencyLoop(DumpableObject **loop,
}
}
- /* Domain and CHECK constraint */
+ /* Domain and CHECK or NOT NULL constraint */
if (nLoop == 2 &&
loop[0]->objType == DO_TYPE &&
loop[1]->objType == DO_CONSTRAINT &&
- ((ConstraintInfo *) loop[1])->contype == 'c' &&
+ (((ConstraintInfo *) loop[1])->contype == 'c' ||
+ ((ConstraintInfo *) loop[1])->contype == 'n') &&
((ConstraintInfo *) loop[1])->condomain == (TypeInfo *) loop[0])
{
repairDomainConstraintLoop(loop[0], loop[1]);
@@ -1186,14 +1387,15 @@ repairDependencyLoop(DumpableObject **loop,
if (nLoop == 2 &&
loop[1]->objType == DO_TYPE &&
loop[0]->objType == DO_CONSTRAINT &&
- ((ConstraintInfo *) loop[0])->contype == 'c' &&
+ (((ConstraintInfo *) loop[0])->contype == 'c' ||
+ ((ConstraintInfo *) loop[0])->contype == 'n') &&
((ConstraintInfo *) loop[0])->condomain == (TypeInfo *) loop[1])
{
repairDomainConstraintLoop(loop[1], loop[0]);
return;
}
- /* Indirect loop involving domain and CHECK constraint */
+ /* Indirect loop involving domain and CHECK or NOT NULL constraint */
if (nLoop > 2)
{
for (i = 0; i < nLoop; i++)
@@ -1203,7 +1405,8 @@ repairDependencyLoop(DumpableObject **loop,
for (j = 0; j < nLoop; j++)
{
if (loop[j]->objType == DO_CONSTRAINT &&
- ((ConstraintInfo *) loop[j])->contype == 'c' &&
+ (((ConstraintInfo *) loop[j])->contype == 'c' ||
+ ((ConstraintInfo *) loop[j])->contype == 'n') &&
((ConstraintInfo *) loop[j])->condomain == (TypeInfo *) loop[i])
{
repairDomainConstraintMultiLoop(loop[i], loop[j]);
diff --git a/src/bin/pg_dump/pg_dumpall.c b/src/bin/pg_dump/pg_dumpall.c
index 7f9c302b719..87d10df07c4 100644
--- a/src/bin/pg_dump/pg_dumpall.c
+++ b/src/bin/pg_dump/pg_dumpall.c
@@ -65,10 +65,9 @@ static void dropTablespaces(PGconn *conn);
static void dumpTablespaces(PGconn *conn);
static void dropDBs(PGconn *conn);
static void dumpUserConfig(PGconn *conn, const char *username);
-static void dumpDatabases(PGconn *conn, ArchiveFormat archDumpFormat);
+static void dumpDatabases(PGconn *conn);
static void dumpTimestamp(const char *msg);
-static int runPgDump(const char *dbname, const char *create_opts,
- char *dbfile, ArchiveFormat archDumpFormat);
+static int runPgDump(const char *dbname, const char *create_opts);
static void buildShSecLabels(PGconn *conn,
const char *catalog_name, Oid objectId,
const char *objtype, const char *objname,
@@ -77,7 +76,6 @@ static void executeCommand(PGconn *conn, const char *query);
static void expand_dbname_patterns(PGconn *conn, SimpleStringList *patterns,
SimpleStringList *names);
static void read_dumpall_filters(const char *filename, SimpleStringList *pattern);
-static ArchiveFormat parseDumpFormat(const char *format);
static char pg_dump_bin[MAXPGPATH];
static PQExpBuffer pgdumpopts;
@@ -150,7 +148,6 @@ main(int argc, char *argv[])
{"password", no_argument, NULL, 'W'},
{"no-privileges", no_argument, NULL, 'x'},
{"no-acl", no_argument, NULL, 'x'},
- {"format", required_argument, NULL, 'F'},
/*
* the following options don't have an equivalent short option letter
@@ -201,8 +198,6 @@ main(int argc, char *argv[])
char *pgdb = NULL;
char *use_role = NULL;
const char *dumpencoding = NULL;
- ArchiveFormat archDumpFormat = archNull;
- const char *formatName = "p";
trivalue prompt_password = TRI_DEFAULT;
bool data_only = false;
bool globals_only = false;
@@ -252,7 +247,7 @@ main(int argc, char *argv[])
pgdumpopts = createPQExpBuffer();
- while ((c = getopt_long(argc, argv, "acd:E:f:F:gh:l:Op:rsS:tU:vwWx", long_options, &optindex)) != -1)
+ while ((c = getopt_long(argc, argv, "acd:E:f:gh:l:Op:rsS:tU:vwWx", long_options, &optindex)) != -1)
{
switch (c)
{
@@ -280,9 +275,7 @@ main(int argc, char *argv[])
appendPQExpBufferStr(pgdumpopts, " -f ");
appendShellString(pgdumpopts, filename);
break;
- case 'F':
- formatName = pg_strdup(optarg);
- break;
+
case 'g':
globals_only = true;
break;
@@ -431,21 +424,6 @@ main(int argc, char *argv[])
exit_nicely(1);
}
- /* Get format for dump. */
- archDumpFormat = parseDumpFormat(formatName);
-
- /*
- * If a non-plain format is specified, a file name is also required as the
- * path to the main directory.
- */
- if (archDumpFormat != archNull &&
- (!filename || strcmp(filename, "") == 0))
- {
- pg_log_error("option -F/--format=d|c|t requires option -f/--file");
- pg_log_error_hint("Try \"%s --help\" for more information.", progname);
- exit_nicely(1);
- }
-
/*
* If password values are not required in the dump, switch to using
* pg_roles which is equally useful, just more likely to have unrestricted
@@ -511,33 +489,6 @@ main(int argc, char *argv[])
appendPQExpBufferStr(pgdumpopts, " --sequence-data");
/*
- * Open the output file if required, otherwise use stdout. If required,
- * then create new directory and global.dat file.
- */
- if (archDumpFormat != archNull)
- {
- char global_path[MAXPGPATH];
-
- /* Create new directory or accept the empty existing directory. */
- create_or_open_dir(filename);
-
- snprintf(global_path, MAXPGPATH, "%s/global.dat", filename);
-
- OPF = fopen(global_path, PG_BINARY_W);
- if (!OPF)
- pg_fatal("could not open \"%s\": %m", global_path);
- }
- else if (filename)
- {
- OPF = fopen(filename, PG_BINARY_W);
- if (!OPF)
- pg_fatal("could not open output file \"%s\": %m",
- filename);
- }
- else
- OPF = stdout;
-
- /*
* If there was a database specified on the command line, use that,
* otherwise try to connect to database "postgres", and failing that
* "template1".
@@ -577,6 +528,19 @@ main(int argc, char *argv[])
&database_exclude_names);
/*
+ * Open the output file if required, otherwise use stdout
+ */
+ if (filename)
+ {
+ OPF = fopen(filename, PG_BINARY_W);
+ if (!OPF)
+ pg_fatal("could not open output file \"%s\": %m",
+ filename);
+ }
+ else
+ OPF = stdout;
+
+ /*
* Set the client encoding if requested.
*/
if (dumpencoding)
@@ -632,7 +596,7 @@ main(int argc, char *argv[])
fprintf(OPF, "SET escape_string_warning = off;\n");
fprintf(OPF, "\n");
- if (!data_only)
+ if (!data_only && !statistics_only && !no_schema)
{
/*
* If asked to --clean, do that first. We can avoid detailed
@@ -675,7 +639,7 @@ main(int argc, char *argv[])
}
if (!globals_only && !roles_only && !tablespaces_only)
- dumpDatabases(conn, archDumpFormat);
+ dumpDatabases(conn);
PQfinish(conn);
@@ -688,7 +652,7 @@ main(int argc, char *argv[])
fclose(OPF);
/* sync the resulting file, errors are not fatal */
- if (dosync && (archDumpFormat == archNull))
+ if (dosync)
(void) fsync_fname(filename, false);
}
@@ -699,14 +663,12 @@ main(int argc, char *argv[])
static void
help(void)
{
- printf(_("%s extracts a PostgreSQL database cluster based on specified dump format.\n\n"), progname);
+ printf(_("%s exports a PostgreSQL database cluster as an SQL script.\n\n"), progname);
printf(_("Usage:\n"));
printf(_(" %s [OPTION]...\n"), progname);
printf(_("\nGeneral options:\n"));
printf(_(" -f, --file=FILENAME output file name\n"));
- printf(_(" -F, --format=c|d|t|p output file format (custom, directory, tar,\n"
- " plain text (default))\n"));
printf(_(" -v, --verbose verbose mode\n"));
printf(_(" -V, --version output version information, then exit\n"));
printf(_(" --lock-wait-timeout=TIMEOUT fail after waiting TIMEOUT for a table lock\n"));
@@ -1013,6 +975,9 @@ dumpRoles(PGconn *conn)
* We do it this way because config settings for roles could mention the
* names of other roles.
*/
+ if (PQntuples(res) > 0)
+ fprintf(OPF, "\n--\n-- User Configurations\n--\n");
+
for (i = 0; i < PQntuples(res); i++)
dumpUserConfig(conn, PQgetvalue(res, i, i_rolname));
@@ -1526,7 +1491,6 @@ dumpUserConfig(PGconn *conn, const char *username)
{
PQExpBuffer buf = createPQExpBuffer();
PGresult *res;
- static bool header_done = false;
printfPQExpBuffer(buf, "SELECT unnest(setconfig) FROM pg_db_role_setting "
"WHERE setdatabase = 0 AND setrole = "
@@ -1538,13 +1502,7 @@ dumpUserConfig(PGconn *conn, const char *username)
res = executeQuery(conn, buf->data);
if (PQntuples(res) > 0)
- {
- if (!header_done)
- fprintf(OPF, "\n--\n-- User Configurations\n--\n");
- header_done = true;
-
fprintf(OPF, "\n--\n-- User Config \"%s\"\n--\n\n", username);
- }
for (int i = 0; i < PQntuples(res); i++)
{
@@ -1618,13 +1576,10 @@ expand_dbname_patterns(PGconn *conn,
* Dump contents of databases.
*/
static void
-dumpDatabases(PGconn *conn, ArchiveFormat archDumpFormat)
+dumpDatabases(PGconn *conn)
{
PGresult *res;
int i;
- char db_subdir[MAXPGPATH];
- char dbfilepath[MAXPGPATH];
- FILE *map_file = NULL;
/*
* Skip databases marked not datallowconn, since we'd be unable to connect
@@ -1638,42 +1593,18 @@ dumpDatabases(PGconn *conn, ArchiveFormat archDumpFormat)
* doesn't have some failure mode with --clean.
*/
res = executeQuery(conn,
- "SELECT datname, oid "
+ "SELECT datname "
"FROM pg_database d "
"WHERE datallowconn AND datconnlimit != -2 "
"ORDER BY (datname <> 'template1'), datname");
- if (archDumpFormat == archNull && PQntuples(res) > 0)
+ if (PQntuples(res) > 0)
fprintf(OPF, "--\n-- Databases\n--\n\n");
- /*
- * If directory/tar/custom format is specified, create a subdirectory
- * under the main directory and each database dump file or subdirectory
- * will be created in that subdirectory by pg_dump.
- */
- if (archDumpFormat != archNull)
- {
- char map_file_path[MAXPGPATH];
-
- snprintf(db_subdir, MAXPGPATH, "%s/databases", filename);
-
- /* Create a subdirectory with 'databases' name under main directory. */
- if (mkdir(db_subdir, pg_dir_create_mode) != 0)
- pg_fatal("could not create subdirectory \"%s\": %m", db_subdir);
-
- snprintf(map_file_path, MAXPGPATH, "%s/map.dat", filename);
-
- /* Create a map file (to store dboid and dbname) */
- map_file = fopen(map_file_path, PG_BINARY_W);
- if (!map_file)
- pg_fatal("could not open map file: %s", strerror(errno));
- }
-
for (i = 0; i < PQntuples(res); i++)
{
char *dbname = PQgetvalue(res, i, 0);
- char *oid = PQgetvalue(res, i, 1);
- const char *create_opts = "";
+ const char *create_opts;
int ret;
/* Skip template0, even if it's not marked !datallowconn. */
@@ -1687,27 +1618,9 @@ dumpDatabases(PGconn *conn, ArchiveFormat archDumpFormat)
continue;
}
- /*
- * If this is not a plain format dump, then append dboid and dbname to
- * the map.dat file.
- */
- if (archDumpFormat != archNull)
- {
- if (archDumpFormat == archCustom)
- snprintf(dbfilepath, MAXPGPATH, "\"%s\"/\"%s\".dmp", db_subdir, oid);
- else if (archDumpFormat == archTar)
- snprintf(dbfilepath, MAXPGPATH, "\"%s\"/\"%s\".tar", db_subdir, oid);
- else
- snprintf(dbfilepath, MAXPGPATH, "\"%s\"/\"%s\"", db_subdir, oid);
-
- /* Put one line entry for dboid and dbname in map file. */
- fprintf(map_file, "%s %s\n", oid, dbname);
- }
-
pg_log_info("dumping database \"%s\"", dbname);
- if (archDumpFormat == archNull)
- fprintf(OPF, "--\n-- Database \"%s\" dump\n--\n\n", dbname);
+ fprintf(OPF, "--\n-- Database \"%s\" dump\n--\n\n", dbname);
/*
* We assume that "template1" and "postgres" already exist in the
@@ -1721,9 +1634,12 @@ dumpDatabases(PGconn *conn, ArchiveFormat archDumpFormat)
{
if (output_clean)
create_opts = "--clean --create";
- /* Since pg_dump won't emit a \connect command, we must */
- else if (archDumpFormat == archNull)
+ else
+ {
+ create_opts = "";
+ /* Since pg_dump won't emit a \connect command, we must */
fprintf(OPF, "\\connect %s\n\n", dbname);
+ }
}
else
create_opts = "--create";
@@ -1731,30 +1647,19 @@ dumpDatabases(PGconn *conn, ArchiveFormat archDumpFormat)
if (filename)
fclose(OPF);
- ret = runPgDump(dbname, create_opts, dbfilepath, archDumpFormat);
+ ret = runPgDump(dbname, create_opts);
if (ret != 0)
pg_fatal("pg_dump failed on database \"%s\", exiting", dbname);
if (filename)
{
- char global_path[MAXPGPATH];
-
- if (archDumpFormat != archNull)
- snprintf(global_path, MAXPGPATH, "%s/global.dat", filename);
- else
- snprintf(global_path, MAXPGPATH, "%s", filename);
-
- OPF = fopen(global_path, PG_BINARY_A);
+ OPF = fopen(filename, PG_BINARY_A);
if (!OPF)
pg_fatal("could not re-open the output file \"%s\": %m",
- global_path);
+ filename);
}
}
- /* Close map file */
- if (archDumpFormat != archNull)
- fclose(map_file);
-
PQclear(res);
}
@@ -1764,8 +1669,7 @@ dumpDatabases(PGconn *conn, ArchiveFormat archDumpFormat)
* Run pg_dump on dbname, with specified options.
*/
static int
-runPgDump(const char *dbname, const char *create_opts, char *dbfile,
- ArchiveFormat archDumpFormat)
+runPgDump(const char *dbname, const char *create_opts)
{
PQExpBufferData connstrbuf;
PQExpBufferData cmd;
@@ -1774,36 +1678,17 @@ runPgDump(const char *dbname, const char *create_opts, char *dbfile,
initPQExpBuffer(&connstrbuf);
initPQExpBuffer(&cmd);
+ printfPQExpBuffer(&cmd, "\"%s\" %s %s", pg_dump_bin,
+ pgdumpopts->data, create_opts);
+
/*
- * If this is not a plain format dump, then append file name and dump
- * format to the pg_dump command to get archive dump.
+ * If we have a filename, use the undocumented plain-append pg_dump
+ * format.
*/
- if (archDumpFormat != archNull)
- {
- printfPQExpBuffer(&cmd, "\"%s\" -f %s %s", pg_dump_bin,
- dbfile, create_opts);
-
- if (archDumpFormat == archDirectory)
- appendPQExpBufferStr(&cmd, " --format=directory ");
- else if (archDumpFormat == archCustom)
- appendPQExpBufferStr(&cmd, " --format=custom ");
- else if (archDumpFormat == archTar)
- appendPQExpBufferStr(&cmd, " --format=tar ");
- }
+ if (filename)
+ appendPQExpBufferStr(&cmd, " -Fa ");
else
- {
- printfPQExpBuffer(&cmd, "\"%s\" %s %s", pg_dump_bin,
- pgdumpopts->data, create_opts);
-
- /*
- * If we have a filename, use the undocumented plain-append pg_dump
- * format.
- */
- if (filename)
- appendPQExpBufferStr(&cmd, " -Fa ");
- else
- appendPQExpBufferStr(&cmd, " -Fp ");
- }
+ appendPQExpBufferStr(&cmd, " -Fp ");
/*
* Append the database name to the already-constructed stem of connection
@@ -1948,36 +1833,3 @@ read_dumpall_filters(const char *filename, SimpleStringList *pattern)
filter_free(&fstate);
}
-
-/*
- * parseDumpFormat
- *
- * This will validate dump formats.
- */
-static ArchiveFormat
-parseDumpFormat(const char *format)
-{
- ArchiveFormat archDumpFormat;
-
- if (pg_strcasecmp(format, "c") == 0)
- archDumpFormat = archCustom;
- else if (pg_strcasecmp(format, "custom") == 0)
- archDumpFormat = archCustom;
- else if (pg_strcasecmp(format, "d") == 0)
- archDumpFormat = archDirectory;
- else if (pg_strcasecmp(format, "directory") == 0)
- archDumpFormat = archDirectory;
- else if (pg_strcasecmp(format, "p") == 0)
- archDumpFormat = archNull;
- else if (pg_strcasecmp(format, "plain") == 0)
- archDumpFormat = archNull;
- else if (pg_strcasecmp(format, "t") == 0)
- archDumpFormat = archTar;
- else if (pg_strcasecmp(format, "tar") == 0)
- archDumpFormat = archTar;
- else
- pg_fatal("unrecognized archive format \"%s\"; please specify \"c\", \"d\", \"p\", or \"t\"",
- format);
-
- return archDumpFormat;
-}
diff --git a/src/bin/pg_dump/pg_restore.c b/src/bin/pg_dump/pg_restore.c
index f2182e91825..2c727b9f156 100644
--- a/src/bin/pg_dump/pg_restore.c
+++ b/src/bin/pg_dump/pg_restore.c
@@ -2,7 +2,7 @@
*
* pg_restore.c
* pg_restore is an utility extracting postgres database definitions
- * from a backup archive created by pg_dump/pg_dumpall using the archiver
+ * from a backup archive created by pg_dump using the archiver
* interface.
*
* pg_restore will read the backup archive and
@@ -41,15 +41,11 @@
#include "postgres_fe.h"
#include <ctype.h>
-#include <sys/stat.h>
#ifdef HAVE_TERMIOS_H
#include <termios.h>
#endif
-#include "common/string.h"
-#include "connectdb.h"
#include "fe_utils/option_utils.h"
-#include "fe_utils/string_utils.h"
#include "filter.h"
#include "getopt_long.h"
#include "parallel.h"
@@ -57,43 +53,18 @@
static void usage(const char *progname);
static void read_restore_filters(const char *filename, RestoreOptions *opts);
-static bool file_exists_in_directory(const char *dir, const char *filename);
-static int restore_one_database(const char *inputFileSpec, RestoreOptions *opts,
- int numWorkers, bool append_data, int num);
-static int read_one_statement(StringInfo inBuf, FILE *pfile);
-static int restore_all_databases(PGconn *conn, const char *dumpdirpath,
- SimpleStringList db_exclude_patterns, RestoreOptions *opts, int numWorkers);
-static int process_global_sql_commands(PGconn *conn, const char *dumpdirpath,
- const char *outfile);
-static void copy_or_print_global_file(const char *outfile, FILE *pfile);
-static int get_dbnames_list_to_restore(PGconn *conn,
- SimplePtrList *dbname_oid_list,
- SimpleStringList db_exclude_patterns);
-static int get_dbname_oid_list_from_mfile(const char *dumpdirpath,
- SimplePtrList *dbname_oid_list);
-
-/*
- * Stores a database OID and the corresponding name.
- */
-typedef struct DbOidName
-{
- Oid oid;
- char str[FLEXIBLE_ARRAY_MEMBER]; /* null-terminated string here */
-} DbOidName;
-
int
main(int argc, char **argv)
{
RestoreOptions *opts;
int c;
+ int exit_code;
int numWorkers = 1;
+ Archive *AH;
char *inputFileSpec;
bool data_only = false;
bool schema_only = false;
- int n_errors = 0;
- bool globals_only = false;
- SimpleStringList db_exclude_patterns = {NULL, NULL};
static int disable_triggers = 0;
static int enable_row_security = 0;
static int if_exists = 0;
@@ -119,7 +90,6 @@ main(int argc, char **argv)
{"clean", 0, NULL, 'c'},
{"create", 0, NULL, 'C'},
{"data-only", 0, NULL, 'a'},
- {"globals-only", 0, NULL, 'g'},
{"dbname", 1, NULL, 'd'},
{"exit-on-error", 0, NULL, 'e'},
{"exclude-schema", 1, NULL, 'N'},
@@ -174,7 +144,6 @@ main(int argc, char **argv)
{"with-statistics", no_argument, &with_statistics, 1},
{"statistics-only", no_argument, &statistics_only, 1},
{"filter", required_argument, NULL, 4},
- {"exclude-database", required_argument, NULL, 6},
{NULL, 0, NULL, 0}
};
@@ -203,7 +172,7 @@ main(int argc, char **argv)
}
}
- while ((c = getopt_long(argc, argv, "acCd:ef:F:gh:I:j:lL:n:N:Op:P:RsS:t:T:U:vwWx1",
+ while ((c = getopt_long(argc, argv, "acCd:ef:F:h:I:j:lL:n:N:Op:P:RsS:t:T:U:vwWx1",
cmdopts, NULL)) != -1)
{
switch (c)
@@ -230,14 +199,11 @@ main(int argc, char **argv)
if (strlen(optarg) != 0)
opts->formatName = pg_strdup(optarg);
break;
- case 'g':
- /* restore only global.dat file from directory */
- globals_only = true;
- break;
case 'h':
if (strlen(optarg) != 0)
opts->cparams.pghost = pg_strdup(optarg);
break;
+
case 'j': /* number of restore jobs */
if (!option_parse_int(optarg, "-j/--jobs", 1,
PG_MAX_JOBS,
@@ -352,9 +318,6 @@ main(int argc, char **argv)
exit(1);
opts->exit_on_error = true;
break;
- case 6: /* database patterns to skip */
- simple_string_list_append(&db_exclude_patterns, optarg);
- break;
default:
/* getopt_long already emitted a complaint */
@@ -382,13 +345,6 @@ main(int argc, char **argv)
if (!opts->cparams.dbname && !opts->filename && !opts->tocSummary)
pg_fatal("one of -d/--dbname and -f/--file must be specified");
- if (db_exclude_patterns.head != NULL && globals_only)
- {
- pg_log_error("option --exclude-database cannot be used together with -g/--globals-only");
- pg_log_error_hint("Try \"%s --help\" for more information.", progname);
- exit_nicely(1);
- }
-
/* Should get at most one of -d and -f, else user is confused */
if (opts->cparams.dbname)
{
@@ -425,6 +381,17 @@ main(int argc, char **argv)
if (with_statistics && no_statistics)
pg_fatal("options --with-statistics and --no-statistics cannot be used together");
+ /* reject conflicting "only-" and "with-" options */
+ if (data_only && (with_schema || with_statistics))
+ pg_fatal("options %s and %s cannot be used together",
+ "-a/--data-only", with_schema ? "--with-schema" : "--with-statistics");
+ if (schema_only && (with_data || with_statistics))
+ pg_fatal("options %s and %s cannot be used together",
+ "-s/--schema-only", with_data ? "--with-data" : "--with-statistics");
+ if (statistics_only && (with_data || with_schema))
+ pg_fatal("options %s and %s cannot be used together",
+ "--statistics-only", with_data ? "--with-data" : "--with-schema");
+
if (data_only && opts->dropSchema)
pg_fatal("options -c/--clean and -a/--data-only cannot be used together");
@@ -443,11 +410,9 @@ main(int argc, char **argv)
pg_fatal("cannot specify both --single-transaction and multiple jobs");
/*
- * Set derivative flags. An "-only" option may be overridden by an
- * explicit "with-" option; e.g. "--schema-only --with-statistics" will
- * include schema and statistics. Other ambiguous or nonsensical
- * combinations, e.g. "--schema-only --no-schema", will have already
- * caused an error in one of the checks above.
+ * Set derivative flags. Ambiguous or nonsensical combinations, e.g.
+ * "--schema-only --no-schema", will have already caused an error in one
+ * of the checks above.
*/
opts->dumpData = ((opts->dumpData && !schema_only && !statistics_only) ||
(data_only || with_data)) && !no_data;
@@ -496,114 +461,6 @@ main(int argc, char **argv)
opts->formatName);
}
- /*
- * If toc.dat file is not present in the current path, then check for
- * global.dat. If global.dat file is present, then restore all the
- * databases from map.dat (if it exists), but skip restoring those
- * matching --exclude-database patterns.
- */
- if (inputFileSpec != NULL && !file_exists_in_directory(inputFileSpec, "toc.dat") &&
- file_exists_in_directory(inputFileSpec, "global.dat"))
- {
- PGconn *conn = NULL; /* Connection to restore global sql
- * commands. */
-
- /*
- * Can only use --list or --use-list options with a single database
- * dump.
- */
- if (opts->tocSummary)
- pg_fatal("option -l/--list cannot be used when restoring an archive created by pg_dumpall");
- else if (opts->tocFile)
- pg_fatal("option -L/--use-list cannot be used when restoring an archive created by pg_dumpall");
-
- /*
- * To restore from a pg_dumpall archive, -C (create database) option
- * must be specified unless we are only restoring globals.
- */
- if (!globals_only && opts->createDB != 1)
- {
- pg_log_error("-C/--create option should be specified when restoring an archive created by pg_dumpall");
- pg_log_error_hint("Try \"%s --help\" for more information.", progname);
- pg_log_error_hint("Individual databases can be restored using their specific archives.");
- exit_nicely(1);
- }
-
- /*
- * Connect to the database to execute global sql commands from
- * global.dat file.
- */
- if (opts->cparams.dbname)
- {
- conn = ConnectDatabase(opts->cparams.dbname, NULL, opts->cparams.pghost,
- opts->cparams.pgport, opts->cparams.username, TRI_DEFAULT,
- false, progname, NULL, NULL, NULL, NULL);
-
-
- if (!conn)
- pg_fatal("could not connect to database \"%s\"", opts->cparams.dbname);
- }
-
- /* If globals-only, then return from here. */
- if (globals_only)
- {
- /*
- * Open global.dat file and execute/append all the global sql
- * commands.
- */
- n_errors = process_global_sql_commands(conn, inputFileSpec,
- opts->filename);
-
- if (conn)
- PQfinish(conn);
-
- pg_log_info("database restoring skipped as -g/--globals-only option was specified");
- }
- else
- {
- /* Now restore all the databases from map.dat */
- n_errors = restore_all_databases(conn, inputFileSpec, db_exclude_patterns,
- opts, numWorkers);
- }
-
- /* Free db pattern list. */
- simple_string_list_destroy(&db_exclude_patterns);
- }
- else /* process if global.dat file does not exist. */
- {
- if (db_exclude_patterns.head != NULL)
- pg_fatal("option --exclude-database can be used only when restoring an archive created by pg_dumpall");
-
- if (globals_only)
- pg_fatal("option -g/--globals-only can be used only when restoring an archive created by pg_dumpall");
-
- n_errors = restore_one_database(inputFileSpec, opts, numWorkers, false, 0);
- }
-
- /* Done, print a summary of ignored errors during restore. */
- if (n_errors)
- {
- pg_log_warning("errors ignored on restore: %d", n_errors);
- return 1;
- }
-
- return 0;
-}
-
-/*
- * restore_one_database
- *
- * This will restore one database using toc.dat file.
- *
- * returns the number of errors while doing restore.
- */
-static int
-restore_one_database(const char *inputFileSpec, RestoreOptions *opts,
- int numWorkers, bool append_data, int num)
-{
- Archive *AH;
- int n_errors;
-
AH = OpenArchive(inputFileSpec, opts->format);
SetArchiveOptions(AH, NULL, opts);
@@ -611,15 +468,9 @@ restore_one_database(const char *inputFileSpec, RestoreOptions *opts,
/*
* We don't have a connection yet but that doesn't matter. The connection
* is initialized to NULL and if we terminate through exit_nicely() while
- * it's still NULL, the cleanup function will just be a no-op. If we are
- * restoring multiple databases, then only update AX handle for cleanup as
- * the previous entry was already in the array and we had closed previous
- * connection, so we can use the same array slot.
+ * it's still NULL, the cleanup function will just be a no-op.
*/
- if (!append_data || num == 0)
- on_exit_close_archive(AH);
- else
- replace_on_exit_close_archive(AH);
+ on_exit_close_archive(AH);
/* Let the archiver know how noisy to be */
AH->verbose = opts->verbose;
@@ -639,21 +490,25 @@ restore_one_database(const char *inputFileSpec, RestoreOptions *opts,
else
{
ProcessArchiveRestoreOptions(AH);
- RestoreArchive(AH, append_data);
+ RestoreArchive(AH);
}
- n_errors = AH->n_errors;
+ /* done, print a summary of ignored errors */
+ if (AH->n_errors)
+ pg_log_warning("errors ignored on restore: %d", AH->n_errors);
/* AH may be freed in CloseArchive? */
+ exit_code = AH->n_errors ? 1 : 0;
+
CloseArchive(AH);
- return n_errors;
+ return exit_code;
}
static void
usage(const char *progname)
{
- printf(_("%s restores PostgreSQL databases from archives created by pg_dump or pg_dumpall.\n\n"), progname);
+ printf(_("%s restores a PostgreSQL database from an archive created by pg_dump.\n\n"), progname);
printf(_("Usage:\n"));
printf(_(" %s [OPTION]... [FILE]\n"), progname);
@@ -671,7 +526,6 @@ usage(const char *progname)
printf(_(" -c, --clean clean (drop) database objects before recreating\n"));
printf(_(" -C, --create create the target database\n"));
printf(_(" -e, --exit-on-error exit on error, default is to continue\n"));
- printf(_(" -g, --globals-only restore only global objects, no databases\n"));
printf(_(" -I, --index=NAME restore named index\n"));
printf(_(" -j, --jobs=NUM use this many parallel jobs to restore\n"));
printf(_(" -L, --use-list=FILENAME use table of contents from this file for\n"
@@ -688,7 +542,6 @@ usage(const char *progname)
printf(_(" -1, --single-transaction restore as a single transaction\n"));
printf(_(" --disable-triggers disable triggers during data-only restore\n"));
printf(_(" --enable-row-security enable row security\n"));
- printf(_(" --exclude-database=PATTERN do not restore the specified database(s)\n"));
printf(_(" --filter=FILENAME restore or skip objects based on expressions\n"
" in FILENAME\n"));
printf(_(" --if-exists use IF EXISTS when dropping objects\n"));
@@ -712,9 +565,9 @@ usage(const char *progname)
printf(_(" --use-set-session-authorization\n"
" use SET SESSION AUTHORIZATION commands instead of\n"
" ALTER OWNER commands to set ownership\n"));
- printf(_(" --with-data dump the data\n"));
- printf(_(" --with-schema dump the schema\n"));
- printf(_(" --with-statistics dump the statistics\n"));
+ printf(_(" --with-data restore the data\n"));
+ printf(_(" --with-schema restore the schema\n"));
+ printf(_(" --with-statistics restore the statistics\n"));
printf(_("\nConnection options:\n"));
printf(_(" -h, --host=HOSTNAME database server host or socket directory\n"));
@@ -725,8 +578,8 @@ usage(const char *progname)
printf(_(" --role=ROLENAME do SET ROLE before restore\n"));
printf(_("\n"
- "The options -I, -n, -N, -P, -t, -T, --section, and --exclude-database can be combined\n"
- "and specified multiple times to select multiple objects.\n"));
+ "The options -I, -n, -N, -P, -t, -T, and --section can be combined and specified\n"
+ "multiple times to select multiple objects.\n"));
printf(_("\nIf no input file name is supplied, then standard input is used.\n\n"));
printf(_("Report bugs to <%s>.\n"), PACKAGE_BUGREPORT);
printf(_("%s home page: <%s>\n"), PACKAGE_NAME, PACKAGE_URL);
@@ -831,578 +684,3 @@ read_restore_filters(const char *filename, RestoreOptions *opts)
filter_free(&fstate);
}
-
-/*
- * file_exists_in_directory
- *
- * Returns true if the file exists in the given directory.
- */
-static bool
-file_exists_in_directory(const char *dir, const char *filename)
-{
- struct stat st;
- char buf[MAXPGPATH];
-
- if (snprintf(buf, MAXPGPATH, "%s/%s", dir, filename) >= MAXPGPATH)
- pg_fatal("directory name too long: \"%s\"", dir);
-
- return (stat(buf, &st) == 0 && S_ISREG(st.st_mode));
-}
-
-/*
- * read_one_statement
- *
- * This will start reading from passed file pointer using fgetc and read till
- * semicolon(sql statement terminator for global.dat file)
- *
- * EOF is returned if end-of-file input is seen; time to shut down.
- */
-
-static int
-read_one_statement(StringInfo inBuf, FILE *pfile)
-{
- int c; /* character read from getc() */
- int m;
-
- StringInfoData q;
-
- initStringInfo(&q);
-
- resetStringInfo(inBuf);
-
- /*
- * Read characters until EOF or the appropriate delimiter is seen.
- */
- while ((c = fgetc(pfile)) != EOF)
- {
- if (c != '\'' && c != '"' && c != '\n' && c != ';')
- {
- appendStringInfoChar(inBuf, (char) c);
- while ((c = fgetc(pfile)) != EOF)
- {
- if (c != '\'' && c != '"' && c != ';' && c != '\n')
- appendStringInfoChar(inBuf, (char) c);
- else
- break;
- }
- }
-
- if (c == '\'' || c == '"')
- {
- appendStringInfoChar(&q, (char) c);
- m = c;
-
- while ((c = fgetc(pfile)) != EOF)
- {
- appendStringInfoChar(&q, (char) c);
-
- if (c == m)
- {
- appendStringInfoString(inBuf, q.data);
- resetStringInfo(&q);
- break;
- }
- }
- }
-
- if (c == ';')
- {
- appendStringInfoChar(inBuf, (char) ';');
- break;
- }
-
- if (c == '\n')
- appendStringInfoChar(inBuf, (char) '\n');
- }
-
- pg_free(q.data);
-
- /* No input before EOF signal means time to quit. */
- if (c == EOF && inBuf->len == 0)
- return EOF;
-
- /* return something that's not EOF */
- return 'Q';
-}
-
-/*
- * get_dbnames_list_to_restore
- *
- * This will mark for skipping any entries from dbname_oid_list that pattern match an
- * entry in the db_exclude_patterns list.
- *
- * Returns the number of database to be restored.
- *
- */
-static int
-get_dbnames_list_to_restore(PGconn *conn,
- SimplePtrList *dbname_oid_list,
- SimpleStringList db_exclude_patterns)
-{
- int count_db = 0;
- PQExpBuffer query;
- PGresult *res;
-
- query = createPQExpBuffer();
-
- if (!conn)
- pg_log_info("considering PATTERN as NAME for --exclude-database option as no db connection while doing pg_restore.");
-
- /*
- * Process one by one all dbnames and if specified to skip restoring, then
- * remove dbname from list.
- */
- for (SimplePtrListCell *db_cell = dbname_oid_list->head;
- db_cell; db_cell = db_cell->next)
- {
- DbOidName *dbidname = (DbOidName *) db_cell->ptr;
- bool skip_db_restore = false;
- PQExpBuffer db_lit = createPQExpBuffer();
-
- appendStringLiteralConn(db_lit, dbidname->str, conn);
-
- for (SimpleStringListCell *pat_cell = db_exclude_patterns.head; pat_cell; pat_cell = pat_cell->next)
- {
- /*
- * If there is an exact match then we don't need to try a pattern
- * match
- */
- if (pg_strcasecmp(dbidname->str, pat_cell->val) == 0)
- skip_db_restore = true;
- /* Otherwise, try a pattern match if there is a connection */
- else if (conn)
- {
- int dotcnt;
-
- appendPQExpBufferStr(query, "SELECT 1 ");
- processSQLNamePattern(conn, query, pat_cell->val, false,
- false, NULL, db_lit->data,
- NULL, NULL, NULL, &dotcnt);
-
- if (dotcnt > 0)
- {
- pg_log_error("improper qualified name (too many dotted names): %s",
- dbidname->str);
- PQfinish(conn);
- exit_nicely(1);
- }
-
- res = executeQuery(conn, query->data);
-
- if ((PQresultStatus(res) == PGRES_TUPLES_OK) && PQntuples(res))
- {
- skip_db_restore = true;
- pg_log_info("database \"%s\" matches exclude pattern: \"%s\"", dbidname->str, pat_cell->val);
- }
-
- PQclear(res);
- resetPQExpBuffer(query);
- }
-
- if (skip_db_restore)
- break;
- }
-
- destroyPQExpBuffer(db_lit);
-
- /*
- * Mark db to be skipped or increment the counter of dbs to be
- * restored
- */
- if (skip_db_restore)
- {
- pg_log_info("excluding database \"%s\"", dbidname->str);
- dbidname->oid = InvalidOid;
- }
- else
- {
- count_db++;
- }
- }
-
- destroyPQExpBuffer(query);
-
- return count_db;
-}
-
-/*
- * get_dbname_oid_list_from_mfile
- *
- * Open map.dat file and read line by line and then prepare a list of database
- * names and corresponding db_oid.
- *
- * Returns, total number of database names in map.dat file.
- */
-static int
-get_dbname_oid_list_from_mfile(const char *dumpdirpath, SimplePtrList *dbname_oid_list)
-{
- StringInfoData linebuf;
- FILE *pfile;
- char map_file_path[MAXPGPATH];
- int count = 0;
-
-
- /*
- * If there is only global.dat file in dump, then return from here as
- * there is no database to restore.
- */
- if (!file_exists_in_directory(dumpdirpath, "map.dat"))
- {
- pg_log_info("database restoring is skipped as \"map.dat\" is not present in \"%s\"", dumpdirpath);
- return 0;
- }
-
- snprintf(map_file_path, MAXPGPATH, "%s/map.dat", dumpdirpath);
-
- /* Open map.dat file. */
- pfile = fopen(map_file_path, PG_BINARY_R);
-
- if (pfile == NULL)
- pg_fatal("could not open \"%s\": %m", map_file_path);
-
- initStringInfo(&linebuf);
-
- /* Append all the dbname/db_oid combinations to the list. */
- while (pg_get_line_buf(pfile, &linebuf))
- {
- Oid db_oid = InvalidOid;
- char *dbname;
- DbOidName *dbidname;
- int namelen;
- char *p = linebuf.data;
-
- /* Extract dboid. */
- while (isdigit((unsigned char) *p))
- p++;
- if (p > linebuf.data && *p == ' ')
- {
- sscanf(linebuf.data, "%u", &db_oid);
- p++;
- }
-
- /* dbname is the rest of the line */
- dbname = p;
- namelen = strlen(dbname);
-
- /* Report error and exit if the file has any corrupted data. */
- if (!OidIsValid(db_oid) || namelen <= 1)
- pg_fatal("invalid entry in \"%s\" at line: %d", map_file_path,
- count + 1);
-
- pg_log_info("found database \"%s\" (OID: %u) in \"%s\"",
- dbname, db_oid, map_file_path);
-
- dbidname = pg_malloc(offsetof(DbOidName, str) + namelen + 1);
- dbidname->oid = db_oid;
- strlcpy(dbidname->str, dbname, namelen);
-
- simple_ptr_list_append(dbname_oid_list, dbidname);
- count++;
- }
-
- /* Close map.dat file. */
- fclose(pfile);
-
- return count;
-}
-
-/*
- * restore_all_databases
- *
- * This will restore databases those dumps are present in
- * directory based on map.dat file mapping.
- *
- * This will skip restoring for databases that are specified with
- * exclude-database option.
- *
- * returns, number of errors while doing restore.
- */
-static int
-restore_all_databases(PGconn *conn, const char *dumpdirpath,
- SimpleStringList db_exclude_patterns, RestoreOptions *opts,
- int numWorkers)
-{
- SimplePtrList dbname_oid_list = {NULL, NULL};
- int num_db_restore = 0;
- int num_total_db;
- int n_errors_total;
- int count = 0;
- char *connected_db = NULL;
- bool dumpData = opts->dumpData;
- bool dumpSchema = opts->dumpSchema;
- bool dumpStatistics = opts->dumpSchema;
-
- /* Save db name to reuse it for all the database. */
- if (opts->cparams.dbname)
- connected_db = opts->cparams.dbname;
-
- num_total_db = get_dbname_oid_list_from_mfile(dumpdirpath, &dbname_oid_list);
-
- /* If map.dat has no entries, return after processing global.dat */
- if (dbname_oid_list.head == NULL)
- return process_global_sql_commands(conn, dumpdirpath, opts->filename);
-
- pg_log_info("found %d database names in \"map.dat\"", num_total_db);
-
- if (!conn)
- {
- pg_log_info("trying to connect database \"postgres\"");
-
- conn = ConnectDatabase("postgres", NULL, opts->cparams.pghost,
- opts->cparams.pgport, opts->cparams.username, TRI_DEFAULT,
- false, progname, NULL, NULL, NULL, NULL);
-
- /* Try with template1. */
- if (!conn)
- {
- pg_log_info("trying to connect database \"template1\"");
-
- conn = ConnectDatabase("template1", NULL, opts->cparams.pghost,
- opts->cparams.pgport, opts->cparams.username, TRI_DEFAULT,
- false, progname, NULL, NULL, NULL, NULL);
- }
- }
-
- /*
- * filter the db list according to the exclude patterns
- */
- num_db_restore = get_dbnames_list_to_restore(conn, &dbname_oid_list,
- db_exclude_patterns);
-
- /* Open global.dat file and execute/append all the global sql commands. */
- n_errors_total = process_global_sql_commands(conn, dumpdirpath, opts->filename);
-
- /* Close the db connection as we are done with globals and patterns. */
- if (conn)
- PQfinish(conn);
-
- /* Exit if no db needs to be restored. */
- if (dbname_oid_list.head == NULL || num_db_restore == 0)
- {
- pg_log_info("no database needs to restore out of %d databases", num_total_db);
- return n_errors_total;
- }
-
- pg_log_info("need to restore %d databases out of %d databases", num_db_restore, num_total_db);
-
- /*
- * We have a list of databases to restore after processing the
- * exclude-database switch(es). Now we can restore them one by one.
- */
- for (SimplePtrListCell *db_cell = dbname_oid_list.head;
- db_cell; db_cell = db_cell->next)
- {
- DbOidName *dbidname = (DbOidName *) db_cell->ptr;
- char subdirpath[MAXPGPATH];
- char subdirdbpath[MAXPGPATH];
- char dbfilename[MAXPGPATH];
- int n_errors;
-
- /* ignore dbs marked for skipping */
- if (dbidname->oid == InvalidOid)
- continue;
-
- /*
- * We need to reset override_dbname so that objects can be restored
- * into an already created database. (used with -d/--dbname option)
- */
- if (opts->cparams.override_dbname)
- {
- pfree(opts->cparams.override_dbname);
- opts->cparams.override_dbname = NULL;
- }
-
- snprintf(subdirdbpath, MAXPGPATH, "%s/databases", dumpdirpath);
-
- /*
- * Look for the database dump file/dir. If there is an {oid}.tar or
- * {oid}.dmp file, use it. Otherwise try to use a directory called
- * {oid}
- */
- snprintf(dbfilename, MAXPGPATH, "%u.tar", dbidname->oid);
- if (file_exists_in_directory(subdirdbpath, dbfilename))
- snprintf(subdirpath, MAXPGPATH, "%s/databases/%u.tar", dumpdirpath, dbidname->oid);
- else
- {
- snprintf(dbfilename, MAXPGPATH, "%u.dmp", dbidname->oid);
-
- if (file_exists_in_directory(subdirdbpath, dbfilename))
- snprintf(subdirpath, MAXPGPATH, "%s/databases/%u.dmp", dumpdirpath, dbidname->oid);
- else
- snprintf(subdirpath, MAXPGPATH, "%s/databases/%u", dumpdirpath, dbidname->oid);
- }
-
- pg_log_info("restoring database \"%s\"", dbidname->str);
-
- /* If database is already created, then don't set createDB flag. */
- if (opts->cparams.dbname)
- {
- PGconn *test_conn;
-
- test_conn = ConnectDatabase(dbidname->str, NULL, opts->cparams.pghost,
- opts->cparams.pgport, opts->cparams.username, TRI_DEFAULT,
- false, progname, NULL, NULL, NULL, NULL);
- if (test_conn)
- {
- PQfinish(test_conn);
-
- /* Use already created database for connection. */
- opts->createDB = 0;
- opts->cparams.dbname = dbidname->str;
- }
- else
- {
- /* we'll have to create it */
- opts->createDB = 1;
- opts->cparams.dbname = connected_db;
- }
- }
-
- /*
- * Reset flags - might have been reset in pg_backup_archiver.c by the
- * previous restore.
- */
- opts->dumpData = dumpData;
- opts->dumpSchema = dumpSchema;
- opts->dumpStatistics = dumpStatistics;
-
- /* Restore the single database. */
- n_errors = restore_one_database(subdirpath, opts, numWorkers, true, count);
-
- /* Print a summary of ignored errors during single database restore. */
- if (n_errors)
- {
- n_errors_total += n_errors;
- pg_log_warning("errors ignored on database \"%s\" restore: %d", dbidname->str, n_errors);
- }
-
- count++;
- }
-
- /* Log number of processed databases. */
- pg_log_info("number of restored databases is %d", num_db_restore);
-
- /* Free dbname and dboid list. */
- simple_ptr_list_destroy(&dbname_oid_list);
-
- return n_errors_total;
-}
-
-/*
- * process_global_sql_commands
- *
- * Open global.dat and execute or copy the sql commands one by one.
- *
- * If outfile is not NULL, copy all sql commands into outfile rather than
- * executing them.
- *
- * Returns the number of errors while processing global.dat
- */
-static int
-process_global_sql_commands(PGconn *conn, const char *dumpdirpath, const char *outfile)
-{
- char global_file_path[MAXPGPATH];
- PGresult *result;
- StringInfoData sqlstatement,
- user_create;
- FILE *pfile;
- int n_errors = 0;
-
- snprintf(global_file_path, MAXPGPATH, "%s/global.dat", dumpdirpath);
-
- /* Open global.dat file. */
- pfile = fopen(global_file_path, PG_BINARY_R);
-
- if (pfile == NULL)
- pg_fatal("could not open \"%s\": %m", global_file_path);
-
- /*
- * If outfile is given, then just copy all global.dat file data into
- * outfile.
- */
- if (outfile)
- {
- copy_or_print_global_file(outfile, pfile);
- return 0;
- }
-
- /* Init sqlstatement to append commands. */
- initStringInfo(&sqlstatement);
-
- /* creation statement for our current role */
- initStringInfo(&user_create);
- appendStringInfoString(&user_create, "CREATE ROLE ");
- /* should use fmtId here, but we don't know the encoding */
- appendStringInfoString(&user_create, PQuser(conn));
- appendStringInfoChar(&user_create, ';');
-
- /* Process file till EOF and execute sql statements. */
- while (read_one_statement(&sqlstatement, pfile) != EOF)
- {
- /* don't try to create the role we are connected as */
- if (strstr(sqlstatement.data, user_create.data))
- continue;
-
- pg_log_info("executing query: %s", sqlstatement.data);
- result = PQexec(conn, sqlstatement.data);
-
- switch (PQresultStatus(result))
- {
- case PGRES_COMMAND_OK:
- case PGRES_TUPLES_OK:
- case PGRES_EMPTY_QUERY:
- break;
- default:
- n_errors++;
- pg_log_error("could not execute query: \"%s\" \nCommand was: \"%s\"", PQerrorMessage(conn), sqlstatement.data);
- }
- PQclear(result);
- }
-
- /* Print a summary of ignored errors during global.dat. */
- if (n_errors)
- pg_log_warning("ignored %d errors in \"%s\"", n_errors, global_file_path);
-
- fclose(pfile);
-
- return n_errors;
-}
-
-/*
- * copy_or_print_global_file
- *
- * Copy global.dat into the output file. If "-" is used as outfile,
- * then print commands to stdout.
- */
-static void
-copy_or_print_global_file(const char *outfile, FILE *pfile)
-{
- char out_file_path[MAXPGPATH];
- FILE *OPF;
- int c;
-
- /* "-" is used for stdout. */
- if (strcmp(outfile, "-") == 0)
- OPF = stdout;
- else
- {
- snprintf(out_file_path, MAXPGPATH, "%s", outfile);
- OPF = fopen(out_file_path, PG_BINARY_W);
-
- if (OPF == NULL)
- {
- fclose(pfile);
- pg_fatal("could not open file: \"%s\"", outfile);
- }
- }
-
- /* Append global.dat into output file or print to stdout. */
- while ((c = fgetc(pfile)) != EOF)
- fputc(c, OPF);
-
- fclose(pfile);
-
- /* Close output file. */
- if (strcmp(outfile, "-") != 0)
- fclose(OPF);
-}
diff --git a/src/bin/pg_dump/t/001_basic.pl b/src/bin/pg_dump/t/001_basic.pl
index 84ca25e17d6..37d893d5e6a 100644
--- a/src/bin/pg_dump/t/001_basic.pl
+++ b/src/bin/pg_dump/t/001_basic.pl
@@ -237,21 +237,6 @@ command_fails_like(
'pg_restore: options -C\/--create and -1\/--single-transaction cannot be used together'
);
-command_fails_like(
- [ 'pg_restore', '--exclude-database=foo', '--globals-only', '-d', 'xxx' ],
- qr/\Qpg_restore: error: option --exclude-database cannot be used together with -g\/--globals-only\E/,
- 'pg_restore: option --exclude-database cannot be used together with -g/--globals-only');
-
-command_fails_like(
- [ 'pg_restore', '--exclude-database=foo', '-d', 'xxx', 'dumpdir' ],
- qr/\Qpg_restore: error: option --exclude-database can be used only when restoring an archive created by pg_dumpall\E/,
- 'When option --exclude-database is used in pg_restore with dump of pg_dump');
-
-command_fails_like(
- [ 'pg_restore', '--globals-only', '-d', 'xxx', 'dumpdir' ],
- qr/\Qpg_restore: error: option -g\/--globals-only can be used only when restoring an archive created by pg_dumpall\E/,
- 'When option --globals-only is not used in pg_restore with dump of pg_dump');
-
# also fails for -r and -t, but it seems pointless to add more tests for those.
command_fails_like(
[ 'pg_dumpall', '--exclude-database=foo', '--globals-only' ],
@@ -259,8 +244,4 @@ command_fails_like(
'pg_dumpall: option --exclude-database cannot be used together with -g/--globals-only'
);
-command_fails_like(
- [ 'pg_dumpall', '--format', 'x' ],
- qr/\Qpg_dumpall: error: unrecognized archive format "x";\E/,
- 'pg_dumpall: unrecognized archive format');
done_testing();
diff --git a/src/bin/pg_dump/t/002_pg_dump.pl b/src/bin/pg_dump/t/002_pg_dump.pl
index cf34f71ea11..d597842908e 100644
--- a/src/bin/pg_dump/t/002_pg_dump.pl
+++ b/src/bin/pg_dump/t/002_pg_dump.pl
@@ -68,6 +68,7 @@ my %pgdump_runs = (
'--no-data',
'--sequence-data',
'--binary-upgrade',
+ '--with-statistics',
'--dbname' => 'postgres', # alternative way to specify database
],
restore_cmd => [
@@ -75,6 +76,7 @@ my %pgdump_runs = (
'--format' => 'custom',
'--verbose',
'--file' => "$tempdir/binary_upgrade.sql",
+ '--with-statistics',
"$tempdir/binary_upgrade.dump",
],
},
@@ -88,11 +90,13 @@ my %pgdump_runs = (
'--format' => 'custom',
'--compress' => '1',
'--file' => "$tempdir/compression_gzip_custom.dump",
+ '--with-statistics',
'postgres',
],
restore_cmd => [
'pg_restore',
'--file' => "$tempdir/compression_gzip_custom.sql",
+ '--with-statistics',
"$tempdir/compression_gzip_custom.dump",
],
command_like => {
@@ -115,6 +119,7 @@ my %pgdump_runs = (
'--format' => 'directory',
'--compress' => 'gzip:1',
'--file' => "$tempdir/compression_gzip_dir",
+ '--with-statistics',
'postgres',
],
# Give coverage for manually compressed blobs.toc files during
@@ -132,6 +137,7 @@ my %pgdump_runs = (
'pg_restore',
'--jobs' => '2',
'--file' => "$tempdir/compression_gzip_dir.sql",
+ '--with-statistics',
"$tempdir/compression_gzip_dir",
],
},
@@ -144,6 +150,7 @@ my %pgdump_runs = (
'--format' => 'plain',
'--compress' => '1',
'--file' => "$tempdir/compression_gzip_plain.sql.gz",
+ '--with-statistics',
'postgres',
],
# Decompress the generated file to run through the tests.
@@ -162,11 +169,13 @@ my %pgdump_runs = (
'--format' => 'custom',
'--compress' => 'lz4',
'--file' => "$tempdir/compression_lz4_custom.dump",
+ '--with-statistics',
'postgres',
],
restore_cmd => [
'pg_restore',
'--file' => "$tempdir/compression_lz4_custom.sql",
+ '--with-statistics',
"$tempdir/compression_lz4_custom.dump",
],
command_like => {
@@ -189,6 +198,7 @@ my %pgdump_runs = (
'--format' => 'directory',
'--compress' => 'lz4:1',
'--file' => "$tempdir/compression_lz4_dir",
+ '--with-statistics',
'postgres',
],
# Verify that data files were compressed
@@ -200,6 +210,7 @@ my %pgdump_runs = (
'pg_restore',
'--jobs' => '2',
'--file' => "$tempdir/compression_lz4_dir.sql",
+ '--with-statistics',
"$tempdir/compression_lz4_dir",
],
},
@@ -212,6 +223,7 @@ my %pgdump_runs = (
'--format' => 'plain',
'--compress' => 'lz4',
'--file' => "$tempdir/compression_lz4_plain.sql.lz4",
+ '--with-statistics',
'postgres',
],
# Decompress the generated file to run through the tests.
@@ -233,11 +245,13 @@ my %pgdump_runs = (
'--format' => 'custom',
'--compress' => 'zstd',
'--file' => "$tempdir/compression_zstd_custom.dump",
+ '--with-statistics',
'postgres',
],
restore_cmd => [
'pg_restore',
'--file' => "$tempdir/compression_zstd_custom.sql",
+ '--with-statistics',
"$tempdir/compression_zstd_custom.dump",
],
command_like => {
@@ -259,6 +273,7 @@ my %pgdump_runs = (
'--format' => 'directory',
'--compress' => 'zstd:1',
'--file' => "$tempdir/compression_zstd_dir",
+ '--with-statistics',
'postgres',
],
# Give coverage for manually compressed blobs.toc files during
@@ -279,6 +294,7 @@ my %pgdump_runs = (
'pg_restore',
'--jobs' => '2',
'--file' => "$tempdir/compression_zstd_dir.sql",
+ '--with-statistics',
"$tempdir/compression_zstd_dir",
],
},
@@ -292,6 +308,7 @@ my %pgdump_runs = (
'--format' => 'plain',
'--compress' => 'zstd:long',
'--file' => "$tempdir/compression_zstd_plain.sql.zst",
+ '--with-statistics',
'postgres',
],
# Decompress the generated file to run through the tests.
@@ -310,6 +327,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/clean.sql",
'--clean',
+ '--with-statistics',
'--dbname' => 'postgres', # alternative way to specify database
],
},
@@ -320,6 +338,7 @@ my %pgdump_runs = (
'--clean',
'--if-exists',
'--encoding' => 'UTF8', # no-op, just for testing
+ '--with-statistics',
'postgres',
],
},
@@ -338,6 +357,7 @@ my %pgdump_runs = (
'--create',
'--no-reconnect', # no-op, just for testing
'--verbose',
+ '--with-statistics',
'postgres',
],
},
@@ -348,7 +368,7 @@ my %pgdump_runs = (
'--data-only',
'--superuser' => 'test_superuser',
'--disable-triggers',
- '--verbose', # no-op, just make sure it works
+ '--verbose', # no-op, just make sure it works
'postgres',
],
},
@@ -356,6 +376,7 @@ my %pgdump_runs = (
dump_cmd => [
'pg_dump', '--no-sync',
'--file' => "$tempdir/defaults.sql",
+ '--with-statistics',
'postgres',
],
},
@@ -364,6 +385,7 @@ my %pgdump_runs = (
dump_cmd => [
'pg_dump', '--no-sync',
'--file' => "$tempdir/defaults_no_public.sql",
+ '--with-statistics',
'regress_pg_dump_test',
],
},
@@ -373,6 +395,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--clean',
'--file' => "$tempdir/defaults_no_public_clean.sql",
+ '--with-statistics',
'regress_pg_dump_test',
],
},
@@ -381,6 +404,7 @@ my %pgdump_runs = (
dump_cmd => [
'pg_dump', '--no-sync',
'--file' => "$tempdir/defaults_public_owner.sql",
+ '--with-statistics',
'regress_public_owner',
],
},
@@ -395,12 +419,14 @@ my %pgdump_runs = (
'pg_dump',
'--format' => 'custom',
'--file' => "$tempdir/defaults_custom_format.dump",
+ '--with-statistics',
'postgres',
],
restore_cmd => [
'pg_restore',
'--format' => 'custom',
'--file' => "$tempdir/defaults_custom_format.sql",
+ '--with-statistics',
"$tempdir/defaults_custom_format.dump",
],
command_like => {
@@ -425,12 +451,14 @@ my %pgdump_runs = (
'pg_dump',
'--format' => 'directory',
'--file' => "$tempdir/defaults_dir_format",
+ '--with-statistics',
'postgres',
],
restore_cmd => [
'pg_restore',
'--format' => 'directory',
'--file' => "$tempdir/defaults_dir_format.sql",
+ '--with-statistics',
"$tempdir/defaults_dir_format",
],
command_like => {
@@ -456,11 +484,13 @@ my %pgdump_runs = (
'--format' => 'directory',
'--jobs' => 2,
'--file' => "$tempdir/defaults_parallel",
+ '--with-statistics',
'postgres',
],
restore_cmd => [
'pg_restore',
'--file' => "$tempdir/defaults_parallel.sql",
+ '--with-statistics',
"$tempdir/defaults_parallel",
],
},
@@ -472,12 +502,14 @@ my %pgdump_runs = (
'pg_dump',
'--format' => 'tar',
'--file' => "$tempdir/defaults_tar_format.tar",
+ '--with-statistics',
'postgres',
],
restore_cmd => [
'pg_restore',
'--format' => 'tar',
'--file' => "$tempdir/defaults_tar_format.sql",
+ '--with-statistics',
"$tempdir/defaults_tar_format.tar",
],
},
@@ -486,6 +518,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/exclude_dump_test_schema.sql",
'--exclude-schema' => 'dump_test',
+ '--with-statistics',
'postgres',
],
},
@@ -494,6 +527,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/exclude_test_table.sql",
'--exclude-table' => 'dump_test.test_table',
+ '--with-statistics',
'postgres',
],
},
@@ -502,6 +536,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/exclude_measurement.sql",
'--exclude-table-and-children' => 'dump_test.measurement',
+ '--with-statistics',
'postgres',
],
},
@@ -511,6 +546,7 @@ my %pgdump_runs = (
'--file' => "$tempdir/exclude_measurement_data.sql",
'--exclude-table-data-and-children' => 'dump_test.measurement',
'--no-unlogged-table-data',
+ '--with-statistics',
'postgres',
],
},
@@ -520,6 +556,7 @@ my %pgdump_runs = (
'--file' => "$tempdir/exclude_test_table_data.sql",
'--exclude-table-data' => 'dump_test.test_table',
'--no-unlogged-table-data',
+ '--with-statistics',
'postgres',
],
},
@@ -538,6 +575,7 @@ my %pgdump_runs = (
'--file' => "$tempdir/pg_dumpall_globals.sql",
'--globals-only',
'--no-sync',
+ '--with-statistics',
],
},
pg_dumpall_globals_clean => {
@@ -547,12 +585,14 @@ my %pgdump_runs = (
'--globals-only',
'--clean',
'--no-sync',
+ '--with-statistics',
],
},
pg_dumpall_dbprivs => {
dump_cmd => [
'pg_dumpall', '--no-sync',
'--file' => "$tempdir/pg_dumpall_dbprivs.sql",
+ '--with-statistics',
],
},
pg_dumpall_exclude => {
@@ -562,6 +602,7 @@ my %pgdump_runs = (
'--file' => "$tempdir/pg_dumpall_exclude.sql",
'--exclude-database' => '*dump_test*',
'--no-sync',
+ '--with-statistics',
],
},
no_toast_compression => {
@@ -569,6 +610,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/no_toast_compression.sql",
'--no-toast-compression',
+ '--with-statistics',
'postgres',
],
},
@@ -577,6 +619,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/no_large_objects.sql",
'--no-large-objects',
+ '--with-statistics',
'postgres',
],
},
@@ -585,6 +628,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/no_policies.sql",
'--no-policies',
+ '--with-statistics',
'postgres',
],
},
@@ -593,6 +637,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/no_privs.sql",
'--no-privileges',
+ '--with-statistics',
'postgres',
],
},
@@ -601,6 +646,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/no_owner.sql",
'--no-owner',
+ '--with-statistics',
'postgres',
],
},
@@ -609,6 +655,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/no_table_access_method.sql",
'--no-table-access-method',
+ '--with-statistics',
'postgres',
],
},
@@ -617,6 +664,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/only_dump_test_schema.sql",
'--schema' => 'dump_test',
+ '--with-statistics',
'postgres',
],
},
@@ -627,6 +675,7 @@ my %pgdump_runs = (
'--table' => 'dump_test.test_table',
'--lock-wait-timeout' =>
(1000 * $PostgreSQL::Test::Utils::timeout_default),
+ '--with-statistics',
'postgres',
],
},
@@ -637,6 +686,7 @@ my %pgdump_runs = (
'--table-and-children' => 'dump_test.measurement',
'--lock-wait-timeout' =>
(1000 * $PostgreSQL::Test::Utils::timeout_default),
+ '--with-statistics',
'postgres',
],
},
@@ -646,6 +696,7 @@ my %pgdump_runs = (
'--file' => "$tempdir/role.sql",
'--role' => 'regress_dump_test_role',
'--schema' => 'dump_test_second_schema',
+ '--with-statistics',
'postgres',
],
},
@@ -658,11 +709,13 @@ my %pgdump_runs = (
'--file' => "$tempdir/role_parallel",
'--role' => 'regress_dump_test_role',
'--schema' => 'dump_test_second_schema',
+ '--with-statistics',
'postgres',
],
restore_cmd => [
'pg_restore',
'--file' => "$tempdir/role_parallel.sql",
+ '--with-statistics',
"$tempdir/role_parallel",
],
},
@@ -691,6 +744,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/section_pre_data.sql",
'--section' => 'pre-data',
+ '--with-statistics',
'postgres',
],
},
@@ -699,6 +753,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/section_data.sql",
'--section' => 'data',
+ '--with-statistics',
'postgres',
],
},
@@ -707,6 +762,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/section_post_data.sql",
'--section' => 'post-data',
+ '--with-statistics',
'postgres',
],
},
@@ -717,6 +773,7 @@ my %pgdump_runs = (
'--schema' => 'dump_test',
'--large-objects',
'--no-large-objects',
+ '--with-statistics',
'postgres',
],
},
@@ -732,6 +789,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
"--file=$tempdir/no_data_no_schema.sql", '--no-data',
'--no-schema', 'postgres',
+ '--with-statistics',
],
},
statistics_only => {
@@ -741,18 +799,11 @@ my %pgdump_runs = (
'postgres',
],
},
- schema_only_with_statistics => {
- dump_cmd => [
- 'pg_dump', '--no-sync',
- "--file=$tempdir/schema_only_with_statistics.sql",
- '--schema-only', '--with-statistics', 'postgres',
- ],
- },
no_schema => {
dump_cmd => [
'pg_dump', '--no-sync',
"--file=$tempdir/no_schema.sql", '--no-schema',
- 'postgres',
+ '--with-statistics', 'postgres',
],
},);
@@ -1029,6 +1080,7 @@ my %tests = (
test_schema_plus_large_objects => 1,
},
unlike => {
+ binary_upgrade => 1,
no_large_objects => 1,
no_owner => 1,
schema_only => 1,
@@ -1132,7 +1184,9 @@ my %tests = (
) INHERITS (dump_test.test_table_nn, dump_test.test_table_nn_2);
ALTER TABLE dump_test.test_table_nn ADD CONSTRAINT nn NOT NULL col1 NOT VALID;
ALTER TABLE dump_test.test_table_nn_chld1 VALIDATE CONSTRAINT nn;
- ALTER TABLE dump_test.test_table_nn_chld2 VALIDATE CONSTRAINT nn;',
+ ALTER TABLE dump_test.test_table_nn_chld2 VALIDATE CONSTRAINT nn;
+ COMMENT ON CONSTRAINT nn ON dump_test.test_table_nn IS \'nn comment is valid\';
+ COMMENT ON CONSTRAINT nn ON dump_test.test_table_nn_chld2 IS \'nn_chld2 comment is valid\';',
regexp => qr/^
\QALTER TABLE dump_test.test_table_nn\E \n^\s+
\QADD CONSTRAINT nn NOT NULL col1 NOT VALID;\E
@@ -1146,6 +1200,34 @@ my %tests = (
},
},
+ # This constraint is invalid therefore it goes in SECTION_POST_DATA
+ 'COMMENT ON CONSTRAINT ON test_table_nn' => {
+ regexp => qr/^
+ \QCOMMENT ON CONSTRAINT nn ON dump_test.test_table_nn IS\E
+ /xm,
+ like => {
+ %full_runs, %dump_test_schema_runs, section_post_data => 1,
+ },
+ unlike => {
+ exclude_dump_test_schema => 1,
+ only_dump_measurement => 1,
+ },
+ },
+
+ # This constraint is valid therefore it goes in SECTION_PRE_DATA
+ 'COMMENT ON CONSTRAINT ON test_table_chld2' => {
+ regexp => qr/^
+ \QCOMMENT ON CONSTRAINT nn ON dump_test.test_table_nn_chld2 IS\E
+ /xm,
+ like => {
+ %full_runs, %dump_test_schema_runs, section_pre_data => 1,
+ },
+ unlike => {
+ exclude_dump_test_schema => 1,
+ only_dump_measurement => 1,
+ },
+ },
+
'CONSTRAINT NOT NULL / NOT VALID (child1)' => {
regexp => qr/^
\QCREATE TABLE dump_test.test_table_nn_chld1 (\E\n
@@ -1517,6 +1599,7 @@ my %tests = (
test_schema_plus_large_objects => 1,
},
unlike => {
+ binary_upgrade => 1,
schema_only => 1,
schema_only_with_statistics => 1,
no_large_objects => 1,
@@ -2289,17 +2372,19 @@ my %tests = (
create_sql => 'CREATE DOMAIN dump_test.us_postal_code AS TEXT
COLLATE "C"
DEFAULT \'10014\'
+ CONSTRAINT nn NOT NULL
CHECK(VALUE ~ \'^\d{5}$\' OR
VALUE ~ \'^\d{5}-\d{4}$\');
+ COMMENT ON CONSTRAINT nn
+ ON DOMAIN dump_test.us_postal_code IS \'not null\';
COMMENT ON CONSTRAINT us_postal_code_check
ON DOMAIN dump_test.us_postal_code IS \'check it\';',
regexp => qr/^
- \QCREATE DOMAIN dump_test.us_postal_code AS text COLLATE pg_catalog."C" DEFAULT '10014'::text\E\n\s+
+ \QCREATE DOMAIN dump_test.us_postal_code AS text COLLATE pg_catalog."C" CONSTRAINT nn NOT NULL DEFAULT '10014'::text\E\n\s+
\QCONSTRAINT us_postal_code_check CHECK \E
\Q(((VALUE ~ '^\d{5}\E
\$\Q'::text) OR (VALUE ~ '^\d{5}-\d{4}\E\$
\Q'::text)));\E(.|\n)*
- \QCOMMENT ON CONSTRAINT us_postal_code_check ON DOMAIN dump_test.us_postal_code IS 'check it';\E
/xm,
like =>
{ %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
@@ -2309,6 +2394,30 @@ my %tests = (
},
},
+ 'COMMENT ON CONSTRAINT ON DOMAIN (1)' => {
+ regexp => qr/^
+ \QCOMMENT ON CONSTRAINT nn ON DOMAIN dump_test.us_postal_code IS 'not null';\E
+ /xm,
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
+ unlike => {
+ exclude_dump_test_schema => 1,
+ only_dump_measurement => 1,
+ },
+ },
+
+ 'COMMENT ON CONSTRAINT ON DOMAIN (2)' => {
+ regexp => qr/^
+ \QCOMMENT ON CONSTRAINT us_postal_code_check ON DOMAIN dump_test.us_postal_code IS 'check it';\E
+ /xm,
+ like =>
+ { %full_runs, %dump_test_schema_runs, section_pre_data => 1, },
+ unlike => {
+ exclude_dump_test_schema => 1,
+ only_dump_measurement => 1,
+ },
+ },
+
'CREATE FUNCTION dump_test.pltestlang_call_handler' => {
create_order => 17,
create_sql => 'CREATE FUNCTION dump_test.pltestlang_call_handler()
@@ -4524,9 +4633,9 @@ my %tests = (
no_schema => 1,
section_data => 1,
test_schema_plus_large_objects => 1,
- binary_upgrade => 1,
},
unlike => {
+ binary_upgrade => 1,
no_large_objects => 1,
no_privs => 1,
schema_only => 1,
@@ -5096,6 +5205,17 @@ command_fails_like(
'pg_dump',
'--port' => $port,
'--strict-names',
+ '--schema-only',
+ '--with-statistics',
+ ],
+ qr/\Qpg_dump: error: options -s\/--schema-only and --with-statistics cannot be used together\E/,
+ 'cannot use --schema-only and --with-statistics together');
+
+command_fails_like(
+ [
+ 'pg_dump',
+ '--port' => $port,
+ '--strict-names',
'--table' => 'nonexistent*'
],
qr/\Qpg_dump: error: no matching tables were found for pattern\E/,
diff --git a/src/bin/pg_dump/t/006_pg_dumpall.pl b/src/bin/pg_dump/t/006_pg_dumpall.pl
deleted file mode 100644
index 5acd49f1559..00000000000
--- a/src/bin/pg_dump/t/006_pg_dumpall.pl
+++ /dev/null
@@ -1,391 +0,0 @@
-# Copyright (c) 2021-2025, PostgreSQL Global Development Group
-
-use strict;
-use warnings FATAL => 'all';
-
-use PostgreSQL::Test::Cluster;
-use PostgreSQL::Test::Utils;
-use Test::More;
-
-my $tempdir = PostgreSQL::Test::Utils::tempdir;
-my $run_db = 'postgres';
-my $sep = $windows_os ? "\\" : "/";
-
-# Tablespace locations used by "restore_tablespace" test case.
-my $tablespace1 = "${tempdir}${sep}tbl1";
-my $tablespace2 = "${tempdir}${sep}tbl2";
-mkdir($tablespace1) || die "mkdir $tablespace1 $!";
-mkdir($tablespace2) || die "mkdir $tablespace2 $!";
-
-# Scape tablespace locations on Windows.
-$tablespace1 = $windows_os ? ($tablespace1 =~ s/\\/\\\\/gr) : $tablespace1;
-$tablespace2 = $windows_os ? ($tablespace2 =~ s/\\/\\\\/gr) : $tablespace2;
-
-# Where pg_dumpall will be executed.
-my $node = PostgreSQL::Test::Cluster->new('node');
-$node->init;
-$node->start;
-
-
-###############################################################
-# Definition of the pg_dumpall test cases to run.
-#
-# Each of these test cases are named and those names are used for fail
-# reporting and also to save the dump and restore information needed for the
-# test to assert.
-#
-# The "setup_sql" is a psql valid script that contains SQL commands to execute
-# before of actually execute the tests. The setups are all executed before of
-# any test execution.
-#
-# The "dump_cmd" and "restore_cmd" are the commands that will be executed. The
-# "restore_cmd" must have the --file flag to save the restore output so that we
-# can assert on it.
-#
-# The "like" and "unlike" is a regexp that is used to match the pg_restore
-# output. It must have at least one of then filled per test cases but it also
-# can have both. See "excluding_databases" test case for example.
-my %pgdumpall_runs = (
- restore_roles => {
- setup_sql => '
- CREATE ROLE dumpall WITH ENCRYPTED PASSWORD \'admin\' SUPERUSER;
- CREATE ROLE dumpall2 WITH REPLICATION CONNECTION LIMIT 10;',
- dump_cmd => [
- 'pg_dumpall',
- '--format' => 'directory',
- '--file' => "$tempdir/restore_roles",
- ],
- restore_cmd => [
- 'pg_restore', '-C',
- '--format' => 'directory',
- '--file' => "$tempdir/restore_roles.sql",
- "$tempdir/restore_roles",
- ],
- like => qr/
- ^\s*\QCREATE ROLE dumpall;\E\s*\n
- \s*\QALTER ROLE dumpall WITH SUPERUSER INHERIT NOCREATEROLE NOCREATEDB NOLOGIN NOREPLICATION NOBYPASSRLS PASSWORD 'SCRAM-SHA-256\E
- [^']+';\s*\n
- \s*\QCREATE ROLE dumpall2;\E
- \s*\QALTER ROLE dumpall2 WITH NOSUPERUSER INHERIT NOCREATEROLE NOCREATEDB NOLOGIN REPLICATION NOBYPASSRLS CONNECTION LIMIT 10;\E
- /xm
- },
-
- restore_tablespace => {
- setup_sql => "
- CREATE ROLE tap;
- CREATE TABLESPACE tbl1 OWNER tap LOCATION '$tablespace1';
- CREATE TABLESPACE tbl2 OWNER tap LOCATION '$tablespace2' WITH (seq_page_cost=1.0);",
- dump_cmd => [
- 'pg_dumpall',
- '--format' => 'directory',
- '--file' => "$tempdir/restore_tablespace",
- ],
- restore_cmd => [
- 'pg_restore', '-C',
- '--format' => 'directory',
- '--file' => "$tempdir/restore_tablespace.sql",
- "$tempdir/restore_tablespace",
- ],
- # Match "E" as optional since it is added on LOCATION when running on
- # Windows.
- like => qr/^
- \n\QCREATE TABLESPACE tbl1 OWNER tap LOCATION \E(?:E)?\Q'$tablespace1';\E
- \n\QCREATE TABLESPACE tbl2 OWNER tap LOCATION \E(?:E)?\Q'$tablespace2';\E
- \n\QALTER TABLESPACE tbl2 SET (seq_page_cost=1.0);\E
- /xm,
- },
-
- restore_grants => {
- setup_sql => "
- CREATE DATABASE tapgrantsdb;
- CREATE SCHEMA private;
- CREATE SEQUENCE serial START 101;
- CREATE FUNCTION fn() RETURNS void AS \$\$
- BEGIN
- END;
- \$\$ LANGUAGE plpgsql;
- CREATE ROLE super;
- CREATE ROLE grant1;
- CREATE ROLE grant2;
- CREATE ROLE grant3;
- CREATE ROLE grant4;
- CREATE ROLE grant5;
- CREATE ROLE grant6;
- CREATE ROLE grant7;
- CREATE ROLE grant8;
-
- CREATE TABLE t (id int);
- INSERT INTO t VALUES (1), (2), (3), (4);
-
- GRANT SELECT ON TABLE t TO grant1;
- GRANT INSERT ON TABLE t TO grant2;
- GRANT ALL PRIVILEGES ON TABLE t to grant3;
- GRANT CONNECT, CREATE ON DATABASE tapgrantsdb TO grant4;
- GRANT USAGE, CREATE ON SCHEMA private TO grant5;
- GRANT USAGE, SELECT, UPDATE ON SEQUENCE serial TO grant6;
- GRANT super TO grant7;
- GRANT EXECUTE ON FUNCTION fn() TO grant8;
- ",
- dump_cmd => [
- 'pg_dumpall',
- '--format' => 'directory',
- '--file' => "$tempdir/restore_grants",
- ],
- restore_cmd => [
- 'pg_restore', '-C',
- '--format' => 'directory',
- '--file' => "$tempdir/restore_grants.sql",
- "$tempdir/restore_grants",
- ],
- like => qr/^
- \n\QGRANT super TO grant7 WITH INHERIT TRUE GRANTED BY\E
- (.*\n)*
- \n\QGRANT ALL ON SCHEMA private TO grant5;\E
- (.*\n)*
- \n\QGRANT ALL ON FUNCTION public.fn() TO grant8;\E
- (.*\n)*
- \n\QGRANT ALL ON SEQUENCE public.serial TO grant6;\E
- (.*\n)*
- \n\QGRANT SELECT ON TABLE public.t TO grant1;\E
- \n\QGRANT INSERT ON TABLE public.t TO grant2;\E
- \n\QGRANT ALL ON TABLE public.t TO grant3;\E
- (.*\n)*
- \n\QGRANT CREATE,CONNECT ON DATABASE tapgrantsdb TO grant4;\E
- /xm,
- },
-
- excluding_databases => {
- setup_sql => 'CREATE DATABASE db1;
- \c db1
- CREATE TABLE t1 (id int);
- INSERT INTO t1 VALUES (1), (2), (3), (4);
- CREATE TABLE t2 (id int);
- INSERT INTO t2 VALUES (1), (2), (3), (4);
-
- CREATE DATABASE db2;
- \c db2
- CREATE TABLE t3 (id int);
- INSERT INTO t3 VALUES (1), (2), (3), (4);
- CREATE TABLE t4 (id int);
- INSERT INTO t4 VALUES (1), (2), (3), (4);
-
- CREATE DATABASE dbex3;
- \c dbex3
- CREATE TABLE t5 (id int);
- INSERT INTO t5 VALUES (1), (2), (3), (4);
- CREATE TABLE t6 (id int);
- INSERT INTO t6 VALUES (1), (2), (3), (4);
-
- CREATE DATABASE dbex4;
- \c dbex4
- CREATE TABLE t7 (id int);
- INSERT INTO t7 VALUES (1), (2), (3), (4);
- CREATE TABLE t8 (id int);
- INSERT INTO t8 VALUES (1), (2), (3), (4);
-
- CREATE DATABASE db5;
- \c db5
- CREATE TABLE t9 (id int);
- INSERT INTO t9 VALUES (1), (2), (3), (4);
- CREATE TABLE t10 (id int);
- INSERT INTO t10 VALUES (1), (2), (3), (4);
- ',
- dump_cmd => [
- 'pg_dumpall',
- '--format' => 'directory',
- '--file' => "$tempdir/excluding_databases",
- '--exclude-database' => 'dbex*',
- ],
- restore_cmd => [
- 'pg_restore', '-C',
- '--format' => 'directory',
- '--file' => "$tempdir/excluding_databases.sql",
- '--exclude-database' => 'db5',
- "$tempdir/excluding_databases",
- ],
- like => qr/^
- \n\QCREATE DATABASE db1\E
- (.*\n)*
- \n\QCREATE TABLE public.t1 (\E
- (.*\n)*
- \n\QCREATE TABLE public.t2 (\E
- (.*\n)*
- \n\QCREATE DATABASE db2\E
- (.*\n)*
- \n\QCREATE TABLE public.t3 (\E
- (.*\n)*
- \n\QCREATE TABLE public.t4 (/xm,
- unlike => qr/^
- \n\QCREATE DATABASE db3\E
- (.*\n)*
- \n\QCREATE TABLE public.t5 (\E
- (.*\n)*
- \n\QCREATE TABLE public.t6 (\E
- (.*\n)*
- \n\QCREATE DATABASE db4\E
- (.*\n)*
- \n\QCREATE TABLE public.t7 (\E
- (.*\n)*
- \n\QCREATE TABLE public.t8 (\E
- \n\QCREATE DATABASE db5\E
- (.*\n)*
- \n\QCREATE TABLE public.t9 (\E
- (.*\n)*
- \n\QCREATE TABLE public.t10 (\E
- /xm,
- },
-
- format_directory => {
- setup_sql => "CREATE TABLE format_directory(a int, b boolean, c text);
- INSERT INTO format_directory VALUES (1, true, 'name1'), (2, false, 'name2');",
- dump_cmd => [
- 'pg_dumpall',
- '--format' => 'directory',
- '--file' => "$tempdir/format_directory",
- ],
- restore_cmd => [
- 'pg_restore', '-C',
- '--format' => 'directory',
- '--file' => "$tempdir/format_directory.sql",
- "$tempdir/format_directory",
- ],
- like => qr/^\n\QCOPY public.format_directory (a, b, c) FROM stdin;/xm
- },
-
- format_tar => {
- setup_sql => "CREATE TABLE format_tar(a int, b boolean, c text);
- INSERT INTO format_tar VALUES (1, false, 'name3'), (2, true, 'name4');",
- dump_cmd => [
- 'pg_dumpall',
- '--format' => 'tar',
- '--file' => "$tempdir/format_tar",
- ],
- restore_cmd => [
- 'pg_restore', '-C',
- '--format' => 'tar',
- '--file' => "$tempdir/format_tar.sql",
- "$tempdir/format_tar",
- ],
- like => qr/^\n\QCOPY public.format_tar (a, b, c) FROM stdin;/xm
- },
-
- format_custom => {
- setup_sql => "CREATE TABLE format_custom(a int, b boolean, c text);
- INSERT INTO format_custom VALUES (1, false, 'name5'), (2, true, 'name6');",
- dump_cmd => [
- 'pg_dumpall',
- '--format' => 'custom',
- '--file' => "$tempdir/format_custom",
- ],
- restore_cmd => [
- 'pg_restore', '-C',
- '--format' => 'custom',
- '--file' => "$tempdir/format_custom.sql",
- "$tempdir/format_custom",
- ],
- like => qr/^ \n\QCOPY public.format_custom (a, b, c) FROM stdin;/xm
- },
-
- dump_globals_only => {
- setup_sql => "CREATE TABLE format_dir(a int, b boolean, c text);
- INSERT INTO format_dir VALUES (1, false, 'name5'), (2, true, 'name6');",
- dump_cmd => [
- 'pg_dumpall',
- '--format' => 'directory',
- '--globals-only',
- '--file' => "$tempdir/dump_globals_only",
- ],
- restore_cmd => [
- 'pg_restore', '-C', '--globals-only',
- '--format' => 'directory',
- '--file' => "$tempdir/dump_globals_only.sql",
- "$tempdir/dump_globals_only",
- ],
- like => qr/
- ^\s*\QCREATE ROLE dumpall;\E\s*\n
- /xm
- }, );
-
-# First execute the setup_sql
-foreach my $run (sort keys %pgdumpall_runs)
-{
- if ($pgdumpall_runs{$run}->{setup_sql})
- {
- $node->safe_psql($run_db, $pgdumpall_runs{$run}->{setup_sql});
- }
-}
-
-# Execute the tests
-foreach my $run (sort keys %pgdumpall_runs)
-{
- # Create a new target cluster to pg_restore each test case run so that we
- # don't need to take care of the cleanup from the target cluster after each
- # run.
- my $target_node = PostgreSQL::Test::Cluster->new("target_$run");
- $target_node->init;
- $target_node->start;
-
- # Dumpall from node cluster.
- $node->command_ok(\@{ $pgdumpall_runs{$run}->{dump_cmd} },
- "$run: pg_dumpall runs");
-
- # Restore the dump on "target_node" cluster.
- my @restore_cmd = (
- @{ $pgdumpall_runs{$run}->{restore_cmd} },
- '--host', $target_node->host, '--port', $target_node->port);
-
- my ($stdout, $stderr) = run_command(\@restore_cmd);
-
- # pg_restore --file output file.
- my $output_file = slurp_file("$tempdir/${run}.sql");
-
- if (!($pgdumpall_runs{$run}->{like}) && !($pgdumpall_runs{$run}->{unlike}))
- {
- die "missing \"like\" or \"unlike\" in test \"$run\"";
- }
-
- if ($pgdumpall_runs{$run}->{like})
- {
- like($output_file, $pgdumpall_runs{$run}->{like}, "should dump $run");
- }
-
- if ($pgdumpall_runs{$run}->{unlike})
- {
- unlike(
- $output_file,
- $pgdumpall_runs{$run}->{unlike},
- "should not dump $run");
- }
-}
-
-# Some negative test case with dump of pg_dumpall and restore using pg_restore
-# test case 1: when -C is not used in pg_restore with dump of pg_dumpall
-$node->command_fails_like(
- [ 'pg_restore',
- "$tempdir/format_custom",
- '--format' => 'custom',
- '--file' => "$tempdir/error_test.sql", ],
- qr/\Qpg_restore: error: -C\/--create option should be specified when restoring an archive created by pg_dumpall\E/,
- 'When -C is not used in pg_restore with dump of pg_dumpall');
-
-# test case 2: When --list option is used with dump of pg_dumpall
-$node->command_fails_like(
- [ 'pg_restore',
- "$tempdir/format_custom", '-C',
- '--format' => 'custom', '--list',
- '--file' => "$tempdir/error_test.sql", ],
- qr/\Qpg_restore: error: option -l\/--list cannot be used when restoring an archive created by pg_dumpall\E/,
- 'When --list is used in pg_restore with dump of pg_dumpall');
-
-# test case 3: When non-exist database is given with -d option
-$node->command_fails_like(
- [ 'pg_restore',
- "$tempdir/format_custom", '-C',
- '--format' => 'custom',
- '-d' => 'dbpq', ],
- qr/\Qpg_restore: error: could not connect to database "dbpq"\E/,
- 'When non-existent database is given with -d option in pg_restore with dump of pg_dumpall');
-
-$node->stop('fast');
-
-done_testing();
diff --git a/src/bin/pg_rewind/libpq_source.c b/src/bin/pg_rewind/libpq_source.c
index 56c2ad55d4a..e80edb7077e 100644
--- a/src/bin/pg_rewind/libpq_source.c
+++ b/src/bin/pg_rewind/libpq_source.c
@@ -215,7 +215,7 @@ libpq_get_current_wal_insert_lsn(rewind_source *source)
val = run_simple_query(conn, "SELECT pg_current_wal_insert_lsn()");
- if (sscanf(val, "%X/%X", &hi, &lo) != 2)
+ if (sscanf(val, "%X/%08X", &hi, &lo) != 2)
pg_fatal("unrecognized result \"%s\" for current WAL insert location", val);
result = ((uint64) hi) << 32 | lo;
diff --git a/src/bin/pg_rewind/parsexlog.c b/src/bin/pg_rewind/parsexlog.c
index 2cd44625ca3..8f4b282c6b1 100644
--- a/src/bin/pg_rewind/parsexlog.c
+++ b/src/bin/pg_rewind/parsexlog.c
@@ -89,11 +89,11 @@ extractPageMap(const char *datadir, XLogRecPtr startpoint, int tliIndex,
XLogRecPtr errptr = xlogreader->EndRecPtr;
if (errormsg)
- pg_fatal("could not read WAL record at %X/%X: %s",
+ pg_fatal("could not read WAL record at %X/%08X: %s",
LSN_FORMAT_ARGS(errptr),
errormsg);
else
- pg_fatal("could not read WAL record at %X/%X",
+ pg_fatal("could not read WAL record at %X/%08X",
LSN_FORMAT_ARGS(errptr));
}
@@ -105,7 +105,7 @@ extractPageMap(const char *datadir, XLogRecPtr startpoint, int tliIndex,
* messed up.
*/
if (xlogreader->EndRecPtr != endpoint)
- pg_fatal("end pointer %X/%X is not a valid end point; expected %X/%X",
+ pg_fatal("end pointer %X/%08X is not a valid end point; expected %X/%08X",
LSN_FORMAT_ARGS(endpoint), LSN_FORMAT_ARGS(xlogreader->EndRecPtr));
XLogReaderFree(xlogreader);
@@ -143,10 +143,10 @@ readOneRecord(const char *datadir, XLogRecPtr ptr, int tliIndex,
if (record == NULL)
{
if (errormsg)
- pg_fatal("could not read WAL record at %X/%X: %s",
+ pg_fatal("could not read WAL record at %X/%08X: %s",
LSN_FORMAT_ARGS(ptr), errormsg);
else
- pg_fatal("could not read WAL record at %X/%X",
+ pg_fatal("could not read WAL record at %X/%08X",
LSN_FORMAT_ARGS(ptr));
}
endptr = xlogreader->EndRecPtr;
@@ -211,11 +211,11 @@ findLastCheckpoint(const char *datadir, XLogRecPtr forkptr, int tliIndex,
if (record == NULL)
{
if (errormsg)
- pg_fatal("could not find previous WAL record at %X/%X: %s",
+ pg_fatal("could not find previous WAL record at %X/%08X: %s",
LSN_FORMAT_ARGS(searchptr),
errormsg);
else
- pg_fatal("could not find previous WAL record at %X/%X",
+ pg_fatal("could not find previous WAL record at %X/%08X",
LSN_FORMAT_ARGS(searchptr));
}
@@ -458,8 +458,8 @@ extractPageInfo(XLogReaderState *record)
* we don't recognize the type. That's bad - we don't know how to
* track that change.
*/
- pg_fatal("WAL record modifies a relation, but record type is not recognized: "
- "lsn: %X/%X, rmid: %d, rmgr: %s, info: %02X",
+ pg_fatal("WAL record modifies a relation, but record type is not recognized:\n"
+ "lsn: %X/%08X, rmid: %d, rmgr: %s, info: %02X",
LSN_FORMAT_ARGS(record->ReadRecPtr),
rmid, RmgrName(rmid), info);
}
diff --git a/src/bin/pg_rewind/pg_rewind.c b/src/bin/pg_rewind/pg_rewind.c
index 9d16c1e6b47..0c68dd4235e 100644
--- a/src/bin/pg_rewind/pg_rewind.c
+++ b/src/bin/pg_rewind/pg_rewind.c
@@ -393,7 +393,7 @@ main(int argc, char **argv)
targetHistory, targetNentries,
&divergerec, &lastcommontliIndex);
- pg_log_info("servers diverged at WAL location %X/%X on timeline %u",
+ pg_log_info("servers diverged at WAL location %X/%08X on timeline %u",
LSN_FORMAT_ARGS(divergerec),
targetHistory[lastcommontliIndex].tli);
@@ -461,7 +461,7 @@ main(int argc, char **argv)
findLastCheckpoint(datadir_target, divergerec, lastcommontliIndex,
&chkptrec, &chkpttli, &chkptredo, restore_command);
- pg_log_info("rewinding from last common checkpoint at %X/%X on timeline %u",
+ pg_log_info("rewinding from last common checkpoint at %X/%08X on timeline %u",
LSN_FORMAT_ARGS(chkptrec), chkpttli);
/* Initialize the hash table to track the status of each file */
@@ -902,7 +902,7 @@ getTimelineHistory(TimeLineID tli, bool is_source, int *nentries)
TimeLineHistoryEntry *entry;
entry = &history[i];
- pg_log_debug("%u: %X/%X - %X/%X", entry->tli,
+ pg_log_debug("%u: %X/%08X - %X/%08X", entry->tli,
LSN_FORMAT_ARGS(entry->begin),
LSN_FORMAT_ARGS(entry->end));
}
@@ -981,8 +981,8 @@ createBackupLabel(XLogRecPtr startpoint, TimeLineID starttli, XLogRecPtr checkpo
strftime(strfbuf, sizeof(strfbuf), "%Y-%m-%d %H:%M:%S %Z", tmp);
len = snprintf(buf, sizeof(buf),
- "START WAL LOCATION: %X/%X (file %s)\n"
- "CHECKPOINT LOCATION: %X/%X\n"
+ "START WAL LOCATION: %X/%08X (file %s)\n"
+ "CHECKPOINT LOCATION: %X/%08X\n"
"BACKUP METHOD: pg_rewind\n"
"BACKUP FROM: standby\n"
"START TIME: %s\n",
diff --git a/src/bin/pg_rewind/t/RewindTest.pm b/src/bin/pg_rewind/t/RewindTest.pm
index 3efab831797..b0234ebfaf2 100644
--- a/src/bin/pg_rewind/t/RewindTest.pm
+++ b/src/bin/pg_rewind/t/RewindTest.pm
@@ -285,7 +285,7 @@ sub run_pg_rewind
# Check that pg_rewind with dbname and --write-recovery-conf
# wrote the dbname in the generated primary_conninfo value.
like(slurp_file("$primary_pgdata/postgresql.auto.conf"),
- qr/dbname=postgres/m, 'recovery conf file sets dbname');
+ qr/dbname=postgres/m, 'recovery conf file sets dbname');
# Check that standby.signal is here as recovery configuration
# was requested.
diff --git a/src/bin/pg_rewind/timeline.c b/src/bin/pg_rewind/timeline.c
index 4d9f0d8301b..6784969951f 100644
--- a/src/bin/pg_rewind/timeline.c
+++ b/src/bin/pg_rewind/timeline.c
@@ -66,7 +66,7 @@ rewind_parseTimeLineHistory(char *buffer, TimeLineID targetTLI, int *nentries)
if (*ptr == '\0' || *ptr == '#')
continue;
- nfields = sscanf(fline, "%u\t%X/%X", &tli, &switchpoint_hi, &switchpoint_lo);
+ nfields = sscanf(fline, "%u\t%X/%08X", &tli, &switchpoint_hi, &switchpoint_lo);
if (nfields < 1)
{
diff --git a/src/bin/pg_test_timing/pg_test_timing.c b/src/bin/pg_test_timing/pg_test_timing.c
index ce7aad4b25a..a5621251afc 100644
--- a/src/bin/pg_test_timing/pg_test_timing.c
+++ b/src/bin/pg_test_timing/pg_test_timing.c
@@ -9,19 +9,30 @@
#include <limits.h>
#include "getopt_long.h"
+#include "port/pg_bitutils.h"
#include "portability/instr_time.h"
static const char *progname;
static unsigned int test_duration = 3;
+static double max_rprct = 99.99;
+
+/* record duration in powers of 2 nanoseconds */
+static long long int histogram[32];
+
+/* record counts of first 10K durations directly */
+#define NUM_DIRECT 10000
+static long long int direct_histogram[NUM_DIRECT];
+
+/* separately record highest observed duration */
+static int32 largest_diff;
+static long long int largest_diff_count;
+
static void handle_args(int argc, char *argv[]);
static uint64 test_timing(unsigned int duration);
static void output(uint64 loop_count);
-/* record duration in powers of 2 microseconds */
-static long long int histogram[32];
-
int
main(int argc, char *argv[])
{
@@ -44,6 +55,7 @@ handle_args(int argc, char *argv[])
{
static struct option long_options[] = {
{"duration", required_argument, NULL, 'd'},
+ {"cutoff", required_argument, NULL, 'c'},
{NULL, 0, NULL, 0}
};
@@ -56,7 +68,7 @@ handle_args(int argc, char *argv[])
{
if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-?") == 0)
{
- printf(_("Usage: %s [-d DURATION]\n"), progname);
+ printf(_("Usage: %s [-d DURATION] [-c CUTOFF]\n"), progname);
exit(0);
}
if (strcmp(argv[1], "--version") == 0 || strcmp(argv[1], "-V") == 0)
@@ -66,7 +78,7 @@ handle_args(int argc, char *argv[])
}
}
- while ((option = getopt_long(argc, argv, "d:",
+ while ((option = getopt_long(argc, argv, "d:c:",
long_options, &optindex)) != -1)
{
switch (option)
@@ -93,6 +105,26 @@ handle_args(int argc, char *argv[])
}
break;
+ case 'c':
+ errno = 0;
+ max_rprct = strtod(optarg, &endptr);
+
+ if (endptr == optarg || *endptr != '\0' || errno != 0)
+ {
+ fprintf(stderr, _("%s: invalid argument for option %s\n"),
+ progname, "--cutoff");
+ fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname);
+ exit(1);
+ }
+
+ if (max_rprct < 0 || max_rprct > 100)
+ {
+ fprintf(stderr, _("%s: %s must be in range %u..%u\n"),
+ progname, "--cutoff", 0, 100);
+ exit(1);
+ }
+ break;
+
default:
fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
progname);
@@ -111,7 +143,6 @@ handle_args(int argc, char *argv[])
exit(1);
}
-
printf(ngettext("Testing timing overhead for %u second.\n",
"Testing timing overhead for %u seconds.\n",
test_duration),
@@ -130,23 +161,33 @@ test_timing(unsigned int duration)
end_time,
temp;
- total_time = duration > 0 ? duration * INT64CONST(1000000) : 0;
+ /*
+ * Pre-zero the statistics data structures. They're already zero by
+ * default, but this helps bring them into processor cache and avoid
+ * possible timing glitches due to COW behavior.
+ */
+ memset(direct_histogram, 0, sizeof(direct_histogram));
+ memset(histogram, 0, sizeof(histogram));
+ largest_diff = 0;
+ largest_diff_count = 0;
+
+ total_time = duration > 0 ? duration * INT64CONST(1000000000) : 0;
INSTR_TIME_SET_CURRENT(start_time);
- cur = INSTR_TIME_GET_MICROSEC(start_time);
+ cur = INSTR_TIME_GET_NANOSEC(start_time);
while (time_elapsed < total_time)
{
int32 diff,
- bits = 0;
+ bits;
prev = cur;
INSTR_TIME_SET_CURRENT(temp);
- cur = INSTR_TIME_GET_MICROSEC(temp);
+ cur = INSTR_TIME_GET_NANOSEC(temp);
diff = cur - prev;
/* Did time go backwards? */
- if (diff < 0)
+ if (unlikely(diff < 0))
{
fprintf(stderr, _("Detected clock going backwards in time.\n"));
fprintf(stderr, _("Time warp: %d ms\n"), diff);
@@ -154,25 +195,37 @@ test_timing(unsigned int duration)
}
/* What is the highest bit in the time diff? */
- while (diff)
- {
- diff >>= 1;
- bits++;
- }
+ if (diff > 0)
+ bits = pg_leftmost_one_pos32(diff) + 1;
+ else
+ bits = 0;
/* Update appropriate duration bucket */
histogram[bits]++;
+ /* Update direct histogram of time diffs */
+ if (diff < NUM_DIRECT)
+ direct_histogram[diff]++;
+
+ /* Also track the largest observed duration, even if >= NUM_DIRECT */
+ if (diff > largest_diff)
+ {
+ largest_diff = diff;
+ largest_diff_count = 1;
+ }
+ else if (diff == largest_diff)
+ largest_diff_count++;
+
loop_count++;
INSTR_TIME_SUBTRACT(temp, start_time);
- time_elapsed = INSTR_TIME_GET_MICROSEC(temp);
+ time_elapsed = INSTR_TIME_GET_NANOSEC(temp);
}
INSTR_TIME_SET_CURRENT(end_time);
INSTR_TIME_SUBTRACT(end_time, start_time);
- printf(_("Per loop time including overhead: %0.2f ns\n"),
+ printf(_("Average loop time including overhead: %0.2f ns\n"),
INSTR_TIME_GET_DOUBLE(end_time) * 1e9 / loop_count);
return loop_count;
@@ -181,28 +234,95 @@ test_timing(unsigned int duration)
static void
output(uint64 loop_count)
{
- int64 max_bit = 31,
- i;
- char *header1 = _("< us");
- char *header2 = /* xgettext:no-c-format */ _("% of total");
- char *header3 = _("count");
+ int max_bit = 31;
+ const char *header1 = _("<= ns");
+ const char *header1b = _("ns");
+ const char *header2 = /* xgettext:no-c-format */ _("% of total");
+ const char *header3 = /* xgettext:no-c-format */ _("running %");
+ const char *header4 = _("count");
int len1 = strlen(header1);
int len2 = strlen(header2);
int len3 = strlen(header3);
+ int len4 = strlen(header4);
+ double rprct;
+ bool stopped = false;
/* find highest bit value */
while (max_bit > 0 && histogram[max_bit] == 0)
max_bit--;
+ /* set minimum column widths */
+ len1 = Max(8, len1);
+ len2 = Max(10, len2);
+ len3 = Max(10, len3);
+ len4 = Max(10, len4);
+
printf(_("Histogram of timing durations:\n"));
- printf("%*s %*s %*s\n",
- Max(6, len1), header1,
- Max(10, len2), header2,
- Max(10, len3), header3);
-
- for (i = 0; i <= max_bit; i++)
- printf("%*ld %*.5f %*lld\n",
- Max(6, len1), 1l << i,
- Max(10, len2) - 1, (double) histogram[i] * 100 / loop_count,
- Max(10, len3), histogram[i]);
+ printf("%*s %*s %*s %*s\n",
+ len1, header1,
+ len2, header2,
+ len3, header3,
+ len4, header4);
+
+ rprct = 0;
+ for (int i = 0; i <= max_bit; i++)
+ {
+ double prct = (double) histogram[i] * 100 / loop_count;
+
+ rprct += prct;
+ printf("%*ld %*.4f %*.4f %*lld\n",
+ len1, (1L << i) - 1,
+ len2, prct,
+ len3, rprct,
+ len4, histogram[i]);
+ }
+
+ printf(_("\nObserved timing durations up to %.4f%%:\n"), max_rprct);
+ printf("%*s %*s %*s %*s\n",
+ len1, header1b,
+ len2, header2,
+ len3, header3,
+ len4, header4);
+
+ rprct = 0;
+ for (int i = 0; i < NUM_DIRECT; i++)
+ {
+ if (direct_histogram[i])
+ {
+ double prct = (double) direct_histogram[i] * 100 / loop_count;
+ bool print_it = !stopped;
+
+ rprct += prct;
+
+ /* if largest diff is < NUM_DIRECT, be sure we print it */
+ if (i == largest_diff)
+ {
+ if (stopped)
+ printf("...\n");
+ print_it = true;
+ }
+
+ if (print_it)
+ printf("%*d %*.4f %*.4f %*lld\n",
+ len1, i,
+ len2, prct,
+ len3, rprct,
+ len4, direct_histogram[i]);
+ if (rprct >= max_rprct)
+ stopped = true;
+ }
+ }
+
+ /* print largest diff when it's outside the array range */
+ if (largest_diff >= NUM_DIRECT)
+ {
+ double prct = (double) largest_diff_count * 100 / loop_count;
+
+ printf("...\n");
+ printf("%*d %*.4f %*.4f %*lld\n",
+ len1, largest_diff,
+ len2, prct,
+ len3, 100.0,
+ len4, largest_diff_count);
+ }
}
diff --git a/src/bin/pg_test_timing/t/001_basic.pl b/src/bin/pg_test_timing/t/001_basic.pl
index 6554cd981af..9912acc052a 100644
--- a/src/bin/pg_test_timing/t/001_basic.pl
+++ b/src/bin/pg_test_timing/t/001_basic.pl
@@ -25,5 +25,22 @@ command_fails_like(
[ 'pg_test_timing', '--duration' => '0' ],
qr/\Qpg_test_timing: --duration must be in range 1..4294967295\E/,
'pg_test_timing: --duration must be in range');
+command_fails_like(
+ [ 'pg_test_timing', '--cutoff' => '101' ],
+ qr/\Qpg_test_timing: --cutoff must be in range 0..100\E/,
+ 'pg_test_timing: --cutoff must be in range');
+
+#########################################
+# We obviously can't check for specific output, but we can
+# do a simple run and make sure it produces something.
+
+command_like(
+ [ 'pg_test_timing', '--duration' => '1' ],
+ qr/
+\QTesting timing overhead for 1 second.\E.*
+\QHistogram of timing durations:\E.*
+\QObserved timing durations up to 99.9900%:\E
+/sx,
+ 'pg_test_timing: sanity check');
done_testing();
diff --git a/src/bin/pg_upgrade/check.c b/src/bin/pg_upgrade/check.c
index 940fc77fc2e..310f53c5577 100644
--- a/src/bin/pg_upgrade/check.c
+++ b/src/bin/pg_upgrade/check.c
@@ -23,11 +23,12 @@ static void check_for_isn_and_int8_passing_mismatch(ClusterInfo *cluster);
static void check_for_user_defined_postfix_ops(ClusterInfo *cluster);
static void check_for_incompatible_polymorphics(ClusterInfo *cluster);
static void check_for_tables_with_oids(ClusterInfo *cluster);
+static void check_for_not_null_inheritance(ClusterInfo *cluster);
static void check_for_pg_role_prefix(ClusterInfo *cluster);
static void check_for_new_tablespace_dir(void);
static void check_for_user_defined_encoding_conversions(ClusterInfo *cluster);
static void check_for_unicode_update(ClusterInfo *cluster);
-static void check_new_cluster_logical_replication_slots(void);
+static void check_new_cluster_replication_slots(void);
static void check_new_cluster_subscription_configuration(void);
static void check_old_cluster_for_valid_slots(void);
static void check_old_cluster_subscription_state(void);
@@ -168,6 +169,7 @@ static DataTypesUsageChecks data_types_usage_checks[] =
/* pg_class.oid is preserved, so 'regclass' is OK */
" 'regcollation', "
" 'regconfig', "
+ /* pg_database.oid is preserved, so 'regdatabase' is OK */
" 'regdictionary', "
" 'regnamespace', "
" 'regoper', "
@@ -419,7 +421,7 @@ process_data_type_check(DbInfo *dbinfo, PGresult *res, void *arg)
if (!state->result)
{
pg_log(PG_REPORT, "failed check: %s", _(state->check->status));
- appendPQExpBuffer(*state->report, "\n%s\n%s %s\n",
+ appendPQExpBuffer(*state->report, "\n%s\n%s\n %s\n",
_(state->check->report_text),
_("A list of the problem columns is in the file:"),
output_path);
@@ -629,7 +631,7 @@ check_and_dump_old_cluster(void)
* Before that the logical slots are not upgraded, so we will not be
* able to upgrade the logical replication clusters completely.
*/
- get_subscription_count(&old_cluster);
+ get_subscription_info(&old_cluster);
check_old_cluster_subscription_state();
}
@@ -672,6 +674,14 @@ check_and_dump_old_cluster(void)
check_for_tables_with_oids(&old_cluster);
/*
+ * Pre-PG 18 allowed child tables to omit not-null constraints that their
+ * parents columns have, but schema restore fails for them. Verify there
+ * are none, iff applicable.
+ */
+ if (GET_MAJOR_VERSION(old_cluster.major_version) <= 1800)
+ check_for_not_null_inheritance(&old_cluster);
+
+ /*
* Pre-PG 10 allowed tables with 'unknown' type columns and non WAL logged
* hash indexes
*/
@@ -754,7 +764,7 @@ check_new_cluster(void)
check_for_new_tablespace_dir();
- check_new_cluster_logical_replication_slots();
+ check_new_cluster_replication_slots();
check_new_cluster_subscription_configuration();
}
@@ -885,7 +895,7 @@ check_cluster_versions(void)
*/
if (GET_MAJOR_VERSION(old_cluster.major_version) >= 1800 &&
user_opts.char_signedness != -1)
- pg_fatal("%s option cannot be used to upgrade from PostgreSQL %s and later.",
+ pg_fatal("The option %s cannot be used for upgrades from PostgreSQL %s and later.",
"--set-char-signedness", "18");
check_ok();
@@ -946,12 +956,12 @@ check_for_new_tablespace_dir(void)
prep_status("Checking for new cluster tablespace directories");
- for (tblnum = 0; tblnum < os_info.num_old_tablespaces; tblnum++)
+ for (tblnum = 0; tblnum < new_cluster.num_tablespaces; tblnum++)
{
struct stat statbuf;
snprintf(new_tablespace_dir, MAXPGPATH, "%s%s",
- os_info.old_tablespaces[tblnum],
+ new_cluster.tablespaces[tblnum],
new_cluster.tablespace_suffix);
if (stat(new_tablespace_dir, &statbuf) == 0 || errno != ENOENT)
@@ -1003,17 +1013,17 @@ create_script_for_old_cluster_deletion(char **deletion_script_file_name)
* directory. We can't create a proper old cluster delete script in that
* case.
*/
- for (tblnum = 0; tblnum < os_info.num_old_tablespaces; tblnum++)
+ for (tblnum = 0; tblnum < new_cluster.num_tablespaces; tblnum++)
{
- char old_tablespace_dir[MAXPGPATH];
+ char new_tablespace_dir[MAXPGPATH];
- strlcpy(old_tablespace_dir, os_info.old_tablespaces[tblnum], MAXPGPATH);
- canonicalize_path(old_tablespace_dir);
- if (path_is_prefix_of_path(old_cluster_pgdata, old_tablespace_dir))
+ strlcpy(new_tablespace_dir, new_cluster.tablespaces[tblnum], MAXPGPATH);
+ canonicalize_path(new_tablespace_dir);
+ if (path_is_prefix_of_path(old_cluster_pgdata, new_tablespace_dir))
{
/* reproduce warning from CREATE TABLESPACE that is in the log */
pg_log(PG_WARNING,
- "\nWARNING: user-defined tablespace locations should not be inside the data directory, i.e. %s", old_tablespace_dir);
+ "\nWARNING: user-defined tablespace locations should not be inside the data directory, i.e. %s", new_tablespace_dir);
/* Unlink file in case it is left over from a previous run. */
unlink(*deletion_script_file_name);
@@ -1041,9 +1051,9 @@ create_script_for_old_cluster_deletion(char **deletion_script_file_name)
/* delete old cluster's alternate tablespaces */
old_tblspc_suffix = pg_strdup(old_cluster.tablespace_suffix);
fix_path_separator(old_tblspc_suffix);
- for (tblnum = 0; tblnum < os_info.num_old_tablespaces; tblnum++)
+ for (tblnum = 0; tblnum < old_cluster.num_tablespaces; tblnum++)
fprintf(script, RMDIR_CMD " %c%s%s%c\n", PATH_QUOTE,
- fix_path_separator(os_info.old_tablespaces[tblnum]),
+ fix_path_separator(old_cluster.tablespaces[tblnum]),
old_tblspc_suffix, PATH_QUOTE);
pfree(old_tblspc_suffix);
@@ -1623,6 +1633,93 @@ check_for_tables_with_oids(ClusterInfo *cluster)
check_ok();
}
+/*
+ * Callback function for processing results of query for
+ * check_for_not_null_inheritance.
+ */
+static void
+process_inconsistent_notnull(DbInfo *dbinfo, PGresult *res, void *arg)
+{
+ UpgradeTaskReport *report = (UpgradeTaskReport *) arg;
+ int ntups = PQntuples(res);
+ int i_nspname = PQfnumber(res, "nspname");
+ int i_relname = PQfnumber(res, "relname");
+ int i_attname = PQfnumber(res, "attname");
+
+ AssertVariableIsOfType(&process_inconsistent_notnull,
+ UpgradeTaskProcessCB);
+
+ if (ntups == 0)
+ return;
+
+ if (report->file == NULL &&
+ (report->file = fopen_priv(report->path, "w")) == NULL)
+ pg_fatal("could not open file \"%s\": %m", report->path);
+
+ fprintf(report->file, "In database: %s\n", dbinfo->db_name);
+
+ for (int rowno = 0; rowno < ntups; rowno++)
+ {
+ fprintf(report->file, " %s.%s.%s\n",
+ PQgetvalue(res, rowno, i_nspname),
+ PQgetvalue(res, rowno, i_relname),
+ PQgetvalue(res, rowno, i_attname));
+ }
+}
+
+/*
+ * check_for_not_null_inheritance()
+ *
+ * An attempt to create child tables lacking not-null constraints that are
+ * present in their parents errors out. This can no longer occur since 18,
+ * but previously there were various ways for that to happen. Check that
+ * the cluster to be upgraded doesn't have any of those problems.
+ */
+static void
+check_for_not_null_inheritance(ClusterInfo *cluster)
+{
+ UpgradeTaskReport report;
+ UpgradeTask *task;
+ const char *query;
+
+ prep_status("Checking for not-null constraint inconsistencies");
+
+ report.file = NULL;
+ snprintf(report.path, sizeof(report.path), "%s/%s",
+ log_opts.basedir,
+ "not_null_inconsistent_columns.txt");
+
+ query = "SELECT nspname, cc.relname, ac.attname "
+ "FROM pg_catalog.pg_inherits i, pg_catalog.pg_attribute ac, "
+ " pg_catalog.pg_attribute ap, pg_catalog.pg_class cc, "
+ " pg_catalog.pg_namespace nc "
+ "WHERE cc.oid = ac.attrelid AND i.inhrelid = ac.attrelid "
+ " AND i.inhparent = ap.attrelid AND ac.attname = ap.attname "
+ " AND cc.relnamespace = nc.oid "
+ " AND ap.attnum > 0 and ap.attnotnull AND NOT ac.attnotnull";
+
+ task = upgrade_task_create();
+ upgrade_task_add_step(task, query,
+ process_inconsistent_notnull,
+ true, &report);
+ upgrade_task_run(task, cluster);
+ upgrade_task_free(task);
+
+ if (report.file)
+ {
+ fclose(report.file);
+ pg_log(PG_REPORT, "fatal");
+ pg_fatal("Your installation contains inconsistent NOT NULL constraints.\n"
+ "If the parent column(s) are NOT NULL, then the child column must\n"
+ "also be marked NOT NULL, or the upgrade will fail.\n"
+ "You can fix this by running\n"
+ " ALTER TABLE tablename ALTER column SET NOT NULL;\n"
+ "on each column listed in the file:\n"
+ " %s", report.path);
+ }
+ else
+ check_ok();
+}
/*
* check_for_pg_role_prefix()
@@ -1934,7 +2031,7 @@ check_for_unicode_update(ClusterInfo *cluster)
{
fclose(report.file);
report_status(PG_WARNING, "warning");
- pg_log(PG_WARNING, "Your installation contains relations that may be affected by a new version of Unicode.\n"
+ pg_log(PG_WARNING, "Your installation contains relations that might be affected by a new version of Unicode.\n"
"A list of potentially-affected relations is in the file:\n"
" %s", report.path);
}
@@ -1943,48 +2040,80 @@ check_for_unicode_update(ClusterInfo *cluster)
}
/*
- * check_new_cluster_logical_replication_slots()
+ * check_new_cluster_replication_slots()
*
- * Verify that there are no logical replication slots on the new cluster and
- * that the parameter settings necessary for creating slots are sufficient.
+ * Validate the new cluster's readiness for migrating replication slots:
+ * - Ensures no existing logical replication slots on the new cluster when
+ * migrating logical slots.
+ * - Ensure conflict detection slot does not exist on the new cluster when
+ * migrating subscriptions with retain_dead_tuples enabled.
+ * - Ensure that the parameter settings on the new cluster necessary for
+ * creating slots are sufficient.
*/
static void
-check_new_cluster_logical_replication_slots(void)
+check_new_cluster_replication_slots(void)
{
PGresult *res;
PGconn *conn;
int nslots_on_old;
int nslots_on_new;
+ int rdt_slot_on_new;
int max_replication_slots;
char *wal_level;
+ int i_nslots_on_new;
+ int i_rdt_slot_on_new;
- /* Logical slots can be migrated since PG17. */
+ /*
+ * Logical slots can be migrated since PG17 and a physical slot
+ * CONFLICT_DETECTION_SLOT can be migrated since PG19.
+ */
if (GET_MAJOR_VERSION(old_cluster.major_version) <= 1600)
return;
nslots_on_old = count_old_cluster_logical_slots();
- /* Quick return if there are no logical slots to be migrated. */
- if (nslots_on_old == 0)
+ /*
+ * Quick return if there are no slots to be migrated and no subscriptions
+ * have the retain_dead_tuples option enabled.
+ */
+ if (nslots_on_old == 0 && !old_cluster.sub_retain_dead_tuples)
return;
conn = connectToServer(&new_cluster, "template1");
- prep_status("Checking for new cluster logical replication slots");
+ prep_status("Checking for new cluster replication slots");
- res = executeQueryOrDie(conn, "SELECT count(*) "
- "FROM pg_catalog.pg_replication_slots "
- "WHERE slot_type = 'logical' AND "
- "temporary IS FALSE;");
+ res = executeQueryOrDie(conn, "SELECT %s AS nslots_on_new, %s AS rdt_slot_on_new "
+ "FROM pg_catalog.pg_replication_slots",
+ nslots_on_old > 0
+ ? "COUNT(*) FILTER (WHERE slot_type = 'logical' AND temporary IS FALSE)"
+ : "0",
+ old_cluster.sub_retain_dead_tuples
+ ? "COUNT(*) FILTER (WHERE slot_name = 'pg_conflict_detection')"
+ : "0");
if (PQntuples(res) != 1)
- pg_fatal("could not count the number of logical replication slots");
+ pg_fatal("could not count the number of replication slots");
- nslots_on_new = atoi(PQgetvalue(res, 0, 0));
+ i_nslots_on_new = PQfnumber(res, "nslots_on_new");
+ i_rdt_slot_on_new = PQfnumber(res, "rdt_slot_on_new");
+
+ nslots_on_new = atoi(PQgetvalue(res, 0, i_nslots_on_new));
if (nslots_on_new)
+ {
+ Assert(nslots_on_old);
pg_fatal("expected 0 logical replication slots but found %d",
nslots_on_new);
+ }
+
+ rdt_slot_on_new = atoi(PQgetvalue(res, 0, i_rdt_slot_on_new));
+
+ if (rdt_slot_on_new)
+ {
+ Assert(old_cluster.sub_retain_dead_tuples);
+ pg_fatal("The replication slot \"pg_conflict_detection\" already exists on the new cluster");
+ }
PQclear(res);
@@ -1997,12 +2126,24 @@ check_new_cluster_logical_replication_slots(void)
wal_level = PQgetvalue(res, 0, 0);
- if (strcmp(wal_level, "logical") != 0)
+ if (nslots_on_old > 0 && strcmp(wal_level, "logical") != 0)
pg_fatal("\"wal_level\" must be \"logical\" but is set to \"%s\"",
wal_level);
+ if (old_cluster.sub_retain_dead_tuples &&
+ strcmp(wal_level, "minimal") == 0)
+ pg_fatal("\"wal_level\" must be \"replica\" or \"logical\" but is set to \"%s\"",
+ wal_level);
+
max_replication_slots = atoi(PQgetvalue(res, 1, 0));
+ if (old_cluster.sub_retain_dead_tuples &&
+ nslots_on_old + 1 > max_replication_slots)
+ pg_fatal("\"max_replication_slots\" (%d) must be greater than or equal to the number of "
+ "logical replication slots on the old cluster plus one additional slot required "
+ "for retaining conflict detection information (%d)",
+ max_replication_slots, nslots_on_old + 1);
+
if (nslots_on_old > max_replication_slots)
pg_fatal("\"max_replication_slots\" (%d) must be greater than or equal to the number of "
"logical replication slots (%d) on the old cluster",
@@ -2114,6 +2255,22 @@ check_old_cluster_for_valid_slots(void)
"The slot \"%s\" has not consumed the WAL yet\n",
slot->slotname);
}
+
+ /*
+ * The name "pg_conflict_detection" (defined as
+ * CONFLICT_DETECTION_SLOT) has been reserved for logical
+ * replication conflict detection slot since PG19.
+ */
+ if (strcmp(slot->slotname, "pg_conflict_detection") == 0)
+ {
+ if (script == NULL &&
+ (script = fopen_priv(output_path, "w")) == NULL)
+ pg_fatal("could not open file \"%s\": %m", output_path);
+
+ fprintf(script,
+ "The slot name \"%s\" is reserved\n",
+ slot->slotname);
+ }
}
}
diff --git a/src/bin/pg_upgrade/dump.c b/src/bin/pg_upgrade/dump.c
index 23cb08e8347..183f08ce1e8 100644
--- a/src/bin/pg_upgrade/dump.c
+++ b/src/bin/pg_upgrade/dump.c
@@ -58,7 +58,7 @@ generate_old_dump(void)
(user_opts.transfer_mode == TRANSFER_MODE_SWAP) ?
"" : "--sequence-data",
log_opts.verbose ? "--verbose" : "",
- user_opts.do_statistics ? "" : "--no-statistics",
+ user_opts.do_statistics ? "--with-statistics" : "--no-statistics",
log_opts.dumpdir,
sql_file_name, escaped_connstr.data);
diff --git a/src/bin/pg_upgrade/info.c b/src/bin/pg_upgrade/info.c
index 4b7a56f5b3b..c39eb077c2f 100644
--- a/src/bin/pg_upgrade/info.c
+++ b/src/bin/pg_upgrade/info.c
@@ -443,10 +443,26 @@ get_db_infos(ClusterInfo *cluster)
for (tupnum = 0; tupnum < ntups; tupnum++)
{
+ char *spcloc = PQgetvalue(res, tupnum, i_spclocation);
+ bool inplace = spcloc[0] && !is_absolute_path(spcloc);
+
dbinfos[tupnum].db_oid = atooid(PQgetvalue(res, tupnum, i_oid));
dbinfos[tupnum].db_name = pg_strdup(PQgetvalue(res, tupnum, i_datname));
- snprintf(dbinfos[tupnum].db_tablespace, sizeof(dbinfos[tupnum].db_tablespace), "%s",
- PQgetvalue(res, tupnum, i_spclocation));
+
+ /*
+ * The tablespace location might be "", meaning the cluster default
+ * location, i.e. pg_default or pg_global. For in-place tablespaces,
+ * pg_tablespace_location() returns a path relative to the data
+ * directory.
+ */
+ if (inplace)
+ snprintf(dbinfos[tupnum].db_tablespace,
+ sizeof(dbinfos[tupnum].db_tablespace),
+ "%s/%s", cluster->pgdata, spcloc);
+ else
+ snprintf(dbinfos[tupnum].db_tablespace,
+ sizeof(dbinfos[tupnum].db_tablespace),
+ "%s", spcloc);
}
PQclear(res);
@@ -616,11 +632,21 @@ process_rel_infos(DbInfo *dbinfo, PGresult *res, void *arg)
/* Is the tablespace oid non-default? */
if (atooid(PQgetvalue(res, relnum, i_reltablespace)) != 0)
{
+ char *spcloc = PQgetvalue(res, relnum, i_spclocation);
+ bool inplace = spcloc[0] && !is_absolute_path(spcloc);
+
/*
* The tablespace location might be "", meaning the cluster
- * default location, i.e. pg_default or pg_global.
+ * default location, i.e. pg_default or pg_global. For in-place
+ * tablespaces, pg_tablespace_location() returns a path relative
+ * to the data directory.
*/
- tablespace = PQgetvalue(res, relnum, i_spclocation);
+ if (inplace)
+ tablespace = psprintf("%s/%s",
+ os_info.running_cluster->pgdata,
+ spcloc);
+ else
+ tablespace = spcloc;
/* Can we reuse the previous string allocation? */
if (last_tablespace && strcmp(tablespace, last_tablespace) == 0)
@@ -630,6 +656,10 @@ process_rel_infos(DbInfo *dbinfo, PGresult *res, void *arg)
last_tablespace = curr->tablespace = pg_strdup(tablespace);
curr->tblsp_alloc = true;
}
+
+ /* Free palloc'd string for in-place tablespaces. */
+ if (inplace)
+ pfree(tablespace);
}
else
/* A zero reltablespace oid indicates the database tablespace. */
@@ -752,20 +782,33 @@ count_old_cluster_logical_slots(void)
}
/*
- * get_subscription_count()
+ * get_subscription_info()
*
- * Gets the number of subscriptions in the cluster.
+ * Gets the information of subscriptions in the cluster.
*/
void
-get_subscription_count(ClusterInfo *cluster)
+get_subscription_info(ClusterInfo *cluster)
{
PGconn *conn;
PGresult *res;
+ int i_nsub;
+ int i_retain_dead_tuples;
conn = connectToServer(cluster, "template1");
- res = executeQueryOrDie(conn, "SELECT count(*) "
- "FROM pg_catalog.pg_subscription");
- cluster->nsubs = atoi(PQgetvalue(res, 0, 0));
+ if (GET_MAJOR_VERSION(cluster->major_version) >= 1900)
+ res = executeQueryOrDie(conn, "SELECT count(*) AS nsub,"
+ "COUNT(CASE WHEN subretaindeadtuples THEN 1 END) > 0 AS retain_dead_tuples "
+ "FROM pg_catalog.pg_subscription");
+ else
+ res = executeQueryOrDie(conn, "SELECT count(*) AS nsub,"
+ "'f' AS retain_dead_tuples "
+ "FROM pg_catalog.pg_subscription");
+
+ i_nsub = PQfnumber(res, "nsub");
+ i_retain_dead_tuples = PQfnumber(res, "retain_dead_tuples");
+
+ cluster->nsubs = atoi(PQgetvalue(res, 0, i_nsub));
+ cluster->sub_retain_dead_tuples = (strcmp(PQgetvalue(res, 0, i_retain_dead_tuples), "t") == 0);
PQclear(res);
PQfinish(conn);
diff --git a/src/bin/pg_upgrade/parallel.c b/src/bin/pg_upgrade/parallel.c
index 056aa2edaee..6d7941844a7 100644
--- a/src/bin/pg_upgrade/parallel.c
+++ b/src/bin/pg_upgrade/parallel.c
@@ -40,6 +40,7 @@ typedef struct
char *old_pgdata;
char *new_pgdata;
char *old_tablespace;
+ char *new_tablespace;
} transfer_thread_arg;
static exec_thread_arg **exec_thread_args;
@@ -171,7 +172,7 @@ win32_exec_prog(exec_thread_arg *args)
void
parallel_transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
char *old_pgdata, char *new_pgdata,
- char *old_tablespace)
+ char *old_tablespace, char *new_tablespace)
{
#ifndef WIN32
pid_t child;
@@ -181,7 +182,7 @@ parallel_transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
#endif
if (user_opts.jobs <= 1)
- transfer_all_new_dbs(old_db_arr, new_db_arr, old_pgdata, new_pgdata, NULL);
+ transfer_all_new_dbs(old_db_arr, new_db_arr, old_pgdata, new_pgdata, NULL, NULL);
else
{
/* parallel */
@@ -225,7 +226,7 @@ parallel_transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
if (child == 0)
{
transfer_all_new_dbs(old_db_arr, new_db_arr, old_pgdata, new_pgdata,
- old_tablespace);
+ old_tablespace, new_tablespace);
/* if we take another exit path, it will be non-zero */
/* use _exit to skip atexit() functions */
_exit(0);
@@ -246,6 +247,7 @@ parallel_transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
new_arg->new_pgdata = pg_strdup(new_pgdata);
pg_free(new_arg->old_tablespace);
new_arg->old_tablespace = old_tablespace ? pg_strdup(old_tablespace) : NULL;
+ new_arg->new_tablespace = new_tablespace ? pg_strdup(new_tablespace) : NULL;
child = (HANDLE) _beginthreadex(NULL, 0, (void *) win32_transfer_all_new_dbs,
new_arg, 0, NULL);
@@ -263,7 +265,8 @@ DWORD
win32_transfer_all_new_dbs(transfer_thread_arg *args)
{
transfer_all_new_dbs(args->old_db_arr, args->new_db_arr, args->old_pgdata,
- args->new_pgdata, args->old_tablespace);
+ args->new_pgdata, args->old_tablespace,
+ args->new_tablespace);
/* terminates thread */
return 0;
diff --git a/src/bin/pg_upgrade/pg_upgrade.c b/src/bin/pg_upgrade/pg_upgrade.c
index 536e49d2616..d5cd5bf0b3a 100644
--- a/src/bin/pg_upgrade/pg_upgrade.c
+++ b/src/bin/pg_upgrade/pg_upgrade.c
@@ -67,6 +67,7 @@ static void set_frozenxids(bool minmxid_only);
static void make_outputdirs(char *pgdata);
static void setup(char *argv0);
static void create_logical_replication_slots(void);
+static void create_conflict_detection_slot(void);
ClusterInfo old_cluster,
new_cluster;
@@ -88,6 +89,7 @@ int
main(int argc, char **argv)
{
char *deletion_script_file_name = NULL;
+ bool migrate_logical_slots;
/*
* pg_upgrade doesn't currently use common/logging.c, but initialize it
@@ -198,18 +200,39 @@ main(int argc, char **argv)
new_cluster.pgdata);
check_ok();
+ migrate_logical_slots = count_old_cluster_logical_slots();
+
/*
- * Migrate the logical slots to the new cluster. Note that we need to do
- * this after resetting WAL because otherwise the required WAL would be
- * removed and slots would become unusable. There is a possibility that
- * background processes might generate some WAL before we could create the
- * slots in the new cluster but we can ignore that WAL as that won't be
- * required downstream.
+ * Migrate replication slots to the new cluster.
+ *
+ * Note that we must migrate logical slots after resetting WAL because
+ * otherwise the required WAL would be removed and slots would become
+ * unusable. There is a possibility that background processes might
+ * generate some WAL before we could create the slots in the new cluster
+ * but we can ignore that WAL as that won't be required downstream.
+ *
+ * The conflict detection slot is not affected by concerns related to WALs
+ * as it only retains the dead tuples. It is created here for consistency.
+ * Note that the new conflict detection slot uses the latest transaction
+ * ID as xmin, so it cannot protect dead tuples that existed before the
+ * upgrade. Additionally, commit timestamps and origin data are not
+ * preserved during the upgrade. So, even after creating the slot, the
+ * upgraded subscriber may be unable to detect conflicts or log relevant
+ * commit timestamps and origins when applying changes from the publisher
+ * occurred before the upgrade especially if those changes were not
+ * replicated. It can only protect tuples that might be deleted after the
+ * new cluster starts.
*/
- if (count_old_cluster_logical_slots())
+ if (migrate_logical_slots || old_cluster.sub_retain_dead_tuples)
{
start_postmaster(&new_cluster, true);
- create_logical_replication_slots();
+
+ if (migrate_logical_slots)
+ create_logical_replication_slots();
+
+ if (old_cluster.sub_retain_dead_tuples)
+ create_conflict_detection_slot();
+
stop_postmaster(false);
}
@@ -1025,3 +1048,24 @@ create_logical_replication_slots(void)
return;
}
+
+/*
+ * create_conflict_detection_slot()
+ *
+ * Create a replication slot to retain information necessary for conflict
+ * detection such as dead tuples, commit timestamps, and origins, for migrated
+ * subscriptions with retain_dead_tuples enabled.
+ */
+static void
+create_conflict_detection_slot(void)
+{
+ PGconn *conn_new_template1;
+
+ prep_status("Creating the replication conflict detection slot");
+
+ conn_new_template1 = connectToServer(&new_cluster, "template1");
+ PQclear(executeQueryOrDie(conn_new_template1, "SELECT pg_catalog.binary_upgrade_create_conflict_detection_slot()"));
+ PQfinish(conn_new_template1);
+
+ check_ok();
+}
diff --git a/src/bin/pg_upgrade/pg_upgrade.h b/src/bin/pg_upgrade/pg_upgrade.h
index 69c965bb7d0..0ef47be0dc1 100644
--- a/src/bin/pg_upgrade/pg_upgrade.h
+++ b/src/bin/pg_upgrade/pg_upgrade.h
@@ -300,8 +300,12 @@ typedef struct
uint32 major_version; /* PG_VERSION of cluster */
char major_version_str[64]; /* string PG_VERSION of cluster */
uint32 bin_version; /* version returned from pg_ctl */
+ char **tablespaces; /* tablespace directories */
+ int num_tablespaces;
const char *tablespace_suffix; /* directory specification */
int nsubs; /* number of subscriptions */
+ bool sub_retain_dead_tuples; /* whether a subscription enables
+ * retain_dead_tuples. */
} ClusterInfo;
@@ -354,8 +358,6 @@ typedef struct
const char *progname; /* complete pathname for this program */
char *user; /* username for clusters */
bool user_specified; /* user specified on command-line */
- char **old_tablespaces; /* tablespaces */
- int num_old_tablespaces;
LibraryInfo *libraries; /* loadable libraries */
int num_libraries;
ClusterInfo *running_cluster;
@@ -441,7 +443,7 @@ FileNameMap *gen_db_file_maps(DbInfo *old_db,
const char *new_pgdata);
void get_db_rel_and_slot_infos(ClusterInfo *cluster);
int count_old_cluster_logical_slots(void);
-void get_subscription_count(ClusterInfo *cluster);
+void get_subscription_info(ClusterInfo *cluster);
/* option.c */
@@ -455,7 +457,7 @@ void transfer_all_new_tablespaces(DbInfoArr *old_db_arr,
DbInfoArr *new_db_arr, char *old_pgdata, char *new_pgdata);
void transfer_all_new_dbs(DbInfoArr *old_db_arr,
DbInfoArr *new_db_arr, char *old_pgdata, char *new_pgdata,
- char *old_tablespace);
+ char *old_tablespace, char *new_tablespace);
/* tablespace.c */
@@ -503,7 +505,7 @@ void parallel_exec_prog(const char *log_file, const char *opt_log_file,
const char *fmt,...) pg_attribute_printf(3, 4);
void parallel_transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
char *old_pgdata, char *new_pgdata,
- char *old_tablespace);
+ char *old_tablespace, char *new_tablespace);
bool reap_child(bool wait_for_child);
/* task.c */
diff --git a/src/bin/pg_upgrade/relfilenumber.c b/src/bin/pg_upgrade/relfilenumber.c
index 2959c07f0b8..38c17ceabf2 100644
--- a/src/bin/pg_upgrade/relfilenumber.c
+++ b/src/bin/pg_upgrade/relfilenumber.c
@@ -17,7 +17,7 @@
#include "common/logging.h"
#include "pg_upgrade.h"
-static void transfer_single_new_db(FileNameMap *maps, int size, char *old_tablespace);
+static void transfer_single_new_db(FileNameMap *maps, int size, char *old_tablespace, char *new_tablespace);
static void transfer_relfile(FileNameMap *map, const char *type_suffix, bool vm_must_add_frozenbit);
/*
@@ -136,21 +136,22 @@ transfer_all_new_tablespaces(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
*/
if (user_opts.jobs <= 1)
parallel_transfer_all_new_dbs(old_db_arr, new_db_arr, old_pgdata,
- new_pgdata, NULL);
+ new_pgdata, NULL, NULL);
else
{
int tblnum;
/* transfer default tablespace */
parallel_transfer_all_new_dbs(old_db_arr, new_db_arr, old_pgdata,
- new_pgdata, old_pgdata);
+ new_pgdata, old_pgdata, new_pgdata);
- for (tblnum = 0; tblnum < os_info.num_old_tablespaces; tblnum++)
+ for (tblnum = 0; tblnum < old_cluster.num_tablespaces; tblnum++)
parallel_transfer_all_new_dbs(old_db_arr,
new_db_arr,
old_pgdata,
new_pgdata,
- os_info.old_tablespaces[tblnum]);
+ old_cluster.tablespaces[tblnum],
+ new_cluster.tablespaces[tblnum]);
/* reap all children */
while (reap_child(true) == true)
;
@@ -169,7 +170,8 @@ transfer_all_new_tablespaces(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
*/
void
transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
- char *old_pgdata, char *new_pgdata, char *old_tablespace)
+ char *old_pgdata, char *new_pgdata,
+ char *old_tablespace, char *new_tablespace)
{
int old_dbnum,
new_dbnum;
@@ -204,7 +206,7 @@ transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
new_pgdata);
if (n_maps)
{
- transfer_single_new_db(mappings, n_maps, old_tablespace);
+ transfer_single_new_db(mappings, n_maps, old_tablespace, new_tablespace);
}
/* We allocate something even for n_maps == 0 */
pg_free(mappings);
@@ -234,10 +236,10 @@ transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
* moved_db_dir: Destination for the pg_restore-generated database directory.
*/
static bool
-prepare_for_swap(const char *old_tablespace, Oid db_oid,
- char *old_catalog_dir, char *new_db_dir, char *moved_db_dir)
+prepare_for_swap(const char *old_tablespace, const char *new_tablespace,
+ Oid db_oid, char *old_catalog_dir, char *new_db_dir,
+ char *moved_db_dir)
{
- const char *new_tablespace;
const char *old_tblspc_suffix;
const char *new_tblspc_suffix;
char old_tblspc[MAXPGPATH];
@@ -247,24 +249,14 @@ prepare_for_swap(const char *old_tablespace, Oid db_oid,
struct stat st;
if (strcmp(old_tablespace, old_cluster.pgdata) == 0)
- {
- new_tablespace = new_cluster.pgdata;
- new_tblspc_suffix = "/base";
old_tblspc_suffix = "/base";
- }
else
- {
- /*
- * XXX: The below line is a hack to deal with the fact that we
- * presently don't have an easy way to find the corresponding new
- * tablespace's path. This will need to be fixed if/when we add
- * pg_upgrade support for in-place tablespaces.
- */
- new_tablespace = old_tablespace;
+ old_tblspc_suffix = old_cluster.tablespace_suffix;
+ if (strcmp(new_tablespace, new_cluster.pgdata) == 0)
+ new_tblspc_suffix = "/base";
+ else
new_tblspc_suffix = new_cluster.tablespace_suffix;
- old_tblspc_suffix = old_cluster.tablespace_suffix;
- }
/* Old and new cluster paths. */
snprintf(old_tblspc, sizeof(old_tblspc), "%s%s", old_tablespace, old_tblspc_suffix);
@@ -290,19 +282,19 @@ prepare_for_swap(const char *old_tablespace, Oid db_oid,
/* Create directory for stuff that is moved aside. */
if (pg_mkdir_p(moved_tblspc, pg_dir_create_mode) != 0 && errno != EEXIST)
- pg_fatal("could not create directory \"%s\"", moved_tblspc);
+ pg_fatal("could not create directory \"%s\": %m", moved_tblspc);
/* Create directory for old catalog files. */
if (pg_mkdir_p(old_catalog_dir, pg_dir_create_mode) != 0)
- pg_fatal("could not create directory \"%s\"", old_catalog_dir);
+ pg_fatal("could not create directory \"%s\": %m", old_catalog_dir);
/* Move the new cluster's database directory aside. */
if (rename(new_db_dir, moved_db_dir) != 0)
- pg_fatal("could not rename \"%s\" to \"%s\"", new_db_dir, moved_db_dir);
+ pg_fatal("could not rename directory \"%s\" to \"%s\": %m", new_db_dir, moved_db_dir);
/* Move the old cluster's database directory into place. */
if (rename(old_db_dir, new_db_dir) != 0)
- pg_fatal("could not rename \"%s\" to \"%s\"", old_db_dir, new_db_dir);
+ pg_fatal("could not rename directory \"%s\" to \"%s\": %m", old_db_dir, new_db_dir);
return true;
}
@@ -390,7 +382,7 @@ swap_catalog_files(FileNameMap *maps, int size, const char *old_catalog_dir,
snprintf(dest, sizeof(dest), "%s/%s", old_catalog_dir, de->d_name);
if (rename(path, dest) != 0)
- pg_fatal("could not rename \"%s\" to \"%s\": %m", path, dest);
+ pg_fatal("could not rename file \"%s\" to \"%s\": %m", path, dest);
}
if (errno)
pg_fatal("could not read directory \"%s\": %m", new_db_dir);
@@ -417,7 +409,7 @@ swap_catalog_files(FileNameMap *maps, int size, const char *old_catalog_dir,
snprintf(dest, sizeof(dest), "%s/%s", new_db_dir, de->d_name);
if (rename(path, dest) != 0)
- pg_fatal("could not rename \"%s\" to \"%s\": %m", path, dest);
+ pg_fatal("could not rename file \"%s\" to \"%s\": %m", path, dest);
/*
* We don't fsync() the database files in the file synchronization
@@ -450,7 +442,7 @@ swap_catalog_files(FileNameMap *maps, int size, const char *old_catalog_dir,
* during pg_restore.
*/
static void
-do_swap(FileNameMap *maps, int size, char *old_tablespace)
+do_swap(FileNameMap *maps, int size, char *old_tablespace, char *new_tablespace)
{
char old_catalog_dir[MAXPGPATH];
char new_db_dir[MAXPGPATH];
@@ -470,21 +462,23 @@ do_swap(FileNameMap *maps, int size, char *old_tablespace)
*/
if (old_tablespace)
{
- if (prepare_for_swap(old_tablespace, maps[0].db_oid,
+ if (prepare_for_swap(old_tablespace, new_tablespace, maps[0].db_oid,
old_catalog_dir, new_db_dir, moved_db_dir))
swap_catalog_files(maps, size,
old_catalog_dir, new_db_dir, moved_db_dir);
}
else
{
- if (prepare_for_swap(old_cluster.pgdata, maps[0].db_oid,
+ if (prepare_for_swap(old_cluster.pgdata, new_cluster.pgdata, maps[0].db_oid,
old_catalog_dir, new_db_dir, moved_db_dir))
swap_catalog_files(maps, size,
old_catalog_dir, new_db_dir, moved_db_dir);
- for (int tblnum = 0; tblnum < os_info.num_old_tablespaces; tblnum++)
+ for (int tblnum = 0; tblnum < old_cluster.num_tablespaces; tblnum++)
{
- if (prepare_for_swap(os_info.old_tablespaces[tblnum], maps[0].db_oid,
+ if (prepare_for_swap(old_cluster.tablespaces[tblnum],
+ new_cluster.tablespaces[tblnum],
+ maps[0].db_oid,
old_catalog_dir, new_db_dir, moved_db_dir))
swap_catalog_files(maps, size,
old_catalog_dir, new_db_dir, moved_db_dir);
@@ -498,7 +492,8 @@ do_swap(FileNameMap *maps, int size, char *old_tablespace)
* create links for mappings stored in "maps" array.
*/
static void
-transfer_single_new_db(FileNameMap *maps, int size, char *old_tablespace)
+transfer_single_new_db(FileNameMap *maps, int size,
+ char *old_tablespace, char *new_tablespace)
{
int mapnum;
bool vm_must_add_frozenbit = false;
@@ -520,7 +515,7 @@ transfer_single_new_db(FileNameMap *maps, int size, char *old_tablespace)
*/
Assert(!vm_must_add_frozenbit);
- do_swap(maps, size, old_tablespace);
+ do_swap(maps, size, old_tablespace, new_tablespace);
return;
}
diff --git a/src/bin/pg_upgrade/server.c b/src/bin/pg_upgrade/server.c
index 873e5b5117b..7eb15bc7d5a 100644
--- a/src/bin/pg_upgrade/server.c
+++ b/src/bin/pg_upgrade/server.c
@@ -242,24 +242,6 @@ start_postmaster(ClusterInfo *cluster, bool report_and_exit_on_error)
appendPQExpBufferStr(&pgoptions, " -c synchronous_commit=off -c fsync=off -c full_page_writes=off");
/*
- * Use max_slot_wal_keep_size as -1 to prevent the WAL removal by the
- * checkpointer process. If WALs required by logical replication slots
- * are removed, the slots are unusable. This setting prevents the
- * invalidation of slots during the upgrade. We set this option when
- * cluster is PG17 or later because logical replication slots can only be
- * migrated since then. Besides, max_slot_wal_keep_size is added in PG13.
- */
- if (GET_MAJOR_VERSION(cluster->major_version) >= 1700)
- appendPQExpBufferStr(&pgoptions, " -c max_slot_wal_keep_size=-1");
-
- /*
- * Use idle_replication_slot_timeout=0 to prevent slot invalidation due to
- * idle_timeout by checkpointer process during upgrade.
- */
- if (GET_MAJOR_VERSION(cluster->major_version) >= 1800)
- appendPQExpBufferStr(&pgoptions, " -c idle_replication_slot_timeout=0");
-
- /*
* Use -b to disable autovacuum and logical replication launcher
* (effective in PG17 or later for the latter).
*/
diff --git a/src/bin/pg_upgrade/t/004_subscription.pl b/src/bin/pg_upgrade/t/004_subscription.pl
index c545abf6581..77387be0f9d 100644
--- a/src/bin/pg_upgrade/t/004_subscription.pl
+++ b/src/bin/pg_upgrade/t/004_subscription.pl
@@ -22,13 +22,13 @@ $publisher->start;
# Initialize the old subscriber node
my $old_sub = PostgreSQL::Test::Cluster->new('old_sub');
-$old_sub->init;
+$old_sub->init(allows_streaming => 'physical');
$old_sub->start;
my $oldbindir = $old_sub->config_data('--bindir');
# Initialize the new subscriber
my $new_sub = PostgreSQL::Test::Cluster->new('new_sub');
-$new_sub->init;
+$new_sub->init(allows_streaming => 'physical');
my $newbindir = $new_sub->config_data('--bindir');
# In a VPATH build, we'll be started in the source directory, but we want
@@ -53,7 +53,8 @@ $old_sub->safe_psql('postgres',
$old_sub->stop;
-$new_sub->append_conf('postgresql.conf', "max_active_replication_origins = 0");
+$new_sub->append_conf('postgresql.conf',
+ "max_active_replication_origins = 0");
# pg_upgrade will fail because the new cluster has insufficient
# max_active_replication_origins.
@@ -80,7 +81,56 @@ command_checks_all(
);
# Reset max_active_replication_origins
-$new_sub->append_conf('postgresql.conf', "max_active_replication_origins = 10");
+$new_sub->append_conf('postgresql.conf',
+ "max_active_replication_origins = 10");
+
+# Cleanup
+$publisher->safe_psql('postgres', "DROP PUBLICATION regress_pub1");
+$old_sub->start;
+$old_sub->safe_psql('postgres', "DROP SUBSCRIPTION regress_sub1;");
+
+# ------------------------------------------------------
+# Check that pg_upgrade fails when max_replication_slots configured in the new
+# cluster is less than the number of logical slots in the old cluster + 1 when
+# subscription's retain_dead_tuples option is enabled.
+# ------------------------------------------------------
+# It is sufficient to use disabled subscription to test upgrade failure.
+
+$publisher->safe_psql('postgres', "CREATE PUBLICATION regress_pub1");
+$old_sub->safe_psql('postgres',
+ "CREATE SUBSCRIPTION regress_sub1 CONNECTION '$connstr' PUBLICATION regress_pub1 WITH (enabled = false, retain_dead_tuples = true)"
+);
+
+$old_sub->stop;
+
+$new_sub->append_conf('postgresql.conf', 'max_replication_slots = 0');
+
+# pg_upgrade will fail because the new cluster has insufficient
+# max_replication_slots.
+command_checks_all(
+ [
+ 'pg_upgrade',
+ '--no-sync',
+ '--old-datadir' => $old_sub->data_dir,
+ '--new-datadir' => $new_sub->data_dir,
+ '--old-bindir' => $oldbindir,
+ '--new-bindir' => $newbindir,
+ '--socketdir' => $new_sub->host,
+ '--old-port' => $old_sub->port,
+ '--new-port' => $new_sub->port,
+ $mode,
+ '--check',
+ ],
+ 1,
+ [
+ qr/"max_replication_slots" \(0\) must be greater than or equal to the number of logical replication slots on the old cluster plus one additional slot required for retaining conflict detection information \(1\)/
+ ],
+ [qr//],
+ 'run of pg_upgrade where the new cluster has insufficient max_replication_slots'
+);
+
+# Reset max_replication_slots
+$new_sub->append_conf('postgresql.conf', 'max_replication_slots = 10');
# Cleanup
$publisher->safe_psql('postgres', "DROP PUBLICATION regress_pub1");
@@ -198,8 +248,9 @@ $old_sub->safe_psql(
rmtree($new_sub->data_dir . "/pg_upgrade_output.d");
# Verify that the upgrade should be successful with tables in 'ready'/'init'
-# state along with retaining the replication origin's remote lsn, subscription's
-# running status, and failover option.
+# state along with retaining the replication origin's remote lsn,
+# subscription's running status, failover option, and retain_dead_tuples
+# option.
$publisher->safe_psql(
'postgres', qq[
CREATE TABLE tab_upgraded1(id int);
@@ -209,7 +260,7 @@ $publisher->safe_psql(
$old_sub->safe_psql(
'postgres', qq[
CREATE TABLE tab_upgraded1(id int);
- CREATE SUBSCRIPTION regress_sub4 CONNECTION '$connstr' PUBLICATION regress_pub4 WITH (failover = true);
+ CREATE SUBSCRIPTION regress_sub4 CONNECTION '$connstr' PUBLICATION regress_pub4 WITH (failover = true, retain_dead_tuples = true);
]);
# Wait till the table tab_upgraded1 reaches 'ready' state
@@ -268,7 +319,8 @@ $new_sub->append_conf('postgresql.conf',
# Check that pg_upgrade is successful when all tables are in ready or in
# init state (tab_upgraded1 table is in ready state and tab_upgraded2 table is
# in init state) along with retaining the replication origin's remote lsn,
-# subscription's running status, and failover option.
+# subscription's running status, failover option, and retain_dead_tuples
+# option.
# ------------------------------------------------------
command_ok(
[
@@ -291,7 +343,8 @@ ok( !-d $new_sub->data_dir . "/pg_upgrade_output.d",
# ------------------------------------------------------
# Check that the data inserted to the publisher when the new subscriber is down
# will be replicated once it is started. Also check that the old subscription
-# states and relations origins are all preserved.
+# states and relations origins are all preserved, and that the conflict
+# detection slot is created.
# ------------------------------------------------------
$publisher->safe_psql(
'postgres', qq[
@@ -301,15 +354,16 @@ $publisher->safe_psql(
$new_sub->start;
-# The subscription's running status and failover option should be preserved
-# in the upgraded instance. So regress_sub4 should still have subenabled and
-# subfailover set to true, while regress_sub5 should have both set to false.
+# The subscription's running status, failover option, and retain_dead_tuples
+# option should be preserved in the upgraded instance. So regress_sub4 should
+# still have subenabled, subfailover, and subretaindeadtuples set to true,
+# while regress_sub5 should have both set to false.
$result = $new_sub->safe_psql('postgres',
- "SELECT subname, subenabled, subfailover FROM pg_subscription ORDER BY subname"
+ "SELECT subname, subenabled, subfailover, subretaindeadtuples FROM pg_subscription ORDER BY subname"
);
-is( $result, qq(regress_sub4|t|t
-regress_sub5|f|f),
- "check that the subscription's running status and failover are preserved"
+is( $result, qq(regress_sub4|t|t|t
+regress_sub5|f|f|f),
+ "check that the subscription's running status, failover, and retain_dead_tuples are preserved"
);
# Subscription relations should be preserved
@@ -328,6 +382,11 @@ $result = $new_sub->safe_psql('postgres',
);
is($result, qq($remote_lsn), "remote_lsn should have been preserved");
+# The conflict detection slot should be created
+$result = $new_sub->safe_psql('postgres',
+ "SELECT xmin IS NOT NULL from pg_replication_slots WHERE slot_name = 'pg_conflict_detection'");
+is($result, qq(t), "conflict detection slot exists");
+
# Resume the initial sync and wait until all tables of subscription
# 'regress_sub5' are synchronized
$new_sub->append_conf('postgresql.conf',
diff --git a/src/bin/pg_upgrade/t/005_char_signedness.pl b/src/bin/pg_upgrade/t/005_char_signedness.pl
index 17fa0d48b15..cd8cff6f513 100644
--- a/src/bin/pg_upgrade/t/005_char_signedness.pl
+++ b/src/bin/pg_upgrade/t/005_char_signedness.pl
@@ -65,7 +65,7 @@ command_checks_all(
$mode
],
1,
- [qr/--set-char-signedness option cannot be used/],
+ [qr/option --set-char-signedness cannot be used/],
[],
'--set-char-signedness option cannot be used for upgrading from v18 or later'
);
diff --git a/src/bin/pg_upgrade/t/006_transfer_modes.pl b/src/bin/pg_upgrade/t/006_transfer_modes.pl
index 550a63fdf7d..348f4021462 100644
--- a/src/bin/pg_upgrade/t/006_transfer_modes.pl
+++ b/src/bin/pg_upgrade/t/006_transfer_modes.pl
@@ -13,7 +13,8 @@ sub test_mode
{
my ($mode) = @_;
- my $old = PostgreSQL::Test::Cluster->new('old', install_path => $ENV{oldinstall});
+ my $old =
+ PostgreSQL::Test::Cluster->new('old', install_path => $ENV{oldinstall});
my $new = PostgreSQL::Test::Cluster->new('new');
# --swap can't be used to upgrade from versions older than 10, so just skip
@@ -37,24 +38,50 @@ sub test_mode
}
$new->init();
+ # allow_in_place_tablespaces is available as far back as v10.
+ if ($old->pg_version >= 10)
+ {
+ $new->append_conf('postgresql.conf', "allow_in_place_tablespaces = true");
+ $old->append_conf('postgresql.conf', "allow_in_place_tablespaces = true");
+ }
+
# Create a small variety of simple test objects on the old cluster. We'll
# check that these reach the new version after upgrading.
$old->start;
- $old->safe_psql('postgres', "CREATE TABLE test1 AS SELECT generate_series(1, 100)");
+ $old->safe_psql('postgres',
+ "CREATE TABLE test1 AS SELECT generate_series(1, 100)");
$old->safe_psql('postgres', "CREATE DATABASE testdb1");
- $old->safe_psql('testdb1', "CREATE TABLE test2 AS SELECT generate_series(200, 300)");
+ $old->safe_psql('testdb1',
+ "CREATE TABLE test2 AS SELECT generate_series(200, 300)");
$old->safe_psql('testdb1', "VACUUM FULL test2");
$old->safe_psql('testdb1', "CREATE SEQUENCE testseq START 5432");
- # For cross-version tests, we can also check that pg_upgrade handles
- # tablespaces.
+ # If an old installation is provided, we can test non-in-place tablespaces.
if (defined($ENV{oldinstall}))
{
my $tblspc = PostgreSQL::Test::Utils::tempdir_short();
- $old->safe_psql('postgres', "CREATE TABLESPACE test_tblspc LOCATION '$tblspc'");
- $old->safe_psql('postgres', "CREATE DATABASE testdb2 TABLESPACE test_tblspc");
- $old->safe_psql('postgres', "CREATE TABLE test3 TABLESPACE test_tblspc AS SELECT generate_series(300, 401)");
- $old->safe_psql('testdb2', "CREATE TABLE test4 AS SELECT generate_series(400, 502)");
+ $old->safe_psql('postgres',
+ "CREATE TABLESPACE test_tblspc LOCATION '$tblspc'");
+ $old->safe_psql('postgres',
+ "CREATE DATABASE testdb2 TABLESPACE test_tblspc");
+ $old->safe_psql('postgres',
+ "CREATE TABLE test3 TABLESPACE test_tblspc AS SELECT generate_series(300, 401)"
+ );
+ $old->safe_psql('testdb2',
+ "CREATE TABLE test4 AS SELECT generate_series(400, 502)");
+ }
+
+ # If the old cluster is >= v10, we can test in-place tablespaces.
+ if ($old->pg_version >= 10)
+ {
+ $old->safe_psql('postgres',
+ "CREATE TABLESPACE inplc_tblspc LOCATION ''");
+ $old->safe_psql('postgres',
+ "CREATE DATABASE testdb3 TABLESPACE inplc_tblspc");
+ $old->safe_psql('postgres',
+ "CREATE TABLE test5 TABLESPACE inplc_tblspc AS SELECT generate_series(503, 606)");
+ $old->safe_psql('testdb3',
+ "CREATE TABLE test6 AS SELECT generate_series(607, 711)");
}
$old->stop;
@@ -86,15 +113,25 @@ sub test_mode
$result = $new->safe_psql('testdb1', "SELECT nextval('testseq')");
is($result, '5432', "sequence data after pg_upgrade $mode");
- # For cross-version tests, we should have some objects in a non-default
- # tablespace.
+ # Tests for non-in-place tablespaces.
if (defined($ENV{oldinstall}))
{
- $result = $new->safe_psql('postgres', "SELECT COUNT(*) FROM test3");
+ $result =
+ $new->safe_psql('postgres', "SELECT COUNT(*) FROM test3");
is($result, '102', "test3 data after pg_upgrade $mode");
- $result = $new->safe_psql('testdb2', "SELECT COUNT(*) FROM test4");
+ $result =
+ $new->safe_psql('testdb2', "SELECT COUNT(*) FROM test4");
is($result, '103', "test4 data after pg_upgrade $mode");
}
+
+ # Tests for in-place tablespaces.
+ if ($old->pg_version >= 10)
+ {
+ $result = $new->safe_psql('postgres', "SELECT COUNT(*) FROM test5");
+ is($result, '104', "test5 data after pg_upgrade $mode");
+ $result = $new->safe_psql('testdb3', "SELECT COUNT(*) FROM test6");
+ is($result, '105', "test6 data after pg_upgrade $mode");
+ }
$new->stop;
}
diff --git a/src/bin/pg_upgrade/tablespace.c b/src/bin/pg_upgrade/tablespace.c
index 3520a75ba31..151d74e1734 100644
--- a/src/bin/pg_upgrade/tablespace.c
+++ b/src/bin/pg_upgrade/tablespace.c
@@ -23,10 +23,20 @@ init_tablespaces(void)
set_tablespace_directory_suffix(&old_cluster);
set_tablespace_directory_suffix(&new_cluster);
- if (os_info.num_old_tablespaces > 0 &&
+ if (old_cluster.num_tablespaces > 0 &&
strcmp(old_cluster.tablespace_suffix, new_cluster.tablespace_suffix) == 0)
- pg_fatal("Cannot upgrade to/from the same system catalog version when\n"
- "using tablespaces.");
+ {
+ for (int i = 0; i < old_cluster.num_tablespaces; i++)
+ {
+ /*
+ * In-place tablespaces are okay for same-version upgrades because
+ * their paths will differ between clusters.
+ */
+ if (strcmp(old_cluster.tablespaces[i], new_cluster.tablespaces[i]) == 0)
+ pg_fatal("Cannot upgrade to/from the same system catalog version when\n"
+ "using tablespaces.");
+ }
+ }
}
@@ -53,19 +63,48 @@ get_tablespace_paths(void)
res = executeQueryOrDie(conn, "%s", query);
- if ((os_info.num_old_tablespaces = PQntuples(res)) != 0)
- os_info.old_tablespaces =
- (char **) pg_malloc(os_info.num_old_tablespaces * sizeof(char *));
+ old_cluster.num_tablespaces = PQntuples(res);
+ new_cluster.num_tablespaces = PQntuples(res);
+
+ if (PQntuples(res) != 0)
+ {
+ old_cluster.tablespaces =
+ (char **) pg_malloc(old_cluster.num_tablespaces * sizeof(char *));
+ new_cluster.tablespaces =
+ (char **) pg_malloc(new_cluster.num_tablespaces * sizeof(char *));
+ }
else
- os_info.old_tablespaces = NULL;
+ {
+ old_cluster.tablespaces = NULL;
+ new_cluster.tablespaces = NULL;
+ }
i_spclocation = PQfnumber(res, "spclocation");
- for (tblnum = 0; tblnum < os_info.num_old_tablespaces; tblnum++)
+ for (tblnum = 0; tblnum < old_cluster.num_tablespaces; tblnum++)
{
struct stat statBuf;
+ char *spcloc = PQgetvalue(res, tblnum, i_spclocation);
- os_info.old_tablespaces[tblnum] = pg_strdup(PQgetvalue(res, tblnum, i_spclocation));
+ /*
+ * For now, we do not expect non-in-place tablespaces to move during
+ * upgrade. If that changes, it will likely become necessary to run
+ * the above query on the new cluster, too.
+ *
+ * pg_tablespace_location() returns absolute paths for non-in-place
+ * tablespaces and relative paths for in-place ones, so we use
+ * is_absolute_path() to distinguish between them.
+ */
+ if (is_absolute_path(PQgetvalue(res, tblnum, i_spclocation)))
+ {
+ old_cluster.tablespaces[tblnum] = pg_strdup(spcloc);
+ new_cluster.tablespaces[tblnum] = old_cluster.tablespaces[tblnum];
+ }
+ else
+ {
+ old_cluster.tablespaces[tblnum] = psprintf("%s/%s", old_cluster.pgdata, spcloc);
+ new_cluster.tablespaces[tblnum] = psprintf("%s/%s", new_cluster.pgdata, spcloc);
+ }
/*
* Check that the tablespace path exists and is a directory.
@@ -76,21 +115,21 @@ get_tablespace_paths(void)
* that contains user tablespaces is moved as part of pg_upgrade
* preparation and the symbolic links are not updated.
*/
- if (stat(os_info.old_tablespaces[tblnum], &statBuf) != 0)
+ if (stat(old_cluster.tablespaces[tblnum], &statBuf) != 0)
{
if (errno == ENOENT)
report_status(PG_FATAL,
"tablespace directory \"%s\" does not exist",
- os_info.old_tablespaces[tblnum]);
+ old_cluster.tablespaces[tblnum]);
else
report_status(PG_FATAL,
"could not stat tablespace directory \"%s\": %m",
- os_info.old_tablespaces[tblnum]);
+ old_cluster.tablespaces[tblnum]);
}
if (!S_ISDIR(statBuf.st_mode))
report_status(PG_FATAL,
"tablespace path \"%s\" is not a directory",
- os_info.old_tablespaces[tblnum]);
+ old_cluster.tablespaces[tblnum]);
}
PQclear(res);
diff --git a/src/bin/pg_upgrade/task.c b/src/bin/pg_upgrade/task.c
index a48d5691390..ee0e2457152 100644
--- a/src/bin/pg_upgrade/task.c
+++ b/src/bin/pg_upgrade/task.c
@@ -192,8 +192,7 @@ start_conn(const ClusterInfo *cluster, UpgradeTaskSlot *slot)
slot->conn = PQconnectStart(conn_opts.data);
if (!slot->conn)
- pg_fatal("failed to create connection with connection string: \"%s\"",
- conn_opts.data);
+ pg_fatal("out of memory");
termPQExpBuffer(&conn_opts);
}
@@ -402,7 +401,7 @@ wait_on_slots(UpgradeTaskSlot *slots, int numslots)
* If we found socket(s) to wait on, wait.
*/
if (select_loop(maxFd, &input, &output) == -1)
- pg_fatal("select() failed: %m");
+ pg_fatal("%s() failed: %m", "select");
/*
* Mark which sockets appear to be ready.
diff --git a/src/bin/pg_verifybackup/meson.build b/src/bin/pg_verifybackup/meson.build
index 9567d55500d..f45ea790d8e 100644
--- a/src/bin/pg_verifybackup/meson.build
+++ b/src/bin/pg_verifybackup/meson.build
@@ -23,10 +23,10 @@ tests += {
'sd': meson.current_source_dir(),
'bd': meson.current_build_dir(),
'tap': {
- 'env': {'GZIP_PROGRAM': gzip.found() ? gzip.path() : '',
- 'TAR': tar.found() ? tar.path() : '',
- 'LZ4': program_lz4.found() ? program_lz4.path() : '',
- 'ZSTD': program_zstd.found() ? program_zstd.path() : ''},
+ 'env': {'GZIP_PROGRAM': gzip.found() ? gzip.full_path() : '',
+ 'TAR': tar.found() ? tar.full_path() : '',
+ 'LZ4': program_lz4.found() ? program_lz4.full_path() : '',
+ 'ZSTD': program_zstd.found() ? program_zstd.full_path() : ''},
'tests': [
't/001_basic.pl',
't/002_algorithm.pl',
diff --git a/src/bin/pg_verifybackup/pg_verifybackup.c b/src/bin/pg_verifybackup/pg_verifybackup.c
index 48994ef9bc6..5e6c13bb921 100644
--- a/src/bin/pg_verifybackup/pg_verifybackup.c
+++ b/src/bin/pg_verifybackup/pg_verifybackup.c
@@ -1207,7 +1207,7 @@ parse_required_wal(verifier_context *context, char *pg_waldump_path,
{
char *pg_waldump_cmd;
- pg_waldump_cmd = psprintf("\"%s\" --quiet --path=\"%s\" --timeline=%u --start=%X/%X --end=%X/%X\n",
+ pg_waldump_cmd = psprintf("\"%s\" --quiet --path=\"%s\" --timeline=%u --start=%X/%08X --end=%X/%08X\n",
pg_waldump_path, wal_directory, this_wal_range->tli,
LSN_FORMAT_ARGS(this_wal_range->start_lsn),
LSN_FORMAT_ARGS(this_wal_range->end_lsn));
diff --git a/src/bin/pg_verifybackup/t/008_untar.pl b/src/bin/pg_verifybackup/t/008_untar.pl
index deed3ec247d..bc3d6b352ad 100644
--- a/src/bin/pg_verifybackup/t/008_untar.pl
+++ b/src/bin/pg_verifybackup/t/008_untar.pl
@@ -16,6 +16,22 @@ my $primary = PostgreSQL::Test::Cluster->new('primary');
$primary->init(allows_streaming => 1);
$primary->start;
+# Create file with some random data and an arbitrary size, useful to check
+# the solidity of the compression and decompression logic. The size of the
+# file is chosen to be around 640kB. This has proven to be large enough to
+# detect some issues related to LZ4, and low enough to not impact the runtime
+# of the test significantly.
+my $junk_data = $primary->safe_psql(
+ 'postgres', qq(
+ SELECT string_agg(encode(sha256(i::bytea), 'hex'), '')
+ FROM generate_series(1, 10240) s(i);));
+my $data_dir = $primary->data_dir;
+my $junk_file = "$data_dir/junk";
+open my $jf, '>', $junk_file
+ or die "Could not create junk file: $!";
+print $jf $junk_data;
+close $jf;
+
# Create a tablespace directory.
my $source_ts_path = PostgreSQL::Test::Utils::tempdir_short();
@@ -53,6 +69,12 @@ my @test_configuration = (
'enabled' => check_pg_config("#define USE_LZ4 1")
},
{
+ 'compression_method' => 'lz4',
+ 'backup_flags' => [ '--compress', 'server-lz4:5' ],
+ 'backup_archive' => [ 'base.tar.lz4', "$tsoid.tar.lz4" ],
+ 'enabled' => check_pg_config("#define USE_LZ4 1")
+ },
+ {
'compression_method' => 'zstd',
'backup_flags' => [ '--compress', 'server-zstd' ],
'backup_archive' => [ 'base.tar.zst', "$tsoid.tar.zst" ],
diff --git a/src/bin/pg_verifybackup/t/010_client_untar.pl b/src/bin/pg_verifybackup/t/010_client_untar.pl
index d8d2b06c7ee..b62faeb5acf 100644
--- a/src/bin/pg_verifybackup/t/010_client_untar.pl
+++ b/src/bin/pg_verifybackup/t/010_client_untar.pl
@@ -15,6 +15,22 @@ my $primary = PostgreSQL::Test::Cluster->new('primary');
$primary->init(allows_streaming => 1);
$primary->start;
+# Create file with some random data and an arbitrary size, useful to check
+# the solidity of the compression and decompression logic. The size of the
+# file is chosen to be around 640kB. This has proven to be large enough to
+# detect some issues related to LZ4, and low enough to not impact the runtime
+# of the test significantly.
+my $junk_data = $primary->safe_psql(
+ 'postgres', qq(
+ SELECT string_agg(encode(sha256(i::bytea), 'hex'), '')
+ FROM generate_series(1, 10240) s(i);));
+my $data_dir = $primary->data_dir;
+my $junk_file = "$data_dir/junk";
+open my $jf, '>', $junk_file
+ or die "Could not create junk file: $!";
+print $jf $junk_data;
+close $jf;
+
my $backup_path = $primary->backup_dir . '/client-backup';
my $extract_path = $primary->backup_dir . '/extracted-backup';
@@ -38,6 +54,12 @@ my @test_configuration = (
'enabled' => check_pg_config("#define USE_LZ4 1")
},
{
+ 'compression_method' => 'lz4',
+ 'backup_flags' => [ '--compress', 'client-lz4:1' ],
+ 'backup_archive' => 'base.tar.lz4',
+ 'enabled' => check_pg_config("#define USE_LZ4 1")
+ },
+ {
'compression_method' => 'zstd',
'backup_flags' => [ '--compress', 'client-zstd:5' ],
'backup_archive' => 'base.tar.zst',
diff --git a/src/bin/pg_waldump/pg_waldump.c b/src/bin/pg_waldump/pg_waldump.c
index 51fb76efc48..13d3ec2f5be 100644
--- a/src/bin/pg_waldump/pg_waldump.c
+++ b/src/bin/pg_waldump/pg_waldump.c
@@ -656,7 +656,7 @@ XLogDumpDisplayStats(XLogDumpConfig *config, XLogStats *stats)
}
total_len = total_rec_len + total_fpi_len;
- printf("WAL statistics between %X/%X and %X/%X:\n",
+ printf("WAL statistics between %X/%08X and %X/%08X:\n",
LSN_FORMAT_ARGS(stats->startptr), LSN_FORMAT_ARGS(stats->endptr));
/*
@@ -904,7 +904,7 @@ main(int argc, char **argv)
config.filter_by_extended = true;
break;
case 'e':
- if (sscanf(optarg, "%X/%X", &xlogid, &xrecoff) != 2)
+ if (sscanf(optarg, "%X/%08X", &xlogid, &xrecoff) != 2)
{
pg_log_error("invalid WAL location: \"%s\"",
optarg);
@@ -1002,7 +1002,7 @@ main(int argc, char **argv)
config.filter_by_extended = true;
break;
case 's':
- if (sscanf(optarg, "%X/%X", &xlogid, &xrecoff) != 2)
+ if (sscanf(optarg, "%X/%08X", &xlogid, &xrecoff) != 2)
{
pg_log_error("invalid WAL location: \"%s\"",
optarg);
@@ -1140,7 +1140,7 @@ main(int argc, char **argv)
XLogSegNoOffsetToRecPtr(segno, 0, WalSegSz, private.startptr);
else if (!XLByteInSeg(private.startptr, segno, WalSegSz))
{
- pg_log_error("start WAL location %X/%X is not inside file \"%s\"",
+ pg_log_error("start WAL location %X/%08X is not inside file \"%s\"",
LSN_FORMAT_ARGS(private.startptr),
fname);
goto bad_argument;
@@ -1182,7 +1182,7 @@ main(int argc, char **argv)
if (!XLByteInSeg(private.endptr, segno, WalSegSz) &&
private.endptr != (segno + 1) * WalSegSz)
{
- pg_log_error("end WAL location %X/%X is not inside file \"%s\"",
+ pg_log_error("end WAL location %X/%08X is not inside file \"%s\"",
LSN_FORMAT_ARGS(private.endptr),
argv[argc - 1]);
goto bad_argument;
@@ -1214,7 +1214,7 @@ main(int argc, char **argv)
first_record = XLogFindNextRecord(xlogreader_state, private.startptr);
if (first_record == InvalidXLogRecPtr)
- pg_fatal("could not find a valid record after %X/%X",
+ pg_fatal("could not find a valid record after %X/%08X",
LSN_FORMAT_ARGS(private.startptr));
/*
@@ -1224,8 +1224,8 @@ main(int argc, char **argv)
*/
if (first_record != private.startptr &&
XLogSegmentOffset(private.startptr, WalSegSz) != 0)
- pg_log_info(ngettext("first record is after %X/%X, at %X/%X, skipping over %u byte",
- "first record is after %X/%X, at %X/%X, skipping over %u bytes",
+ pg_log_info(ngettext("first record is after %X/%08X, at %X/%08X, skipping over %u byte",
+ "first record is after %X/%08X, at %X/%08X, skipping over %u bytes",
(first_record - private.startptr)),
LSN_FORMAT_ARGS(private.startptr),
LSN_FORMAT_ARGS(first_record),
@@ -1309,7 +1309,7 @@ main(int argc, char **argv)
exit(0);
if (errormsg)
- pg_fatal("error in WAL record at %X/%X: %s",
+ pg_fatal("error in WAL record at %X/%08X: %s",
LSN_FORMAT_ARGS(xlogreader_state->ReadRecPtr),
errormsg);
diff --git a/src/bin/pg_walsummary/t/002_blocks.pl b/src/bin/pg_walsummary/t/002_blocks.pl
index 270332780a4..0f98c7df82e 100644
--- a/src/bin/pg_walsummary/t/002_blocks.pl
+++ b/src/bin/pg_walsummary/t/002_blocks.pl
@@ -47,11 +47,12 @@ EOM
ok($result, "WAL summarization caught up after insert");
# The WAL summarizer should have generated some IO statistics.
-my $stats_reads = $node1->safe_psql(
+$node1->poll_query_until(
'postgres',
- qq{SELECT sum(reads) > 0 FROM pg_stat_io
- WHERE backend_type = 'walsummarizer' AND object = 'wal'});
-is($stats_reads, 't', "WAL summarizer generates statistics for WAL reads");
+ q{SELECT sum(reads) > 0 FROM pg_stat_io
+ WHERE backend_type = 'walsummarizer' AND object = 'wal'})
+ or die
+ "Timed out while waiting for WAL summarizer to generate statistics for WAL reads";
# Find the highest LSN that is summarized on disk.
my $summarized_lsn = $node1->safe_psql('postgres', <<EOM);
diff --git a/src/bin/pgbench/t/002_pgbench_no_server.pl b/src/bin/pgbench/t/002_pgbench_no_server.pl
index f975c73dd75..2cc59cc8140 100644
--- a/src/bin/pgbench/t/002_pgbench_no_server.pl
+++ b/src/bin/pgbench/t/002_pgbench_no_server.pl
@@ -233,21 +233,9 @@ for my $o (@options)
'pgbench option error: ' . $name);
}
-# Help
-pgbench(
- '--help', 0,
- [
- qr{benchmarking tool for PostgreSQL},
- qr{Usage},
- qr{Initialization options:},
- qr{Common options:},
- qr{Report bugs to}
- ],
- [qr{^$}],
- 'pgbench help');
-
-# Version
-pgbench('-V', 0, [qr{^pgbench .PostgreSQL. }], [qr{^$}], 'pgbench version');
+program_help_ok('pgbench');
+program_version_ok('pgbench');
+program_options_handling_ok('pgbench');
# list of builtins
pgbench(
diff --git a/src/bin/psql/command.c b/src/bin/psql/command.c
index 81a5ba844ba..0e00d73487c 100644
--- a/src/bin/psql/command.c
+++ b/src/bin/psql/command.c
@@ -67,8 +67,8 @@ static backslashResult exec_command_C(PsqlScanState scan_state, bool active_bran
static backslashResult exec_command_connect(PsqlScanState scan_state, bool active_branch);
static backslashResult exec_command_cd(PsqlScanState scan_state, bool active_branch,
const char *cmd);
-static backslashResult exec_command_close(PsqlScanState scan_state, bool active_branch,
- const char *cmd);
+static backslashResult exec_command_close_prepared(PsqlScanState scan_state,
+ bool active_branch, const char *cmd);
static backslashResult exec_command_conninfo(PsqlScanState scan_state, bool active_branch);
static backslashResult exec_command_copy(PsqlScanState scan_state, bool active_branch);
static backslashResult exec_command_copyright(PsqlScanState scan_state, bool active_branch);
@@ -330,8 +330,8 @@ exec_command(const char *cmd,
status = exec_command_connect(scan_state, active_branch);
else if (strcmp(cmd, "cd") == 0)
status = exec_command_cd(scan_state, active_branch, cmd);
- else if (strcmp(cmd, "close") == 0)
- status = exec_command_close(scan_state, active_branch, cmd);
+ else if (strcmp(cmd, "close_prepared") == 0)
+ status = exec_command_close_prepared(scan_state, active_branch, cmd);
else if (strcmp(cmd, "conninfo") == 0)
status = exec_command_conninfo(scan_state, active_branch);
else if (pg_strcasecmp(cmd, "copy") == 0)
@@ -728,10 +728,10 @@ exec_command_cd(PsqlScanState scan_state, bool active_branch, const char *cmd)
}
/*
- * \close -- close a previously prepared statement
+ * \close_prepared -- close a previously prepared statement
*/
static backslashResult
-exec_command_close(PsqlScanState scan_state, bool active_branch, const char *cmd)
+exec_command_close_prepared(PsqlScanState scan_state, bool active_branch, const char *cmd)
{
backslashResult status = PSQL_CMD_SKIP_LINE;
@@ -778,6 +778,7 @@ exec_command_conninfo(PsqlScanState scan_state, bool active_branch)
int ssl_in_use,
password_used,
gssapi_used;
+ int version_num;
char *paramval;
if (!active_branch)
@@ -793,7 +794,9 @@ exec_command_conninfo(PsqlScanState scan_state, bool active_branch)
/* Get values for the parameters */
host = PQhost(pset.db);
hostaddr = PQhostaddr(pset.db);
- protocol_version = psprintf("%d", PQprotocolVersion(pset.db));
+ version_num = PQfullProtocolVersion(pset.db);
+ protocol_version = psprintf("%d.%d", version_num / 10000,
+ version_num % 10000);
ssl_in_use = PQsslInUse(pset.db);
password_used = PQconnectionUsedPassword(pset.db);
gssapi_used = PQconnectionUsedGSSAPI(pset.db);
@@ -874,11 +877,11 @@ exec_command_conninfo(PsqlScanState scan_state, bool active_branch)
printTableAddCell(&cont, _("Backend PID"), false, false);
printTableAddCell(&cont, backend_pid, false, false);
- /* TLS Connection */
- printTableAddCell(&cont, _("TLS Connection"), false, false);
+ /* SSL Connection */
+ printTableAddCell(&cont, _("SSL Connection"), false, false);
printTableAddCell(&cont, ssl_in_use ? _("true") : _("false"), false, false);
- /* TLS Information */
+ /* SSL Information */
if (ssl_in_use)
{
char *library,
@@ -895,19 +898,19 @@ exec_command_conninfo(PsqlScanState scan_state, bool active_branch)
compression = (char *) PQsslAttribute(pset.db, "compression");
alpn = (char *) PQsslAttribute(pset.db, "alpn");
- printTableAddCell(&cont, _("TLS Library"), false, false);
+ printTableAddCell(&cont, _("SSL Library"), false, false);
printTableAddCell(&cont, library ? library : _("unknown"), false, false);
- printTableAddCell(&cont, _("TLS Protocol"), false, false);
+ printTableAddCell(&cont, _("SSL Protocol"), false, false);
printTableAddCell(&cont, protocol ? protocol : _("unknown"), false, false);
- printTableAddCell(&cont, _("TLS Key Bits"), false, false);
+ printTableAddCell(&cont, _("SSL Key Bits"), false, false);
printTableAddCell(&cont, key_bits ? key_bits : _("unknown"), false, false);
- printTableAddCell(&cont, _("TLS Cipher"), false, false);
+ printTableAddCell(&cont, _("SSL Cipher"), false, false);
printTableAddCell(&cont, cipher ? cipher : _("unknown"), false, false);
- printTableAddCell(&cont, _("TLS Compression"), false, false);
+ printTableAddCell(&cont, _("SSL Compression"), false, false);
printTableAddCell(&cont, (compression && strcmp(compression, "off") != 0) ?
_("true") : _("false"), false, false);
@@ -1946,7 +1949,7 @@ exec_command_gexec(PsqlScanState scan_state, bool active_branch)
{
if (PQpipelineStatus(pset.db) != PQ_PIPELINE_OFF)
{
- pg_log_error("\\gexec not allowed in pipeline mode");
+ pg_log_error("\\%s not allowed in pipeline mode", "gexec");
clean_extended_state();
return PSQL_CMD_ERROR;
}
@@ -1972,7 +1975,7 @@ exec_command_gset(PsqlScanState scan_state, bool active_branch)
if (PQpipelineStatus(pset.db) != PQ_PIPELINE_OFF)
{
- pg_log_error("\\gset not allowed in pipeline mode");
+ pg_log_error("\\%s not allowed in pipeline mode", "gset");
clean_extended_state();
return PSQL_CMD_ERROR;
}
@@ -3284,7 +3287,7 @@ exec_command_watch(PsqlScanState scan_state, bool active_branch,
if (PQpipelineStatus(pset.db) != PQ_PIPELINE_OFF)
{
- pg_log_error("\\watch not allowed in pipeline mode");
+ pg_log_error("\\%s not allowed in pipeline mode", "watch");
clean_extended_state();
success = false;
}
@@ -4477,6 +4480,8 @@ SyncVariables(void)
{
char vbuf[32];
const char *server_version;
+ char *service_name;
+ char *service_file;
/* get stuff from connection */
pset.encoding = PQclientEncoding(pset.db);
@@ -4486,12 +4491,21 @@ SyncVariables(void)
setFmtEncoding(pset.encoding);
SetVariable(pset.vars, "DBNAME", PQdb(pset.db));
- SetVariable(pset.vars, "SERVICE", PQservice(pset.db));
SetVariable(pset.vars, "USER", PQuser(pset.db));
SetVariable(pset.vars, "HOST", PQhost(pset.db));
SetVariable(pset.vars, "PORT", PQport(pset.db));
SetVariable(pset.vars, "ENCODING", pg_encoding_to_char(pset.encoding));
+ service_name = get_conninfo_value("service");
+ SetVariable(pset.vars, "SERVICE", service_name);
+ if (service_name)
+ pg_free(service_name);
+
+ service_file = get_conninfo_value("servicefile");
+ SetVariable(pset.vars, "SERVICEFILE", service_file);
+ if (service_file)
+ pg_free(service_file);
+
/* this bit should match connection_warnings(): */
/* Try to get full text form of version, might include "devel" etc */
server_version = PQparameterStatus(pset.db, "server_version");
@@ -4521,6 +4535,7 @@ UnsyncVariables(void)
{
SetVariable(pset.vars, "DBNAME", NULL);
SetVariable(pset.vars, "SERVICE", NULL);
+ SetVariable(pset.vars, "SERVICEFILE", NULL);
SetVariable(pset.vars, "USER", NULL);
SetVariable(pset.vars, "HOST", NULL);
SetVariable(pset.vars, "PORT", NULL);
diff --git a/src/bin/psql/common.c b/src/bin/psql/common.c
index 3e4e444f3fd..cd329ade12b 100644
--- a/src/bin/psql/common.c
+++ b/src/bin/psql/common.c
@@ -1867,6 +1867,33 @@ ExecQueryAndProcessResults(const char *query,
{
FILE *copy_stream = NULL;
+ if (PQpipelineStatus(pset.db) != PQ_PIPELINE_OFF)
+ {
+ /*
+ * Running COPY within a pipeline can break the protocol
+ * synchronisation in multiple ways, and psql shows its limits
+ * when it comes to tracking this information.
+ *
+ * While in COPY mode, the backend process ignores additional
+ * Sync messages and will not send the matching ReadyForQuery
+ * expected by the frontend.
+ *
+ * Additionally, libpq automatically sends a Sync with the
+ * Copy message, creating an unexpected synchronisation point.
+ * A failure during COPY would leave the pipeline in an
+ * aborted state while the backend would be in a clean state,
+ * ready to process commands.
+ *
+ * Improving those issues would require modifications in how
+ * libpq handles pipelines and COPY. Hence, for the time
+ * being, we forbid the use of COPY within a pipeline,
+ * aborting the connection to avoid an inconsistent state on
+ * psql side if trying to use a COPY command.
+ */
+ pg_log_info("COPY in a pipeline is not supported, aborting connection");
+ exit(EXIT_BADCONN);
+ }
+
/*
* For COPY OUT, direct the output to the default place (probably
* a pager pipe) for \watch, or to pset.copyStream for \copy,
@@ -2504,6 +2531,41 @@ session_username(void)
return PQuser(pset.db);
}
+/*
+ * Return the value of option for keyword in the current connection.
+ *
+ * The caller is responsible for freeing the result value allocated.
+ */
+char *
+get_conninfo_value(const char *keyword)
+{
+ PQconninfoOption *opts;
+ PQconninfoOption *serviceopt = NULL;
+ char *res = NULL;
+
+ if (pset.db == NULL)
+ return NULL;
+
+ opts = PQconninfo(pset.db);
+ if (opts == NULL)
+ return NULL;
+
+ for (PQconninfoOption *opt = opts; opt->keyword != NULL; ++opt)
+ {
+ if (strcmp(opt->keyword, keyword) == 0)
+ {
+ serviceopt = opt;
+ break;
+ }
+ }
+
+ /* Take a copy of the value, as it is freed by PQconninfoFree(). */
+ if (serviceopt && serviceopt->val != NULL)
+ res = pg_strdup(serviceopt->val);
+ PQconninfoFree(opts);
+
+ return res;
+}
/* expand_tilde
*
@@ -2601,7 +2663,7 @@ clean_extended_state(void)
switch (pset.send_mode)
{
- case PSQL_SEND_EXTENDED_CLOSE: /* \close */
+ case PSQL_SEND_EXTENDED_CLOSE: /* \close_prepared */
free(pset.stmtName);
break;
case PSQL_SEND_EXTENDED_PARSE: /* \parse */
diff --git a/src/bin/psql/common.h b/src/bin/psql/common.h
index 7f1a23de1e8..64762ab9817 100644
--- a/src/bin/psql/common.h
+++ b/src/bin/psql/common.h
@@ -39,6 +39,7 @@ extern bool SendQuery(const char *query);
extern bool is_superuser(void);
extern bool standard_strings(void);
extern const char *session_username(void);
+extern char *get_conninfo_value(const char *keyword);
extern void expand_tilde(char **filename);
extern void clean_extended_state(void);
diff --git a/src/bin/psql/describe.c b/src/bin/psql/describe.c
index 1d08268393e..7a06af48842 100644
--- a/src/bin/psql/describe.c
+++ b/src/bin/psql/describe.c
@@ -296,6 +296,7 @@ describeFunctions(const char *functypes, const char *func_pattern,
char **arg_patterns, int num_arg_patterns,
bool verbose, bool showSystem)
{
+ const char *df_options = "anptwSx+";
bool showAggregate = strchr(functypes, 'a') != NULL;
bool showNormal = strchr(functypes, 'n') != NULL;
bool showProcedure = strchr(functypes, 'p') != NULL;
@@ -310,9 +311,9 @@ describeFunctions(const char *functypes, const char *func_pattern,
/* No "Parallel" column before 9.6 */
static const bool translate_columns_pre_96[] = {false, false, false, false, true, true, false, true, true, false, false, false, false};
- if (strlen(functypes) != strspn(functypes, "anptwSx+"))
+ if (strlen(functypes) != strspn(functypes, df_options))
{
- pg_log_error("\\df only takes [anptwSx+] as options");
+ pg_log_error("\\df only takes [%s] as options", df_options);
return true;
}
@@ -6188,8 +6189,8 @@ listExtensions(const char *pattern)
"FROM pg_catalog.pg_extension e "
"LEFT JOIN pg_catalog.pg_namespace n ON n.oid = e.extnamespace "
"LEFT JOIN pg_catalog.pg_description d ON d.objoid = e.oid "
- "LEFT JOIN pg_catalog.pg_available_extensions() ae(name, default_version, comment) ON ae.name = e.extname "
- "AND d.classoid = 'pg_catalog.pg_extension'::pg_catalog.regclass\n",
+ "AND d.classoid = 'pg_catalog.pg_extension'::pg_catalog.regclass "
+ "LEFT JOIN pg_catalog.pg_available_extensions() ae(name, default_version, comment) ON ae.name = e.extname\n",
gettext_noop("Name"),
gettext_noop("Version"),
gettext_noop("Default version"),
@@ -6745,7 +6746,7 @@ describeSubscriptions(const char *pattern, bool verbose)
printQueryOpt myopt = pset.popt;
static const bool translate_columns[] = {false, false, false, false,
false, false, false, false, false, false, false, false, false, false,
- false};
+ false, false};
if (pset.sversion < 100000)
{
@@ -6813,6 +6814,10 @@ describeSubscriptions(const char *pattern, bool verbose)
appendPQExpBuffer(&buf,
", subfailover AS \"%s\"\n",
gettext_noop("Failover"));
+ if (pset.sversion >= 190000)
+ appendPQExpBuffer(&buf,
+ ", subretaindeadtuples AS \"%s\"\n",
+ gettext_noop("Retain dead tuples"));
appendPQExpBuffer(&buf,
", subsynccommit AS \"%s\"\n"
diff --git a/src/bin/psql/help.c b/src/bin/psql/help.c
index 403b51325a7..8c62729a0d1 100644
--- a/src/bin/psql/help.c
+++ b/src/bin/psql/help.c
@@ -252,7 +252,8 @@ slashUsage(unsigned short int pager)
HELP0(" \\dO[Sx+] [PATTERN] list collations\n");
HELP0(" \\dp[Sx] [PATTERN] list table, view, and sequence access privileges\n");
HELP0(" \\dP[itnx+] [PATTERN] list [only index/table] partitioned relations [n=nested]\n");
- HELP0(" \\drds[x] [ROLEPTRN [DBPTRN]] list per-database role settings\n");
+ HELP0(" \\drds[x] [ROLEPTRN [DBPTRN]]\n"
+ " list per-database role settings\n");
HELP0(" \\drg[Sx] [PATTERN] list role grants\n");
HELP0(" \\dRp[x+] [PATTERN] list replication publications\n");
HELP0(" \\dRs[x+] [PATTERN] list replication subscriptions\n");
@@ -330,12 +331,12 @@ slashUsage(unsigned short int pager)
HELP0(" \\bind [PARAM]... set query parameters\n");
HELP0(" \\bind_named STMT_NAME [PARAM]...\n"
" set query parameters for an existing prepared statement\n");
- HELP0(" \\close STMT_NAME close an existing prepared statement\n");
+ HELP0(" \\close_prepared STMT_NAME\n"
+ " close an existing prepared statement\n");
HELP0(" \\endpipeline exit pipeline mode\n");
HELP0(" \\flush flush output data to the server\n");
HELP0(" \\flushrequest send request to the server to flush its output buffer\n");
- HELP0(" \\getresults [NUM_RES] read NUM_RES pending results. All pending results are\n"
- " read if no argument is provided\n");
+ HELP0(" \\getresults [NUM_RES] read NUM_RES pending results, or all if no argument\n");
HELP0(" \\parse STMT_NAME create a prepared statement\n");
HELP0(" \\sendpipeline send an extended query to an ongoing pipeline\n");
HELP0(" \\startpipeline enter pipeline mode\n");
@@ -463,8 +464,9 @@ helpVariables(unsigned short int pager)
" VERSION_NAME\n"
" VERSION_NUM\n"
" psql's version (in verbose string, short string, or numeric format)\n");
- HELP0(" WATCH_INTERVAL\n"
- " if set to a number, overrides the default two second \\watch interval\n");
+ HELPN(" WATCH_INTERVAL\n"
+ " number of seconds \\watch waits between executions (default %s)\n",
+ DEFAULT_WATCH_INTERVAL);
HELP0("\nDisplay settings:\n");
HELP0("Usage:\n");
@@ -746,7 +748,7 @@ void
print_copyright(void)
{
puts("PostgreSQL Database Management System\n"
- "(formerly known as Postgres, then as Postgres95)\n\n"
+ "(also known as Postgres, formerly known as Postgres95)\n\n"
"Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group\n\n"
"Portions Copyright (c) 1994, The Regents of the University of California\n\n"
"Permission to use, copy, modify, and distribute this software and its\n"
diff --git a/src/bin/psql/prompt.c b/src/bin/psql/prompt.c
index 3aa7d2d06c8..b08d7328fbf 100644
--- a/src/bin/psql/prompt.c
+++ b/src/bin/psql/prompt.c
@@ -169,8 +169,12 @@ get_prompt(promptStatus_t status, ConditionalStack cstack)
break;
/* service name */
case 's':
- if (pset.db && PQservice(pset.db))
- strlcpy(buf, PQservice(pset.db), sizeof(buf));
+ {
+ const char *service_name = GetVariable(pset.vars, "SERVICE");
+
+ if (service_name)
+ strlcpy(buf, service_name, sizeof(buf));
+ }
break;
/* backend pid */
case 'p':
diff --git a/src/bin/psql/t/001_basic.pl b/src/bin/psql/t/001_basic.pl
index 4050f9a5e3e..f42c3961e09 100644
--- a/src/bin/psql/t/001_basic.pl
+++ b/src/bin/psql/t/001_basic.pl
@@ -483,8 +483,8 @@ psql_like($node, "copy (values ('foo'),('bar')) to stdout \\g | $pipe_cmd",
my $c4 = slurp_file($g_file);
like($c4, qr/foo.*bar/s);
-# Tests with pipelines. These trigger FATAL failures in the backend,
-# so they cannot be tested via SQL.
+# Test COPY within pipelines. These abort the connection from
+# the frontend so they cannot be tested via SQL.
$node->safe_psql('postgres', 'CREATE TABLE psql_pipeline()');
my $log_location = -s $node->logfile;
psql_fails_like(
@@ -493,35 +493,41 @@ psql_fails_like(
COPY psql_pipeline FROM STDIN;
SELECT 'val1';
\\syncpipeline
-\\getresults
\\endpipeline},
- qr/server closed the connection unexpectedly/,
- 'protocol sync loss in pipeline: direct COPY, SELECT, sync and getresult'
-);
+ qr/COPY in a pipeline is not supported, aborting connection/,
+ 'COPY FROM in pipeline: fails');
$node->wait_for_log(
qr/FATAL: .*terminating connection because protocol synchronization was lost/,
$log_location);
+# Remove \syncpipeline here.
psql_fails_like(
$node,
qq{\\startpipeline
-COPY psql_pipeline FROM STDIN \\bind \\sendpipeline
-SELECT 'val1' \\bind \\sendpipeline
-\\syncpipeline
-\\getresults
+COPY psql_pipeline TO STDOUT;
+SELECT 'val1';
\\endpipeline},
- qr/server closed the connection unexpectedly/,
- 'protocol sync loss in pipeline: bind COPY, SELECT, sync and getresult');
+ qr/COPY in a pipeline is not supported, aborting connection/,
+ 'COPY TO in pipeline: fails');
-# This time, test without the \getresults.
psql_fails_like(
$node,
qq{\\startpipeline
-COPY psql_pipeline FROM STDIN;
+\\copy psql_pipeline from stdin;
SELECT 'val1';
\\syncpipeline
\\endpipeline},
- qr/server closed the connection unexpectedly/,
- 'protocol sync loss in pipeline: COPY, SELECT and sync');
+ qr/COPY in a pipeline is not supported, aborting connection/,
+ '\copy from in pipeline: fails');
+
+# Sync attempt after a COPY TO/FROM.
+psql_fails_like(
+ $node,
+ qq{\\startpipeline
+\\copy psql_pipeline to stdout;
+\\syncpipeline
+\\endpipeline},
+ qr/COPY in a pipeline is not supported, aborting connection/,
+ '\copy to in pipeline: fails');
done_testing();
diff --git a/src/bin/psql/tab-complete.in.c b/src/bin/psql/tab-complete.in.c
index ec65ab79fec..1f2ca946fc5 100644
--- a/src/bin/psql/tab-complete.in.c
+++ b/src/bin/psql/tab-complete.in.c
@@ -889,6 +889,14 @@ static const SchemaQuery Query_for_list_of_analyzables = {
.result = "c.relname",
};
+/*
+ * Relations supporting COPY TO/FROM are currently almost the same as
+ * those supporting ANALYZE. Although views with INSTEAD OF INSERT triggers
+ * can be used with COPY FROM, they are rarely used for this purpose,
+ * so plain views are intentionally excluded from this tab completion.
+ */
+#define Query_for_list_of_tables_for_copy Query_for_list_of_analyzables
+
/* Relations supporting index creation */
static const SchemaQuery Query_for_list_of_indexables = {
.catname = "pg_catalog.pg_class c",
@@ -1002,7 +1010,7 @@ static const SchemaQuery Query_for_trigger_of_table = {
#define Query_for_list_of_database_vars \
"SELECT conf FROM ("\
-" SELECT setdatabase, pg_catalog.split_part(unnest(setconfig),'=',1) conf"\
+" SELECT setdatabase, pg_catalog.split_part(pg_catalog.unnest(setconfig),'=',1) conf"\
" FROM pg_db_role_setting "\
" ) s, pg_database d "\
" WHERE s.setdatabase = d.oid "\
@@ -1078,9 +1086,12 @@ Keywords_for_list_of_owner_roles, "PUBLIC"
" WHERE usename LIKE '%s'"
#define Query_for_list_of_user_vars \
-" SELECT pg_catalog.split_part(pg_catalog.unnest(rolconfig),'=',1) "\
-" FROM pg_catalog.pg_roles "\
-" WHERE rolname LIKE '%s'"
+"SELECT conf FROM ("\
+" SELECT rolname, pg_catalog.split_part(pg_catalog.unnest(rolconfig),'=',1) conf"\
+" FROM pg_catalog.pg_roles"\
+" ) s"\
+" WHERE s.conf like '%s' "\
+" AND s.rolname LIKE '%s'"
#define Query_for_list_of_access_methods \
" SELECT amname "\
@@ -1190,6 +1201,19 @@ Alter_procedure_options, "COST", "IMMUTABLE", "LEAKPROOF", "NOT LEAKPROOF", \
Alter_routine_options, "CALLED ON NULL INPUT", "RETURNS NULL ON NULL INPUT", \
"STRICT", "SUPPORT"
+/* COPY options shared between FROM and TO */
+#define Copy_common_options \
+"DELIMITER", "ENCODING", "ESCAPE", "FORMAT", "HEADER", "NULL", "QUOTE"
+
+/* COPY FROM options */
+#define Copy_from_options \
+Copy_common_options, "DEFAULT", "FORCE_NOT_NULL", "FORCE_NULL", "FREEZE", \
+"LOG_VERBOSITY", "ON_ERROR", "REJECT_LIMIT"
+
+/* COPY TO options */
+#define Copy_to_options \
+Copy_common_options, "FORCE_QUOTE"
+
/*
* These object types were introduced later than our support cutoff of
* server version 9.2. We use the VersionedQuery infrastructure so that
@@ -1875,7 +1899,7 @@ psql_completion(const char *text, int start, int end)
static const char *const backslash_commands[] = {
"\\a",
"\\bind", "\\bind_named",
- "\\connect", "\\conninfo", "\\C", "\\cd", "\\close", "\\copy",
+ "\\connect", "\\conninfo", "\\C", "\\cd", "\\close_prepared", "\\copy",
"\\copyright", "\\crosstabview",
"\\d", "\\da", "\\dA", "\\dAc", "\\dAf", "\\dAo", "\\dAp",
"\\db", "\\dc", "\\dconfig", "\\dC", "\\dd", "\\ddp", "\\dD",
@@ -2298,8 +2322,9 @@ match_previous_words(int pattern_id,
/* ALTER SUBSCRIPTION <name> SET ( */
else if (Matches("ALTER", "SUBSCRIPTION", MatchAny, MatchAnyN, "SET", "("))
COMPLETE_WITH("binary", "disable_on_error", "failover", "origin",
- "password_required", "run_as_owner", "slot_name",
- "streaming", "synchronous_commit", "two_phase");
+ "password_required", "retain_dead_tuples",
+ "run_as_owner", "slot_name", "streaming",
+ "synchronous_commit", "two_phase");
/* ALTER SUBSCRIPTION <name> SKIP ( */
else if (Matches("ALTER", "SUBSCRIPTION", MatchAny, MatchAnyN, "SKIP", "("))
COMPLETE_WITH("lsn");
@@ -2495,7 +2520,10 @@ match_previous_words(int pattern_id,
/* ALTER USER,ROLE <name> RESET */
else if (Matches("ALTER", "USER|ROLE", MatchAny, "RESET"))
+ {
+ set_completion_reference(prev2_wd);
COMPLETE_WITH_QUERY_PLUS(Query_for_list_of_user_vars, "ALL");
+ }
/* ALTER USER,ROLE <name> WITH */
else if (Matches("ALTER", "USER|ROLE", MatchAny, "WITH"))
@@ -2725,17 +2753,24 @@ match_previous_words(int pattern_id,
/* ALTER TABLE xxx ADD */
else if (Matches("ALTER", "TABLE", MatchAny, "ADD"))
{
- /* make sure to keep this list and the !Matches() below in sync */
- COMPLETE_WITH("COLUMN", "CONSTRAINT", "CHECK", "UNIQUE", "PRIMARY KEY",
- "EXCLUDE", "FOREIGN KEY");
+ /*
+ * make sure to keep this list and the MatchAnyExcept() below in sync
+ */
+ COMPLETE_WITH("COLUMN", "CONSTRAINT", "CHECK (", "NOT NULL", "UNIQUE",
+ "PRIMARY KEY", "EXCLUDE", "FOREIGN KEY");
}
/* ALTER TABLE xxx ADD [COLUMN] yyy */
else if (Matches("ALTER", "TABLE", MatchAny, "ADD", "COLUMN", MatchAny) ||
- Matches("ALTER", "TABLE", MatchAny, "ADD", MatchAnyExcept("COLUMN|CONSTRAINT|CHECK|UNIQUE|PRIMARY|EXCLUDE|FOREIGN")))
+ Matches("ALTER", "TABLE", MatchAny, "ADD", MatchAnyExcept("COLUMN|CONSTRAINT|CHECK|UNIQUE|PRIMARY|NOT|EXCLUDE|FOREIGN")))
COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_datatypes);
/* ALTER TABLE xxx ADD CONSTRAINT yyy */
else if (Matches("ALTER", "TABLE", MatchAny, "ADD", "CONSTRAINT", MatchAny))
- COMPLETE_WITH("CHECK", "UNIQUE", "PRIMARY KEY", "EXCLUDE", "FOREIGN KEY");
+ COMPLETE_WITH("CHECK (", "NOT NULL", "UNIQUE", "PRIMARY KEY", "EXCLUDE", "FOREIGN KEY");
+ /* ALTER TABLE xxx ADD NOT NULL */
+ else if (Matches("ALTER", "TABLE", MatchAny, "ADD", "NOT", "NULL"))
+ COMPLETE_WITH_ATTR(prev4_wd);
+ else if (Matches("ALTER", "TABLE", MatchAny, "ADD", "CONSTRAINT", MatchAny, "NOT", "NULL"))
+ COMPLETE_WITH_ATTR(prev6_wd);
/* ALTER TABLE xxx ADD [CONSTRAINT yyy] (PRIMARY KEY|UNIQUE) */
else if (Matches("ALTER", "TABLE", MatchAny, "ADD", "PRIMARY", "KEY") ||
Matches("ALTER", "TABLE", MatchAny, "ADD", "UNIQUE") ||
@@ -3125,6 +3160,22 @@ match_previous_words(int pattern_id,
COMPLETE_WITH_VERSIONED_SCHEMA_QUERY(Query_for_list_of_procedures);
else if (Matches("CALL", MatchAny))
COMPLETE_WITH("(");
+/* CHECKPOINT */
+ else if (Matches("CHECKPOINT"))
+ COMPLETE_WITH("(");
+ else if (HeadMatches("CHECKPOINT", "(*") &&
+ !HeadMatches("CHECKPOINT", "(*)"))
+ {
+ /*
+ * This fires if we're in an unfinished parenthesized option list.
+ * get_previous_words treats a completed parenthesized option list as
+ * one word, so the above test is correct.
+ */
+ if (ends_with(prev_wd, '(') || ends_with(prev_wd, ','))
+ COMPLETE_WITH("MODE", "FLUSH_UNLOGGED");
+ else if (TailMatches("MODE"))
+ COMPLETE_WITH("FAST", "SPREAD");
+ }
/* CLOSE */
else if (Matches("CLOSE"))
COMPLETE_WITH_QUERY_PLUS(Query_for_list_of_cursors,
@@ -3255,7 +3306,7 @@ match_previous_words(int pattern_id,
* backslash command).
*/
else if (Matches("COPY|\\copy"))
- COMPLETE_WITH_SCHEMA_QUERY_PLUS(Query_for_list_of_tables, "(");
+ COMPLETE_WITH_SCHEMA_QUERY_PLUS(Query_for_list_of_tables_for_copy, "(");
/* Complete COPY ( with legal query commands */
else if (Matches("COPY|\\copy", "("))
COMPLETE_WITH("SELECT", "TABLE", "VALUES", "INSERT INTO", "UPDATE", "DELETE FROM", "MERGE INTO", "WITH");
@@ -3284,23 +3335,24 @@ match_previous_words(int pattern_id,
else if (Matches("COPY|\\copy", MatchAny, "FROM", MatchAny))
COMPLETE_WITH("WITH (", "WHERE");
- /* Complete COPY <sth> FROM|TO filename WITH ( */
- else if (Matches("COPY|\\copy", MatchAny, "FROM|TO", MatchAny, "WITH", "("))
- COMPLETE_WITH("FORMAT", "FREEZE", "DELIMITER", "NULL",
- "HEADER", "QUOTE", "ESCAPE", "FORCE_QUOTE",
- "FORCE_NOT_NULL", "FORCE_NULL", "ENCODING", "DEFAULT",
- "ON_ERROR", "LOG_VERBOSITY");
+ /* Complete COPY <sth> FROM filename WITH ( */
+ else if (Matches("COPY|\\copy", MatchAny, "FROM", MatchAny, "WITH", "("))
+ COMPLETE_WITH(Copy_from_options);
+
+ /* Complete COPY <sth> TO filename WITH ( */
+ else if (Matches("COPY|\\copy", MatchAny, "TO", MatchAny, "WITH", "("))
+ COMPLETE_WITH(Copy_to_options);
/* Complete COPY <sth> FROM|TO filename WITH (FORMAT */
else if (Matches("COPY|\\copy", MatchAny, "FROM|TO", MatchAny, "WITH", "(", "FORMAT"))
COMPLETE_WITH("binary", "csv", "text");
/* Complete COPY <sth> FROM filename WITH (ON_ERROR */
- else if (Matches("COPY|\\copy", MatchAny, "FROM|TO", MatchAny, "WITH", "(", "ON_ERROR"))
+ else if (Matches("COPY|\\copy", MatchAny, "FROM", MatchAny, "WITH", "(", "ON_ERROR"))
COMPLETE_WITH("stop", "ignore");
/* Complete COPY <sth> FROM filename WITH (LOG_VERBOSITY */
- else if (Matches("COPY|\\copy", MatchAny, "FROM|TO", MatchAny, "WITH", "(", "LOG_VERBOSITY"))
+ else if (Matches("COPY|\\copy", MatchAny, "FROM", MatchAny, "WITH", "(", "LOG_VERBOSITY"))
COMPLETE_WITH("silent", "default", "verbose");
/* Complete COPY <sth> FROM <sth> WITH (<options>) */
@@ -3664,9 +3716,10 @@ match_previous_words(int pattern_id,
TailMatches("CREATE", "TEMP|TEMPORARY|UNLOGGED", "TABLE", MatchAny, "(*)", "AS"))
COMPLETE_WITH("EXECUTE", "SELECT", "TABLE", "VALUES", "WITH");
/* Complete CREATE TABLE name (...) with supported options */
- else if (TailMatches("CREATE", "TABLE", MatchAny, "(*)") ||
- TailMatches("CREATE", "UNLOGGED", "TABLE", MatchAny, "(*)"))
+ else if (TailMatches("CREATE", "TABLE", MatchAny, "(*)"))
COMPLETE_WITH("AS", "INHERITS (", "PARTITION BY", "USING", "TABLESPACE", "WITH (");
+ else if (TailMatches("CREATE", "UNLOGGED", "TABLE", MatchAny, "(*)"))
+ COMPLETE_WITH("AS", "INHERITS (", "USING", "TABLESPACE", "WITH (");
else if (TailMatches("CREATE", "TEMP|TEMPORARY", "TABLE", MatchAny, "(*)"))
COMPLETE_WITH("AS", "INHERITS (", "ON COMMIT", "PARTITION BY", "USING",
"TABLESPACE", "WITH (");
@@ -3728,8 +3781,9 @@ match_previous_words(int pattern_id,
else if (Matches("CREATE", "SUBSCRIPTION", MatchAnyN, "WITH", "("))
COMPLETE_WITH("binary", "connect", "copy_data", "create_slot",
"disable_on_error", "enabled", "failover", "origin",
- "password_required", "run_as_owner", "slot_name",
- "streaming", "synchronous_commit", "two_phase");
+ "password_required", "retain_dead_tuples",
+ "run_as_owner", "slot_name", "streaming",
+ "synchronous_commit", "two_phase");
/* CREATE TRIGGER --- is allowed inside CREATE SCHEMA, so use TailMatches */
@@ -4573,10 +4627,14 @@ match_previous_words(int pattern_id,
else if (Matches("ALTER", "DEFAULT", "PRIVILEGES", MatchAnyN, "TO", MatchAny))
COMPLETE_WITH("WITH GRANT OPTION");
/* Complete "GRANT/REVOKE ... ON * *" with TO/FROM */
- else if (Matches("GRANT", MatchAnyN, "ON", MatchAny, MatchAny))
- COMPLETE_WITH("TO");
- else if (Matches("REVOKE", MatchAnyN, "ON", MatchAny, MatchAny))
- COMPLETE_WITH("FROM");
+ else if (Matches("GRANT|REVOKE", MatchAnyN, "ON", MatchAny, MatchAny) &&
+ !TailMatches("FOREIGN", "SERVER") && !TailMatches("LARGE", "OBJECT"))
+ {
+ if (Matches("GRANT", MatchAnyN, "ON", MatchAny, MatchAny))
+ COMPLETE_WITH("TO");
+ else
+ COMPLETE_WITH("FROM");
+ }
/* Complete "GRANT/REVOKE * ON ALL * IN SCHEMA *" with TO/FROM */
else if (TailMatches("GRANT|REVOKE", MatchAny, "ON", "ALL", MatchAny, "IN", "SCHEMA", MatchAny) ||
@@ -4608,6 +4666,26 @@ match_previous_words(int pattern_id,
COMPLETE_WITH("FROM");
}
+ /* Complete "GRANT/REVOKE * ON LARGE OBJECT *" with TO/FROM */
+ else if (TailMatches("GRANT|REVOKE", MatchAny, "ON", "LARGE", "OBJECT", MatchAny) ||
+ TailMatches("REVOKE", "GRANT", "OPTION", "FOR", MatchAny, "ON", "LARGE", "OBJECT", MatchAny))
+ {
+ if (TailMatches("GRANT", MatchAny, MatchAny, MatchAny, MatchAny, MatchAny))
+ COMPLETE_WITH("TO");
+ else
+ COMPLETE_WITH("FROM");
+ }
+
+ /* Complete "GRANT/REVOKE * ON LARGE OBJECTS" with TO/FROM */
+ else if (TailMatches("GRANT|REVOKE", MatchAny, "ON", "LARGE", "OBJECTS") ||
+ TailMatches("REVOKE", "GRANT", "OPTION", "FOR", MatchAny, "ON", "LARGE", "OBJECTS"))
+ {
+ if (TailMatches("GRANT", MatchAny, MatchAny, MatchAny, MatchAny))
+ COMPLETE_WITH("TO");
+ else
+ COMPLETE_WITH("FROM");
+ }
+
/* GROUP BY */
else if (TailMatches("FROM", MatchAny, "GROUP"))
COMPLETE_WITH("BY");
@@ -4943,7 +5021,7 @@ match_previous_words(int pattern_id,
/* Complete with a variable name */
else if (TailMatches("SET|RESET") &&
!TailMatches("UPDATE", MatchAny, "SET") &&
- !TailMatches("ALTER", "DATABASE", MatchAny, "RESET"))
+ !TailMatches("ALTER", "DATABASE|USER|ROLE", MatchAny, "RESET"))
COMPLETE_WITH_QUERY_VERBATIM_PLUS(Query_for_list_of_set_vars,
"CONSTRAINTS",
"TRANSACTION",
diff --git a/src/bin/psql/variables.c b/src/bin/psql/variables.c
index ae2d0e5ed3f..6b64302ebca 100644
--- a/src/bin/psql/variables.c
+++ b/src/bin/psql/variables.c
@@ -204,7 +204,7 @@ ParseVariableDouble(const char *value, const char *name, double *result, double
if ((value == NULL) || (*value == '\0'))
{
if (name)
- pg_log_error("invalid input syntax for \"%s\"", name);
+ pg_log_error("invalid input syntax for variable \"%s\"", name);
return false;
}
@@ -215,14 +215,14 @@ ParseVariableDouble(const char *value, const char *name, double *result, double
if (dblval < min)
{
if (name)
- pg_log_error("invalid value \"%s\" for \"%s\": must be greater than %.2f",
+ pg_log_error("invalid value \"%s\" for variable \"%s\": must be greater than %.2f",
value, name, min);
return false;
}
else if (dblval > max)
{
if (name)
- pg_log_error("invalid value \"%s\" for \"%s\": must be less than %.2f",
+ pg_log_error("invalid value \"%s\" for variable \"%s\": must be less than %.2f",
value, name, max);
}
*result = dblval;
@@ -238,13 +238,13 @@ ParseVariableDouble(const char *value, const char *name, double *result, double
(dblval == 0.0 || dblval >= HUGE_VAL || dblval <= -HUGE_VAL))
{
if (name)
- pg_log_error("\"%s\" is out of range for \"%s\"", value, name);
+ pg_log_error("value \"%s\" is out of range for variable \"%s\"", value, name);
return false;
}
else
{
if (name)
- pg_log_error("invalid value \"%s\" for \"%s\"", value, name);
+ pg_log_error("invalid value \"%s\" for variable \"%s\"", value, name);
return false;
}
}
diff --git a/src/bin/scripts/t/100_vacuumdb.pl b/src/bin/scripts/t/100_vacuumdb.pl
index 75ac24a7a55..ff56a13b46b 100644
--- a/src/bin/scripts/t/100_vacuumdb.pl
+++ b/src/bin/scripts/t/100_vacuumdb.pl
@@ -238,62 +238,105 @@ $node->command_fails_like(
'cannot use option --all and a dbname as argument at the same time');
$node->safe_psql('postgres',
- 'CREATE TABLE regression_vacuumdb_test AS select generate_series(1, 10) a, generate_series(2, 11) b;');
+ 'CREATE TABLE regression_vacuumdb_test AS select generate_series(1, 10) a, generate_series(2, 11) b;'
+);
$node->issues_sql_like(
- [ 'vacuumdb', '--analyze-only', '--missing-stats-only', '-t', 'regression_vacuumdb_test', 'postgres' ],
+ [
+ 'vacuumdb', '--analyze-only',
+ '--missing-stats-only', '-t',
+ 'regression_vacuumdb_test', 'postgres'
+ ],
qr/statement:\ ANALYZE/sx,
'--missing-stats-only with missing stats');
$node->issues_sql_unlike(
- [ 'vacuumdb', '--analyze-only', '--missing-stats-only', '-t', 'regression_vacuumdb_test', 'postgres' ],
+ [
+ 'vacuumdb', '--analyze-only',
+ '--missing-stats-only', '-t',
+ 'regression_vacuumdb_test', 'postgres'
+ ],
qr/statement:\ ANALYZE/sx,
'--missing-stats-only with no missing stats');
$node->safe_psql('postgres',
- 'CREATE INDEX regression_vacuumdb_test_idx ON regression_vacuumdb_test (mod(a, 2));');
+ 'CREATE INDEX regression_vacuumdb_test_idx ON regression_vacuumdb_test (mod(a, 2));'
+);
$node->issues_sql_like(
- [ 'vacuumdb', '--analyze-in-stages', '--missing-stats-only', '-t', 'regression_vacuumdb_test', 'postgres' ],
+ [
+ 'vacuumdb', '--analyze-in-stages',
+ '--missing-stats-only', '-t',
+ 'regression_vacuumdb_test', 'postgres'
+ ],
qr/statement:\ ANALYZE/sx,
'--missing-stats-only with missing index expression stats');
$node->issues_sql_unlike(
- [ 'vacuumdb', '--analyze-in-stages', '--missing-stats-only', '-t', 'regression_vacuumdb_test', 'postgres' ],
+ [
+ 'vacuumdb', '--analyze-in-stages',
+ '--missing-stats-only', '-t',
+ 'regression_vacuumdb_test', 'postgres'
+ ],
qr/statement:\ ANALYZE/sx,
'--missing-stats-only with no missing index expression stats');
$node->safe_psql('postgres',
- 'CREATE STATISTICS regression_vacuumdb_test_stat ON a, b FROM regression_vacuumdb_test;');
+ 'CREATE STATISTICS regression_vacuumdb_test_stat ON a, b FROM regression_vacuumdb_test;'
+);
$node->issues_sql_like(
- [ 'vacuumdb', '--analyze-only', '--missing-stats-only', '-t', 'regression_vacuumdb_test', 'postgres' ],
+ [
+ 'vacuumdb', '--analyze-only',
+ '--missing-stats-only', '-t',
+ 'regression_vacuumdb_test', 'postgres'
+ ],
qr/statement:\ ANALYZE/sx,
'--missing-stats-only with missing extended stats');
$node->issues_sql_unlike(
- [ 'vacuumdb', '--analyze-only', '--missing-stats-only', '-t', 'regression_vacuumdb_test', 'postgres' ],
+ [
+ 'vacuumdb', '--analyze-only',
+ '--missing-stats-only', '-t',
+ 'regression_vacuumdb_test', 'postgres'
+ ],
qr/statement:\ ANALYZE/sx,
'--missing-stats-only with no missing extended stats');
$node->safe_psql('postgres',
"CREATE TABLE regression_vacuumdb_child (a INT) INHERITS (regression_vacuumdb_test);\n"
- . "INSERT INTO regression_vacuumdb_child VALUES (1, 2);\n"
- . "ANALYZE regression_vacuumdb_child;\n");
+ . "INSERT INTO regression_vacuumdb_child VALUES (1, 2);\n"
+ . "ANALYZE regression_vacuumdb_child;\n");
$node->issues_sql_like(
- [ 'vacuumdb', '--analyze-in-stages', '--missing-stats-only', '-t', 'regression_vacuumdb_test', 'postgres' ],
+ [
+ 'vacuumdb', '--analyze-in-stages',
+ '--missing-stats-only', '-t',
+ 'regression_vacuumdb_test', 'postgres'
+ ],
qr/statement:\ ANALYZE/sx,
'--missing-stats-only with missing inherited stats');
$node->issues_sql_unlike(
- [ 'vacuumdb', '--analyze-in-stages', '--missing-stats-only', '-t', 'regression_vacuumdb_test', 'postgres' ],
+ [
+ 'vacuumdb', '--analyze-in-stages',
+ '--missing-stats-only', '-t',
+ 'regression_vacuumdb_test', 'postgres'
+ ],
qr/statement:\ ANALYZE/sx,
'--missing-stats-only with no missing inherited stats');
$node->safe_psql('postgres',
"CREATE TABLE regression_vacuumdb_parted (a INT) PARTITION BY LIST (a);\n"
- . "CREATE TABLE regression_vacuumdb_part1 PARTITION OF regression_vacuumdb_parted FOR VALUES IN (1);\n"
- . "INSERT INTO regression_vacuumdb_parted VALUES (1);\n"
- . "ANALYZE regression_vacuumdb_part1;\n");
+ . "CREATE TABLE regression_vacuumdb_part1 PARTITION OF regression_vacuumdb_parted FOR VALUES IN (1);\n"
+ . "INSERT INTO regression_vacuumdb_parted VALUES (1);\n"
+ . "ANALYZE regression_vacuumdb_part1;\n");
$node->issues_sql_like(
- [ 'vacuumdb', '--analyze-only', '--missing-stats-only', '-t', 'regression_vacuumdb_parted', 'postgres' ],
+ [
+ 'vacuumdb', '--analyze-only',
+ '--missing-stats-only', '-t',
+ 'regression_vacuumdb_parted', 'postgres'
+ ],
qr/statement:\ ANALYZE/sx,
'--missing-stats-only with missing partition stats');
$node->issues_sql_unlike(
- [ 'vacuumdb', '--analyze-only', '--missing-stats-only', '-t', 'regression_vacuumdb_parted', 'postgres' ],
+ [
+ 'vacuumdb', '--analyze-only',
+ '--missing-stats-only', '-t',
+ 'regression_vacuumdb_parted', 'postgres'
+ ],
qr/statement:\ ANALYZE/sx,
'--missing-stats-only with no missing partition stats');