aboutsummaryrefslogtreecommitdiff
path: root/src/bin/pg_dump/pg_dump.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/bin/pg_dump/pg_dump.c')
-rw-r--r--src/bin/pg_dump/pg_dump.c398
1 files changed, 309 insertions, 89 deletions
diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c
index 1937997ea67..f3a353a61a5 100644
--- a/src/bin/pg_dump/pg_dump.c
+++ b/src/bin/pg_dump/pg_dump.c
@@ -47,10 +47,13 @@
#include "catalog/pg_authid_d.h"
#include "catalog/pg_cast_d.h"
#include "catalog/pg_class_d.h"
+#include "catalog/pg_constraint_d.h"
#include "catalog/pg_default_acl_d.h"
#include "catalog/pg_largeobject_d.h"
+#include "catalog/pg_largeobject_metadata_d.h"
#include "catalog/pg_proc_d.h"
#include "catalog/pg_publication_d.h"
+#include "catalog/pg_shdepend_d.h"
#include "catalog/pg_subscription_d.h"
#include "catalog/pg_type_d.h"
#include "common/connect.h"
@@ -209,6 +212,12 @@ static int nbinaryUpgradeClassOids = 0;
static SequenceItem *sequences = NULL;
static int nsequences = 0;
+/*
+ * For binary upgrade, the dump ID of pg_largeobject_metadata is saved for use
+ * as a dependency for pg_shdepend and any large object comments/seclabels.
+ */
+static DumpId lo_metadata_dumpId;
+
/* Maximum number of relations to fetch in a fetchAttributeStats() call. */
#define MAX_ATTR_STATS_RELS 64
@@ -440,8 +449,6 @@ main(int argc, char **argv)
bool data_only = false;
bool schema_only = false;
bool statistics_only = false;
- bool with_data = false;
- bool with_schema = false;
bool with_statistics = false;
bool no_data = false;
bool no_schema = false;
@@ -505,6 +512,7 @@ main(int argc, char **argv)
{"section", required_argument, NULL, 5},
{"serializable-deferrable", no_argument, &dopt.serializable_deferrable, 1},
{"snapshot", required_argument, NULL, 6},
+ {"statistics", no_argument, NULL, 22},
{"statistics-only", no_argument, NULL, 18},
{"strict-names", no_argument, &strict_names, 1},
{"use-set-session-authorization", no_argument, &dopt.use_setsessauth, 1},
@@ -519,9 +527,6 @@ main(int argc, char **argv)
{"no-toast-compression", no_argument, &dopt.no_toast_compression, 1},
{"no-unlogged-table-data", no_argument, &dopt.no_unlogged_table_data, 1},
{"no-sync", no_argument, NULL, 7},
- {"with-data", no_argument, NULL, 22},
- {"with-schema", no_argument, NULL, 23},
- {"with-statistics", no_argument, NULL, 24},
{"on-conflict-do-nothing", no_argument, &dopt.do_nothing, 1},
{"rows-per-insert", required_argument, NULL, 10},
{"include-foreign-data", required_argument, NULL, 11},
@@ -789,14 +794,6 @@ main(int argc, char **argv)
break;
case 22:
- with_data = true;
- break;
-
- case 23:
- with_schema = true;
- break;
-
- case 24:
with_statistics = true;
break;
@@ -843,13 +840,17 @@ main(int argc, char **argv)
if (statistics_only && no_statistics)
pg_fatal("options --statistics-only and --no-statistics cannot be used together");
- /* reject conflicting "with-" and "no-" options */
- if (with_data && no_data)
- pg_fatal("options --with-data and --no-data cannot be used together");
- if (with_schema && no_schema)
- pg_fatal("options --with-schema and --no-schema cannot be used together");
+ /* reject conflicting "no-" options */
if (with_statistics && no_statistics)
- pg_fatal("options --with-statistics and --no-statistics cannot be used together");
+ pg_fatal("options --statistics and --no-statistics cannot be used together");
+
+ /* reject conflicting "-only" options */
+ if (data_only && with_statistics)
+ pg_fatal("options %s and %s cannot be used together",
+ "-a/--data-only", "--statistics");
+ if (schema_only && with_statistics)
+ pg_fatal("options %s and %s cannot be used together",
+ "-s/--schema-only", "--statistics");
if (schema_only && foreign_servers_include_patterns.head != NULL)
pg_fatal("options -s/--schema-only and --include-foreign-data cannot be used together");
@@ -864,16 +865,14 @@ main(int argc, char **argv)
pg_fatal("option --if-exists requires option -c/--clean");
/*
- * Set derivative flags. An "-only" option may be overridden by an
- * explicit "with-" option; e.g. "--schema-only --with-statistics" will
- * include schema and statistics. Other ambiguous or nonsensical
- * combinations, e.g. "--schema-only --no-schema", will have already
- * caused an error in one of the checks above.
+ * Set derivative flags. Ambiguous or nonsensical combinations, e.g.
+ * "--schema-only --no-schema", will have already caused an error in one
+ * of the checks above.
*/
dopt.dumpData = ((dopt.dumpData && !schema_only && !statistics_only) ||
- (data_only || with_data)) && !no_data;
+ data_only) && !no_data;
dopt.dumpSchema = ((dopt.dumpSchema && !data_only && !statistics_only) ||
- (schema_only || with_schema)) && !no_schema;
+ schema_only) && !no_schema;
dopt.dumpStatistics = ((dopt.dumpStatistics && !schema_only && !data_only) ||
(statistics_only || with_statistics)) && !no_statistics;
@@ -1086,6 +1085,36 @@ main(int argc, char **argv)
getTableData(&dopt, tblinfo, numTables, RELKIND_SEQUENCE);
/*
+ * For binary upgrade mode, dump pg_largeobject_metadata and the
+ * associated pg_shdepend rows. This is faster to restore than the
+ * equivalent set of large object commands. We can only do this for
+ * upgrades from v12 and newer; in older versions, pg_largeobject_metadata
+ * was created WITH OIDS, so the OID column is hidden and won't be dumped.
+ */
+ if (dopt.binary_upgrade && fout->remoteVersion >= 120000)
+ {
+ TableInfo *lo_metadata = findTableByOid(LargeObjectMetadataRelationId);
+ TableInfo *shdepend = findTableByOid(SharedDependRelationId);
+
+ makeTableDataInfo(&dopt, lo_metadata);
+ makeTableDataInfo(&dopt, shdepend);
+
+ /*
+ * Save pg_largeobject_metadata's dump ID for use as a dependency for
+ * pg_shdepend and any large object comments/seclabels.
+ */
+ lo_metadata_dumpId = lo_metadata->dataObj->dobj.dumpId;
+ addObjectDependency(&shdepend->dataObj->dobj, lo_metadata_dumpId);
+
+ /*
+ * Only dump large object shdepend rows for this database.
+ */
+ shdepend->dataObj->filtercond = "WHERE classid = 'pg_largeobject'::regclass "
+ "AND dbid = (SELECT oid FROM pg_database "
+ " WHERE datname = current_database())";
+ }
+
+ /*
* In binary-upgrade mode, we do not have to worry about the actual LO
* data or the associated metadata that resides in the pg_largeobject and
* pg_largeobject_metadata tables, respectively.
@@ -1226,7 +1255,7 @@ main(int argc, char **argv)
* right now.
*/
if (plainText)
- RestoreArchive(fout, false);
+ RestoreArchive(fout);
CloseArchive(fout);
@@ -1316,6 +1345,7 @@ help(const char *progname)
printf(_(" --sequence-data include sequence data in dump\n"));
printf(_(" --serializable-deferrable wait until the dump can run without anomalies\n"));
printf(_(" --snapshot=SNAPSHOT use given snapshot for the dump\n"));
+ printf(_(" --statistics dump the statistics\n"));
printf(_(" --statistics-only dump only the statistics, not schema or data\n"));
printf(_(" --strict-names require table and/or schema include patterns to\n"
" match at least one entity each\n"));
@@ -1324,9 +1354,6 @@ help(const char *progname)
printf(_(" --use-set-session-authorization\n"
" use SET SESSION AUTHORIZATION commands instead of\n"
" ALTER OWNER commands to set ownership\n"));
- printf(_(" --with-data dump the data\n"));
- printf(_(" --with-schema dump the schema\n"));
- printf(_(" --with-statistics dump the statistics\n"));
printf(_("\nConnection options:\n"));
printf(_(" -d, --dbname=DBNAME database to dump\n"));
@@ -2168,6 +2195,13 @@ selectDumpableProcLang(ProcLangInfo *plang, Archive *fout)
static void
selectDumpableAccessMethod(AccessMethodInfo *method, Archive *fout)
{
+ /* see getAccessMethods() comment about v9.6. */
+ if (fout->remoteVersion < 90600)
+ {
+ method->dobj.dump = DUMP_COMPONENT_NONE;
+ return;
+ }
+
if (checkExtensionMembership(&method->dobj, fout))
return; /* extension membership overrides all else */
@@ -3924,10 +3958,37 @@ getLOs(Archive *fout)
* as it will be copied by pg_upgrade, which simply copies the
* pg_largeobject table. We *do* however dump out anything but the
* data, as pg_upgrade copies just pg_largeobject, but not
- * pg_largeobject_metadata, after the dump is restored.
+ * pg_largeobject_metadata, after the dump is restored. In versions
+ * before v12, this is done via proper large object commands. In
+ * newer versions, we dump the content of pg_largeobject_metadata and
+ * any associated pg_shdepend rows, which is faster to restore. (On
+ * <v12, pg_largeobject_metadata was created WITH OIDS, so the OID
+ * column is hidden and won't be dumped.)
*/
if (dopt->binary_upgrade)
- loinfo->dobj.dump &= ~DUMP_COMPONENT_DATA;
+ {
+ if (fout->remoteVersion >= 120000)
+ {
+ /*
+ * We should've saved pg_largeobject_metadata's dump ID before
+ * this point.
+ */
+ Assert(lo_metadata_dumpId);
+
+ loinfo->dobj.dump &= ~(DUMP_COMPONENT_DATA | DUMP_COMPONENT_ACL | DUMP_COMPONENT_DEFINITION);
+
+ /*
+ * Mark the large object as dependent on
+ * pg_largeobject_metadata so that any large object
+ * comments/seclables are dumped after it.
+ */
+ loinfo->dobj.dependencies = (DumpId *) pg_malloc(sizeof(DumpId));
+ loinfo->dobj.dependencies[0] = lo_metadata_dumpId;
+ loinfo->dobj.nDeps = loinfo->dobj.allocDeps = 1;
+ }
+ else
+ loinfo->dobj.dump &= ~DUMP_COMPONENT_DATA;
+ }
/*
* Create a "BLOBS" data item for the group, too. This is just a
@@ -4962,6 +5023,7 @@ getSubscriptions(Archive *fout)
int i_suboriginremotelsn;
int i_subenabled;
int i_subfailover;
+ int i_subretaindeadtuples;
int i,
ntups;
@@ -5034,10 +5096,17 @@ getSubscriptions(Archive *fout)
if (fout->remoteVersion >= 170000)
appendPQExpBufferStr(query,
- " s.subfailover\n");
+ " s.subfailover,\n");
else
appendPQExpBufferStr(query,
- " false AS subfailover\n");
+ " false AS subfailover,\n");
+
+ if (fout->remoteVersion >= 190000)
+ appendPQExpBufferStr(query,
+ " s.subretaindeadtuples\n");
+ else
+ appendPQExpBufferStr(query,
+ " false AS subretaindeadtuples\n");
appendPQExpBufferStr(query,
"FROM pg_subscription s\n");
@@ -5071,6 +5140,7 @@ getSubscriptions(Archive *fout)
i_subpasswordrequired = PQfnumber(res, "subpasswordrequired");
i_subrunasowner = PQfnumber(res, "subrunasowner");
i_subfailover = PQfnumber(res, "subfailover");
+ i_subretaindeadtuples = PQfnumber(res, "subretaindeadtuples");
i_subconninfo = PQfnumber(res, "subconninfo");
i_subslotname = PQfnumber(res, "subslotname");
i_subsynccommit = PQfnumber(res, "subsynccommit");
@@ -5104,6 +5174,8 @@ getSubscriptions(Archive *fout)
(strcmp(PQgetvalue(res, i, i_subrunasowner), "t") == 0);
subinfo[i].subfailover =
(strcmp(PQgetvalue(res, i, i_subfailover), "t") == 0);
+ subinfo[i].subretaindeadtuples =
+ (strcmp(PQgetvalue(res, i, i_subretaindeadtuples), "t") == 0);
subinfo[i].subconninfo =
pg_strdup(PQgetvalue(res, i, i_subconninfo));
if (PQgetisnull(res, i, i_subslotname))
@@ -5362,6 +5434,9 @@ dumpSubscription(Archive *fout, const SubscriptionInfo *subinfo)
if (subinfo->subfailover)
appendPQExpBufferStr(query, ", failover = true");
+ if (subinfo->subretaindeadtuples)
+ appendPQExpBufferStr(query, ", retain_dead_tuples = true");
+
if (strcmp(subinfo->subsynccommit, "off") != 0)
appendPQExpBuffer(query, ", synchronous_commit = %s", fmtId(subinfo->subsynccommit));
@@ -6122,6 +6197,7 @@ getTypes(Archive *fout)
*/
tyinfo[i].nDomChecks = 0;
tyinfo[i].domChecks = NULL;
+ tyinfo[i].notnull = NULL;
if ((tyinfo[i].dobj.dump & DUMP_COMPONENT_DEFINITION) &&
tyinfo[i].typtype == TYPTYPE_DOMAIN)
getDomainConstraints(fout, &(tyinfo[i]));
@@ -6181,6 +6257,8 @@ getOperators(Archive *fout)
int i_oprnamespace;
int i_oprowner;
int i_oprkind;
+ int i_oprleft;
+ int i_oprright;
int i_oprcode;
/*
@@ -6192,6 +6270,8 @@ getOperators(Archive *fout)
"oprnamespace, "
"oprowner, "
"oprkind, "
+ "oprleft, "
+ "oprright, "
"oprcode::oid AS oprcode "
"FROM pg_operator");
@@ -6207,6 +6287,8 @@ getOperators(Archive *fout)
i_oprnamespace = PQfnumber(res, "oprnamespace");
i_oprowner = PQfnumber(res, "oprowner");
i_oprkind = PQfnumber(res, "oprkind");
+ i_oprleft = PQfnumber(res, "oprleft");
+ i_oprright = PQfnumber(res, "oprright");
i_oprcode = PQfnumber(res, "oprcode");
for (i = 0; i < ntups; i++)
@@ -6220,6 +6302,8 @@ getOperators(Archive *fout)
findNamespace(atooid(PQgetvalue(res, i, i_oprnamespace)));
oprinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_oprowner));
oprinfo[i].oprkind = (PQgetvalue(res, i, i_oprkind))[0];
+ oprinfo[i].oprleft = atooid(PQgetvalue(res, i, i_oprleft));
+ oprinfo[i].oprright = atooid(PQgetvalue(res, i, i_oprright));
oprinfo[i].oprcode = atooid(PQgetvalue(res, i, i_oprcode));
/* Decide whether we want to dump it */
@@ -6248,6 +6332,7 @@ getCollations(Archive *fout)
int i_collname;
int i_collnamespace;
int i_collowner;
+ int i_collencoding;
query = createPQExpBuffer();
@@ -6258,7 +6343,8 @@ getCollations(Archive *fout)
appendPQExpBufferStr(query, "SELECT tableoid, oid, collname, "
"collnamespace, "
- "collowner "
+ "collowner, "
+ "collencoding "
"FROM pg_collation");
res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
@@ -6272,6 +6358,7 @@ getCollations(Archive *fout)
i_collname = PQfnumber(res, "collname");
i_collnamespace = PQfnumber(res, "collnamespace");
i_collowner = PQfnumber(res, "collowner");
+ i_collencoding = PQfnumber(res, "collencoding");
for (i = 0; i < ntups; i++)
{
@@ -6283,6 +6370,7 @@ getCollations(Archive *fout)
collinfo[i].dobj.namespace =
findNamespace(atooid(PQgetvalue(res, i, i_collnamespace)));
collinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_collowner));
+ collinfo[i].collencoding = atoi(PQgetvalue(res, i, i_collencoding));
/* Decide whether we want to dump it */
selectDumpableObject(&(collinfo[i].dobj), fout);
@@ -6373,16 +6461,28 @@ getAccessMethods(Archive *fout)
int i_amhandler;
int i_amtype;
- /* Before 9.6, there are no user-defined access methods */
- if (fout->remoteVersion < 90600)
- return;
-
query = createPQExpBuffer();
- /* Select all access methods from pg_am table */
- appendPQExpBufferStr(query, "SELECT tableoid, oid, amname, amtype, "
- "amhandler::pg_catalog.regproc AS amhandler "
- "FROM pg_am");
+ /*
+ * Select all access methods from pg_am table. v9.6 introduced CREATE
+ * ACCESS METHOD, so earlier versions usually have only built-in access
+ * methods. v9.6 also changed the access method API, replacing dozens of
+ * pg_am columns with amhandler. Even if a user created an access method
+ * by "INSERT INTO pg_am", we have no way to translate pre-v9.6 pg_am
+ * columns to a v9.6+ CREATE ACCESS METHOD. Hence, before v9.6, read
+ * pg_am just to facilitate findAccessMethodByOid() providing the
+ * OID-to-name mapping.
+ */
+ appendPQExpBufferStr(query, "SELECT tableoid, oid, amname, ");
+ if (fout->remoteVersion >= 90600)
+ appendPQExpBufferStr(query,
+ "amtype, "
+ "amhandler::pg_catalog.regproc AS amhandler ");
+ else
+ appendPQExpBufferStr(query,
+ "'i'::pg_catalog.\"char\" AS amtype, "
+ "'-'::pg_catalog.regproc AS amhandler ");
+ appendPQExpBufferStr(query, "FROM pg_am");
res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
@@ -6431,6 +6531,7 @@ getOpclasses(Archive *fout)
OpclassInfo *opcinfo;
int i_tableoid;
int i_oid;
+ int i_opcmethod;
int i_opcname;
int i_opcnamespace;
int i_opcowner;
@@ -6440,7 +6541,7 @@ getOpclasses(Archive *fout)
* system-defined opclasses at dump-out time.
*/
- appendPQExpBufferStr(query, "SELECT tableoid, oid, opcname, "
+ appendPQExpBufferStr(query, "SELECT tableoid, oid, opcmethod, opcname, "
"opcnamespace, "
"opcowner "
"FROM pg_opclass");
@@ -6453,6 +6554,7 @@ getOpclasses(Archive *fout)
i_tableoid = PQfnumber(res, "tableoid");
i_oid = PQfnumber(res, "oid");
+ i_opcmethod = PQfnumber(res, "opcmethod");
i_opcname = PQfnumber(res, "opcname");
i_opcnamespace = PQfnumber(res, "opcnamespace");
i_opcowner = PQfnumber(res, "opcowner");
@@ -6466,6 +6568,7 @@ getOpclasses(Archive *fout)
opcinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_opcname));
opcinfo[i].dobj.namespace =
findNamespace(atooid(PQgetvalue(res, i, i_opcnamespace)));
+ opcinfo[i].opcmethod = atooid(PQgetvalue(res, i, i_opcmethod));
opcinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_opcowner));
/* Decide whether we want to dump it */
@@ -6491,6 +6594,7 @@ getOpfamilies(Archive *fout)
OpfamilyInfo *opfinfo;
int i_tableoid;
int i_oid;
+ int i_opfmethod;
int i_opfname;
int i_opfnamespace;
int i_opfowner;
@@ -6502,7 +6606,7 @@ getOpfamilies(Archive *fout)
* system-defined opfamilies at dump-out time.
*/
- appendPQExpBufferStr(query, "SELECT tableoid, oid, opfname, "
+ appendPQExpBufferStr(query, "SELECT tableoid, oid, opfmethod, opfname, "
"opfnamespace, "
"opfowner "
"FROM pg_opfamily");
@@ -6516,6 +6620,7 @@ getOpfamilies(Archive *fout)
i_tableoid = PQfnumber(res, "tableoid");
i_oid = PQfnumber(res, "oid");
i_opfname = PQfnumber(res, "opfname");
+ i_opfmethod = PQfnumber(res, "opfmethod");
i_opfnamespace = PQfnumber(res, "opfnamespace");
i_opfowner = PQfnumber(res, "opfowner");
@@ -6528,6 +6633,7 @@ getOpfamilies(Archive *fout)
opfinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_opfname));
opfinfo[i].dobj.namespace =
findNamespace(atooid(PQgetvalue(res, i, i_opfnamespace)));
+ opfinfo[i].opfmethod = atooid(PQgetvalue(res, i, i_opfmethod));
opfinfo[i].rolname = getRoleName(PQgetvalue(res, i, i_opfowner));
/* Decide whether we want to dump it */
@@ -8247,27 +8353,33 @@ addConstrChildIdxDeps(DumpableObject *dobj, const IndxInfo *refidx)
static void
getDomainConstraints(Archive *fout, TypeInfo *tyinfo)
{
- int i;
ConstraintInfo *constrinfo;
PQExpBuffer query = createPQExpBuffer();
PGresult *res;
int i_tableoid,
i_oid,
i_conname,
- i_consrc;
+ i_consrc,
+ i_convalidated,
+ i_contype;
int ntups;
if (!fout->is_prepared[PREPQUERY_GETDOMAINCONSTRAINTS])
{
- /* Set up query for constraint-specific details */
- appendPQExpBufferStr(query,
- "PREPARE getDomainConstraints(pg_catalog.oid) AS\n"
- "SELECT tableoid, oid, conname, "
- "pg_catalog.pg_get_constraintdef(oid) AS consrc, "
- "convalidated "
- "FROM pg_catalog.pg_constraint "
- "WHERE contypid = $1 AND contype = 'c' "
- "ORDER BY conname");
+ /*
+ * Set up query for constraint-specific details. For servers 17 and
+ * up, domains have constraints of type 'n' as well as 'c', otherwise
+ * just the latter.
+ */
+ appendPQExpBuffer(query,
+ "PREPARE getDomainConstraints(pg_catalog.oid) AS\n"
+ "SELECT tableoid, oid, conname, "
+ "pg_catalog.pg_get_constraintdef(oid) AS consrc, "
+ "convalidated, contype "
+ "FROM pg_catalog.pg_constraint "
+ "WHERE contypid = $1 AND contype IN (%s) "
+ "ORDER BY conname",
+ fout->remoteVersion < 170000 ? "'c'" : "'c', 'n'");
ExecuteSqlStatement(fout, query->data);
@@ -8286,33 +8398,50 @@ getDomainConstraints(Archive *fout, TypeInfo *tyinfo)
i_oid = PQfnumber(res, "oid");
i_conname = PQfnumber(res, "conname");
i_consrc = PQfnumber(res, "consrc");
+ i_convalidated = PQfnumber(res, "convalidated");
+ i_contype = PQfnumber(res, "contype");
constrinfo = (ConstraintInfo *) pg_malloc(ntups * sizeof(ConstraintInfo));
-
- tyinfo->nDomChecks = ntups;
tyinfo->domChecks = constrinfo;
- for (i = 0; i < ntups; i++)
+ /* 'i' tracks result rows; 'j' counts CHECK constraints */
+ for (int i = 0, j = 0; i < ntups; i++)
{
- bool validated = PQgetvalue(res, i, 4)[0] == 't';
-
- constrinfo[i].dobj.objType = DO_CONSTRAINT;
- constrinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
- constrinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
- AssignDumpId(&constrinfo[i].dobj);
- constrinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_conname));
- constrinfo[i].dobj.namespace = tyinfo->dobj.namespace;
- constrinfo[i].contable = NULL;
- constrinfo[i].condomain = tyinfo;
- constrinfo[i].contype = 'c';
- constrinfo[i].condef = pg_strdup(PQgetvalue(res, i, i_consrc));
- constrinfo[i].confrelid = InvalidOid;
- constrinfo[i].conindex = 0;
- constrinfo[i].condeferrable = false;
- constrinfo[i].condeferred = false;
- constrinfo[i].conislocal = true;
-
- constrinfo[i].separate = !validated;
+ bool validated = PQgetvalue(res, i, i_convalidated)[0] == 't';
+ char contype = (PQgetvalue(res, i, i_contype))[0];
+ ConstraintInfo *constraint;
+
+ if (contype == CONSTRAINT_CHECK)
+ {
+ constraint = &constrinfo[j++];
+ tyinfo->nDomChecks++;
+ }
+ else
+ {
+ Assert(contype == CONSTRAINT_NOTNULL);
+ Assert(tyinfo->notnull == NULL);
+ /* use last item in array for the not-null constraint */
+ tyinfo->notnull = &(constrinfo[ntups - 1]);
+ constraint = tyinfo->notnull;
+ }
+
+ constraint->dobj.objType = DO_CONSTRAINT;
+ constraint->dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
+ constraint->dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid));
+ AssignDumpId(&(constraint->dobj));
+ constraint->dobj.name = pg_strdup(PQgetvalue(res, i, i_conname));
+ constraint->dobj.namespace = tyinfo->dobj.namespace;
+ constraint->contable = NULL;
+ constraint->condomain = tyinfo;
+ constraint->contype = contype;
+ constraint->condef = pg_strdup(PQgetvalue(res, i, i_consrc));
+ constraint->confrelid = InvalidOid;
+ constraint->conindex = 0;
+ constraint->condeferrable = false;
+ constraint->condeferred = false;
+ constraint->conislocal = true;
+
+ constraint->separate = !validated;
/*
* Make the domain depend on the constraint, ensuring it won't be
@@ -8321,8 +8450,7 @@ getDomainConstraints(Archive *fout, TypeInfo *tyinfo)
* anyway, so this doesn't matter.
*/
if (validated)
- addObjectDependency(&tyinfo->dobj,
- constrinfo[i].dobj.dumpId);
+ addObjectDependency(&tyinfo->dobj, constraint->dobj.dumpId);
}
PQclear(res);
@@ -9039,8 +9167,20 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
if (tbinfo->relkind == RELKIND_SEQUENCE)
continue;
- /* Don't bother with uninteresting tables, either */
- if (!tbinfo->interesting)
+ /*
+ * Don't bother with uninteresting tables, either. For binary
+ * upgrades, this is bypassed for pg_largeobject_metadata and
+ * pg_shdepend so that the columns names are collected for the
+ * corresponding COPY commands. Restoring the data for those catalogs
+ * is faster than restoring the equivalent set of large object
+ * commands. We can only do this for upgrades from v12 and newer; in
+ * older versions, pg_largeobject_metadata was created WITH OIDS, so
+ * the OID column is hidden and won't be dumped.
+ */
+ if (!tbinfo->interesting &&
+ !(fout->dopt->binary_upgrade && fout->remoteVersion >= 120000 &&
+ (tbinfo->dobj.catId.oid == LargeObjectMetadataRelationId ||
+ tbinfo->dobj.catId.oid == SharedDependRelationId)))
continue;
/* OK, we need info for this table */
@@ -9244,7 +9384,10 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
pg_fatal("unrecognized table OID %u", attrelid);
/* cross-check that we only got requested tables */
if (tbinfo->relkind == RELKIND_SEQUENCE ||
- !tbinfo->interesting)
+ (!tbinfo->interesting &&
+ !(fout->dopt->binary_upgrade && fout->remoteVersion >= 120000 &&
+ (tbinfo->dobj.catId.oid == LargeObjectMetadataRelationId ||
+ tbinfo->dobj.catId.oid == SharedDependRelationId))))
pg_fatal("unexpected column data for table \"%s\"",
tbinfo->dobj.name);
@@ -12517,8 +12660,36 @@ dumpDomain(Archive *fout, const TypeInfo *tyinfo)
appendPQExpBuffer(q, " COLLATE %s", fmtQualifiedDumpable(coll));
}
+ /*
+ * Print a not-null constraint if there's one. In servers older than 17
+ * these don't have names, so just print it unadorned; in newer ones they
+ * do, but most of the time it's going to be the standard generated one,
+ * so omit the name in that case also.
+ */
if (typnotnull[0] == 't')
- appendPQExpBufferStr(q, " NOT NULL");
+ {
+ if (fout->remoteVersion < 170000 || tyinfo->notnull == NULL)
+ appendPQExpBufferStr(q, " NOT NULL");
+ else
+ {
+ ConstraintInfo *notnull = tyinfo->notnull;
+
+ if (!notnull->separate)
+ {
+ char *default_name;
+
+ /* XXX should match ChooseConstraintName better */
+ default_name = psprintf("%s_not_null", tyinfo->dobj.name);
+
+ if (strcmp(default_name, notnull->dobj.name) == 0)
+ appendPQExpBufferStr(q, " NOT NULL");
+ else
+ appendPQExpBuffer(q, " CONSTRAINT %s %s",
+ fmtId(notnull->dobj.name), notnull->condef);
+ free(default_name);
+ }
+ }
+ }
if (typdefault != NULL)
{
@@ -12538,7 +12709,7 @@ dumpDomain(Archive *fout, const TypeInfo *tyinfo)
{
ConstraintInfo *domcheck = &(tyinfo->domChecks[i]);
- if (!domcheck->separate)
+ if (!domcheck->separate && domcheck->contype == 'c')
appendPQExpBuffer(q, "\n\tCONSTRAINT %s %s",
fmtId(domcheck->dobj.name), domcheck->condef);
}
@@ -12583,8 +12754,13 @@ dumpDomain(Archive *fout, const TypeInfo *tyinfo)
for (i = 0; i < tyinfo->nDomChecks; i++)
{
ConstraintInfo *domcheck = &(tyinfo->domChecks[i]);
- PQExpBuffer conprefix = createPQExpBuffer();
+ PQExpBuffer conprefix;
+
+ /* but only if the constraint itself was dumped here */
+ if (domcheck->separate)
+ continue;
+ conprefix = createPQExpBuffer();
appendPQExpBuffer(conprefix, "CONSTRAINT %s ON DOMAIN",
fmtId(domcheck->dobj.name));
@@ -12597,6 +12773,25 @@ dumpDomain(Archive *fout, const TypeInfo *tyinfo)
destroyPQExpBuffer(conprefix);
}
+ /*
+ * And a comment on the not-null constraint, if there's one -- but only if
+ * the constraint itself was dumped here
+ */
+ if (tyinfo->notnull != NULL && !tyinfo->notnull->separate)
+ {
+ PQExpBuffer conprefix = createPQExpBuffer();
+
+ appendPQExpBuffer(conprefix, "CONSTRAINT %s ON DOMAIN",
+ fmtId(tyinfo->notnull->dobj.name));
+
+ if (tyinfo->notnull->dobj.dump & DUMP_COMPONENT_COMMENT)
+ dumpComment(fout, conprefix->data, qtypname,
+ tyinfo->dobj.namespace->dobj.name,
+ tyinfo->rolname,
+ tyinfo->notnull->dobj.catId, 0, tyinfo->dobj.dumpId);
+ destroyPQExpBuffer(conprefix);
+ }
+
destroyPQExpBuffer(q);
destroyPQExpBuffer(delq);
destroyPQExpBuffer(query);
@@ -18458,14 +18653,23 @@ dumpConstraint(Archive *fout, const ConstraintInfo *coninfo)
.dropStmt = delq->data));
}
}
- else if (coninfo->contype == 'c' && tbinfo == NULL)
+ else if (tbinfo == NULL)
{
- /* CHECK constraint on a domain */
+ /* CHECK, NOT NULL constraint on a domain */
TypeInfo *tyinfo = coninfo->condomain;
+ Assert(coninfo->contype == 'c' || coninfo->contype == 'n');
+
/* Ignore if not to be dumped separately */
if (coninfo->separate)
{
+ const char *keyword;
+
+ if (coninfo->contype == 'c')
+ keyword = "CHECK CONSTRAINT";
+ else
+ keyword = "CONSTRAINT";
+
appendPQExpBuffer(q, "ALTER DOMAIN %s\n",
fmtQualifiedDumpable(tyinfo));
appendPQExpBuffer(q, " ADD CONSTRAINT %s %s;\n",
@@ -18484,10 +18688,26 @@ dumpConstraint(Archive *fout, const ConstraintInfo *coninfo)
ARCHIVE_OPTS(.tag = tag,
.namespace = tyinfo->dobj.namespace->dobj.name,
.owner = tyinfo->rolname,
- .description = "CHECK CONSTRAINT",
+ .description = keyword,
.section = SECTION_POST_DATA,
.createStmt = q->data,
.dropStmt = delq->data));
+
+ if (coninfo->dobj.dump & DUMP_COMPONENT_COMMENT)
+ {
+ PQExpBuffer conprefix = createPQExpBuffer();
+ char *qtypname = pg_strdup(fmtId(tyinfo->dobj.name));
+
+ appendPQExpBuffer(conprefix, "CONSTRAINT %s ON DOMAIN",
+ fmtId(coninfo->dobj.name));
+
+ dumpComment(fout, conprefix->data, qtypname,
+ tyinfo->dobj.namespace->dobj.name,
+ tyinfo->rolname,
+ coninfo->dobj.catId, 0, tyinfo->dobj.dumpId);
+ destroyPQExpBuffer(conprefix);
+ free(qtypname);
+ }
}
}
else