diff options
Diffstat (limited to 'src/bin')
-rw-r--r-- | src/bin/pg_dump/pg_backup_archiver.c | 15 | ||||
-rw-r--r-- | src/bin/pg_dump/pg_dump.c | 268 | ||||
-rw-r--r-- | src/bin/pg_dump/pg_dump.h | 5 | ||||
-rw-r--r-- | src/bin/pg_dump/pg_dump_sort.c | 15 | ||||
-rw-r--r-- | src/bin/pg_dump/t/002_pg_dump.pl | 34 | ||||
-rw-r--r-- | src/bin/pg_upgrade/check.c | 96 | ||||
-rw-r--r-- | src/bin/pg_upgrade/info.c | 25 | ||||
-rw-r--r-- | src/bin/pg_upgrade/pg_upgrade.c | 60 | ||||
-rw-r--r-- | src/bin/pg_upgrade/pg_upgrade.h | 4 | ||||
-rw-r--r-- | src/bin/pg_upgrade/t/004_subscription.pl | 85 | ||||
-rw-r--r-- | src/bin/psql/describe.c | 6 | ||||
-rw-r--r-- | src/bin/psql/tab-complete.in.c | 10 |
12 files changed, 514 insertions, 109 deletions
diff --git a/src/bin/pg_dump/pg_backup_archiver.c b/src/bin/pg_dump/pg_backup_archiver.c index 197c1295d93..30e0da31aa3 100644 --- a/src/bin/pg_dump/pg_backup_archiver.c +++ b/src/bin/pg_dump/pg_backup_archiver.c @@ -31,6 +31,8 @@ #endif #include "catalog/pg_class_d.h" +#include "catalog/pg_largeobject_metadata_d.h" +#include "catalog/pg_shdepend_d.h" #include "common/string.h" #include "compress_io.h" #include "dumputils.h" @@ -2974,6 +2976,19 @@ _tocEntryRequired(TocEntry *te, teSection curSection, ArchiveHandle *AH) int res = REQ_SCHEMA | REQ_DATA; RestoreOptions *ropt = AH->public.ropt; + /* + * For binary upgrade mode, dump pg_largeobject_metadata and the + * associated pg_shdepend rows. This is faster to restore than the + * equivalent set of large object commands. We can only do this for + * upgrades from v12 and newer; in older versions, pg_largeobject_metadata + * was created WITH OIDS, so the OID column is hidden and won't be dumped. + */ + if (ropt->binary_upgrade && AH->public.remoteVersion >= 120000 && + strcmp(te->desc, "TABLE DATA") == 0 && + (te->catalogId.oid == LargeObjectMetadataRelationId || + te->catalogId.oid == SharedDependRelationId)) + return REQ_DATA; + /* These items are treated specially */ if (strcmp(te->desc, "ENCODING") == 0 || strcmp(te->desc, "STDSTRINGS") == 0 || diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c index c6226175528..6298edb26b5 100644 --- a/src/bin/pg_dump/pg_dump.c +++ b/src/bin/pg_dump/pg_dump.c @@ -47,10 +47,13 @@ #include "catalog/pg_authid_d.h" #include "catalog/pg_cast_d.h" #include "catalog/pg_class_d.h" +#include "catalog/pg_constraint_d.h" #include "catalog/pg_default_acl_d.h" #include "catalog/pg_largeobject_d.h" +#include "catalog/pg_largeobject_metadata_d.h" #include "catalog/pg_proc_d.h" #include "catalog/pg_publication_d.h" +#include "catalog/pg_shdepend_d.h" #include "catalog/pg_subscription_d.h" #include "catalog/pg_type_d.h" #include "common/connect.h" @@ -209,6 +212,12 @@ static int nbinaryUpgradeClassOids = 0; static SequenceItem *sequences = NULL; static int nsequences = 0; +/* + * For binary upgrade, the dump ID of pg_largeobject_metadata is saved for use + * as a dependency for pg_shdepend and any large object comments/seclabels. + */ +static DumpId lo_metadata_dumpId; + /* Maximum number of relations to fetch in a fetchAttributeStats() call. */ #define MAX_ATTR_STATS_RELS 64 @@ -1086,6 +1095,36 @@ main(int argc, char **argv) getTableData(&dopt, tblinfo, numTables, RELKIND_SEQUENCE); /* + * For binary upgrade mode, dump pg_largeobject_metadata and the + * associated pg_shdepend rows. This is faster to restore than the + * equivalent set of large object commands. We can only do this for + * upgrades from v12 and newer; in older versions, pg_largeobject_metadata + * was created WITH OIDS, so the OID column is hidden and won't be dumped. + */ + if (dopt.binary_upgrade && fout->remoteVersion >= 120000) + { + TableInfo *lo_metadata = findTableByOid(LargeObjectMetadataRelationId); + TableInfo *shdepend = findTableByOid(SharedDependRelationId); + + makeTableDataInfo(&dopt, lo_metadata); + makeTableDataInfo(&dopt, shdepend); + + /* + * Save pg_largeobject_metadata's dump ID for use as a dependency for + * pg_shdepend and any large object comments/seclabels. + */ + lo_metadata_dumpId = lo_metadata->dataObj->dobj.dumpId; + addObjectDependency(&shdepend->dataObj->dobj, lo_metadata_dumpId); + + /* + * Only dump large object shdepend rows for this database. + */ + shdepend->dataObj->filtercond = "WHERE classid = 'pg_largeobject'::regclass " + "AND dbid = (SELECT oid FROM pg_database " + " WHERE datname = current_database())"; + } + + /* * In binary-upgrade mode, we do not have to worry about the actual LO * data or the associated metadata that resides in the pg_largeobject and * pg_largeobject_metadata tables, respectively. @@ -3924,10 +3963,37 @@ getLOs(Archive *fout) * as it will be copied by pg_upgrade, which simply copies the * pg_largeobject table. We *do* however dump out anything but the * data, as pg_upgrade copies just pg_largeobject, but not - * pg_largeobject_metadata, after the dump is restored. + * pg_largeobject_metadata, after the dump is restored. In versions + * before v12, this is done via proper large object commands. In + * newer versions, we dump the content of pg_largeobject_metadata and + * any associated pg_shdepend rows, which is faster to restore. (On + * <v12, pg_largeobject_metadata was created WITH OIDS, so the OID + * column is hidden and won't be dumped.) */ if (dopt->binary_upgrade) - loinfo->dobj.dump &= ~DUMP_COMPONENT_DATA; + { + if (fout->remoteVersion >= 120000) + { + /* + * We should've saved pg_largeobject_metadata's dump ID before + * this point. + */ + Assert(lo_metadata_dumpId); + + loinfo->dobj.dump &= ~(DUMP_COMPONENT_DATA | DUMP_COMPONENT_ACL | DUMP_COMPONENT_DEFINITION); + + /* + * Mark the large object as dependent on + * pg_largeobject_metadata so that any large object + * comments/seclables are dumped after it. + */ + loinfo->dobj.dependencies = (DumpId *) pg_malloc(sizeof(DumpId)); + loinfo->dobj.dependencies[0] = lo_metadata_dumpId; + loinfo->dobj.nDeps = loinfo->dobj.allocDeps = 1; + } + else + loinfo->dobj.dump &= ~DUMP_COMPONENT_DATA; + } /* * Create a "BLOBS" data item for the group, too. This is just a @@ -4962,6 +5028,7 @@ getSubscriptions(Archive *fout) int i_suboriginremotelsn; int i_subenabled; int i_subfailover; + int i_subretaindeadtuples; int i, ntups; @@ -5034,10 +5101,17 @@ getSubscriptions(Archive *fout) if (fout->remoteVersion >= 170000) appendPQExpBufferStr(query, - " s.subfailover\n"); + " s.subfailover,\n"); else appendPQExpBufferStr(query, - " false AS subfailover\n"); + " false AS subfailover,\n"); + + if (fout->remoteVersion >= 190000) + appendPQExpBufferStr(query, + " s.subretaindeadtuples\n"); + else + appendPQExpBufferStr(query, + " false AS subretaindeadtuples\n"); appendPQExpBufferStr(query, "FROM pg_subscription s\n"); @@ -5071,6 +5145,7 @@ getSubscriptions(Archive *fout) i_subpasswordrequired = PQfnumber(res, "subpasswordrequired"); i_subrunasowner = PQfnumber(res, "subrunasowner"); i_subfailover = PQfnumber(res, "subfailover"); + i_subretaindeadtuples = PQfnumber(res, "subretaindeadtuples"); i_subconninfo = PQfnumber(res, "subconninfo"); i_subslotname = PQfnumber(res, "subslotname"); i_subsynccommit = PQfnumber(res, "subsynccommit"); @@ -5104,6 +5179,8 @@ getSubscriptions(Archive *fout) (strcmp(PQgetvalue(res, i, i_subrunasowner), "t") == 0); subinfo[i].subfailover = (strcmp(PQgetvalue(res, i, i_subfailover), "t") == 0); + subinfo[i].subretaindeadtuples = + (strcmp(PQgetvalue(res, i, i_subretaindeadtuples), "t") == 0); subinfo[i].subconninfo = pg_strdup(PQgetvalue(res, i, i_subconninfo)); if (PQgetisnull(res, i, i_subslotname)) @@ -5362,6 +5439,9 @@ dumpSubscription(Archive *fout, const SubscriptionInfo *subinfo) if (subinfo->subfailover) appendPQExpBufferStr(query, ", failover = true"); + if (subinfo->subretaindeadtuples) + appendPQExpBufferStr(query, ", retain_dead_tuples = true"); + if (strcmp(subinfo->subsynccommit, "off") != 0) appendPQExpBuffer(query, ", synchronous_commit = %s", fmtId(subinfo->subsynccommit)); @@ -6122,6 +6202,7 @@ getTypes(Archive *fout) */ tyinfo[i].nDomChecks = 0; tyinfo[i].domChecks = NULL; + tyinfo[i].notnull = NULL; if ((tyinfo[i].dobj.dump & DUMP_COMPONENT_DEFINITION) && tyinfo[i].typtype == TYPTYPE_DOMAIN) getDomainConstraints(fout, &(tyinfo[i])); @@ -8247,27 +8328,33 @@ addConstrChildIdxDeps(DumpableObject *dobj, const IndxInfo *refidx) static void getDomainConstraints(Archive *fout, TypeInfo *tyinfo) { - int i; ConstraintInfo *constrinfo; PQExpBuffer query = createPQExpBuffer(); PGresult *res; int i_tableoid, i_oid, i_conname, - i_consrc; + i_consrc, + i_convalidated, + i_contype; int ntups; if (!fout->is_prepared[PREPQUERY_GETDOMAINCONSTRAINTS]) { - /* Set up query for constraint-specific details */ - appendPQExpBufferStr(query, - "PREPARE getDomainConstraints(pg_catalog.oid) AS\n" - "SELECT tableoid, oid, conname, " - "pg_catalog.pg_get_constraintdef(oid) AS consrc, " - "convalidated " - "FROM pg_catalog.pg_constraint " - "WHERE contypid = $1 AND contype = 'c' " - "ORDER BY conname"); + /* + * Set up query for constraint-specific details. For servers 17 and + * up, domains have constraints of type 'n' as well as 'c', otherwise + * just the latter. + */ + appendPQExpBuffer(query, + "PREPARE getDomainConstraints(pg_catalog.oid) AS\n" + "SELECT tableoid, oid, conname, " + "pg_catalog.pg_get_constraintdef(oid) AS consrc, " + "convalidated, contype " + "FROM pg_catalog.pg_constraint " + "WHERE contypid = $1 AND contype IN (%s) " + "ORDER BY conname", + fout->remoteVersion < 170000 ? "'c'" : "'c', 'n'"); ExecuteSqlStatement(fout, query->data); @@ -8286,33 +8373,50 @@ getDomainConstraints(Archive *fout, TypeInfo *tyinfo) i_oid = PQfnumber(res, "oid"); i_conname = PQfnumber(res, "conname"); i_consrc = PQfnumber(res, "consrc"); + i_convalidated = PQfnumber(res, "convalidated"); + i_contype = PQfnumber(res, "contype"); constrinfo = (ConstraintInfo *) pg_malloc(ntups * sizeof(ConstraintInfo)); - - tyinfo->nDomChecks = ntups; tyinfo->domChecks = constrinfo; - for (i = 0; i < ntups; i++) + /* 'i' tracks result rows; 'j' counts CHECK constraints */ + for (int i = 0, j = 0; i < ntups; i++) { - bool validated = PQgetvalue(res, i, 4)[0] == 't'; - - constrinfo[i].dobj.objType = DO_CONSTRAINT; - constrinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid)); - constrinfo[i].dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid)); - AssignDumpId(&constrinfo[i].dobj); - constrinfo[i].dobj.name = pg_strdup(PQgetvalue(res, i, i_conname)); - constrinfo[i].dobj.namespace = tyinfo->dobj.namespace; - constrinfo[i].contable = NULL; - constrinfo[i].condomain = tyinfo; - constrinfo[i].contype = 'c'; - constrinfo[i].condef = pg_strdup(PQgetvalue(res, i, i_consrc)); - constrinfo[i].confrelid = InvalidOid; - constrinfo[i].conindex = 0; - constrinfo[i].condeferrable = false; - constrinfo[i].condeferred = false; - constrinfo[i].conislocal = true; - - constrinfo[i].separate = !validated; + bool validated = PQgetvalue(res, i, i_convalidated)[0] == 't'; + char contype = (PQgetvalue(res, i, i_contype))[0]; + ConstraintInfo *constraint; + + if (contype == CONSTRAINT_CHECK) + { + constraint = &constrinfo[j++]; + tyinfo->nDomChecks++; + } + else + { + Assert(contype == CONSTRAINT_NOTNULL); + Assert(tyinfo->notnull == NULL); + /* use last item in array for the not-null constraint */ + tyinfo->notnull = &(constrinfo[ntups - 1]); + constraint = tyinfo->notnull; + } + + constraint->dobj.objType = DO_CONSTRAINT; + constraint->dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid)); + constraint->dobj.catId.oid = atooid(PQgetvalue(res, i, i_oid)); + AssignDumpId(&(constraint->dobj)); + constraint->dobj.name = pg_strdup(PQgetvalue(res, i, i_conname)); + constraint->dobj.namespace = tyinfo->dobj.namespace; + constraint->contable = NULL; + constraint->condomain = tyinfo; + constraint->contype = contype; + constraint->condef = pg_strdup(PQgetvalue(res, i, i_consrc)); + constraint->confrelid = InvalidOid; + constraint->conindex = 0; + constraint->condeferrable = false; + constraint->condeferred = false; + constraint->conislocal = true; + + constraint->separate = !validated; /* * Make the domain depend on the constraint, ensuring it won't be @@ -8321,8 +8425,7 @@ getDomainConstraints(Archive *fout, TypeInfo *tyinfo) * anyway, so this doesn't matter. */ if (validated) - addObjectDependency(&tyinfo->dobj, - constrinfo[i].dobj.dumpId); + addObjectDependency(&tyinfo->dobj, constraint->dobj.dumpId); } PQclear(res); @@ -9039,8 +9142,20 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables) if (tbinfo->relkind == RELKIND_SEQUENCE) continue; - /* Don't bother with uninteresting tables, either */ - if (!tbinfo->interesting) + /* + * Don't bother with uninteresting tables, either. For binary + * upgrades, this is bypassed for pg_largeobject_metadata and + * pg_shdepend so that the columns names are collected for the + * corresponding COPY commands. Restoring the data for those catalogs + * is faster than restoring the equivalent set of large object + * commands. We can only do this for upgrades from v12 and newer; in + * older versions, pg_largeobject_metadata was created WITH OIDS, so + * the OID column is hidden and won't be dumped. + */ + if (!tbinfo->interesting && + !(fout->dopt->binary_upgrade && fout->remoteVersion >= 120000 && + (tbinfo->dobj.catId.oid == LargeObjectMetadataRelationId || + tbinfo->dobj.catId.oid == SharedDependRelationId))) continue; /* OK, we need info for this table */ @@ -9244,7 +9359,10 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables) pg_fatal("unrecognized table OID %u", attrelid); /* cross-check that we only got requested tables */ if (tbinfo->relkind == RELKIND_SEQUENCE || - !tbinfo->interesting) + (!tbinfo->interesting && + !(fout->dopt->binary_upgrade && fout->remoteVersion >= 120000 && + (tbinfo->dobj.catId.oid == LargeObjectMetadataRelationId || + tbinfo->dobj.catId.oid == SharedDependRelationId)))) pg_fatal("unexpected column data for table \"%s\"", tbinfo->dobj.name); @@ -12517,8 +12635,36 @@ dumpDomain(Archive *fout, const TypeInfo *tyinfo) appendPQExpBuffer(q, " COLLATE %s", fmtQualifiedDumpable(coll)); } + /* + * Print a not-null constraint if there's one. In servers older than 17 + * these don't have names, so just print it unadorned; in newer ones they + * do, but most of the time it's going to be the standard generated one, + * so omit the name in that case also. + */ if (typnotnull[0] == 't') - appendPQExpBufferStr(q, " NOT NULL"); + { + if (fout->remoteVersion < 170000 || tyinfo->notnull == NULL) + appendPQExpBufferStr(q, " NOT NULL"); + else + { + ConstraintInfo *notnull = tyinfo->notnull; + + if (!notnull->separate) + { + char *default_name; + + /* XXX should match ChooseConstraintName better */ + default_name = psprintf("%s_not_null", tyinfo->dobj.name); + + if (strcmp(default_name, notnull->dobj.name) == 0) + appendPQExpBufferStr(q, " NOT NULL"); + else + appendPQExpBuffer(q, " CONSTRAINT %s %s", + fmtId(notnull->dobj.name), notnull->condef); + free(default_name); + } + } + } if (typdefault != NULL) { @@ -12538,7 +12684,7 @@ dumpDomain(Archive *fout, const TypeInfo *tyinfo) { ConstraintInfo *domcheck = &(tyinfo->domChecks[i]); - if (!domcheck->separate) + if (!domcheck->separate && domcheck->contype == 'c') appendPQExpBuffer(q, "\n\tCONSTRAINT %s %s", fmtId(domcheck->dobj.name), domcheck->condef); } @@ -12602,6 +12748,25 @@ dumpDomain(Archive *fout, const TypeInfo *tyinfo) destroyPQExpBuffer(conprefix); } + /* + * And a comment on the not-null constraint, if there's one -- but only if + * the constraint itself was dumped here + */ + if (tyinfo->notnull != NULL && !tyinfo->notnull->separate) + { + PQExpBuffer conprefix = createPQExpBuffer(); + + appendPQExpBuffer(conprefix, "CONSTRAINT %s ON DOMAIN", + fmtId(tyinfo->notnull->dobj.name)); + + if (tyinfo->notnull->dobj.dump & DUMP_COMPONENT_COMMENT) + dumpComment(fout, conprefix->data, qtypname, + tyinfo->dobj.namespace->dobj.name, + tyinfo->rolname, + tyinfo->notnull->dobj.catId, 0, tyinfo->dobj.dumpId); + destroyPQExpBuffer(conprefix); + } + destroyPQExpBuffer(q); destroyPQExpBuffer(delq); destroyPQExpBuffer(query); @@ -18463,14 +18628,23 @@ dumpConstraint(Archive *fout, const ConstraintInfo *coninfo) .dropStmt = delq->data)); } } - else if (coninfo->contype == 'c' && tbinfo == NULL) + else if (tbinfo == NULL) { - /* CHECK constraint on a domain */ + /* CHECK, NOT NULL constraint on a domain */ TypeInfo *tyinfo = coninfo->condomain; + Assert(coninfo->contype == 'c' || coninfo->contype == 'n'); + /* Ignore if not to be dumped separately */ if (coninfo->separate) { + const char *keyword; + + if (coninfo->contype == 'c') + keyword = "CHECK CONSTRAINT"; + else + keyword = "CONSTRAINT"; + appendPQExpBuffer(q, "ALTER DOMAIN %s\n", fmtQualifiedDumpable(tyinfo)); appendPQExpBuffer(q, " ADD CONSTRAINT %s %s;\n", @@ -18489,7 +18663,7 @@ dumpConstraint(Archive *fout, const ConstraintInfo *coninfo) ARCHIVE_OPTS(.tag = tag, .namespace = tyinfo->dobj.namespace->dobj.name, .owner = tyinfo->rolname, - .description = "CHECK CONSTRAINT", + .description = keyword, .section = SECTION_POST_DATA, .createStmt = q->data, .dropStmt = delq->data)); diff --git a/src/bin/pg_dump/pg_dump.h b/src/bin/pg_dump/pg_dump.h index 39eef1d6617..93a4475d51b 100644 --- a/src/bin/pg_dump/pg_dump.h +++ b/src/bin/pg_dump/pg_dump.h @@ -222,7 +222,9 @@ typedef struct _typeInfo bool isDefined; /* true if typisdefined */ /* If needed, we'll create a "shell type" entry for it; link that here: */ struct _shellTypeInfo *shellType; /* shell-type entry, or NULL */ - /* If it's a domain, we store links to its constraints here: */ + /* If it's a domain, its not-null constraint is here: */ + struct _constraintInfo *notnull; + /* If it's a domain, we store links to its CHECK constraints here: */ int nDomChecks; struct _constraintInfo *domChecks; } TypeInfo; @@ -709,6 +711,7 @@ typedef struct _SubscriptionInfo bool subpasswordrequired; bool subrunasowner; bool subfailover; + bool subretaindeadtuples; char *subconninfo; char *subslotname; char *subsynccommit; diff --git a/src/bin/pg_dump/pg_dump_sort.c b/src/bin/pg_dump/pg_dump_sort.c index 538e7dcb493..f99a0797ea7 100644 --- a/src/bin/pg_dump/pg_dump_sort.c +++ b/src/bin/pg_dump/pg_dump_sort.c @@ -907,7 +907,7 @@ repairTableAttrDefMultiLoop(DumpableObject *tableobj, } /* - * CHECK constraints on domains work just like those on tables ... + * CHECK, NOT NULL constraints on domains work just like those on tables ... */ static void repairDomainConstraintLoop(DumpableObject *domainobj, @@ -1173,11 +1173,12 @@ repairDependencyLoop(DumpableObject **loop, } } - /* Domain and CHECK constraint */ + /* Domain and CHECK or NOT NULL constraint */ if (nLoop == 2 && loop[0]->objType == DO_TYPE && loop[1]->objType == DO_CONSTRAINT && - ((ConstraintInfo *) loop[1])->contype == 'c' && + (((ConstraintInfo *) loop[1])->contype == 'c' || + ((ConstraintInfo *) loop[1])->contype == 'n') && ((ConstraintInfo *) loop[1])->condomain == (TypeInfo *) loop[0]) { repairDomainConstraintLoop(loop[0], loop[1]); @@ -1186,14 +1187,15 @@ repairDependencyLoop(DumpableObject **loop, if (nLoop == 2 && loop[1]->objType == DO_TYPE && loop[0]->objType == DO_CONSTRAINT && - ((ConstraintInfo *) loop[0])->contype == 'c' && + (((ConstraintInfo *) loop[0])->contype == 'c' || + ((ConstraintInfo *) loop[0])->contype == 'n') && ((ConstraintInfo *) loop[0])->condomain == (TypeInfo *) loop[1]) { repairDomainConstraintLoop(loop[1], loop[0]); return; } - /* Indirect loop involving domain and CHECK constraint */ + /* Indirect loop involving domain and CHECK or NOT NULL constraint */ if (nLoop > 2) { for (i = 0; i < nLoop; i++) @@ -1203,7 +1205,8 @@ repairDependencyLoop(DumpableObject **loop, for (j = 0; j < nLoop; j++) { if (loop[j]->objType == DO_CONSTRAINT && - ((ConstraintInfo *) loop[j])->contype == 'c' && + (((ConstraintInfo *) loop[j])->contype == 'c' || + ((ConstraintInfo *) loop[j])->contype == 'n') && ((ConstraintInfo *) loop[j])->condomain == (TypeInfo *) loop[i]) { repairDomainConstraintMultiLoop(loop[i], loop[j]); diff --git a/src/bin/pg_dump/t/002_pg_dump.pl b/src/bin/pg_dump/t/002_pg_dump.pl index 2485d8f360e..6c7ec80e271 100644 --- a/src/bin/pg_dump/t/002_pg_dump.pl +++ b/src/bin/pg_dump/t/002_pg_dump.pl @@ -1087,6 +1087,7 @@ my %tests = ( test_schema_plus_large_objects => 1, }, unlike => { + binary_upgrade => 1, no_large_objects => 1, no_owner => 1, schema_only => 1, @@ -1605,6 +1606,7 @@ my %tests = ( test_schema_plus_large_objects => 1, }, unlike => { + binary_upgrade => 1, schema_only => 1, schema_only_with_statistics => 1, no_large_objects => 1, @@ -2377,17 +2379,19 @@ my %tests = ( create_sql => 'CREATE DOMAIN dump_test.us_postal_code AS TEXT COLLATE "C" DEFAULT \'10014\' + CONSTRAINT nn NOT NULL CHECK(VALUE ~ \'^\d{5}$\' OR VALUE ~ \'^\d{5}-\d{4}$\'); + COMMENT ON CONSTRAINT nn + ON DOMAIN dump_test.us_postal_code IS \'not null\'; COMMENT ON CONSTRAINT us_postal_code_check ON DOMAIN dump_test.us_postal_code IS \'check it\';', regexp => qr/^ - \QCREATE DOMAIN dump_test.us_postal_code AS text COLLATE pg_catalog."C" DEFAULT '10014'::text\E\n\s+ + \QCREATE DOMAIN dump_test.us_postal_code AS text COLLATE pg_catalog."C" CONSTRAINT nn NOT NULL DEFAULT '10014'::text\E\n\s+ \QCONSTRAINT us_postal_code_check CHECK \E \Q(((VALUE ~ '^\d{5}\E \$\Q'::text) OR (VALUE ~ '^\d{5}-\d{4}\E\$ \Q'::text)));\E(.|\n)* - \QCOMMENT ON CONSTRAINT us_postal_code_check ON DOMAIN dump_test.us_postal_code IS 'check it';\E /xm, like => { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, @@ -2397,6 +2401,30 @@ my %tests = ( }, }, + 'COMMENT ON CONSTRAINT ON DOMAIN (1)' => { + regexp => qr/^ + \QCOMMENT ON CONSTRAINT nn ON DOMAIN dump_test.us_postal_code IS 'not null';\E + /xm, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, + unlike => { + exclude_dump_test_schema => 1, + only_dump_measurement => 1, + }, + }, + + 'COMMENT ON CONSTRAINT ON DOMAIN (2)' => { + regexp => qr/^ + \QCOMMENT ON CONSTRAINT us_postal_code_check ON DOMAIN dump_test.us_postal_code IS 'check it';\E + /xm, + like => + { %full_runs, %dump_test_schema_runs, section_pre_data => 1, }, + unlike => { + exclude_dump_test_schema => 1, + only_dump_measurement => 1, + }, + }, + 'CREATE FUNCTION dump_test.pltestlang_call_handler' => { create_order => 17, create_sql => 'CREATE FUNCTION dump_test.pltestlang_call_handler() @@ -4612,9 +4640,9 @@ my %tests = ( no_schema => 1, section_data => 1, test_schema_plus_large_objects => 1, - binary_upgrade => 1, }, unlike => { + binary_upgrade => 1, no_large_objects => 1, no_privs => 1, schema_only => 1, diff --git a/src/bin/pg_upgrade/check.c b/src/bin/pg_upgrade/check.c index 30579ef2051..5e6403f0773 100644 --- a/src/bin/pg_upgrade/check.c +++ b/src/bin/pg_upgrade/check.c @@ -28,7 +28,7 @@ static void check_for_pg_role_prefix(ClusterInfo *cluster); static void check_for_new_tablespace_dir(void); static void check_for_user_defined_encoding_conversions(ClusterInfo *cluster); static void check_for_unicode_update(ClusterInfo *cluster); -static void check_new_cluster_logical_replication_slots(void); +static void check_new_cluster_replication_slots(void); static void check_new_cluster_subscription_configuration(void); static void check_old_cluster_for_valid_slots(void); static void check_old_cluster_subscription_state(void); @@ -631,7 +631,7 @@ check_and_dump_old_cluster(void) * Before that the logical slots are not upgraded, so we will not be * able to upgrade the logical replication clusters completely. */ - get_subscription_count(&old_cluster); + get_subscription_info(&old_cluster); check_old_cluster_subscription_state(); } @@ -764,7 +764,7 @@ check_new_cluster(void) check_for_new_tablespace_dir(); - check_new_cluster_logical_replication_slots(); + check_new_cluster_replication_slots(); check_new_cluster_subscription_configuration(); } @@ -2040,48 +2040,80 @@ check_for_unicode_update(ClusterInfo *cluster) } /* - * check_new_cluster_logical_replication_slots() + * check_new_cluster_replication_slots() * - * Verify that there are no logical replication slots on the new cluster and - * that the parameter settings necessary for creating slots are sufficient. + * Validate the new cluster's readiness for migrating replication slots: + * - Ensures no existing logical replication slots on the new cluster when + * migrating logical slots. + * - Ensure conflict detection slot does not exist on the new cluster when + * migrating subscriptions with retain_dead_tuples enabled. + * - Ensure that the parameter settings on the new cluster necessary for + * creating slots are sufficient. */ static void -check_new_cluster_logical_replication_slots(void) +check_new_cluster_replication_slots(void) { PGresult *res; PGconn *conn; int nslots_on_old; int nslots_on_new; + int rdt_slot_on_new; int max_replication_slots; char *wal_level; + int i_nslots_on_new; + int i_rdt_slot_on_new; - /* Logical slots can be migrated since PG17. */ + /* + * Logical slots can be migrated since PG17 and a physical slot + * CONFLICT_DETECTION_SLOT can be migrated since PG19. + */ if (GET_MAJOR_VERSION(old_cluster.major_version) <= 1600) return; nslots_on_old = count_old_cluster_logical_slots(); - /* Quick return if there are no logical slots to be migrated. */ - if (nslots_on_old == 0) + /* + * Quick return if there are no slots to be migrated and no subscriptions + * have the retain_dead_tuples option enabled. + */ + if (nslots_on_old == 0 && !old_cluster.sub_retain_dead_tuples) return; conn = connectToServer(&new_cluster, "template1"); - prep_status("Checking for new cluster logical replication slots"); + prep_status("Checking for new cluster replication slots"); - res = executeQueryOrDie(conn, "SELECT count(*) " - "FROM pg_catalog.pg_replication_slots " - "WHERE slot_type = 'logical' AND " - "temporary IS FALSE;"); + res = executeQueryOrDie(conn, "SELECT %s AS nslots_on_new, %s AS rdt_slot_on_new " + "FROM pg_catalog.pg_replication_slots", + nslots_on_old > 0 + ? "COUNT(*) FILTER (WHERE slot_type = 'logical' AND temporary IS FALSE)" + : "0", + old_cluster.sub_retain_dead_tuples + ? "COUNT(*) FILTER (WHERE slot_name = 'pg_conflict_detection')" + : "0"); if (PQntuples(res) != 1) - pg_fatal("could not count the number of logical replication slots"); + pg_fatal("could not count the number of replication slots"); - nslots_on_new = atoi(PQgetvalue(res, 0, 0)); + i_nslots_on_new = PQfnumber(res, "nslots_on_new"); + i_rdt_slot_on_new = PQfnumber(res, "rdt_slot_on_new"); + + nslots_on_new = atoi(PQgetvalue(res, 0, i_nslots_on_new)); if (nslots_on_new) + { + Assert(nslots_on_old); pg_fatal("expected 0 logical replication slots but found %d", nslots_on_new); + } + + rdt_slot_on_new = atoi(PQgetvalue(res, 0, i_rdt_slot_on_new)); + + if (rdt_slot_on_new) + { + Assert(old_cluster.sub_retain_dead_tuples); + pg_fatal("The replication slot \"pg_conflict_detection\" already exists on the new cluster"); + } PQclear(res); @@ -2094,12 +2126,24 @@ check_new_cluster_logical_replication_slots(void) wal_level = PQgetvalue(res, 0, 0); - if (strcmp(wal_level, "logical") != 0) + if (nslots_on_old > 0 && strcmp(wal_level, "logical") != 0) pg_fatal("\"wal_level\" must be \"logical\" but is set to \"%s\"", wal_level); + if (old_cluster.sub_retain_dead_tuples && + strcmp(wal_level, "minimal") == 0) + pg_fatal("\"wal_level\" must be \"replica\" or \"logical\" but is set to \"%s\"", + wal_level); + max_replication_slots = atoi(PQgetvalue(res, 1, 0)); + if (old_cluster.sub_retain_dead_tuples && + nslots_on_old + 1 > max_replication_slots) + pg_fatal("\"max_replication_slots\" (%d) must be greater than or equal to the number of " + "logical replication slots on the old cluster plus one additional slot required " + "for retaining conflict detection information (%d)", + max_replication_slots, nslots_on_old + 1); + if (nslots_on_old > max_replication_slots) pg_fatal("\"max_replication_slots\" (%d) must be greater than or equal to the number of " "logical replication slots (%d) on the old cluster", @@ -2211,6 +2255,22 @@ check_old_cluster_for_valid_slots(void) "The slot \"%s\" has not consumed the WAL yet\n", slot->slotname); } + + /* + * The name "pg_conflict_detection" (defined as + * CONFLICT_DETECTION_SLOT) has been reserved for logical + * replication conflict detection slot since PG19. + */ + if (strcmp(slot->slotname, "pg_conflict_detection") == 0) + { + if (script == NULL && + (script = fopen_priv(output_path, "w")) == NULL) + pg_fatal("could not open file \"%s\": %m", output_path); + + fprintf(script, + "The slot name \"%s\" is reserved\n", + slot->slotname); + } } } diff --git a/src/bin/pg_upgrade/info.c b/src/bin/pg_upgrade/info.c index 4b7a56f5b3b..a437067cdca 100644 --- a/src/bin/pg_upgrade/info.c +++ b/src/bin/pg_upgrade/info.c @@ -752,20 +752,33 @@ count_old_cluster_logical_slots(void) } /* - * get_subscription_count() + * get_subscription_info() * - * Gets the number of subscriptions in the cluster. + * Gets the information of subscriptions in the cluster. */ void -get_subscription_count(ClusterInfo *cluster) +get_subscription_info(ClusterInfo *cluster) { PGconn *conn; PGresult *res; + int i_nsub; + int i_retain_dead_tuples; conn = connectToServer(cluster, "template1"); - res = executeQueryOrDie(conn, "SELECT count(*) " - "FROM pg_catalog.pg_subscription"); - cluster->nsubs = atoi(PQgetvalue(res, 0, 0)); + if (GET_MAJOR_VERSION(cluster->major_version) >= 1900) + res = executeQueryOrDie(conn, "SELECT count(*) AS nsub," + "COUNT(CASE WHEN subretaindeadtuples THEN 1 END) > 0 AS retain_dead_tuples " + "FROM pg_catalog.pg_subscription"); + else + res = executeQueryOrDie(conn, "SELECT count(*) AS nsub," + "'f' AS retain_dead_tuples " + "FROM pg_catalog.pg_subscription"); + + i_nsub = PQfnumber(res, "nsub"); + i_retain_dead_tuples = PQfnumber(res, "retain_dead_tuples"); + + cluster->nsubs = atoi(PQgetvalue(res, 0, i_nsub)); + cluster->sub_retain_dead_tuples = (strcmp(PQgetvalue(res, 0, i_retain_dead_tuples), "t") == 0); PQclear(res); PQfinish(conn); diff --git a/src/bin/pg_upgrade/pg_upgrade.c b/src/bin/pg_upgrade/pg_upgrade.c index 536e49d2616..d5cd5bf0b3a 100644 --- a/src/bin/pg_upgrade/pg_upgrade.c +++ b/src/bin/pg_upgrade/pg_upgrade.c @@ -67,6 +67,7 @@ static void set_frozenxids(bool minmxid_only); static void make_outputdirs(char *pgdata); static void setup(char *argv0); static void create_logical_replication_slots(void); +static void create_conflict_detection_slot(void); ClusterInfo old_cluster, new_cluster; @@ -88,6 +89,7 @@ int main(int argc, char **argv) { char *deletion_script_file_name = NULL; + bool migrate_logical_slots; /* * pg_upgrade doesn't currently use common/logging.c, but initialize it @@ -198,18 +200,39 @@ main(int argc, char **argv) new_cluster.pgdata); check_ok(); + migrate_logical_slots = count_old_cluster_logical_slots(); + /* - * Migrate the logical slots to the new cluster. Note that we need to do - * this after resetting WAL because otherwise the required WAL would be - * removed and slots would become unusable. There is a possibility that - * background processes might generate some WAL before we could create the - * slots in the new cluster but we can ignore that WAL as that won't be - * required downstream. + * Migrate replication slots to the new cluster. + * + * Note that we must migrate logical slots after resetting WAL because + * otherwise the required WAL would be removed and slots would become + * unusable. There is a possibility that background processes might + * generate some WAL before we could create the slots in the new cluster + * but we can ignore that WAL as that won't be required downstream. + * + * The conflict detection slot is not affected by concerns related to WALs + * as it only retains the dead tuples. It is created here for consistency. + * Note that the new conflict detection slot uses the latest transaction + * ID as xmin, so it cannot protect dead tuples that existed before the + * upgrade. Additionally, commit timestamps and origin data are not + * preserved during the upgrade. So, even after creating the slot, the + * upgraded subscriber may be unable to detect conflicts or log relevant + * commit timestamps and origins when applying changes from the publisher + * occurred before the upgrade especially if those changes were not + * replicated. It can only protect tuples that might be deleted after the + * new cluster starts. */ - if (count_old_cluster_logical_slots()) + if (migrate_logical_slots || old_cluster.sub_retain_dead_tuples) { start_postmaster(&new_cluster, true); - create_logical_replication_slots(); + + if (migrate_logical_slots) + create_logical_replication_slots(); + + if (old_cluster.sub_retain_dead_tuples) + create_conflict_detection_slot(); + stop_postmaster(false); } @@ -1025,3 +1048,24 @@ create_logical_replication_slots(void) return; } + +/* + * create_conflict_detection_slot() + * + * Create a replication slot to retain information necessary for conflict + * detection such as dead tuples, commit timestamps, and origins, for migrated + * subscriptions with retain_dead_tuples enabled. + */ +static void +create_conflict_detection_slot(void) +{ + PGconn *conn_new_template1; + + prep_status("Creating the replication conflict detection slot"); + + conn_new_template1 = connectToServer(&new_cluster, "template1"); + PQclear(executeQueryOrDie(conn_new_template1, "SELECT pg_catalog.binary_upgrade_create_conflict_detection_slot()")); + PQfinish(conn_new_template1); + + check_ok(); +} diff --git a/src/bin/pg_upgrade/pg_upgrade.h b/src/bin/pg_upgrade/pg_upgrade.h index 69c965bb7d0..e9401430e69 100644 --- a/src/bin/pg_upgrade/pg_upgrade.h +++ b/src/bin/pg_upgrade/pg_upgrade.h @@ -302,6 +302,8 @@ typedef struct uint32 bin_version; /* version returned from pg_ctl */ const char *tablespace_suffix; /* directory specification */ int nsubs; /* number of subscriptions */ + bool sub_retain_dead_tuples; /* whether a subscription enables + * retain_dead_tuples. */ } ClusterInfo; @@ -441,7 +443,7 @@ FileNameMap *gen_db_file_maps(DbInfo *old_db, const char *new_pgdata); void get_db_rel_and_slot_infos(ClusterInfo *cluster); int count_old_cluster_logical_slots(void); -void get_subscription_count(ClusterInfo *cluster); +void get_subscription_info(ClusterInfo *cluster); /* option.c */ diff --git a/src/bin/pg_upgrade/t/004_subscription.pl b/src/bin/pg_upgrade/t/004_subscription.pl index e46f02c6cc6..77387be0f9d 100644 --- a/src/bin/pg_upgrade/t/004_subscription.pl +++ b/src/bin/pg_upgrade/t/004_subscription.pl @@ -22,13 +22,13 @@ $publisher->start; # Initialize the old subscriber node my $old_sub = PostgreSQL::Test::Cluster->new('old_sub'); -$old_sub->init; +$old_sub->init(allows_streaming => 'physical'); $old_sub->start; my $oldbindir = $old_sub->config_data('--bindir'); # Initialize the new subscriber my $new_sub = PostgreSQL::Test::Cluster->new('new_sub'); -$new_sub->init; +$new_sub->init(allows_streaming => 'physical'); my $newbindir = $new_sub->config_data('--bindir'); # In a VPATH build, we'll be started in the source directory, but we want @@ -90,6 +90,54 @@ $old_sub->start; $old_sub->safe_psql('postgres', "DROP SUBSCRIPTION regress_sub1;"); # ------------------------------------------------------ +# Check that pg_upgrade fails when max_replication_slots configured in the new +# cluster is less than the number of logical slots in the old cluster + 1 when +# subscription's retain_dead_tuples option is enabled. +# ------------------------------------------------------ +# It is sufficient to use disabled subscription to test upgrade failure. + +$publisher->safe_psql('postgres', "CREATE PUBLICATION regress_pub1"); +$old_sub->safe_psql('postgres', + "CREATE SUBSCRIPTION regress_sub1 CONNECTION '$connstr' PUBLICATION regress_pub1 WITH (enabled = false, retain_dead_tuples = true)" +); + +$old_sub->stop; + +$new_sub->append_conf('postgresql.conf', 'max_replication_slots = 0'); + +# pg_upgrade will fail because the new cluster has insufficient +# max_replication_slots. +command_checks_all( + [ + 'pg_upgrade', + '--no-sync', + '--old-datadir' => $old_sub->data_dir, + '--new-datadir' => $new_sub->data_dir, + '--old-bindir' => $oldbindir, + '--new-bindir' => $newbindir, + '--socketdir' => $new_sub->host, + '--old-port' => $old_sub->port, + '--new-port' => $new_sub->port, + $mode, + '--check', + ], + 1, + [ + qr/"max_replication_slots" \(0\) must be greater than or equal to the number of logical replication slots on the old cluster plus one additional slot required for retaining conflict detection information \(1\)/ + ], + [qr//], + 'run of pg_upgrade where the new cluster has insufficient max_replication_slots' +); + +# Reset max_replication_slots +$new_sub->append_conf('postgresql.conf', 'max_replication_slots = 10'); + +# Cleanup +$publisher->safe_psql('postgres', "DROP PUBLICATION regress_pub1"); +$old_sub->start; +$old_sub->safe_psql('postgres', "DROP SUBSCRIPTION regress_sub1;"); + +# ------------------------------------------------------ # Check that pg_upgrade refuses to run if: # a) there's a subscription with tables in a state other than 'r' (ready) or # 'i' (init) and/or @@ -200,8 +248,9 @@ $old_sub->safe_psql( rmtree($new_sub->data_dir . "/pg_upgrade_output.d"); # Verify that the upgrade should be successful with tables in 'ready'/'init' -# state along with retaining the replication origin's remote lsn, subscription's -# running status, and failover option. +# state along with retaining the replication origin's remote lsn, +# subscription's running status, failover option, and retain_dead_tuples +# option. $publisher->safe_psql( 'postgres', qq[ CREATE TABLE tab_upgraded1(id int); @@ -211,7 +260,7 @@ $publisher->safe_psql( $old_sub->safe_psql( 'postgres', qq[ CREATE TABLE tab_upgraded1(id int); - CREATE SUBSCRIPTION regress_sub4 CONNECTION '$connstr' PUBLICATION regress_pub4 WITH (failover = true); + CREATE SUBSCRIPTION regress_sub4 CONNECTION '$connstr' PUBLICATION regress_pub4 WITH (failover = true, retain_dead_tuples = true); ]); # Wait till the table tab_upgraded1 reaches 'ready' state @@ -270,7 +319,8 @@ $new_sub->append_conf('postgresql.conf', # Check that pg_upgrade is successful when all tables are in ready or in # init state (tab_upgraded1 table is in ready state and tab_upgraded2 table is # in init state) along with retaining the replication origin's remote lsn, -# subscription's running status, and failover option. +# subscription's running status, failover option, and retain_dead_tuples +# option. # ------------------------------------------------------ command_ok( [ @@ -293,7 +343,8 @@ ok( !-d $new_sub->data_dir . "/pg_upgrade_output.d", # ------------------------------------------------------ # Check that the data inserted to the publisher when the new subscriber is down # will be replicated once it is started. Also check that the old subscription -# states and relations origins are all preserved. +# states and relations origins are all preserved, and that the conflict +# detection slot is created. # ------------------------------------------------------ $publisher->safe_psql( 'postgres', qq[ @@ -303,15 +354,16 @@ $publisher->safe_psql( $new_sub->start; -# The subscription's running status and failover option should be preserved -# in the upgraded instance. So regress_sub4 should still have subenabled and -# subfailover set to true, while regress_sub5 should have both set to false. +# The subscription's running status, failover option, and retain_dead_tuples +# option should be preserved in the upgraded instance. So regress_sub4 should +# still have subenabled, subfailover, and subretaindeadtuples set to true, +# while regress_sub5 should have both set to false. $result = $new_sub->safe_psql('postgres', - "SELECT subname, subenabled, subfailover FROM pg_subscription ORDER BY subname" + "SELECT subname, subenabled, subfailover, subretaindeadtuples FROM pg_subscription ORDER BY subname" ); -is( $result, qq(regress_sub4|t|t -regress_sub5|f|f), - "check that the subscription's running status and failover are preserved" +is( $result, qq(regress_sub4|t|t|t +regress_sub5|f|f|f), + "check that the subscription's running status, failover, and retain_dead_tuples are preserved" ); # Subscription relations should be preserved @@ -330,6 +382,11 @@ $result = $new_sub->safe_psql('postgres', ); is($result, qq($remote_lsn), "remote_lsn should have been preserved"); +# The conflict detection slot should be created +$result = $new_sub->safe_psql('postgres', + "SELECT xmin IS NOT NULL from pg_replication_slots WHERE slot_name = 'pg_conflict_detection'"); +is($result, qq(t), "conflict detection slot exists"); + # Resume the initial sync and wait until all tables of subscription # 'regress_sub5' are synchronized $new_sub->append_conf('postgresql.conf', diff --git a/src/bin/psql/describe.c b/src/bin/psql/describe.c index dd25d2fe7b8..7a06af48842 100644 --- a/src/bin/psql/describe.c +++ b/src/bin/psql/describe.c @@ -6746,7 +6746,7 @@ describeSubscriptions(const char *pattern, bool verbose) printQueryOpt myopt = pset.popt; static const bool translate_columns[] = {false, false, false, false, false, false, false, false, false, false, false, false, false, false, - false}; + false, false}; if (pset.sversion < 100000) { @@ -6814,6 +6814,10 @@ describeSubscriptions(const char *pattern, bool verbose) appendPQExpBuffer(&buf, ", subfailover AS \"%s\"\n", gettext_noop("Failover")); + if (pset.sversion >= 190000) + appendPQExpBuffer(&buf, + ", subretaindeadtuples AS \"%s\"\n", + gettext_noop("Retain dead tuples")); appendPQExpBuffer(&buf, ", subsynccommit AS \"%s\"\n" diff --git a/src/bin/psql/tab-complete.in.c b/src/bin/psql/tab-complete.in.c index 37524364290..dbc586c5bc3 100644 --- a/src/bin/psql/tab-complete.in.c +++ b/src/bin/psql/tab-complete.in.c @@ -2319,8 +2319,9 @@ match_previous_words(int pattern_id, /* ALTER SUBSCRIPTION <name> SET ( */ else if (Matches("ALTER", "SUBSCRIPTION", MatchAny, MatchAnyN, "SET", "(")) COMPLETE_WITH("binary", "disable_on_error", "failover", "origin", - "password_required", "run_as_owner", "slot_name", - "streaming", "synchronous_commit", "two_phase"); + "password_required", "retain_dead_tuples", + "run_as_owner", "slot_name", "streaming", + "synchronous_commit", "two_phase"); /* ALTER SUBSCRIPTION <name> SKIP ( */ else if (Matches("ALTER", "SUBSCRIPTION", MatchAny, MatchAnyN, "SKIP", "(")) COMPLETE_WITH("lsn"); @@ -3774,8 +3775,9 @@ match_previous_words(int pattern_id, else if (Matches("CREATE", "SUBSCRIPTION", MatchAnyN, "WITH", "(")) COMPLETE_WITH("binary", "connect", "copy_data", "create_slot", "disable_on_error", "enabled", "failover", "origin", - "password_required", "run_as_owner", "slot_name", - "streaming", "synchronous_commit", "two_phase"); + "password_required", "retain_dead_tuples", + "run_as_owner", "slot_name", "streaming", + "synchronous_commit", "two_phase"); /* CREATE TRIGGER --- is allowed inside CREATE SCHEMA, so use TailMatches */ |