aboutsummaryrefslogtreecommitdiff
path: root/src/backend
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend')
-rw-r--r--src/backend/access/brin/brin_minmax_multi.c2
-rw-r--r--src/backend/access/common/heaptuple.c8
-rw-r--r--src/backend/access/common/printsimple.c2
-rw-r--r--src/backend/access/common/reloptions.c25
-rw-r--r--src/backend/access/common/toast_compression.c14
-rw-r--r--src/backend/access/common/toast_internals.c2
-rw-r--r--src/backend/access/common/tupdesc.c21
-rw-r--r--src/backend/access/gin/gininsert.c2
-rw-r--r--src/backend/access/gist/gistutil.c14
-rw-r--r--src/backend/access/gist/gistvalidate.c6
-rw-r--r--src/backend/access/heap/heapam.c6
-rw-r--r--src/backend/access/heap/heapam_handler.c2
-rw-r--r--src/backend/access/heap/heapam_xlog.c7
-rw-r--r--src/backend/access/heap/vacuumlazy.c121
-rw-r--r--src/backend/access/heap/visibilitymap.c2
-rw-r--r--src/backend/access/index/amapi.c13
-rw-r--r--src/backend/access/nbtree/nbtcompare.c4
-rw-r--r--src/backend/access/nbtree/nbtpreprocesskeys.c412
-rw-r--r--src/backend/access/nbtree/nbtree.c32
-rw-r--r--src/backend/access/nbtree/nbtsearch.c542
-rw-r--r--src/backend/access/nbtree/nbtutils.c390
-rw-r--r--src/backend/access/rmgrdesc/replorigindesc.c2
-rw-r--r--src/backend/access/rmgrdesc/xactdesc.c6
-rw-r--r--src/backend/access/rmgrdesc/xlogdesc.c6
-rw-r--r--src/backend/access/spgist/spgutils.c4
-rw-r--r--src/backend/access/table/toast_helper.c2
-rw-r--r--src/backend/access/transam/clog.c67
-rw-r--r--src/backend/access/transam/commit_ts.c73
-rw-r--r--src/backend/access/transam/multixact.c136
-rw-r--r--src/backend/access/transam/slru.c25
-rw-r--r--src/backend/access/transam/subtrans.c34
-rw-r--r--src/backend/access/transam/timeline.c4
-rw-r--r--src/backend/access/transam/twophase.c285
-rw-r--r--src/backend/access/transam/xact.c31
-rw-r--r--src/backend/access/transam/xlog.c193
-rw-r--r--src/backend/access/transam/xlogbackup.c8
-rw-r--r--src/backend/access/transam/xloginsert.c12
-rw-r--r--src/backend/access/transam/xlogprefetcher.c16
-rw-r--r--src/backend/access/transam/xlogreader.c82
-rw-r--r--src/backend/access/transam/xlogrecovery.c181
-rw-r--r--src/backend/access/transam/xlogutils.c2
-rw-r--r--src/backend/backup/backup_manifest.c2
-rw-r--r--src/backend/backup/basebackup_copy.c2
-rw-r--r--src/backend/backup/basebackup_incremental.c14
-rw-r--r--src/backend/bootstrap/bootstrap.c2
-rw-r--r--src/backend/catalog/dependency.c11
-rw-r--r--src/backend/catalog/heap.c96
-rw-r--r--src/backend/catalog/index.c10
-rw-r--r--src/backend/catalog/pg_subscription.c22
-rw-r--r--src/backend/catalog/system_views.sql14
-rw-r--r--src/backend/commands/analyze.c30
-rw-r--r--src/backend/commands/cluster.c2
-rw-r--r--src/backend/commands/copy.c42
-rw-r--r--src/backend/commands/copyfromparse.c19
-rw-r--r--src/backend/commands/copyto.c2
-rw-r--r--src/backend/commands/dbcommands.c55
-rw-r--r--src/backend/commands/explain.c21
-rw-r--r--src/backend/commands/foreigncmds.c16
-rw-r--r--src/backend/commands/indexcmds.c42
-rw-r--r--src/backend/commands/matview.c3
-rw-r--r--src/backend/commands/publicationcmds.c24
-rw-r--r--src/backend/commands/schemacmds.c1
-rw-r--r--src/backend/commands/subscriptioncmds.c408
-rw-r--r--src/backend/commands/tablecmds.c140
-rw-r--r--src/backend/commands/tablespace.c2
-rw-r--r--src/backend/commands/trigger.c74
-rw-r--r--src/backend/commands/typecmds.c14
-rw-r--r--src/backend/commands/vacuum.c121
-rw-r--r--src/backend/executor/execExprInterp.c1
-rw-r--r--src/backend/executor/execGrouping.c4
-rw-r--r--src/backend/executor/execIndexing.c4
-rw-r--r--src/backend/executor/execParallel.c1
-rw-r--r--src/backend/executor/execReplication.c255
-rw-r--r--src/backend/executor/nodeModifyTable.c136
-rw-r--r--src/backend/executor/nodeTidrangescan.c6
-rw-r--r--src/backend/jit/llvm/Makefile2
-rw-r--r--src/backend/jit/llvm/meson.build2
-rw-r--r--src/backend/lib/README4
-rw-r--r--src/backend/libpq/auth.c12
-rw-r--r--src/backend/libpq/be-secure-gssapi.c6
-rw-r--r--src/backend/libpq/be-secure-openssl.c4
-rw-r--r--src/backend/libpq/hba.c38
-rw-r--r--src/backend/libpq/pg_ident.conf.sample26
-rw-r--r--src/backend/libpq/pqcomm.c1
-rw-r--r--src/backend/libpq/pqmq.c16
-rw-r--r--src/backend/main/main.c16
-rw-r--r--src/backend/nodes/gen_node_support.pl2
-rw-r--r--src/backend/nodes/outfuncs.c2
-rw-r--r--src/backend/nodes/queryjumblefuncs.c280
-rw-r--r--src/backend/nodes/readfuncs.c2
-rw-r--r--src/backend/optimizer/path/costsize.c72
-rw-r--r--src/backend/optimizer/path/joinpath.c56
-rw-r--r--src/backend/optimizer/path/joinrels.c60
-rw-r--r--src/backend/optimizer/plan/createplan.c151
-rw-r--r--src/backend/optimizer/plan/initsplan.c24
-rw-r--r--src/backend/optimizer/plan/planner.c41
-rw-r--r--src/backend/optimizer/plan/subselect.c46
-rw-r--r--src/backend/optimizer/prep/prepjointree.c320
-rw-r--r--src/backend/optimizer/util/clauses.c99
-rw-r--r--src/backend/optimizer/util/inherit.c10
-rw-r--r--src/backend/optimizer/util/paramassign.c109
-rw-r--r--src/backend/optimizer/util/pathnode.c84
-rw-r--r--src/backend/optimizer/util/placeholder.c40
-rw-r--r--src/backend/optimizer/util/plancat.c127
-rw-r--r--src/backend/parser/analyze.c90
-rw-r--r--src/backend/parser/gram.y393
-rw-r--r--src/backend/parser/parse_expr.c4
-rw-r--r--src/backend/parser/parse_utilcmd.c22
-rw-r--r--src/backend/partitioning/partbounds.c57
-rw-r--r--src/backend/postmaster/autovacuum.c36
-rw-r--r--src/backend/postmaster/bgworker.c1
-rw-r--r--src/backend/postmaster/checkpointer.c232
-rw-r--r--src/backend/postmaster/pgarch.c12
-rw-r--r--src/backend/postmaster/pmchild.c18
-rw-r--r--src/backend/postmaster/postmaster.c35
-rw-r--r--src/backend/postmaster/walsummarizer.c28
-rw-r--r--src/backend/regex/regc_pg_locale.c429
-rw-r--r--src/backend/replication/libpqwalreceiver/libpqwalreceiver.c36
-rw-r--r--src/backend/replication/logical/applyparallelworker.c3
-rw-r--r--src/backend/replication/logical/conflict.c22
-rw-r--r--src/backend/replication/logical/launcher.c272
-rw-r--r--src/backend/replication/logical/logical.c32
-rw-r--r--src/backend/replication/logical/origin.c6
-rw-r--r--src/backend/replication/logical/proto.c2
-rw-r--r--src/backend/replication/logical/reorderbuffer.c202
-rw-r--r--src/backend/replication/logical/slotsync.c20
-rw-r--r--src/backend/replication/logical/snapbuild.c58
-rw-r--r--src/backend/replication/logical/tablesync.c56
-rw-r--r--src/backend/replication/logical/worker.c810
-rw-r--r--src/backend/replication/pgoutput/pgoutput.c8
-rw-r--r--src/backend/replication/repl_gram.y4
-rw-r--r--src/backend/replication/repl_scanner.l2
-rw-r--r--src/backend/replication/slot.c169
-rw-r--r--src/backend/replication/slotfuncs.c2
-rw-r--r--src/backend/replication/syncrep.c4
-rw-r--r--src/backend/replication/syncrep_scanner.l11
-rw-r--r--src/backend/replication/walreceiver.c16
-rw-r--r--src/backend/replication/walsender.c133
-rw-r--r--src/backend/rewrite/rewriteHandler.c5
-rw-r--r--src/backend/statistics/mcv.c2
-rw-r--r--src/backend/storage/aio/README.md5
-rw-r--r--src/backend/storage/aio/aio.c27
-rw-r--r--src/backend/storage/aio/aio_callback.c7
-rw-r--r--src/backend/storage/aio/method_io_uring.c218
-rw-r--r--src/backend/storage/aio/method_worker.c69
-rw-r--r--src/backend/storage/buffer/bufmgr.c33
-rw-r--r--src/backend/storage/buffer/localbuf.c32
-rw-r--r--src/backend/storage/file/fd.c19
-rw-r--r--src/backend/storage/file/fileset.c2
-rw-r--r--src/backend/storage/ipc/dsm_registry.c314
-rw-r--r--src/backend/storage/ipc/latch.c8
-rw-r--r--src/backend/storage/ipc/procarray.c72
-rw-r--r--src/backend/storage/ipc/procsignal.c6
-rw-r--r--src/backend/storage/ipc/shmem.c4
-rw-r--r--src/backend/storage/ipc/standby.c4
-rw-r--r--src/backend/storage/lmgr/generate-lwlocknames.pl113
-rw-r--r--src/backend/storage/lmgr/lock.c22
-rw-r--r--src/backend/storage/lmgr/lwlock.c48
-rw-r--r--src/backend/storage/lmgr/predicate.c11
-rw-r--r--src/backend/tcop/backend_startup.c39
-rw-r--r--src/backend/tcop/postgres.c5
-rw-r--r--src/backend/tcop/pquery.c25
-rw-r--r--src/backend/tcop/utility.c26
-rw-r--r--src/backend/tsearch/dict_ispell.c18
-rw-r--r--src/backend/tsearch/dict_synonym.c1
-rw-r--r--src/backend/tsearch/dict_thesaurus.c7
-rw-r--r--src/backend/tsearch/ts_locale.c4
-rw-r--r--src/backend/tsearch/ts_selfuncs.c2
-rw-r--r--src/backend/tsearch/wparser_def.c2
-rw-r--r--src/backend/utils/activity/backend_status.c10
-rw-r--r--src/backend/utils/activity/pgstat.c52
-rw-r--r--src/backend/utils/activity/pgstat_backend.c14
-rw-r--r--src/backend/utils/activity/pgstat_io.c10
-rw-r--r--src/backend/utils/activity/pgstat_relation.c4
-rw-r--r--src/backend/utils/activity/pgstat_shmem.c5
-rw-r--r--src/backend/utils/activity/pgstat_slru.c10
-rw-r--r--src/backend/utils/activity/pgstat_wal.c20
-rw-r--r--src/backend/utils/activity/wait_event_names.txt10
-rw-r--r--src/backend/utils/adt/Makefile1
-rw-r--r--src/backend/utils/adt/acl.c33
-rw-r--r--src/backend/utils/adt/bytea.c1114
-rw-r--r--src/backend/utils/adt/date.c86
-rw-r--r--src/backend/utils/adt/float.c22
-rw-r--r--src/backend/utils/adt/formatting.c5
-rw-r--r--src/backend/utils/adt/inet_net_pton.c3
-rw-r--r--src/backend/utils/adt/jsonb_gin.c4
-rw-r--r--src/backend/utils/adt/jsonb_op.c8
-rw-r--r--src/backend/utils/adt/jsonb_util.c43
-rw-r--r--src/backend/utils/adt/jsonfuncs.c4
-rw-r--r--src/backend/utils/adt/jsonpath_exec.c4
-rw-r--r--src/backend/utils/adt/like.c22
-rw-r--r--src/backend/utils/adt/like_support.c7
-rw-r--r--src/backend/utils/adt/mcxtfuncs.c2
-rw-r--r--src/backend/utils/adt/meson.build1
-rw-r--r--src/backend/utils/adt/multirangetypes.c9
-rw-r--r--src/backend/utils/adt/network.c2
-rw-r--r--src/backend/utils/adt/network_spgist.c1
-rw-r--r--src/backend/utils/adt/numeric.c20
-rw-r--r--src/backend/utils/adt/pg_locale.c122
-rw-r--r--src/backend/utils/adt/pg_locale_builtin.c113
-rw-r--r--src/backend/utils/adt/pg_locale_icu.c130
-rw-r--r--src/backend/utils/adt/pg_locale_libc.c361
-rw-r--r--src/backend/utils/adt/pg_lsn.c2
-rw-r--r--src/backend/utils/adt/pg_upgrade_support.c19
-rw-r--r--src/backend/utils/adt/pgstatfuncs.c16
-rw-r--r--src/backend/utils/adt/rangetypes.c8
-rw-r--r--src/backend/utils/adt/regproc.c118
-rw-r--r--src/backend/utils/adt/ri_triggers.c2
-rw-r--r--src/backend/utils/adt/selfuncs.c21
-rw-r--r--src/backend/utils/adt/tid.c2
-rw-r--r--src/backend/utils/adt/timestamp.c101
-rw-r--r--src/backend/utils/adt/tsvector_op.c24
-rw-r--r--src/backend/utils/adt/varlena.c1098
-rw-r--r--src/backend/utils/adt/xml.c171
-rw-r--r--src/backend/utils/cache/catcache.c3
-rw-r--r--src/backend/utils/cache/evtcache.c16
-rw-r--r--src/backend/utils/cache/plancache.c29
-rw-r--r--src/backend/utils/cache/ts_cache.c4
-rw-r--r--src/backend/utils/cache/typcache.c13
-rw-r--r--src/backend/utils/error/elog.c15
-rw-r--r--src/backend/utils/fmgr/dfmgr.c16
-rw-r--r--src/backend/utils/hash/dynahash.c62
-rw-r--r--src/backend/utils/init/miscinit.c1
-rw-r--r--src/backend/utils/init/postinit.c11
-rw-r--r--src/backend/utils/misc/guc.c59
-rw-r--r--src/backend/utils/misc/guc_tables.c18
-rw-r--r--src/backend/utils/misc/injection_point.c46
-rw-r--r--src/backend/utils/misc/postgresql.conf.sample4
-rw-r--r--src/backend/utils/misc/ps_status.c16
-rw-r--r--src/backend/utils/mmgr/alignedalloc.c18
-rw-r--r--src/backend/utils/mmgr/aset.c71
-rw-r--r--src/backend/utils/mmgr/bump.c31
-rw-r--r--src/backend/utils/mmgr/dsa.c15
-rw-r--r--src/backend/utils/mmgr/generation.c29
-rw-r--r--src/backend/utils/mmgr/mcxt.c116
-rw-r--r--src/backend/utils/mmgr/slab.c32
236 files changed, 9709 insertions, 5169 deletions
diff --git a/src/backend/access/brin/brin_minmax_multi.c b/src/backend/access/brin/brin_minmax_multi.c
index 0d1507a2a36..b85a70a0db2 100644
--- a/src/backend/access/brin/brin_minmax_multi.c
+++ b/src/backend/access/brin/brin_minmax_multi.c
@@ -624,7 +624,7 @@ brin_range_serialize(Ranges *range)
for (i = 0; i < nvalues; i++)
{
- len += VARSIZE_ANY(range->values[i]);
+ len += VARSIZE_ANY(DatumGetPointer(range->values[i]));
}
}
else if (typlen == -2) /* cstring */
diff --git a/src/backend/access/common/heaptuple.c b/src/backend/access/common/heaptuple.c
index 969d1028cae..a410b5eb99b 100644
--- a/src/backend/access/common/heaptuple.c
+++ b/src/backend/access/common/heaptuple.c
@@ -189,7 +189,7 @@ getmissingattr(TupleDesc tupleDesc,
if (att->attlen > 0)
key.len = att->attlen;
else
- key.len = VARSIZE_ANY(attrmiss->am_value);
+ key.len = VARSIZE_ANY(DatumGetPointer(attrmiss->am_value));
key.value = attrmiss->am_value;
entry = hash_search(missing_cache, &key, HASH_ENTER, &found);
@@ -901,9 +901,9 @@ expand_tuple(HeapTuple *targetHeapTuple,
att->attlen,
attrmiss[attnum].am_value);
- targetDataLen = att_addlength_pointer(targetDataLen,
- att->attlen,
- attrmiss[attnum].am_value);
+ targetDataLen = att_addlength_datum(targetDataLen,
+ att->attlen,
+ attrmiss[attnum].am_value);
}
else
{
diff --git a/src/backend/access/common/printsimple.c b/src/backend/access/common/printsimple.c
index f346ab3e812..a09c8fcd332 100644
--- a/src/backend/access/common/printsimple.c
+++ b/src/backend/access/common/printsimple.c
@@ -123,7 +123,7 @@ printsimple(TupleTableSlot *slot, DestReceiver *self)
case OIDOID:
{
- Oid num = ObjectIdGetDatum(value);
+ Oid num = DatumGetObjectId(value);
char str[10]; /* 10 digits */
int len;
diff --git a/src/backend/access/common/reloptions.c b/src/backend/access/common/reloptions.c
index 46c1dce222d..594a657ea1a 100644
--- a/src/backend/access/common/reloptions.c
+++ b/src/backend/access/common/reloptions.c
@@ -1190,8 +1190,8 @@ transformRelOptions(Datum oldOptions, List *defList, const char *namspace,
for (i = 0; i < noldoptions; i++)
{
- char *text_str = VARDATA(oldoptions[i]);
- int text_len = VARSIZE(oldoptions[i]) - VARHDRSZ;
+ char *text_str = VARDATA(DatumGetPointer(oldoptions[i]));
+ int text_len = VARSIZE(DatumGetPointer(oldoptions[i])) - VARHDRSZ;
/* Search for a match in defList */
foreach(cell, defList)
@@ -1243,8 +1243,9 @@ transformRelOptions(Datum oldOptions, List *defList, const char *namspace,
}
else
{
- text *t;
+ const char *name;
const char *value;
+ text *t;
Size len;
/*
@@ -1291,11 +1292,19 @@ transformRelOptions(Datum oldOptions, List *defList, const char *namspace,
* have just "name", assume "name=true" is meant. Note: the
* namespace is not output.
*/
+ name = def->defname;
if (def->arg != NULL)
value = defGetString(def);
else
value = "true";
+ /* Insist that name not contain "=", else "a=b=c" is ambiguous */
+ if (strchr(name, '=') != NULL)
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("invalid option name \"%s\": must not contain \"=\"",
+ name)));
+
/*
* This is not a great place for this test, but there's no other
* convenient place to filter the option out. As WITH (oids =
@@ -1303,7 +1312,7 @@ transformRelOptions(Datum oldOptions, List *defList, const char *namspace,
* amount of ugly.
*/
if (acceptOidsOff && def->defnamespace == NULL &&
- strcmp(def->defname, "oids") == 0)
+ strcmp(name, "oids") == 0)
{
if (defGetBoolean(def))
ereport(ERROR,
@@ -1313,11 +1322,11 @@ transformRelOptions(Datum oldOptions, List *defList, const char *namspace,
continue;
}
- len = VARHDRSZ + strlen(def->defname) + 1 + strlen(value);
+ len = VARHDRSZ + strlen(name) + 1 + strlen(value);
/* +1 leaves room for sprintf's trailing null */
t = (text *) palloc(len + 1);
SET_VARSIZE(t, len);
- sprintf(VARDATA(t), "%s=%s", def->defname, value);
+ sprintf(VARDATA(t), "%s=%s", name, value);
astate = accumArrayResult(astate, PointerGetDatum(t),
false, TEXTOID,
@@ -1447,8 +1456,8 @@ parseRelOptionsInternal(Datum options, bool validate,
for (i = 0; i < noptions; i++)
{
- char *text_str = VARDATA(optiondatums[i]);
- int text_len = VARSIZE(optiondatums[i]) - VARHDRSZ;
+ char *text_str = VARDATA(DatumGetPointer(optiondatums[i]));
+ int text_len = VARSIZE(DatumGetPointer(optiondatums[i])) - VARHDRSZ;
int j;
/* Search for a match in reloptions */
diff --git a/src/backend/access/common/toast_compression.c b/src/backend/access/common/toast_compression.c
index 21f2f4af97e..926f1e4008a 100644
--- a/src/backend/access/common/toast_compression.c
+++ b/src/backend/access/common/toast_compression.c
@@ -25,11 +25,11 @@
/* GUC */
int default_toast_compression = TOAST_PGLZ_COMPRESSION;
-#define NO_LZ4_SUPPORT() \
+#define NO_COMPRESSION_SUPPORT(method) \
ereport(ERROR, \
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED), \
- errmsg("compression method lz4 not supported"), \
- errdetail("This functionality requires the server to be built with lz4 support.")))
+ errmsg("compression method %s not supported", method), \
+ errdetail("This functionality requires the server to be built with %s support.", method)))
/*
* Compress a varlena using PGLZ.
@@ -139,7 +139,7 @@ struct varlena *
lz4_compress_datum(const struct varlena *value)
{
#ifndef USE_LZ4
- NO_LZ4_SUPPORT();
+ NO_COMPRESSION_SUPPORT("lz4");
return NULL; /* keep compiler quiet */
#else
int32 valsize;
@@ -182,7 +182,7 @@ struct varlena *
lz4_decompress_datum(const struct varlena *value)
{
#ifndef USE_LZ4
- NO_LZ4_SUPPORT();
+ NO_COMPRESSION_SUPPORT("lz4");
return NULL; /* keep compiler quiet */
#else
int32 rawsize;
@@ -215,7 +215,7 @@ struct varlena *
lz4_decompress_datum_slice(const struct varlena *value, int32 slicelength)
{
#ifndef USE_LZ4
- NO_LZ4_SUPPORT();
+ NO_COMPRESSION_SUPPORT("lz4");
return NULL; /* keep compiler quiet */
#else
int32 rawsize;
@@ -289,7 +289,7 @@ CompressionNameToMethod(const char *compression)
else if (strcmp(compression, "lz4") == 0)
{
#ifndef USE_LZ4
- NO_LZ4_SUPPORT();
+ NO_COMPRESSION_SUPPORT("lz4");
#endif
return TOAST_LZ4_COMPRESSION;
}
diff --git a/src/backend/access/common/toast_internals.c b/src/backend/access/common/toast_internals.c
index 7d8be8346ce..196e06115e9 100644
--- a/src/backend/access/common/toast_internals.c
+++ b/src/backend/access/common/toast_internals.c
@@ -144,7 +144,7 @@ toast_save_datum(Relation rel, Datum value,
int num_indexes;
int validIndex;
- Assert(!VARATT_IS_EXTERNAL(value));
+ Assert(!VARATT_IS_EXTERNAL(dval));
/*
* Open the toast relation and its indexes. We can use the index to check
diff --git a/src/backend/access/common/tupdesc.c b/src/backend/access/common/tupdesc.c
index ffd0c78f905..be60005ae46 100644
--- a/src/backend/access/common/tupdesc.c
+++ b/src/backend/access/common/tupdesc.c
@@ -142,11 +142,18 @@ void
verify_compact_attribute(TupleDesc tupdesc, int attnum)
{
#ifdef USE_ASSERT_CHECKING
- CompactAttribute *cattr = &tupdesc->compact_attrs[attnum];
+ CompactAttribute cattr;
Form_pg_attribute attr = TupleDescAttr(tupdesc, attnum);
CompactAttribute tmp;
/*
+ * Make a temp copy of the TupleDesc's CompactAttribute. This may be a
+ * shared TupleDesc and the attcacheoff might get changed by another
+ * backend.
+ */
+ memcpy(&cattr, &tupdesc->compact_attrs[attnum], sizeof(CompactAttribute));
+
+ /*
* Populate the temporary CompactAttribute from the corresponding
* Form_pg_attribute
*/
@@ -156,11 +163,11 @@ verify_compact_attribute(TupleDesc tupdesc, int attnum)
* Make the attcacheoff match since it's been reset to -1 by
* populate_compact_attribute_internal. Same with attnullability.
*/
- tmp.attcacheoff = cattr->attcacheoff;
- tmp.attnullability = cattr->attnullability;
+ tmp.attcacheoff = cattr.attcacheoff;
+ tmp.attnullability = cattr.attnullability;
/* Check the freshly populated CompactAttribute matches the TupleDesc's */
- Assert(memcmp(&tmp, cattr, sizeof(CompactAttribute)) == 0);
+ Assert(memcmp(&tmp, &cattr, sizeof(CompactAttribute)) == 0);
#endif
}
@@ -808,10 +815,10 @@ hashRowType(TupleDesc desc)
uint32 s;
int i;
- s = hash_combine(0, hash_uint32(desc->natts));
- s = hash_combine(s, hash_uint32(desc->tdtypeid));
+ s = hash_combine(0, hash_bytes_uint32(desc->natts));
+ s = hash_combine(s, hash_bytes_uint32(desc->tdtypeid));
for (i = 0; i < desc->natts; ++i)
- s = hash_combine(s, hash_uint32(TupleDescAttr(desc, i)->atttypid));
+ s = hash_combine(s, hash_bytes_uint32(TupleDescAttr(desc, i)->atttypid));
return s;
}
diff --git a/src/backend/access/gin/gininsert.c b/src/backend/access/gin/gininsert.c
index a65acd89104..47b1898a064 100644
--- a/src/backend/access/gin/gininsert.c
+++ b/src/backend/access/gin/gininsert.c
@@ -2233,7 +2233,7 @@ _gin_build_tuple(OffsetNumber attrnum, unsigned char category,
else if (typlen > 0)
keylen = typlen;
else if (typlen == -1)
- keylen = VARSIZE_ANY(key);
+ keylen = VARSIZE_ANY(DatumGetPointer(key));
else if (typlen == -2)
keylen = strlen(DatumGetPointer(key)) + 1;
else
diff --git a/src/backend/access/gist/gistutil.c b/src/backend/access/gist/gistutil.c
index a6b701943d3..c0aa7d0222f 100644
--- a/src/backend/access/gist/gistutil.c
+++ b/src/backend/access/gist/gistutil.c
@@ -1058,11 +1058,11 @@ gistGetFakeLSN(Relation rel)
}
/*
- * This is a stratnum support function for GiST opclasses that use the
- * RT*StrategyNumber constants.
+ * This is a stratnum translation support function for GiST opclasses that use
+ * the RT*StrategyNumber constants.
*/
Datum
-gist_stratnum_common(PG_FUNCTION_ARGS)
+gist_translate_cmptype_common(PG_FUNCTION_ARGS)
{
CompareType cmptype = PG_GETARG_INT32(0);
@@ -1090,9 +1090,9 @@ gist_stratnum_common(PG_FUNCTION_ARGS)
/*
* Returns the opclass's private stratnum used for the given compare type.
*
- * Calls the opclass's GIST_STRATNUM_PROC support function, if any,
- * and returns the result.
- * Returns InvalidStrategy if the function is not defined.
+ * Calls the opclass's GIST_TRANSLATE_CMPTYPE_PROC support function, if any,
+ * and returns the result. Returns InvalidStrategy if the function is not
+ * defined.
*/
StrategyNumber
gisttranslatecmptype(CompareType cmptype, Oid opfamily)
@@ -1101,7 +1101,7 @@ gisttranslatecmptype(CompareType cmptype, Oid opfamily)
Datum result;
/* Check whether the function is provided. */
- funcid = get_opfamily_proc(opfamily, ANYOID, ANYOID, GIST_STRATNUM_PROC);
+ funcid = get_opfamily_proc(opfamily, ANYOID, ANYOID, GIST_TRANSLATE_CMPTYPE_PROC);
if (!OidIsValid(funcid))
return InvalidStrategy;
diff --git a/src/backend/access/gist/gistvalidate.c b/src/backend/access/gist/gistvalidate.c
index 2a49e6d20f0..2ed6f74fce9 100644
--- a/src/backend/access/gist/gistvalidate.c
+++ b/src/backend/access/gist/gistvalidate.c
@@ -138,7 +138,7 @@ gistvalidate(Oid opclassoid)
ok = check_amproc_signature(procform->amproc, VOIDOID, true,
1, 1, INTERNALOID);
break;
- case GIST_STRATNUM_PROC:
+ case GIST_TRANSLATE_CMPTYPE_PROC:
ok = check_amproc_signature(procform->amproc, INT2OID, true,
1, 1, INT4OID) &&
procform->amproclefttype == ANYOID &&
@@ -265,7 +265,7 @@ gistvalidate(Oid opclassoid)
if (i == GIST_DISTANCE_PROC || i == GIST_FETCH_PROC ||
i == GIST_COMPRESS_PROC || i == GIST_DECOMPRESS_PROC ||
i == GIST_OPTIONS_PROC || i == GIST_SORTSUPPORT_PROC ||
- i == GIST_STRATNUM_PROC)
+ i == GIST_TRANSLATE_CMPTYPE_PROC)
continue; /* optional methods */
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
@@ -336,7 +336,7 @@ gistadjustmembers(Oid opfamilyoid,
case GIST_FETCH_PROC:
case GIST_OPTIONS_PROC:
case GIST_SORTSUPPORT_PROC:
- case GIST_STRATNUM_PROC:
+ case GIST_TRANSLATE_CMPTYPE_PROC:
/* Optional, so force it to be a soft family dependency */
op->ref_is_hard = false;
op->ref_is_family = true;
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 2be7f817c78..0dcd6ee817e 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -4982,7 +4982,7 @@ l3:
case LockWaitError:
if (!ConditionalMultiXactIdWait((MultiXactId) xwait,
status, infomask, relation,
- NULL, log_lock_failure))
+ NULL, log_lock_failures))
ereport(ERROR,
(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
errmsg("could not obtain lock on row in relation \"%s\"",
@@ -5020,7 +5020,7 @@ l3:
}
break;
case LockWaitError:
- if (!ConditionalXactLockTableWait(xwait, log_lock_failure))
+ if (!ConditionalXactLockTableWait(xwait, log_lock_failures))
ereport(ERROR,
(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
errmsg("could not obtain lock on row in relation \"%s\"",
@@ -5285,7 +5285,7 @@ heap_acquire_tuplock(Relation relation, ItemPointer tid, LockTupleMode mode,
break;
case LockWaitError:
- if (!ConditionalLockTupleTuplock(relation, tid, mode, log_lock_failure))
+ if (!ConditionalLockTupleTuplock(relation, tid, mode, log_lock_failures))
ereport(ERROR,
(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
errmsg("could not obtain lock on row in relation \"%s\"",
diff --git a/src/backend/access/heap/heapam_handler.c b/src/backend/access/heap/heapam_handler.c
index ac082fefa77..cb4bc35c93e 100644
--- a/src/backend/access/heap/heapam_handler.c
+++ b/src/backend/access/heap/heapam_handler.c
@@ -464,7 +464,7 @@ tuple_lock_retry:
return TM_WouldBlock;
break;
case LockWaitError:
- if (!ConditionalXactLockTableWait(SnapshotDirty.xmax, log_lock_failure))
+ if (!ConditionalXactLockTableWait(SnapshotDirty.xmax, log_lock_failures))
ereport(ERROR,
(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
errmsg("could not obtain lock on row in relation \"%s\"",
diff --git a/src/backend/access/heap/heapam_xlog.c b/src/backend/access/heap/heapam_xlog.c
index 30f4c2d3c67..eb4bd3d6ae3 100644
--- a/src/backend/access/heap/heapam_xlog.c
+++ b/src/backend/access/heap/heapam_xlog.c
@@ -438,6 +438,9 @@ heap_xlog_insert(XLogReaderState *record)
ItemPointerSetBlockNumber(&target_tid, blkno);
ItemPointerSetOffsetNumber(&target_tid, xlrec->offnum);
+ /* No freezing in the heap_insert() code path */
+ Assert(!(xlrec->flags & XLH_INSERT_ALL_FROZEN_SET));
+
/*
* The visibility map may need to be fixed even if the heap page is
* already up-to-date.
@@ -508,10 +511,6 @@ heap_xlog_insert(XLogReaderState *record)
if (xlrec->flags & XLH_INSERT_ALL_VISIBLE_CLEARED)
PageClearAllVisible(page);
- /* XLH_INSERT_ALL_FROZEN_SET implies that all tuples are visible */
- if (xlrec->flags & XLH_INSERT_ALL_FROZEN_SET)
- PageSetAllVisible(page);
-
MarkBufferDirty(buffer);
}
if (BufferIsValid(buffer))
diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c
index 708674d8fcf..14036c27e87 100644
--- a/src/backend/access/heap/vacuumlazy.c
+++ b/src/backend/access/heap/vacuumlazy.c
@@ -423,7 +423,7 @@ typedef struct LVSavedErrInfo
/* non-export function prototypes */
static void lazy_scan_heap(LVRelState *vacrel);
static void heap_vacuum_eager_scan_setup(LVRelState *vacrel,
- VacuumParams *params);
+ const VacuumParams params);
static BlockNumber heap_vac_scan_next_block(ReadStream *stream,
void *callback_private_data,
void *per_buffer_data);
@@ -431,7 +431,7 @@ static void find_next_unskippable_block(LVRelState *vacrel, bool *skipsallvis);
static bool lazy_scan_new_or_empty(LVRelState *vacrel, Buffer buf,
BlockNumber blkno, Page page,
bool sharelock, Buffer vmbuffer);
-static void lazy_scan_prune(LVRelState *vacrel, Buffer buf,
+static int lazy_scan_prune(LVRelState *vacrel, Buffer buf,
BlockNumber blkno, Page page,
Buffer vmbuffer, bool all_visible_according_to_vm,
bool *has_lpdead_items, bool *vm_page_frozen);
@@ -485,7 +485,7 @@ static void restore_vacuum_error_info(LVRelState *vacrel,
* vacuum options or for relfrozenxid/relminmxid advancement.
*/
static void
-heap_vacuum_eager_scan_setup(LVRelState *vacrel, VacuumParams *params)
+heap_vacuum_eager_scan_setup(LVRelState *vacrel, const VacuumParams params)
{
uint32 randseed;
BlockNumber allvisible;
@@ -504,7 +504,7 @@ heap_vacuum_eager_scan_setup(LVRelState *vacrel, VacuumParams *params)
vacrel->eager_scan_remaining_successes = 0;
/* If eager scanning is explicitly disabled, just return. */
- if (params->max_eager_freeze_failure_rate == 0)
+ if (params.max_eager_freeze_failure_rate == 0)
return;
/*
@@ -581,11 +581,11 @@ heap_vacuum_eager_scan_setup(LVRelState *vacrel, VacuumParams *params)
vacrel->next_eager_scan_region_start = randseed % EAGER_SCAN_REGION_SIZE;
- Assert(params->max_eager_freeze_failure_rate > 0 &&
- params->max_eager_freeze_failure_rate <= 1);
+ Assert(params.max_eager_freeze_failure_rate > 0 &&
+ params.max_eager_freeze_failure_rate <= 1);
vacrel->eager_scan_max_fails_per_region =
- params->max_eager_freeze_failure_rate *
+ params.max_eager_freeze_failure_rate *
EAGER_SCAN_REGION_SIZE;
/*
@@ -612,7 +612,7 @@ heap_vacuum_eager_scan_setup(LVRelState *vacrel, VacuumParams *params)
* and locked the relation.
*/
void
-heap_vacuum_rel(Relation rel, VacuumParams *params,
+heap_vacuum_rel(Relation rel, const VacuumParams params,
BufferAccessStrategy bstrategy)
{
LVRelState *vacrel;
@@ -634,9 +634,9 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
ErrorContextCallback errcallback;
char **indnames = NULL;
- verbose = (params->options & VACOPT_VERBOSE) != 0;
+ verbose = (params.options & VACOPT_VERBOSE) != 0;
instrument = (verbose || (AmAutoVacuumWorkerProcess() &&
- params->log_min_duration >= 0));
+ params.log_min_duration >= 0));
if (instrument)
{
pg_rusage_init(&ru0);
@@ -699,9 +699,9 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
* The truncate param allows user to avoid attempting relation truncation,
* though it can't force truncation to happen.
*/
- Assert(params->index_cleanup != VACOPTVALUE_UNSPECIFIED);
- Assert(params->truncate != VACOPTVALUE_UNSPECIFIED &&
- params->truncate != VACOPTVALUE_AUTO);
+ Assert(params.index_cleanup != VACOPTVALUE_UNSPECIFIED);
+ Assert(params.truncate != VACOPTVALUE_UNSPECIFIED &&
+ params.truncate != VACOPTVALUE_AUTO);
/*
* While VacuumFailSafeActive is reset to false before calling this, we
@@ -711,14 +711,14 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
vacrel->consider_bypass_optimization = true;
vacrel->do_index_vacuuming = true;
vacrel->do_index_cleanup = true;
- vacrel->do_rel_truncate = (params->truncate != VACOPTVALUE_DISABLED);
- if (params->index_cleanup == VACOPTVALUE_DISABLED)
+ vacrel->do_rel_truncate = (params.truncate != VACOPTVALUE_DISABLED);
+ if (params.index_cleanup == VACOPTVALUE_DISABLED)
{
/* Force disable index vacuuming up-front */
vacrel->do_index_vacuuming = false;
vacrel->do_index_cleanup = false;
}
- else if (params->index_cleanup == VACOPTVALUE_ENABLED)
+ else if (params.index_cleanup == VACOPTVALUE_ENABLED)
{
/* Force index vacuuming. Note that failsafe can still bypass. */
vacrel->consider_bypass_optimization = false;
@@ -726,7 +726,7 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
else
{
/* Default/auto, make all decisions dynamically */
- Assert(params->index_cleanup == VACOPTVALUE_AUTO);
+ Assert(params.index_cleanup == VACOPTVALUE_AUTO);
}
/* Initialize page counters explicitly (be tidy) */
@@ -757,7 +757,6 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
vacrel->vm_new_visible_pages = 0;
vacrel->vm_new_visible_frozen_pages = 0;
vacrel->vm_new_frozen_pages = 0;
- vacrel->rel_pages = orig_rel_pages = RelationGetNumberOfBlocks(rel);
/*
* Get cutoffs that determine which deleted tuples are considered DEAD,
@@ -776,7 +775,9 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
* to increase the number of dead tuples it can prune away.)
*/
vacrel->aggressive = vacuum_get_cutoffs(rel, params, &vacrel->cutoffs);
+ vacrel->rel_pages = orig_rel_pages = RelationGetNumberOfBlocks(rel);
vacrel->vistest = GlobalVisTestFor(rel);
+
/* Initialize state used to track oldest extant XID/MXID */
vacrel->NewRelfrozenXid = vacrel->cutoffs.OldestXmin;
vacrel->NewRelminMxid = vacrel->cutoffs.OldestMxact;
@@ -788,7 +789,7 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
*/
vacrel->skippedallvis = false;
skipwithvm = true;
- if (params->options & VACOPT_DISABLE_PAGE_SKIPPING)
+ if (params.options & VACOPT_DISABLE_PAGE_SKIPPING)
{
/*
* Force aggressive mode, and disable skipping blocks using the
@@ -829,7 +830,7 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
* is already dangerously old.)
*/
lazy_check_wraparound_failsafe(vacrel);
- dead_items_alloc(vacrel, params->nworkers);
+ dead_items_alloc(vacrel, params.nworkers);
/*
* Call lazy_scan_heap to perform all required heap pruning, index
@@ -946,9 +947,9 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
{
TimestampTz endtime = GetCurrentTimestamp();
- if (verbose || params->log_min_duration == 0 ||
+ if (verbose || params.log_min_duration == 0 ||
TimestampDifferenceExceeds(starttime, endtime,
- params->log_min_duration))
+ params.log_min_duration))
{
long secs_dur;
int usecs_dur;
@@ -983,10 +984,10 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
* Aggressiveness already reported earlier, in dedicated
* VACUUM VERBOSE ereport
*/
- Assert(!params->is_wraparound);
+ Assert(!params.is_wraparound);
msgfmt = _("finished vacuuming \"%s.%s.%s\": index scans: %d\n");
}
- else if (params->is_wraparound)
+ else if (params.is_wraparound)
{
/*
* While it's possible for a VACUUM to be both is_wraparound
@@ -1244,6 +1245,7 @@ lazy_scan_heap(LVRelState *vacrel)
Buffer buf;
Page page;
uint8 blk_info = 0;
+ int ndeleted = 0;
bool has_lpdead_items;
void *per_buffer_data = NULL;
bool vm_page_frozen = false;
@@ -1386,10 +1388,10 @@ lazy_scan_heap(LVRelState *vacrel)
* line pointers previously marked LP_DEAD.
*/
if (got_cleanup_lock)
- lazy_scan_prune(vacrel, buf, blkno, page,
- vmbuffer,
- blk_info & VAC_BLK_ALL_VISIBLE_ACCORDING_TO_VM,
- &has_lpdead_items, &vm_page_frozen);
+ ndeleted = lazy_scan_prune(vacrel, buf, blkno, page,
+ vmbuffer,
+ blk_info & VAC_BLK_ALL_VISIBLE_ACCORDING_TO_VM,
+ &has_lpdead_items, &vm_page_frozen);
/*
* Count an eagerly scanned page as a failure or a success.
@@ -1427,7 +1429,7 @@ lazy_scan_heap(LVRelState *vacrel)
*/
if (vacrel->eager_scan_max_fails_per_region > 0)
ereport(vacrel->verbose ? INFO : DEBUG2,
- (errmsg("disabling eager scanning after freezing %u eagerly scanned blocks of \"%s.%s.%s\"",
+ (errmsg("disabling eager scanning after freezing %u eagerly scanned blocks of relation \"%s.%s.%s\"",
orig_eager_scan_success_limit,
vacrel->dbname, vacrel->relnamespace,
vacrel->relname)));
@@ -1480,7 +1482,7 @@ lazy_scan_heap(LVRelState *vacrel)
* table has indexes. There will only be newly-freed space if we
* held the cleanup lock and lazy_scan_prune() was called.
*/
- if (got_cleanup_lock && vacrel->nindexes == 0 && has_lpdead_items &&
+ if (got_cleanup_lock && vacrel->nindexes == 0 && ndeleted > 0 &&
blkno - next_fsm_block_to_vacuum >= VACUUM_FSM_EVERY_PAGES)
{
FreeSpaceMapVacuumRange(vacrel->rel, next_fsm_block_to_vacuum,
@@ -1871,8 +1873,6 @@ lazy_scan_new_or_empty(LVRelState *vacrel, Buffer buf, BlockNumber blkno,
*/
if (!PageIsAllVisible(page))
{
- uint8 old_vmbits;
-
START_CRIT_SECTION();
/* mark buffer dirty before writing a WAL record */
@@ -1892,24 +1892,16 @@ lazy_scan_new_or_empty(LVRelState *vacrel, Buffer buf, BlockNumber blkno,
log_newpage_buffer(buf, true);
PageSetAllVisible(page);
- old_vmbits = visibilitymap_set(vacrel->rel, blkno, buf,
- InvalidXLogRecPtr,
- vmbuffer, InvalidTransactionId,
- VISIBILITYMAP_ALL_VISIBLE |
- VISIBILITYMAP_ALL_FROZEN);
+ visibilitymap_set(vacrel->rel, blkno, buf,
+ InvalidXLogRecPtr,
+ vmbuffer, InvalidTransactionId,
+ VISIBILITYMAP_ALL_VISIBLE |
+ VISIBILITYMAP_ALL_FROZEN);
END_CRIT_SECTION();
- /*
- * If the page wasn't already set all-visible and/or all-frozen in
- * the VM, count it as newly set for logging.
- */
- if ((old_vmbits & VISIBILITYMAP_ALL_VISIBLE) == 0)
- {
- vacrel->vm_new_visible_pages++;
- vacrel->vm_new_visible_frozen_pages++;
- }
- else if ((old_vmbits & VISIBILITYMAP_ALL_FROZEN) == 0)
- vacrel->vm_new_frozen_pages++;
+ /* Count the newly all-frozen pages for logging */
+ vacrel->vm_new_visible_pages++;
+ vacrel->vm_new_visible_frozen_pages++;
}
freespace = PageGetHeapFreeSpace(page);
@@ -1945,8 +1937,10 @@ cmpOffsetNumbers(const void *a, const void *b)
* *vm_page_frozen is set to true if the page is newly set all-frozen in the
* VM. The caller currently only uses this for determining whether an eagerly
* scanned page was successfully set all-frozen.
+ *
+ * Returns the number of tuples deleted from the page during HOT pruning.
*/
-static void
+static int
lazy_scan_prune(LVRelState *vacrel,
Buffer buf,
BlockNumber blkno,
@@ -2217,6 +2211,8 @@ lazy_scan_prune(LVRelState *vacrel,
*vm_page_frozen = true;
}
}
+
+ return presult.ndeleted;
}
/*
@@ -2914,7 +2910,6 @@ lazy_vacuum_heap_page(LVRelState *vacrel, BlockNumber blkno, Buffer buffer,
if (heap_page_is_all_visible(vacrel, buffer, &visibility_cutoff_xid,
&all_frozen))
{
- uint8 old_vmbits;
uint8 flags = VISIBILITYMAP_ALL_VISIBLE;
if (all_frozen)
@@ -2924,25 +2919,15 @@ lazy_vacuum_heap_page(LVRelState *vacrel, BlockNumber blkno, Buffer buffer,
}
PageSetAllVisible(page);
- old_vmbits = visibilitymap_set(vacrel->rel, blkno, buffer,
- InvalidXLogRecPtr,
- vmbuffer, visibility_cutoff_xid,
- flags);
-
- /*
- * If the page wasn't already set all-visible and/or all-frozen in the
- * VM, count it as newly set for logging.
- */
- if ((old_vmbits & VISIBILITYMAP_ALL_VISIBLE) == 0)
- {
- vacrel->vm_new_visible_pages++;
- if (all_frozen)
- vacrel->vm_new_visible_frozen_pages++;
- }
+ visibilitymap_set(vacrel->rel, blkno, buffer,
+ InvalidXLogRecPtr,
+ vmbuffer, visibility_cutoff_xid,
+ flags);
- else if ((old_vmbits & VISIBILITYMAP_ALL_FROZEN) == 0 &&
- all_frozen)
- vacrel->vm_new_frozen_pages++;
+ /* Count the newly set VM page for logging */
+ vacrel->vm_new_visible_pages++;
+ if (all_frozen)
+ vacrel->vm_new_visible_frozen_pages++;
}
/* Revert to the previous phase information for error traceback */
diff --git a/src/backend/access/heap/visibilitymap.c b/src/backend/access/heap/visibilitymap.c
index 745a04ef26e..8f918e00af7 100644
--- a/src/backend/access/heap/visibilitymap.c
+++ b/src/backend/access/heap/visibilitymap.c
@@ -364,7 +364,7 @@ visibilitymap_get_status(Relation rel, BlockNumber heapBlk, Buffer *vmbuf)
{
*vmbuf = vm_readbuf(rel, mapBlock, false);
if (!BufferIsValid(*vmbuf))
- return false;
+ return (uint8) 0;
}
map = PageGetContents(BufferGetPage(*vmbuf));
diff --git a/src/backend/access/index/amapi.c b/src/backend/access/index/amapi.c
index f0f4f974bce..60684c53422 100644
--- a/src/backend/access/index/amapi.c
+++ b/src/backend/access/index/amapi.c
@@ -42,6 +42,19 @@ GetIndexAmRoutine(Oid amhandler)
elog(ERROR, "index access method handler function %u did not return an IndexAmRoutine struct",
amhandler);
+ /* Assert that all required callbacks are present. */
+ Assert(routine->ambuild != NULL);
+ Assert(routine->ambuildempty != NULL);
+ Assert(routine->aminsert != NULL);
+ Assert(routine->ambulkdelete != NULL);
+ Assert(routine->amvacuumcleanup != NULL);
+ Assert(routine->amcostestimate != NULL);
+ Assert(routine->amoptions != NULL);
+ Assert(routine->amvalidate != NULL);
+ Assert(routine->ambeginscan != NULL);
+ Assert(routine->amrescan != NULL);
+ Assert(routine->amendscan != NULL);
+
return routine;
}
diff --git a/src/backend/access/nbtree/nbtcompare.c b/src/backend/access/nbtree/nbtcompare.c
index 4da5a3c1d16..e1b52acd20d 100644
--- a/src/backend/access/nbtree/nbtcompare.c
+++ b/src/backend/access/nbtree/nbtcompare.c
@@ -555,7 +555,7 @@ btcharcmp(PG_FUNCTION_ARGS)
static Datum
char_decrement(Relation rel, Datum existing, bool *underflow)
{
- uint8 cexisting = UInt8GetDatum(existing);
+ uint8 cexisting = DatumGetUInt8(existing);
if (cexisting == 0)
{
@@ -571,7 +571,7 @@ char_decrement(Relation rel, Datum existing, bool *underflow)
static Datum
char_increment(Relation rel, Datum existing, bool *overflow)
{
- uint8 cexisting = UInt8GetDatum(existing);
+ uint8 cexisting = DatumGetUInt8(existing);
if (cexisting == UCHAR_MAX)
{
diff --git a/src/backend/access/nbtree/nbtpreprocesskeys.c b/src/backend/access/nbtree/nbtpreprocesskeys.c
index a136e4bbfdf..21c519cd108 100644
--- a/src/backend/access/nbtree/nbtpreprocesskeys.c
+++ b/src/backend/access/nbtree/nbtpreprocesskeys.c
@@ -16,6 +16,7 @@
#include "postgres.h"
#include "access/nbtree.h"
+#include "common/int.h"
#include "lib/qunique.h"
#include "utils/array.h"
#include "utils/lsyscache.h"
@@ -56,6 +57,8 @@ static void _bt_skiparray_strat_decrement(IndexScanDesc scan, ScanKey arraysk,
BTArrayKeyInfo *array);
static void _bt_skiparray_strat_increment(IndexScanDesc scan, ScanKey arraysk,
BTArrayKeyInfo *array);
+static void _bt_unmark_keys(IndexScanDesc scan, int *keyDataMap);
+static int _bt_reorder_array_cmp(const void *a, const void *b);
static ScanKey _bt_preprocess_array_keys(IndexScanDesc scan, int *new_numberOfKeys);
static void _bt_preprocess_array_keys_final(IndexScanDesc scan, int *keyDataMap);
static int _bt_num_array_keys(IndexScanDesc scan, Oid *skip_eq_ops_out,
@@ -96,7 +99,7 @@ static int _bt_compare_array_elements(const void *a, const void *b, void *arg);
* incomplete sets of cross-type operators, we may fail to detect redundant
* or contradictory keys, but we can survive that.)
*
- * The output keys must be sorted by index attribute. Presently we expect
+ * Required output keys are sorted by index attribute. Presently we expect
* (but verify) that the input keys are already so sorted --- this is done
* by match_clauses_to_index() in indxpath.c. Some reordering of the keys
* within each attribute may be done as a byproduct of the processing here.
@@ -127,29 +130,36 @@ static int _bt_compare_array_elements(const void *a, const void *b, void *arg);
* This has the potential to be much more efficient than a full index scan
* (though it behaves like a full scan when there's many distinct "x" values).
*
- * If possible, redundant keys are eliminated: we keep only the tightest
+ * Typically, redundant keys are eliminated: we keep only the tightest
* >/>= bound and the tightest </<= bound, and if there's an = key then
* that's the only one returned. (So, we return either a single = key,
* or one or two boundary-condition keys for each attr.) However, if we
* cannot compare two keys for lack of a suitable cross-type operator,
- * we cannot eliminate either. If there are two such keys of the same
- * operator strategy, the second one is just pushed into the output array
- * without further processing here. We may also emit both >/>= or both
- * </<= keys if we can't compare them. The logic about required keys still
- * works if we don't eliminate redundant keys.
- *
- * Note that one reason we need direction-sensitive required-key flags is
- * precisely that we may not be able to eliminate redundant keys. Suppose
- * we have "x > 4::int AND x > 10::bigint", and we are unable to determine
- * which key is more restrictive for lack of a suitable cross-type operator.
- * _bt_first will arbitrarily pick one of the keys to do the initial
- * positioning with. If it picks x > 4, then the x > 10 condition will fail
- * until we reach index entries > 10; but we can't stop the scan just because
- * x > 10 is failing. On the other hand, if we are scanning backwards, then
- * failure of either key is indeed enough to stop the scan. (In general, when
- * inequality keys are present, the initial-positioning code only promises to
- * position before the first possible match, not exactly at the first match,
- * for a forward scan; or after the last match for a backward scan.)
+ * we cannot eliminate either key.
+ *
+ * When all redundant keys could not be eliminated, we'll output a key array
+ * that can more or less be treated as if it had no redundant keys. Suppose
+ * we have "x > 4::int AND x > 10::bigint AND x < 70", and we are unable to
+ * determine which > key is more restrictive for lack of a suitable cross-type
+ * operator. We'll arbitrarily pick one of the > keys; the other > key won't
+ * be marked required. Obviously, the scan will be less efficient if we
+ * choose x > 4 over x > 10 -- but it can still largely proceed as if there
+ * was only a single > condition. "x > 10" will be placed at the end of the
+ * so->keyData[] output array. It'll always be evaluated last, after the keys
+ * that could be marked required in the usual way (after "x > 4 AND x < 70").
+ * This can sometimes result in so->keyData[] keys that aren't even in index
+ * attribute order (if the qual involves multiple attributes). The scan's
+ * required keys will still be in attribute order, though, so it can't matter.
+ *
+ * This scheme ensures that _bt_first always uses the same set of keys at the
+ * start of a forwards scan as those _bt_checkkeys uses to determine when to
+ * end a similar backwards scan (and vice-versa). _bt_advance_array_keys
+ * depends on this: it expects to be able to reliably predict what the next
+ * _bt_first call will do by testing whether _bt_checkkeys' routines report
+ * that the final tuple on the page is past the end of matches for the scan's
+ * keys with the scan direction flipped. If it is (if continuescan=false),
+ * then it follows that calling _bt_first will, at a minimum, relocate the
+ * scan to the very next leaf page (in the current scan direction).
*
* As a byproduct of this work, we can detect contradictory quals such
* as "x = 1 AND x > 2". If we see that, we return so->qual_ok = false,
@@ -188,7 +198,8 @@ _bt_preprocess_keys(IndexScanDesc scan)
int numberOfEqualCols;
ScanKey inkeys;
BTScanKeyPreproc xform[BTMaxStrategyNumber];
- bool test_result;
+ bool test_result,
+ redundant_key_kept = false;
AttrNumber attno;
ScanKey arrayKeyData;
int *keyDataMap = NULL;
@@ -388,7 +399,8 @@ _bt_preprocess_keys(IndexScanDesc scan)
xform[j].inkey = NULL;
xform[j].inkeyi = -1;
}
- /* else, cannot determine redundancy, keep both keys */
+ else
+ redundant_key_kept = true;
}
/* track number of attrs for which we have "=" keys */
numberOfEqualCols++;
@@ -409,6 +421,8 @@ _bt_preprocess_keys(IndexScanDesc scan)
else
xform[BTLessStrategyNumber - 1].inkey = NULL;
}
+ else
+ redundant_key_kept = true;
}
/* try to keep only one of >, >= */
@@ -426,6 +440,8 @@ _bt_preprocess_keys(IndexScanDesc scan)
else
xform[BTGreaterStrategyNumber - 1].inkey = NULL;
}
+ else
+ redundant_key_kept = true;
}
/*
@@ -466,25 +482,6 @@ _bt_preprocess_keys(IndexScanDesc scan)
/* check strategy this key's operator corresponds to */
j = inkey->sk_strategy - 1;
- /* if row comparison, push it directly to the output array */
- if (inkey->sk_flags & SK_ROW_HEADER)
- {
- ScanKey outkey = &so->keyData[new_numberOfKeys++];
-
- memcpy(outkey, inkey, sizeof(ScanKeyData));
- if (arrayKeyData)
- keyDataMap[new_numberOfKeys - 1] = i;
- if (numberOfEqualCols == attno - 1)
- _bt_mark_scankey_required(outkey);
-
- /*
- * We don't support RowCompare using equality; such a qual would
- * mess up the numberOfEqualCols tracking.
- */
- Assert(j != (BTEqualStrategyNumber - 1));
- continue;
- }
-
if (inkey->sk_strategy == BTEqualStrategyNumber &&
(inkey->sk_flags & SK_SEARCHARRAY))
{
@@ -593,9 +590,8 @@ _bt_preprocess_keys(IndexScanDesc scan)
* the new scan key.
*
* Note: We do things this way around so that our arrays are
- * always in the same order as their corresponding scan keys,
- * even with incomplete opfamilies. _bt_advance_array_keys
- * depends on this.
+ * always in the same order as their corresponding scan keys.
+ * _bt_preprocess_array_keys_final expects this.
*/
ScanKey outkey = &so->keyData[new_numberOfKeys++];
@@ -607,6 +603,7 @@ _bt_preprocess_keys(IndexScanDesc scan)
xform[j].inkey = inkey;
xform[j].inkeyi = i;
xform[j].arrayidx = arrayidx;
+ redundant_key_kept = true;
}
}
}
@@ -622,6 +619,15 @@ _bt_preprocess_keys(IndexScanDesc scan)
if (arrayKeyData)
_bt_preprocess_array_keys_final(scan, keyDataMap);
+ /*
+ * If there are remaining redundant inequality keys, we must make sure
+ * that each index attribute has no more than one required >/>= key, and
+ * no more than one required </<= key. Attributes that have one or more
+ * required = keys now must keep only one required key (the first = key).
+ */
+ if (unlikely(redundant_key_kept) && so->qual_ok)
+ _bt_unmark_keys(scan, keyDataMap);
+
/* Could pfree arrayKeyData/keyDataMap now, but not worth the cycles */
}
@@ -746,9 +752,12 @@ _bt_fix_scankey_strategy(ScanKey skey, int16 *indoption)
*
* Depending on the operator type, the key may be required for both scan
* directions or just one. Also, if the key is a row comparison header,
- * we have to mark its first subsidiary ScanKey as required. (Subsequent
- * subsidiary ScanKeys are normally for lower-order columns, and thus
- * cannot be required, since they're after the first non-equality scankey.)
+ * we have to mark the appropriate subsidiary ScanKeys as required. In such
+ * cases, the first subsidiary key is required, but subsequent ones are
+ * required only as long as they correspond to successive index columns and
+ * match the leading column as to sort direction. Otherwise the row
+ * comparison ordering is different from the index ordering and so we can't
+ * stop the scan on the basis of those lower-order columns.
*
* Note: when we set required-key flag bits in a subsidiary scankey, we are
* scribbling on a data structure belonging to the index AM's caller, not on
@@ -786,12 +795,25 @@ _bt_mark_scankey_required(ScanKey skey)
if (skey->sk_flags & SK_ROW_HEADER)
{
ScanKey subkey = (ScanKey) DatumGetPointer(skey->sk_argument);
+ AttrNumber attno = skey->sk_attno;
/* First subkey should be same column/operator as the header */
- Assert(subkey->sk_flags & SK_ROW_MEMBER);
- Assert(subkey->sk_attno == skey->sk_attno);
+ Assert(subkey->sk_attno == attno);
Assert(subkey->sk_strategy == skey->sk_strategy);
- subkey->sk_flags |= addflags;
+
+ for (;;)
+ {
+ Assert(subkey->sk_flags & SK_ROW_MEMBER);
+ if (subkey->sk_attno != attno)
+ break; /* non-adjacent key, so not required */
+ if (subkey->sk_strategy != skey->sk_strategy)
+ break; /* wrong direction, so not required */
+ subkey->sk_flags |= addflags;
+ if (subkey->sk_flags & SK_ROW_END)
+ break;
+ subkey++;
+ attno++;
+ }
}
}
@@ -847,8 +869,7 @@ _bt_compare_scankey_args(IndexScanDesc scan, ScanKey op,
cmp_op;
StrategyNumber strat;
- Assert(!((leftarg->sk_flags | rightarg->sk_flags) &
- (SK_ROW_HEADER | SK_ROW_MEMBER)));
+ Assert(!((leftarg->sk_flags | rightarg->sk_flags) & SK_ROW_MEMBER));
/*
* First, deal with cases where one or both args are NULL. This should
@@ -925,6 +946,16 @@ _bt_compare_scankey_args(IndexScanDesc scan, ScanKey op,
}
/*
+ * We don't yet know how to determine redundancy when it involves a row
+ * compare key (barring simple cases involving IS NULL/IS NOT NULL)
+ */
+ if ((leftarg->sk_flags | rightarg->sk_flags) & SK_ROW_HEADER)
+ {
+ Assert(!((leftarg->sk_flags | rightarg->sk_flags) & SK_BT_SKIP));
+ return false;
+ }
+
+ /*
* If either leftarg or rightarg are equality-type array scankeys, we need
* specialized handling (since by now we know that IS NULL wasn't used)
*/
@@ -1468,6 +1499,283 @@ _bt_skiparray_strat_increment(IndexScanDesc scan, ScanKey arraysk,
}
/*
+ * _bt_unmark_keys() -- make superfluous required keys nonrequired after all
+ *
+ * When _bt_preprocess_keys fails to eliminate one or more redundant keys, it
+ * calls here to make sure that no index attribute has more than one > or >=
+ * key marked required, and no more than one required < or <= key. Attributes
+ * with = keys will always get one = key as their required key. All other
+ * keys that were initially marked required get "unmarked" here. That way,
+ * _bt_first and _bt_checkkeys will reliably agree on which keys to use to
+ * start and/or to end the scan.
+ *
+ * We also relocate keys that become/started out nonrequired to the end of
+ * so->keyData[]. That way, _bt_first and _bt_checkkeys cannot fail to reach
+ * a required key due to some earlier nonrequired key getting in the way.
+ *
+ * Only call here when _bt_compare_scankey_args returned false at least once
+ * (otherwise, calling here will just waste cycles).
+ */
+static void
+_bt_unmark_keys(IndexScanDesc scan, int *keyDataMap)
+{
+ BTScanOpaque so = (BTScanOpaque) scan->opaque;
+ AttrNumber attno;
+ bool *unmarkikey;
+ int nunmark,
+ nunmarked,
+ nkept,
+ firsti;
+ ScanKey keepKeys,
+ unmarkKeys;
+ FmgrInfo *keepOrderProcs = NULL,
+ *unmarkOrderProcs = NULL;
+ bool haveReqEquals,
+ haveReqForward,
+ haveReqBackward;
+
+ /*
+ * Do an initial pass over so->keyData[] that determines which keys to
+ * keep as required. We expect so->keyData[] to still be in attribute
+ * order when we're called (though we don't expect any particular order
+ * among each attribute's keys).
+ *
+ * When both equality and inequality keys remain on a single attribute, we
+ * *must* make sure that exactly one of the equalities remains required.
+ * Any requiredness markings that we might leave on later keys/attributes
+ * are predicated on there being required = keys on all prior columns.
+ */
+ unmarkikey = palloc0(so->numberOfKeys * sizeof(bool));
+ nunmark = 0;
+
+ /* Set things up for first key's attribute */
+ attno = so->keyData[0].sk_attno;
+ firsti = 0;
+ haveReqEquals = false;
+ haveReqForward = false;
+ haveReqBackward = false;
+ for (int i = 0; i < so->numberOfKeys; i++)
+ {
+ ScanKey origkey = &so->keyData[i];
+
+ if (origkey->sk_attno != attno)
+ {
+ /* Reset for next attribute */
+ attno = origkey->sk_attno;
+ firsti = i;
+
+ haveReqEquals = false;
+ haveReqForward = false;
+ haveReqBackward = false;
+ }
+
+ /* Equalities get priority over inequalities */
+ if (haveReqEquals)
+ {
+ /*
+ * We already found the first "=" key for this attribute. We've
+ * already decided that all its other keys will be unmarked.
+ */
+ Assert(!(origkey->sk_flags & SK_SEARCHNULL));
+ unmarkikey[i] = true;
+ nunmark++;
+ continue;
+ }
+ else if ((origkey->sk_flags & SK_BT_REQFWD) &&
+ (origkey->sk_flags & SK_BT_REQBKWD))
+ {
+ /*
+ * Found the first "=" key for attno. All other attno keys will
+ * be unmarked.
+ */
+ Assert(origkey->sk_strategy == BTEqualStrategyNumber);
+
+ haveReqEquals = true;
+ for (int j = firsti; j < i; j++)
+ {
+ /* Unmark any prior inequality keys on attno after all */
+ if (!unmarkikey[j])
+ {
+ unmarkikey[j] = true;
+ nunmark++;
+ }
+ }
+ continue;
+ }
+
+ /* Deal with inequalities next */
+ if ((origkey->sk_flags & SK_BT_REQFWD) && !haveReqForward)
+ {
+ haveReqForward = true;
+ continue;
+ }
+ else if ((origkey->sk_flags & SK_BT_REQBKWD) && !haveReqBackward)
+ {
+ haveReqBackward = true;
+ continue;
+ }
+
+ /*
+ * We have either a redundant inequality key that will be unmarked, or
+ * we have a key that wasn't marked required in the first place
+ */
+ unmarkikey[i] = true;
+ nunmark++;
+ }
+
+ /* Should only be called when _bt_compare_scankey_args reported failure */
+ Assert(nunmark > 0);
+
+ /*
+ * Next, allocate temp arrays: one for required keys that'll remain
+ * required, the other for all remaining keys
+ */
+ unmarkKeys = palloc(nunmark * sizeof(ScanKeyData));
+ keepKeys = palloc((so->numberOfKeys - nunmark) * sizeof(ScanKeyData));
+ nunmarked = 0;
+ nkept = 0;
+ if (so->numArrayKeys)
+ {
+ unmarkOrderProcs = palloc(nunmark * sizeof(FmgrInfo));
+ keepOrderProcs = palloc((so->numberOfKeys - nunmark) * sizeof(FmgrInfo));
+ }
+
+ /*
+ * Next, copy the contents of so->keyData[] into the appropriate temp
+ * array.
+ *
+ * Scans with = array keys need us to maintain invariants around the order
+ * of so->orderProcs[] and so->arrayKeys[] relative to so->keyData[]. See
+ * _bt_preprocess_array_keys_final for a full explanation.
+ */
+ for (int i = 0; i < so->numberOfKeys; i++)
+ {
+ ScanKey origkey = &so->keyData[i];
+ ScanKey unmark;
+
+ if (!unmarkikey[i])
+ {
+ /*
+ * Key gets to keep its original requiredness markings.
+ *
+ * Key will stay in its original position, unless we're going to
+ * unmark an earlier key (in which case this key gets moved back).
+ */
+ memcpy(keepKeys + nkept, origkey, sizeof(ScanKeyData));
+
+ if (so->numArrayKeys)
+ {
+ keyDataMap[i] = nkept;
+ memcpy(keepOrderProcs + nkept, &so->orderProcs[i],
+ sizeof(FmgrInfo));
+ }
+
+ nkept++;
+ continue;
+ }
+
+ /*
+ * Key will be unmarked as needed, and moved to the end of the array,
+ * next to other keys that will become (or always were) nonrequired
+ */
+ unmark = unmarkKeys + nunmarked;
+ memcpy(unmark, origkey, sizeof(ScanKeyData));
+
+ if (so->numArrayKeys)
+ {
+ keyDataMap[i] = (so->numberOfKeys - nunmark) + nunmarked;
+ memcpy(&unmarkOrderProcs[nunmarked], &so->orderProcs[i],
+ sizeof(FmgrInfo));
+ }
+
+ /*
+ * Preprocessing only generates skip arrays when it knows that they'll
+ * be the only required = key on the attr. We'll never unmark them.
+ */
+ Assert(!(unmark->sk_flags & SK_BT_SKIP));
+
+ /*
+ * Also shouldn't have to unmark an IS NULL or an IS NOT NULL key.
+ * They aren't cross-type, so an incomplete opfamily can't matter.
+ */
+ Assert(!(unmark->sk_flags & SK_ISNULL) ||
+ !(unmark->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD)));
+
+ /* Clear requiredness flags on redundant key (and on any subkeys) */
+ unmark->sk_flags &= ~(SK_BT_REQFWD | SK_BT_REQBKWD);
+ if (unmark->sk_flags & SK_ROW_HEADER)
+ {
+ ScanKey subkey = (ScanKey) DatumGetPointer(unmark->sk_argument);
+
+ Assert(subkey->sk_strategy == unmark->sk_strategy);
+ for (;;)
+ {
+ Assert(subkey->sk_flags & SK_ROW_MEMBER);
+ subkey->sk_flags &= ~(SK_BT_REQFWD | SK_BT_REQBKWD);
+ if (subkey->sk_flags & SK_ROW_END)
+ break;
+ subkey++;
+ }
+ }
+
+ nunmarked++;
+ }
+
+ /* Copy both temp arrays back into so->keyData[] to reorder */
+ Assert(nkept == so->numberOfKeys - nunmark);
+ Assert(nunmarked == nunmark);
+ memcpy(so->keyData, keepKeys, sizeof(ScanKeyData) * nkept);
+ memcpy(so->keyData + nkept, unmarkKeys, sizeof(ScanKeyData) * nunmarked);
+
+ /* Done with temp arrays */
+ pfree(unmarkikey);
+ pfree(keepKeys);
+ pfree(unmarkKeys);
+
+ /*
+ * Now copy so->orderProcs[] temp entries needed by scans with = array
+ * keys back (just like with the so->keyData[] temp arrays)
+ */
+ if (so->numArrayKeys)
+ {
+ memcpy(so->orderProcs, keepOrderProcs, sizeof(FmgrInfo) * nkept);
+ memcpy(so->orderProcs + nkept, unmarkOrderProcs,
+ sizeof(FmgrInfo) * nunmarked);
+
+ /* Also fix-up array->scan_key references */
+ for (int arridx = 0; arridx < so->numArrayKeys; arridx++)
+ {
+ BTArrayKeyInfo *array = &so->arrayKeys[arridx];
+
+ array->scan_key = keyDataMap[array->scan_key];
+ }
+
+ /*
+ * Sort so->arrayKeys[] based on its new BTArrayKeyInfo.scan_key
+ * offsets, so that its order matches so->keyData[] order as expected
+ */
+ qsort(so->arrayKeys, so->numArrayKeys, sizeof(BTArrayKeyInfo),
+ _bt_reorder_array_cmp);
+
+ /* Done with temp arrays */
+ pfree(unmarkOrderProcs);
+ pfree(keepOrderProcs);
+ }
+}
+
+/*
+ * qsort comparator for reordering so->arrayKeys[] BTArrayKeyInfo entries
+ */
+static int
+_bt_reorder_array_cmp(const void *a, const void *b)
+{
+ BTArrayKeyInfo *arraya = (BTArrayKeyInfo *) a;
+ BTArrayKeyInfo *arrayb = (BTArrayKeyInfo *) b;
+
+ return pg_cmp_s32(arraya->scan_key, arrayb->scan_key);
+}
+
+/*
* _bt_preprocess_array_keys() -- Preprocess SK_SEARCHARRAY scan keys
*
* If there are any SK_SEARCHARRAY scan keys, deconstruct the array(s) and
diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c
index 765659887af..fdff960c130 100644
--- a/src/backend/access/nbtree/nbtree.c
+++ b/src/backend/access/nbtree/nbtree.c
@@ -228,6 +228,8 @@ btgettuple(IndexScanDesc scan, ScanDirection dir)
BTScanOpaque so = (BTScanOpaque) scan->opaque;
bool res;
+ Assert(scan->heapRelation != NULL);
+
/* btree indexes are never lossy */
scan->xs_recheck = false;
@@ -289,6 +291,8 @@ btgetbitmap(IndexScanDesc scan, TIDBitmap *tbm)
int64 ntids = 0;
ItemPointer heapTid;
+ Assert(scan->heapRelation == NULL);
+
/* Each loop iteration performs another primitive index scan */
do
{
@@ -393,6 +397,34 @@ btrescan(IndexScanDesc scan, ScanKey scankey, int nscankeys,
BTScanPosInvalidate(so->currPos);
}
+ /*
+ * We prefer to eagerly drop leaf page pins before btgettuple returns.
+ * This avoids making VACUUM wait to acquire a cleanup lock on the page.
+ *
+ * We cannot safely drop leaf page pins during index-only scans due to a
+ * race condition involving VACUUM setting pages all-visible in the VM.
+ * It's also unsafe for plain index scans that use a non-MVCC snapshot.
+ *
+ * When we drop pins eagerly, the mechanism that marks so->killedItems[]
+ * index tuples LP_DEAD has to deal with concurrent TID recycling races.
+ * The scheme used to detect unsafe TID recycling won't work when scanning
+ * unlogged relations (since it involves saving an affected page's LSN).
+ * Opt out of eager pin dropping during unlogged relation scans for now
+ * (this is preferable to opting out of kill_prior_tuple LP_DEAD setting).
+ *
+ * Also opt out of dropping leaf page pins eagerly during bitmap scans.
+ * Pins cannot be held for more than an instant during bitmap scans either
+ * way, so we might as well avoid wasting cycles on acquiring page LSNs.
+ *
+ * See nbtree/README section on making concurrent TID recycling safe.
+ *
+ * Note: so->dropPin should never change across rescans.
+ */
+ so->dropPin = (!scan->xs_want_itup &&
+ IsMVCCSnapshot(scan->xs_snapshot) &&
+ RelationNeedsWAL(scan->indexRelation) &&
+ scan->heapRelation != NULL);
+
so->markItemIndex = -1;
so->needPrimScan = false;
so->scanBehind = false;
diff --git a/src/backend/access/nbtree/nbtsearch.c b/src/backend/access/nbtree/nbtsearch.c
index fe9a3886913..d69798795b4 100644
--- a/src/backend/access/nbtree/nbtsearch.c
+++ b/src/backend/access/nbtree/nbtsearch.c
@@ -25,7 +25,7 @@
#include "utils/rel.h"
-static void _bt_drop_lock_and_maybe_pin(IndexScanDesc scan, BTScanPos sp);
+static inline void _bt_drop_lock_and_maybe_pin(Relation rel, BTScanOpaque so);
static Buffer _bt_moveright(Relation rel, Relation heaprel, BTScanInsert key,
Buffer buf, bool forupdate, BTStack stack,
int access);
@@ -57,24 +57,29 @@ static bool _bt_endpoint(IndexScanDesc scan, ScanDirection dir);
/*
* _bt_drop_lock_and_maybe_pin()
*
- * Unlock the buffer; and if it is safe to release the pin, do that, too.
- * This will prevent vacuum from stalling in a blocked state trying to read a
- * page when a cursor is sitting on it.
- *
- * See nbtree/README section on making concurrent TID recycling safe.
+ * Unlock so->currPos.buf. If scan is so->dropPin, drop the pin, too.
+ * Dropping the pin prevents VACUUM from blocking on acquiring a cleanup lock.
*/
-static void
-_bt_drop_lock_and_maybe_pin(IndexScanDesc scan, BTScanPos sp)
+static inline void
+_bt_drop_lock_and_maybe_pin(Relation rel, BTScanOpaque so)
{
- _bt_unlockbuf(scan->indexRelation, sp->buf);
-
- if (IsMVCCSnapshot(scan->xs_snapshot) &&
- RelationNeedsWAL(scan->indexRelation) &&
- !scan->xs_want_itup)
+ if (!so->dropPin)
{
- ReleaseBuffer(sp->buf);
- sp->buf = InvalidBuffer;
+ /* Just drop the lock (not the pin) */
+ _bt_unlockbuf(rel, so->currPos.buf);
+ return;
}
+
+ /*
+ * Drop both the lock and the pin.
+ *
+ * Have to set so->currPos.lsn so that _bt_killitems has a way to detect
+ * when concurrent heap TID recycling by VACUUM might have taken place.
+ */
+ Assert(RelationNeedsWAL(rel));
+ so->currPos.lsn = BufferGetLSNAtomic(so->currPos.buf);
+ _bt_relbuf(rel, so->currPos.buf);
+ so->currPos.buf = InvalidBuffer;
}
/*
@@ -866,8 +871,8 @@ _bt_compare(Relation rel,
* if backwards scan, the last item) in the tree that satisfies the
* qualifications in the scan key. On success exit, data about the
* matching tuple(s) on the page has been loaded into so->currPos. We'll
- * drop all locks and hold onto a pin on page's buffer, except when
- * _bt_drop_lock_and_maybe_pin dropped the pin to avoid blocking VACUUM.
+ * drop all locks and hold onto a pin on page's buffer, except during
+ * so->dropPin scans, when we drop both the lock and the pin.
* _bt_returnitem sets the next item to return to scan on success exit.
*
* If there are no matching items in the index, we return false, with no
@@ -887,9 +892,9 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
OffsetNumber offnum;
BTScanInsertData inskey;
ScanKey startKeys[INDEX_MAX_KEYS];
- ScanKeyData notnullkeys[INDEX_MAX_KEYS];
+ ScanKeyData notnullkey;
int keysz = 0;
- StrategyNumber strat_total;
+ StrategyNumber strat_total = InvalidStrategy;
BlockNumber blkno = InvalidBlockNumber,
lastcurrblkno;
@@ -955,46 +960,51 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
/*----------
* Examine the scan keys to discover where we need to start the scan.
+ * The selected scan keys (at most one per index column) are remembered by
+ * storing their addresses into the local startKeys[] array. The final
+ * startKeys[] entry's strategy is set in strat_total. (Actually, there
+ * are a couple of cases where we force a less/more restrictive strategy.)
*
- * We want to identify the keys that can be used as starting boundaries;
- * these are =, >, or >= keys for a forward scan or =, <, <= keys for
- * a backwards scan. We can use keys for multiple attributes so long as
- * the prior attributes had only =, >= (resp. =, <=) keys. Once we accept
- * a > or < boundary or find an attribute with no boundary (which can be
- * thought of as the same as "> -infinity"), we can't use keys for any
- * attributes to its right, because it would break our simplistic notion
- * of what initial positioning strategy to use.
+ * We must use the key that was marked required (in the direction opposite
+ * our own scan's) during preprocessing. Each index attribute can only
+ * have one such required key. In general, the keys that we use to find
+ * an initial position when scanning forwards are the same keys that end
+ * the scan on the leaf level when scanning backwards (and vice-versa).
*
* When the scan keys include cross-type operators, _bt_preprocess_keys
- * may not be able to eliminate redundant keys; in such cases we will
- * arbitrarily pick a usable one for each attribute. This is correct
- * but possibly not optimal behavior. (For example, with keys like
- * "x >= 4 AND x >= 5" we would elect to scan starting at x=4 when
- * x=5 would be more efficient.) Since the situation only arises given
- * a poorly-worded query plus an incomplete opfamily, live with it.
+ * may not be able to eliminate redundant keys; in such cases it will
+ * arbitrarily pick a usable key for each attribute (and scan direction),
+ * ensuring that there is no more than one key required in each direction.
+ * We stop considering further keys once we reach the first nonrequired
+ * key (which must come after all required keys), so this can't affect us.
+ *
+ * The required keys that we use as starting boundaries have to be =, >,
+ * or >= keys for a forward scan or =, <, <= keys for a backwards scan.
+ * We can use keys for multiple attributes so long as the prior attributes
+ * had only =, >= (resp. =, <=) keys. These rules are very similar to the
+ * rules that preprocessing used to determine which keys to mark required.
+ * We cannot always use every required key as a positioning key, though.
+ * Skip arrays necessitate independently applying our own rules here.
+ * Skip arrays are always generally considered = array keys, but we'll
+ * nevertheless treat them as inequalities at certain points of the scan.
+ * When that happens, it _might_ have implications for the number of
+ * required keys that we can safely use for initial positioning purposes.
*
- * When both equality and inequality keys appear for a single attribute
- * (again, only possible when cross-type operators appear), we *must*
- * select one of the equality keys for the starting point, because
- * _bt_checkkeys() will stop the scan as soon as an equality qual fails.
- * For example, if we have keys like "x >= 4 AND x = 10" and we elect to
- * start at x=4, we will fail and stop before reaching x=10. If multiple
- * equality quals survive preprocessing, however, it doesn't matter which
- * one we use --- by definition, they are either redundant or
- * contradictory.
+ * For example, a forward scan with a skip array on its leading attribute
+ * (with no low_compare/high_compare) will have at least two required scan
+ * keys, but we won't use any of them as boundary keys during the scan's
+ * initial call here. Our positioning key during the first call here can
+ * be thought of as representing "> -infinity". Similarly, if such a skip
+ * array's low_compare is "a > 'foo'", then we position using "a > 'foo'"
+ * during the scan's initial call here; a lower-order key such as "b = 42"
+ * can't be used until the "a" array advances beyond MINVAL/low_compare.
*
- * In practice we rarely see any "attribute boundary key gaps" here.
- * Preprocessing can usually backfill skip array keys for any attributes
- * that were omitted from the original scan->keyData[] input keys. All
- * array keys are always considered = keys, but we'll sometimes need to
- * treat the current key value as if we were using an inequality strategy.
- * This happens with range skip arrays, which store inequality keys in the
- * array's low_compare/high_compare fields (used to find the first/last
- * set of matches, when = key will lack a usable sk_argument value).
- * These are always preferred over any redundant "standard" inequality
- * keys on the same column (per the usual rule about preferring = keys).
- * Note also that any column with an = skip array key can never have an
- * additional, contradictory = key.
+ * On the other hand, if such a skip array's low_compare was "a >= 'foo'",
+ * then we _can_ use "a >= 'foo' AND b = 42" during the initial call here.
+ * A subsequent call here might have us use "a = 'fop' AND b = 42". Note
+ * that we treat = and >= as equivalent when scanning forwards (just as we
+ * treat = and <= as equivalent when scanning backwards). We effectively
+ * do the same thing (though with a distinct "a" element/value) each time.
*
* All keys (with the exception of SK_SEARCHNULL keys and SK_BT_SKIP
* array keys whose array is "null_elem=true") imply a NOT NULL qualifier.
@@ -1006,41 +1016,38 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
* traversing a lot of null entries at the start of the scan.
*
* In this loop, row-comparison keys are treated the same as keys on their
- * first (leftmost) columns. We'll add on lower-order columns of the row
- * comparison below, if possible.
+ * first (leftmost) columns. We'll add all lower-order columns of the row
+ * comparison that were marked required during preprocessing below.
*
- * The selected scan keys (at most one per index column) are remembered by
- * storing their addresses into the local startKeys[] array.
- *
- * _bt_checkkeys/_bt_advance_array_keys decide whether and when to start
- * the next primitive index scan (for scans with array keys) based in part
- * on an understanding of how it'll enable us to reposition the scan.
- * They're directly aware of how we'll sometimes cons up an explicit
- * SK_SEARCHNOTNULL key. They'll even end primitive scans by applying a
- * symmetric "deduce NOT NULL" rule of their own. This allows top-level
- * scans to skip large groups of NULLs through repeated deductions about
- * key strictness (for a required inequality key) and whether NULLs in the
- * key's index column are stored last or first (relative to non-NULLs).
+ * _bt_advance_array_keys needs to know exactly how we'll reposition the
+ * scan (should it opt to schedule another primitive index scan). It is
+ * critical that primscans only be scheduled when they'll definitely make
+ * some useful progress. _bt_advance_array_keys does this by calling
+ * _bt_checkkeys routines that report whether a tuple is past the end of
+ * matches for the scan's keys (given the scan's current array elements).
+ * If the page's final tuple is "after the end of matches" for a scan that
+ * uses the *opposite* scan direction, then it must follow that it's also
+ * "before the start of matches" for the actual current scan direction.
+ * It is therefore essential that all of our initial positioning rules are
+ * symmetric with _bt_checkkeys's corresponding continuescan=false rule.
* If you update anything here, _bt_checkkeys/_bt_advance_array_keys might
* need to be kept in sync.
*----------
*/
- strat_total = BTEqualStrategyNumber;
if (so->numberOfKeys > 0)
{
AttrNumber curattr;
- ScanKey chosen;
+ ScanKey bkey;
ScanKey impliesNN;
ScanKey cur;
/*
- * chosen is the so-far-chosen key for the current attribute, if any.
- * We don't cast the decision in stone until we reach keys for the
- * next attribute.
+ * bkey will be set to the key that preprocessing left behind as the
+ * boundary key for this attribute, in this scan direction (if any)
*/
cur = so->keyData;
curattr = 1;
- chosen = NULL;
+ bkey = NULL;
/* Also remember any scankey that implies a NOT NULL constraint */
impliesNN = NULL;
@@ -1053,23 +1060,29 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
{
if (i >= so->numberOfKeys || cur->sk_attno != curattr)
{
+ /* Done looking for the curattr boundary key */
+ Assert(bkey == NULL ||
+ (bkey->sk_attno == curattr &&
+ (bkey->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD))));
+ Assert(impliesNN == NULL ||
+ (impliesNN->sk_attno == curattr &&
+ (impliesNN->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD))));
+
/*
- * Done looking at keys for curattr.
- *
* If this is a scan key for a skip array whose current
* element is MINVAL, choose low_compare (when scanning
* backwards it'll be MAXVAL, and we'll choose high_compare).
*
- * Note: if the array's low_compare key makes 'chosen' NULL,
+ * Note: if the array's low_compare key makes 'bkey' NULL,
* then we behave as if the array's first element is -inf,
* except when !array->null_elem implies a usable NOT NULL
* constraint.
*/
- if (chosen != NULL &&
- (chosen->sk_flags & (SK_BT_MINVAL | SK_BT_MAXVAL)))
+ if (bkey != NULL &&
+ (bkey->sk_flags & (SK_BT_MINVAL | SK_BT_MAXVAL)))
{
- int ikey = chosen - so->keyData;
- ScanKey skipequalitykey = chosen;
+ int ikey = bkey - so->keyData;
+ ScanKey skipequalitykey = bkey;
BTArrayKeyInfo *array = NULL;
for (int arridx = 0; arridx < so->numArrayKeys; arridx++)
@@ -1082,42 +1095,41 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
if (ScanDirectionIsForward(dir))
{
Assert(!(skipequalitykey->sk_flags & SK_BT_MAXVAL));
- chosen = array->low_compare;
+ bkey = array->low_compare;
}
else
{
Assert(!(skipequalitykey->sk_flags & SK_BT_MINVAL));
- chosen = array->high_compare;
+ bkey = array->high_compare;
}
- Assert(chosen == NULL ||
- chosen->sk_attno == skipequalitykey->sk_attno);
+ Assert(bkey == NULL ||
+ bkey->sk_attno == skipequalitykey->sk_attno);
if (!array->null_elem)
impliesNN = skipequalitykey;
else
- Assert(chosen == NULL && impliesNN == NULL);
+ Assert(bkey == NULL && impliesNN == NULL);
}
/*
* If we didn't find a usable boundary key, see if we can
* deduce a NOT NULL key
*/
- if (chosen == NULL && impliesNN != NULL &&
+ if (bkey == NULL && impliesNN != NULL &&
((impliesNN->sk_flags & SK_BT_NULLS_FIRST) ?
ScanDirectionIsForward(dir) :
ScanDirectionIsBackward(dir)))
{
- /* Yes, so build the key in notnullkeys[keysz] */
- chosen = &notnullkeys[keysz];
- ScanKeyEntryInitialize(chosen,
+ /* Final startKeys[] entry will be deduced NOT NULL key */
+ bkey = &notnullkey;
+ ScanKeyEntryInitialize(bkey,
(SK_SEARCHNOTNULL | SK_ISNULL |
(impliesNN->sk_flags &
(SK_BT_DESC | SK_BT_NULLS_FIRST))),
curattr,
- ((impliesNN->sk_flags & SK_BT_NULLS_FIRST) ?
- BTGreaterStrategyNumber :
- BTLessStrategyNumber),
+ ScanDirectionIsForward(dir) ?
+ BTGreaterStrategyNumber : BTLessStrategyNumber,
InvalidOid,
InvalidOid,
InvalidOid,
@@ -1125,12 +1137,12 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
}
/*
- * If we still didn't find a usable boundary key, quit; else
- * save the boundary key pointer in startKeys.
+ * If preprocessing didn't leave a usable boundary key, quit;
+ * else save the boundary key pointer in startKeys[]
*/
- if (chosen == NULL)
+ if (bkey == NULL)
break;
- startKeys[keysz++] = chosen;
+ startKeys[keysz++] = bkey;
/*
* We can only consider adding more boundary keys when the one
@@ -1138,7 +1150,7 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
* (during backwards scans we can only do so when the key that
* we just added to startKeys[] uses the = or <= strategy)
*/
- strat_total = chosen->sk_strategy;
+ strat_total = bkey->sk_strategy;
if (strat_total == BTGreaterStrategyNumber ||
strat_total == BTLessStrategyNumber)
break;
@@ -1149,19 +1161,19 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
* make strat_total > or < (and stop adding boundary keys).
* This can only happen with opclasses that lack skip support.
*/
- if (chosen->sk_flags & (SK_BT_NEXT | SK_BT_PRIOR))
+ if (bkey->sk_flags & (SK_BT_NEXT | SK_BT_PRIOR))
{
- Assert(chosen->sk_flags & SK_BT_SKIP);
+ Assert(bkey->sk_flags & SK_BT_SKIP);
Assert(strat_total == BTEqualStrategyNumber);
if (ScanDirectionIsForward(dir))
{
- Assert(!(chosen->sk_flags & SK_BT_PRIOR));
+ Assert(!(bkey->sk_flags & SK_BT_PRIOR));
strat_total = BTGreaterStrategyNumber;
}
else
{
- Assert(!(chosen->sk_flags & SK_BT_NEXT));
+ Assert(!(bkey->sk_flags & SK_BT_NEXT));
strat_total = BTLessStrategyNumber;
}
@@ -1175,24 +1187,30 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
/*
* Done if that was the last scan key output by preprocessing.
- * Also done if there is a gap index attribute that lacks a
- * usable key (only possible when preprocessing was unable to
- * generate a skip array key to "fill in the gap").
+ * Also done if we've now examined all keys marked required.
*/
if (i >= so->numberOfKeys ||
- cur->sk_attno != curattr + 1)
+ !(cur->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD)))
break;
/*
* Reset for next attr.
*/
+ Assert(cur->sk_attno == curattr + 1);
curattr = cur->sk_attno;
- chosen = NULL;
+ bkey = NULL;
impliesNN = NULL;
}
/*
- * Can we use this key as a starting boundary for this attr?
+ * If we've located the starting boundary key for curattr, we have
+ * no interest in curattr's other required key
+ */
+ if (bkey != NULL)
+ continue;
+
+ /*
+ * Is this key the starting boundary key for curattr?
*
* If not, does it imply a NOT NULL constraint? (Because
* SK_SEARCHNULL keys are always assigned BTEqualStrategyNumber,
@@ -1202,27 +1220,20 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
{
case BTLessStrategyNumber:
case BTLessEqualStrategyNumber:
- if (chosen == NULL)
- {
- if (ScanDirectionIsBackward(dir))
- chosen = cur;
- else
- impliesNN = cur;
- }
+ if (ScanDirectionIsBackward(dir))
+ bkey = cur;
+ else if (impliesNN == NULL)
+ impliesNN = cur;
break;
case BTEqualStrategyNumber:
- /* override any non-equality choice */
- chosen = cur;
+ bkey = cur;
break;
case BTGreaterEqualStrategyNumber:
case BTGreaterStrategyNumber:
- if (chosen == NULL)
- {
- if (ScanDirectionIsForward(dir))
- chosen = cur;
- else
- impliesNN = cur;
- }
+ if (ScanDirectionIsForward(dir))
+ bkey = cur;
+ else if (impliesNN == NULL)
+ impliesNN = cur;
break;
}
}
@@ -1248,16 +1259,18 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
Assert(keysz <= INDEX_MAX_KEYS);
for (int i = 0; i < keysz; i++)
{
- ScanKey cur = startKeys[i];
+ ScanKey bkey = startKeys[i];
- Assert(cur->sk_attno == i + 1);
+ Assert(bkey->sk_attno == i + 1);
- if (cur->sk_flags & SK_ROW_HEADER)
+ if (bkey->sk_flags & SK_ROW_HEADER)
{
/*
* Row comparison header: look to the first row member instead
*/
- ScanKey subkey = (ScanKey) DatumGetPointer(cur->sk_argument);
+ ScanKey subkey = (ScanKey) DatumGetPointer(bkey->sk_argument);
+ bool loosen_strat = false,
+ tighten_strat = false;
/*
* Cannot be a NULL in the first row member: _bt_preprocess_keys
@@ -1265,122 +1278,160 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
* ever getting this far
*/
Assert(subkey->sk_flags & SK_ROW_MEMBER);
- Assert(subkey->sk_attno == cur->sk_attno);
+ Assert(subkey->sk_attno == bkey->sk_attno);
Assert(!(subkey->sk_flags & SK_ISNULL));
/*
+ * This is either a > or >= key (during backwards scans it is
+ * either < or <=) that was marked required during preprocessing.
+ * Later so->keyData[] keys can't have been marked required, so
+ * our row compare header key must be the final startKeys[] entry.
+ */
+ Assert(subkey->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD));
+ Assert(i == keysz - 1);
+
+ /*
* The member scankeys are already in insertion format (ie, they
* have sk_func = 3-way-comparison function)
*/
memcpy(inskey.scankeys + i, subkey, sizeof(ScanKeyData));
/*
- * If the row comparison is the last positioning key we accepted,
- * try to add additional keys from the lower-order row members.
- * (If we accepted independent conditions on additional index
- * columns, we use those instead --- doesn't seem worth trying to
- * determine which is more restrictive.) Note that this is OK
- * even if the row comparison is of ">" or "<" type, because the
- * condition applied to all but the last row member is effectively
- * ">=" or "<=", and so the extra keys don't break the positioning
- * scheme. But, by the same token, if we aren't able to use all
- * the row members, then the part of the row comparison that we
- * did use has to be treated as just a ">=" or "<=" condition, and
- * so we'd better adjust strat_total accordingly.
+ * Now look to later row compare members.
+ *
+ * If there's an "index attribute gap" between two row compare
+ * members, the second member won't have been marked required, and
+ * so can't be used as a starting boundary key here. The part of
+ * the row comparison that we do still use has to be treated as a
+ * ">=" or "<=" condition. For example, a qual "(a, c) > (1, 42)"
+ * with an omitted intervening index attribute "b" will use an
+ * insertion scan key "a >= 1". Even the first "a = 1" tuple on
+ * the leaf level might satisfy the row compare qual.
+ *
+ * We're able to use a _more_ restrictive strategy when we reach a
+ * NULL row compare member, since they're always unsatisfiable.
+ * For example, a qual "(a, b, c) >= (1, NULL, 77)" will use an
+ * insertion scan key "a > 1". All tuples where "a = 1" cannot
+ * possibly satisfy the row compare qual, so this is safe.
*/
- if (i == keysz - 1)
+ Assert(!(subkey->sk_flags & SK_ROW_END));
+ for (;;)
{
- bool used_all_subkeys = false;
+ subkey++;
+ Assert(subkey->sk_flags & SK_ROW_MEMBER);
- Assert(!(subkey->sk_flags & SK_ROW_END));
- for (;;)
+ if (subkey->sk_flags & SK_ISNULL)
{
- subkey++;
- Assert(subkey->sk_flags & SK_ROW_MEMBER);
- if (subkey->sk_attno != keysz + 1)
- break; /* out-of-sequence, can't use it */
- if (subkey->sk_strategy != cur->sk_strategy)
- break; /* wrong direction, can't use it */
- if (subkey->sk_flags & SK_ISNULL)
- break; /* can't use null keys */
- Assert(keysz < INDEX_MAX_KEYS);
- memcpy(inskey.scankeys + keysz, subkey,
- sizeof(ScanKeyData));
- keysz++;
- if (subkey->sk_flags & SK_ROW_END)
- {
- used_all_subkeys = true;
- break;
- }
+ /*
+ * NULL member key, can only use earlier keys.
+ *
+ * We deliberately avoid checking if this key is marked
+ * required. All earlier keys are required, and this key
+ * is unsatisfiable either way, so we can't miss anything.
+ */
+ tighten_strat = true;
+ break;
}
- if (!used_all_subkeys)
+
+ if (!(subkey->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD)))
{
- switch (strat_total)
- {
- case BTLessStrategyNumber:
- strat_total = BTLessEqualStrategyNumber;
- break;
- case BTGreaterStrategyNumber:
- strat_total = BTGreaterEqualStrategyNumber;
- break;
- }
+ /* nonrequired member key, can only use earlier keys */
+ loosen_strat = true;
+ break;
}
- break; /* done with outer loop */
+
+ Assert(subkey->sk_attno == keysz + 1);
+ Assert(subkey->sk_strategy == bkey->sk_strategy);
+ Assert(keysz < INDEX_MAX_KEYS);
+
+ memcpy(inskey.scankeys + keysz, subkey,
+ sizeof(ScanKeyData));
+ keysz++;
+ if (subkey->sk_flags & SK_ROW_END)
+ break;
}
- }
- else
- {
- /*
- * Ordinary comparison key. Transform the search-style scan key
- * to an insertion scan key by replacing the sk_func with the
- * appropriate btree comparison function.
- *
- * If scankey operator is not a cross-type comparison, we can use
- * the cached comparison function; otherwise gotta look it up in
- * the catalogs. (That can't lead to infinite recursion, since no
- * indexscan initiated by syscache lookup will use cross-data-type
- * operators.)
- *
- * We support the convention that sk_subtype == InvalidOid means
- * the opclass input type; this is a hack to simplify life for
- * ScanKeyInit().
- */
- if (cur->sk_subtype == rel->rd_opcintype[i] ||
- cur->sk_subtype == InvalidOid)
+ Assert(!(loosen_strat && tighten_strat));
+ if (loosen_strat)
{
- FmgrInfo *procinfo;
-
- procinfo = index_getprocinfo(rel, cur->sk_attno, BTORDER_PROC);
- ScanKeyEntryInitializeWithInfo(inskey.scankeys + i,
- cur->sk_flags,
- cur->sk_attno,
- InvalidStrategy,
- cur->sk_subtype,
- cur->sk_collation,
- procinfo,
- cur->sk_argument);
+ /* Use less restrictive strategy (and fewer member keys) */
+ switch (strat_total)
+ {
+ case BTLessStrategyNumber:
+ strat_total = BTLessEqualStrategyNumber;
+ break;
+ case BTGreaterStrategyNumber:
+ strat_total = BTGreaterEqualStrategyNumber;
+ break;
+ }
}
- else
+ if (tighten_strat)
{
- RegProcedure cmp_proc;
-
- cmp_proc = get_opfamily_proc(rel->rd_opfamily[i],
- rel->rd_opcintype[i],
- cur->sk_subtype,
- BTORDER_PROC);
- if (!RegProcedureIsValid(cmp_proc))
- elog(ERROR, "missing support function %d(%u,%u) for attribute %d of index \"%s\"",
- BTORDER_PROC, rel->rd_opcintype[i], cur->sk_subtype,
- cur->sk_attno, RelationGetRelationName(rel));
- ScanKeyEntryInitialize(inskey.scankeys + i,
- cur->sk_flags,
- cur->sk_attno,
- InvalidStrategy,
- cur->sk_subtype,
- cur->sk_collation,
- cmp_proc,
- cur->sk_argument);
+ /* Use more restrictive strategy (and fewer member keys) */
+ switch (strat_total)
+ {
+ case BTLessEqualStrategyNumber:
+ strat_total = BTLessStrategyNumber;
+ break;
+ case BTGreaterEqualStrategyNumber:
+ strat_total = BTGreaterStrategyNumber;
+ break;
+ }
}
+
+ /* done adding to inskey (row comparison keys always come last) */
+ break;
+ }
+
+ /*
+ * Ordinary comparison key/search-style key.
+ *
+ * Transform the search-style scan key to an insertion scan key by
+ * replacing the sk_func with the appropriate btree 3-way-comparison
+ * function.
+ *
+ * If scankey operator is not a cross-type comparison, we can use the
+ * cached comparison function; otherwise gotta look it up in the
+ * catalogs. (That can't lead to infinite recursion, since no
+ * indexscan initiated by syscache lookup will use cross-data-type
+ * operators.)
+ *
+ * We support the convention that sk_subtype == InvalidOid means the
+ * opclass input type; this hack simplifies life for ScanKeyInit().
+ */
+ if (bkey->sk_subtype == rel->rd_opcintype[i] ||
+ bkey->sk_subtype == InvalidOid)
+ {
+ FmgrInfo *procinfo;
+
+ procinfo = index_getprocinfo(rel, bkey->sk_attno, BTORDER_PROC);
+ ScanKeyEntryInitializeWithInfo(inskey.scankeys + i,
+ bkey->sk_flags,
+ bkey->sk_attno,
+ InvalidStrategy,
+ bkey->sk_subtype,
+ bkey->sk_collation,
+ procinfo,
+ bkey->sk_argument);
+ }
+ else
+ {
+ RegProcedure cmp_proc;
+
+ cmp_proc = get_opfamily_proc(rel->rd_opfamily[i],
+ rel->rd_opcintype[i],
+ bkey->sk_subtype, BTORDER_PROC);
+ if (!RegProcedureIsValid(cmp_proc))
+ elog(ERROR, "missing support function %d(%u,%u) for attribute %d of index \"%s\"",
+ BTORDER_PROC, rel->rd_opcintype[i], bkey->sk_subtype,
+ bkey->sk_attno, RelationGetRelationName(rel));
+ ScanKeyEntryInitialize(inskey.scankeys + i,
+ bkey->sk_flags,
+ bkey->sk_attno,
+ InvalidStrategy,
+ bkey->sk_subtype,
+ bkey->sk_collation,
+ cmp_proc,
+ bkey->sk_argument);
}
}
@@ -1469,6 +1520,8 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
if (!BufferIsValid(so->currPos.buf))
{
+ Assert(!so->needPrimScan);
+
/*
* We only get here if the index is completely empty. Lock relation
* because nothing finer to lock exists. Without a buffer lock, it's
@@ -1487,7 +1540,6 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
if (!BufferIsValid(so->currPos.buf))
{
- Assert(!so->needPrimScan);
_bt_parallel_done(scan);
return false;
}
@@ -1610,7 +1662,13 @@ _bt_readpage(IndexScanDesc scan, ScanDirection dir, OffsetNumber offnum,
so->currPos.currPage = BufferGetBlockNumber(so->currPos.buf);
so->currPos.prevPage = opaque->btpo_prev;
so->currPos.nextPage = opaque->btpo_next;
+ /* delay setting so->currPos.lsn until _bt_drop_lock_and_maybe_pin */
+ so->currPos.dir = dir;
+ so->currPos.nextTupleOffset = 0;
+ /* either moreRight or moreLeft should be set now (may be unset later) */
+ Assert(ScanDirectionIsForward(dir) ? so->currPos.moreRight :
+ so->currPos.moreLeft);
Assert(!P_IGNORE(opaque));
Assert(BTScanPosIsPinned(so->currPos));
Assert(!so->needPrimScan);
@@ -1626,14 +1684,6 @@ _bt_readpage(IndexScanDesc scan, ScanDirection dir, OffsetNumber offnum,
so->currPos.currPage);
}
- /* initialize remaining currPos fields related to current page */
- so->currPos.lsn = BufferGetLSNAtomic(so->currPos.buf);
- so->currPos.dir = dir;
- so->currPos.nextTupleOffset = 0;
- /* either moreLeft or moreRight should be set now (may be unset later) */
- Assert(ScanDirectionIsForward(dir) ? so->currPos.moreRight :
- so->currPos.moreLeft);
-
PredicateLockPage(rel, so->currPos.currPage, scan->xs_snapshot);
/* initialize local variables */
@@ -2107,10 +2157,9 @@ _bt_returnitem(IndexScanDesc scan, BTScanOpaque so)
*
* Wrapper on _bt_readnextpage that performs final steps for the current page.
*
- * On entry, if so->currPos.buf is valid the buffer is pinned but not locked.
- * If there's no pin held, it's because _bt_drop_lock_and_maybe_pin dropped
- * the pin eagerly earlier on. The scan must have so->currPos.currPage set to
- * a valid block, in any case.
+ * On entry, so->currPos must be valid. Its buffer will be pinned, though
+ * never locked. (Actually, when so->dropPin there won't even be a pin held,
+ * though so->currPos.currPage must still be set to a valid block number.)
*/
static bool
_bt_steppage(IndexScanDesc scan, ScanDirection dir)
@@ -2251,12 +2300,14 @@ _bt_readfirstpage(IndexScanDesc scan, OffsetNumber offnum, ScanDirection dir)
*/
if (_bt_readpage(scan, dir, offnum, true))
{
+ Relation rel = scan->indexRelation;
+
/*
* _bt_readpage succeeded. Drop the lock (and maybe the pin) on
* so->currPos.buf in preparation for btgettuple returning tuples.
*/
Assert(BTScanPosIsPinned(so->currPos));
- _bt_drop_lock_and_maybe_pin(scan, &so->currPos);
+ _bt_drop_lock_and_maybe_pin(rel, so);
return true;
}
@@ -2278,9 +2329,12 @@ _bt_readfirstpage(IndexScanDesc scan, OffsetNumber offnum, ScanDirection dir)
* previously-saved right link or left link. lastcurrblkno is the page that
* was current at the point where the blkno link was saved, which we use to
* reason about concurrent page splits/page deletions during backwards scans.
+ * In the common case where seized=false, blkno is either so->currPos.nextPage
+ * or so->currPos.prevPage, and lastcurrblkno is so->currPos.currPage.
*
- * On entry, caller shouldn't hold any locks or pins on any page (we work
- * directly off of blkno and lastcurrblkno instead). Parallel scan callers
+ * On entry, so->currPos shouldn't be locked by caller. so->currPos.buf must
+ * be InvalidBuffer/unpinned as needed by caller (note that lastcurrblkno
+ * won't need to be read again in almost all cases). Parallel scan callers
* that seized the scan before calling here should pass seized=true; such a
* caller's blkno and lastcurrblkno arguments come from the seized scan.
* seized=false callers just pass us the blkno/lastcurrblkno taken from their
@@ -2294,11 +2348,11 @@ _bt_readfirstpage(IndexScanDesc scan, OffsetNumber offnum, ScanDirection dir)
*
* On success exit, so->currPos is updated to contain data from the next
* interesting page, and we return true. We hold a pin on the buffer on
- * success exit, except when _bt_drop_lock_and_maybe_pin decided it was safe
- * to eagerly drop the pin (to avoid blocking VACUUM).
+ * success exit (except during so->dropPin index scans, when we drop the pin
+ * eagerly to avoid blocking VACUUM).
*
- * If there are no more matching records in the given direction, we drop all
- * locks and pins, invalidate so->currPos, and return false.
+ * If there are no more matching records in the given direction, we invalidate
+ * so->currPos (while ensuring it retains no locks or pins), and return false.
*
* We always release the scan for a parallel scan caller, regardless of
* success or failure; we'll call _bt_parallel_release as soon as possible.
@@ -2413,7 +2467,7 @@ _bt_readnextpage(IndexScanDesc scan, BlockNumber blkno,
*/
Assert(so->currPos.currPage == blkno);
Assert(BTScanPosIsPinned(so->currPos));
- _bt_drop_lock_and_maybe_pin(scan, &so->currPos);
+ _bt_drop_lock_and_maybe_pin(rel, so);
return true;
}
diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c
index 1a15dfcb7d3..9aed207995f 100644
--- a/src/backend/access/nbtree/nbtutils.c
+++ b/src/backend/access/nbtree/nbtutils.c
@@ -44,7 +44,6 @@ static bool _bt_array_decrement(Relation rel, ScanKey skey, BTArrayKeyInfo *arra
static bool _bt_array_increment(Relation rel, ScanKey skey, BTArrayKeyInfo *array);
static bool _bt_advance_array_keys_increment(IndexScanDesc scan, ScanDirection dir,
bool *skip_array_set);
-static void _bt_rewind_nonrequired_arrays(IndexScanDesc scan, ScanDirection dir);
static bool _bt_tuple_before_array_skeys(IndexScanDesc scan, ScanDirection dir,
IndexTuple tuple, TupleDesc tupdesc, int tupnatts,
bool readpagetup, int sktrig, bool *scanBehind);
@@ -52,7 +51,6 @@ static bool _bt_advance_array_keys(IndexScanDesc scan, BTReadPageState *pstate,
IndexTuple tuple, int tupnatts, TupleDesc tupdesc,
int sktrig, bool sktrig_required);
#ifdef USE_ASSERT_CHECKING
-static bool _bt_verify_arrays_bt_first(IndexScanDesc scan, ScanDirection dir);
static bool _bt_verify_keys_with_arraykeys(IndexScanDesc scan);
#endif
static bool _bt_oppodir_checkkeys(IndexScanDesc scan, ScanDirection dir,
@@ -1035,73 +1033,6 @@ _bt_advance_array_keys_increment(IndexScanDesc scan, ScanDirection dir,
}
/*
- * _bt_rewind_nonrequired_arrays() -- Rewind SAOP arrays not marked required
- *
- * Called when _bt_advance_array_keys decides to start a new primitive index
- * scan on the basis of the current scan position being before the position
- * that _bt_first is capable of repositioning the scan to by applying an
- * inequality operator required in the opposite-to-scan direction only.
- *
- * Although equality strategy scan keys (for both arrays and non-arrays alike)
- * are either marked required in both directions or in neither direction,
- * there is a sense in which non-required arrays behave like required arrays.
- * With a qual such as "WHERE a IN (100, 200) AND b >= 3 AND c IN (5, 6, 7)",
- * the scan key on "c" is non-required, but nevertheless enables positioning
- * the scan at the first tuple >= "(100, 3, 5)" on the leaf level during the
- * first descent of the tree by _bt_first. Later on, there could also be a
- * second descent, that places the scan right before tuples >= "(200, 3, 5)".
- * _bt_first must never be allowed to build an insertion scan key whose "c"
- * entry is set to a value other than 5, the "c" array's first element/value.
- * (Actually, it's the first in the current scan direction. This example uses
- * a forward scan.)
- *
- * Calling here resets the array scan key elements for the scan's non-required
- * arrays. This is strictly necessary for correctness in a subset of cases
- * involving "required in opposite direction"-triggered primitive index scans.
- * Not all callers are at risk of _bt_first using a non-required array like
- * this, but advancement always resets the arrays when another primitive scan
- * is scheduled, just to keep things simple. Array advancement even makes
- * sure to reset non-required arrays during scans that have no inequalities.
- * (Advancement still won't call here when there are no inequalities, though
- * that's just because it's all handled indirectly instead.)
- *
- * Note: _bt_verify_arrays_bt_first is called by an assertion to enforce that
- * everybody got this right.
- *
- * Note: In practice almost all SAOP arrays are marked required during
- * preprocessing (if necessary by generating skip arrays). It is hardly ever
- * truly necessary to call here, but consistently doing so is simpler.
- */
-static void
-_bt_rewind_nonrequired_arrays(IndexScanDesc scan, ScanDirection dir)
-{
- Relation rel = scan->indexRelation;
- BTScanOpaque so = (BTScanOpaque) scan->opaque;
- int arrayidx = 0;
-
- for (int ikey = 0; ikey < so->numberOfKeys; ikey++)
- {
- ScanKey cur = so->keyData + ikey;
- BTArrayKeyInfo *array = NULL;
-
- if (!(cur->sk_flags & SK_SEARCHARRAY) ||
- cur->sk_strategy != BTEqualStrategyNumber)
- continue;
-
- array = &so->arrayKeys[arrayidx++];
- Assert(array->scan_key == ikey);
-
- if ((cur->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD)))
- continue;
-
- Assert(array->num_elems != -1); /* No non-required skip arrays */
-
- _bt_array_set_low_or_high(rel, cur, array,
- ScanDirectionIsForward(dir));
- }
-}
-
-/*
* _bt_tuple_before_array_skeys() -- too early to advance required arrays?
*
* We always compare the tuple using the current array keys (which we assume
@@ -1380,8 +1311,6 @@ _bt_start_prim_scan(IndexScanDesc scan, ScanDirection dir)
*/
if (so->needPrimScan)
{
- Assert(_bt_verify_arrays_bt_first(scan, dir));
-
/*
* Flag was set -- must call _bt_first again, which will reset the
* scan's needPrimScan flag
@@ -2007,14 +1936,7 @@ _bt_advance_array_keys(IndexScanDesc scan, BTReadPageState *pstate,
*/
else if (has_required_opposite_direction_only && pstate->finaltup &&
unlikely(!_bt_oppodir_checkkeys(scan, dir, pstate->finaltup)))
- {
- /*
- * Make sure that any SAOP arrays that were not marked required by
- * preprocessing are reset to their first element for this direction
- */
- _bt_rewind_nonrequired_arrays(scan, dir);
goto new_prim_scan;
- }
continue_scan:
@@ -2045,8 +1967,6 @@ continue_scan:
*/
so->oppositeDirCheck = has_required_opposite_direction_only;
- _bt_rewind_nonrequired_arrays(scan, dir);
-
/*
* skip by setting "look ahead" mechanism's offnum for forwards scans
* (backwards scans check scanBehind flag directly instead)
@@ -2143,48 +2063,6 @@ end_toplevel_scan:
#ifdef USE_ASSERT_CHECKING
/*
- * Verify that the scan's qual state matches what we expect at the point that
- * _bt_start_prim_scan is about to start a just-scheduled new primitive scan.
- *
- * We enforce a rule against non-required array scan keys: they must start out
- * with whatever element is the first for the scan's current scan direction.
- * See _bt_rewind_nonrequired_arrays comments for an explanation.
- */
-static bool
-_bt_verify_arrays_bt_first(IndexScanDesc scan, ScanDirection dir)
-{
- BTScanOpaque so = (BTScanOpaque) scan->opaque;
- int arrayidx = 0;
-
- for (int ikey = 0; ikey < so->numberOfKeys; ikey++)
- {
- ScanKey cur = so->keyData + ikey;
- BTArrayKeyInfo *array = NULL;
- int first_elem_dir;
-
- if (!(cur->sk_flags & SK_SEARCHARRAY) ||
- cur->sk_strategy != BTEqualStrategyNumber)
- continue;
-
- array = &so->arrayKeys[arrayidx++];
-
- if (((cur->sk_flags & SK_BT_REQFWD) && ScanDirectionIsForward(dir)) ||
- ((cur->sk_flags & SK_BT_REQBKWD) && ScanDirectionIsBackward(dir)))
- continue;
-
- if (ScanDirectionIsForward(dir))
- first_elem_dir = 0;
- else
- first_elem_dir = array->num_elems - 1;
-
- if (array->cur_elem != first_elem_dir)
- return false;
- }
-
- return _bt_verify_keys_with_arraykeys(scan);
-}
-
-/*
* Verify that the scan's "so->keyData[]" scan keys are in agreement with
* its array key state
*/
@@ -2194,6 +2072,7 @@ _bt_verify_keys_with_arraykeys(IndexScanDesc scan)
BTScanOpaque so = (BTScanOpaque) scan->opaque;
int last_sk_attno = InvalidAttrNumber,
arrayidx = 0;
+ bool nonrequiredseen = false;
if (!so->qual_ok)
return false;
@@ -2217,8 +2096,16 @@ _bt_verify_keys_with_arraykeys(IndexScanDesc scan)
if (array->num_elems != -1 &&
cur->sk_argument != array->elem_values[array->cur_elem])
return false;
- if (last_sk_attno > cur->sk_attno)
- return false;
+ if (cur->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD))
+ {
+ if (last_sk_attno > cur->sk_attno)
+ return false;
+ if (nonrequiredseen)
+ return false;
+ }
+ else
+ nonrequiredseen = true;
+
last_sk_attno = cur->sk_attno;
}
@@ -2551,37 +2438,12 @@ _bt_set_startikey(IndexScanDesc scan, BTReadPageState *pstate)
if (!(key->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD)))
{
/* Scan key isn't marked required (corner case) */
- Assert(!(key->sk_flags & SK_ROW_HEADER));
break; /* unsafe */
}
if (key->sk_flags & SK_ROW_HEADER)
{
- /*
- * RowCompare inequality.
- *
- * Only the first subkey from a RowCompare can ever be marked
- * required (that happens when the row header is marked required).
- * There is no simple, general way for us to transitively deduce
- * whether or not every tuple on the page satisfies a RowCompare
- * key based only on firsttup and lasttup -- so we just give up.
- */
- if (!start_past_saop_eq && !so->skipScan)
- break; /* unsafe to go further */
-
- /*
- * We have to be even more careful with RowCompares that come
- * after an array: we assume it's unsafe to even bypass the array.
- * Calling _bt_start_array_keys to recover the scan's arrays
- * following use of forcenonrequired mode isn't compatible with
- * _bt_check_rowcompare's continuescan=false behavior with NULL
- * row compare members. _bt_advance_array_keys must not make a
- * decision on the basis of a key not being satisfied in the
- * opposite-to-scan direction until the scan reaches a leaf page
- * where the same key begins to be satisfied in scan direction.
- * The _bt_first !used_all_subkeys behavior makes this limitation
- * hard to work around some other way.
- */
- return; /* completely unsafe to set pstate.startikey */
+ /* RowCompare inequalities currently aren't supported */
+ break; /* "unsafe" */
}
if (key->sk_strategy != BTEqualStrategyNumber)
{
@@ -3078,6 +2940,31 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, int tupnatts,
Assert(subkey->sk_flags & SK_ROW_MEMBER);
+ /* When a NULL row member is compared, the row never matches */
+ if (subkey->sk_flags & SK_ISNULL)
+ {
+ /*
+ * Unlike the simple-scankey case, this isn't a disallowed case
+ * (except when it's the first row element that has the NULL arg).
+ * But it can never match. If all the earlier row comparison
+ * columns are required for the scan direction, we can stop the
+ * scan, because there can't be another tuple that will succeed.
+ */
+ Assert(subkey != (ScanKey) DatumGetPointer(skey->sk_argument));
+ subkey--;
+ if (forcenonrequired)
+ {
+ /* treating scan's keys as non-required */
+ }
+ else if ((subkey->sk_flags & SK_BT_REQFWD) &&
+ ScanDirectionIsForward(dir))
+ *continuescan = false;
+ else if ((subkey->sk_flags & SK_BT_REQBKWD) &&
+ ScanDirectionIsBackward(dir))
+ *continuescan = false;
+ return false;
+ }
+
if (subkey->sk_attno > tupnatts)
{
/*
@@ -3087,11 +2974,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, int tupnatts,
* attribute passes the qual.
*/
Assert(BTreeTupleIsPivot(tuple));
- cmpresult = 0;
- if (subkey->sk_flags & SK_ROW_END)
- break;
- subkey++;
- continue;
+ return true;
}
datum = index_getattr(tuple,
@@ -3101,6 +2984,8 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, int tupnatts,
if (isNull)
{
+ int reqflags;
+
if (forcenonrequired)
{
/* treating scan's keys as non-required */
@@ -3111,15 +2996,35 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, int tupnatts,
* Since NULLs are sorted before non-NULLs, we know we have
* reached the lower limit of the range of values for this
* index attr. On a backward scan, we can stop if this qual
- * is one of the "must match" subset. We can stop regardless
- * of whether the qual is > or <, so long as it's required,
- * because it's not possible for any future tuples to pass. On
- * a forward scan, however, we must keep going, because we may
- * have initially positioned to the start of the index.
- * (_bt_advance_array_keys also relies on this behavior during
- * forward scans.)
+ * is one of the "must match" subset. However, on a forwards
+ * scan, we must keep going, because we may have initially
+ * positioned to the start of the index.
+ *
+ * All required NULLS FIRST > row members can use NULL tuple
+ * values to end backwards scans, just like with other values.
+ * A qual "WHERE (a, b, c) > (9, 42, 'foo')" can terminate a
+ * backwards scan upon reaching the index's rightmost "a = 9"
+ * tuple whose "b" column contains a NULL (if not sooner).
+ * Since "b" is NULLS FIRST, we can treat its NULLs as "<" 42.
*/
- if ((subkey->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD)) &&
+ reqflags = SK_BT_REQBKWD;
+
+ /*
+ * When a most significant required NULLS FIRST < row compare
+ * member sees NULL tuple values during a backwards scan, it
+ * signals the end of matches for the whole row compare/scan.
+ * A qual "WHERE (a, b, c) < (9, 42, 'foo')" will terminate a
+ * backwards scan upon reaching the rightmost tuple whose "a"
+ * column has a NULL. The "a" NULL value is "<" 9, and yet
+ * our < row compare will still end the scan. (This isn't
+ * safe with later/lower-order row members. Notice that it
+ * can only happen with an "a" NULL some time after the scan
+ * completely stops needing to use its "b" and "c" members.)
+ */
+ if (subkey == (ScanKey) DatumGetPointer(skey->sk_argument))
+ reqflags |= SK_BT_REQFWD; /* safe, first row member */
+
+ if ((subkey->sk_flags & reqflags) &&
ScanDirectionIsBackward(dir))
*continuescan = false;
}
@@ -3129,15 +3034,35 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, int tupnatts,
* Since NULLs are sorted after non-NULLs, we know we have
* reached the upper limit of the range of values for this
* index attr. On a forward scan, we can stop if this qual is
- * one of the "must match" subset. We can stop regardless of
- * whether the qual is > or <, so long as it's required,
- * because it's not possible for any future tuples to pass. On
- * a backward scan, however, we must keep going, because we
- * may have initially positioned to the end of the index.
- * (_bt_advance_array_keys also relies on this behavior during
- * backward scans.)
+ * one of the "must match" subset. However, on a backward
+ * scan, we must keep going, because we may have initially
+ * positioned to the end of the index.
+ *
+ * All required NULLS LAST < row members can use NULL tuple
+ * values to end forwards scans, just like with other values.
+ * A qual "WHERE (a, b, c) < (9, 42, 'foo')" can terminate a
+ * forwards scan upon reaching the index's leftmost "a = 9"
+ * tuple whose "b" column contains a NULL (if not sooner).
+ * Since "b" is NULLS LAST, we can treat its NULLs as ">" 42.
+ */
+ reqflags = SK_BT_REQFWD;
+
+ /*
+ * When a most significant required NULLS LAST > row compare
+ * member sees NULL tuple values during a forwards scan, it
+ * signals the end of matches for the whole row compare/scan.
+ * A qual "WHERE (a, b, c) > (9, 42, 'foo')" will terminate a
+ * forwards scan upon reaching the leftmost tuple whose "a"
+ * column has a NULL. The "a" NULL value is ">" 9, and yet
+ * our > row compare will end the scan. (This isn't safe with
+ * later/lower-order row members. Notice that it can only
+ * happen with an "a" NULL some time after the scan completely
+ * stops needing to use its "b" and "c" members.)
*/
- if ((subkey->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD)) &&
+ if (subkey == (ScanKey) DatumGetPointer(skey->sk_argument))
+ reqflags |= SK_BT_REQBKWD; /* safe, first row member */
+
+ if ((subkey->sk_flags & reqflags) &&
ScanDirectionIsForward(dir))
*continuescan = false;
}
@@ -3148,30 +3073,6 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, int tupnatts,
return false;
}
- if (subkey->sk_flags & SK_ISNULL)
- {
- /*
- * Unlike the simple-scankey case, this isn't a disallowed case
- * (except when it's the first row element that has the NULL arg).
- * But it can never match. If all the earlier row comparison
- * columns are required for the scan direction, we can stop the
- * scan, because there can't be another tuple that will succeed.
- */
- Assert(subkey != (ScanKey) DatumGetPointer(skey->sk_argument));
- subkey--;
- if (forcenonrequired)
- {
- /* treating scan's keys as non-required */
- }
- else if ((subkey->sk_flags & SK_BT_REQFWD) &&
- ScanDirectionIsForward(dir))
- *continuescan = false;
- else if ((subkey->sk_flags & SK_BT_REQBKWD) &&
- ScanDirectionIsBackward(dir))
- *continuescan = false;
- return false;
- }
-
/* Perform the test --- three-way comparison not bool operator */
cmpresult = DatumGetInt32(FunctionCall2Coll(&subkey->sk_func,
subkey->sk_collation,
@@ -3330,87 +3231,85 @@ _bt_checkkeys_look_ahead(IndexScanDesc scan, BTReadPageState *pstate,
* current page and killed tuples thereon (generally, this should only be
* called if so->numKilled > 0).
*
- * The caller does not have a lock on the page and may or may not have the
- * page pinned in a buffer. Note that read-lock is sufficient for setting
- * LP_DEAD status (which is only a hint).
- *
- * We match items by heap TID before assuming they are the right ones to
- * delete. We cope with cases where items have moved right due to insertions.
- * If an item has moved off the current page due to a split, we'll fail to
- * find it and do nothing (this is not an error case --- we assume the item
- * will eventually get marked in a future indexscan).
+ * Caller should not have a lock on the so->currPos page, but must hold a
+ * buffer pin when !so->dropPin. When we return, it still won't be locked.
+ * It'll continue to hold whatever pins were held before calling here.
*
- * Note that if we hold a pin on the target page continuously from initially
- * reading the items until applying this function, VACUUM cannot have deleted
- * any items from the page, and so there is no need to search left from the
- * recorded offset. (This observation also guarantees that the item is still
- * the right one to delete, which might otherwise be questionable since heap
- * TIDs can get recycled.) This holds true even if the page has been modified
- * by inserts and page splits, so there is no need to consult the LSN.
+ * We match items by heap TID before assuming they are the right ones to set
+ * LP_DEAD. If the scan is one that holds a buffer pin on the target page
+ * continuously from initially reading the items until applying this function
+ * (if it is a !so->dropPin scan), VACUUM cannot have deleted any items on the
+ * page, so the page's TIDs can't have been recycled by now. There's no risk
+ * that we'll confuse a new index tuple that happens to use a recycled TID
+ * with a now-removed tuple with the same TID (that used to be on this same
+ * page). We can't rely on that during scans that drop buffer pins eagerly
+ * (so->dropPin scans), though, so we must condition setting LP_DEAD bits on
+ * the page LSN having not changed since back when _bt_readpage saw the page.
+ * We totally give up on setting LP_DEAD bits when the page LSN changed.
*
- * If the pin was released after reading the page, then we re-read it. If it
- * has been modified since we read it (as determined by the LSN), we dare not
- * flag any entries because it is possible that the old entry was vacuumed
- * away and the TID was re-used by a completely different heap tuple.
+ * We give up much less often during !so->dropPin scans, but it still happens.
+ * We cope with cases where items have moved right due to insertions. If an
+ * item has moved off the current page due to a split, we'll fail to find it
+ * and just give up on it.
*/
void
_bt_killitems(IndexScanDesc scan)
{
+ Relation rel = scan->indexRelation;
BTScanOpaque so = (BTScanOpaque) scan->opaque;
Page page;
BTPageOpaque opaque;
OffsetNumber minoff;
OffsetNumber maxoff;
- int i;
int numKilled = so->numKilled;
bool killedsomething = false;
- bool droppedpin PG_USED_FOR_ASSERTS_ONLY;
+ Buffer buf;
+ Assert(numKilled > 0);
Assert(BTScanPosIsValid(so->currPos));
+ Assert(scan->heapRelation != NULL); /* can't be a bitmap index scan */
- /*
- * Always reset the scan state, so we don't look for same items on other
- * pages.
- */
+ /* Always invalidate so->killedItems[] before leaving so->currPos */
so->numKilled = 0;
- if (BTScanPosIsPinned(so->currPos))
+ if (!so->dropPin)
{
/*
* We have held the pin on this page since we read the index tuples,
* so all we need to do is lock it. The pin will have prevented
- * re-use of any TID on the page, so there is no need to check the
- * LSN.
+ * concurrent VACUUMs from recycling any of the TIDs on the page.
*/
- droppedpin = false;
- _bt_lockbuf(scan->indexRelation, so->currPos.buf, BT_READ);
-
- page = BufferGetPage(so->currPos.buf);
+ Assert(BTScanPosIsPinned(so->currPos));
+ buf = so->currPos.buf;
+ _bt_lockbuf(rel, buf, BT_READ);
}
else
{
- Buffer buf;
+ XLogRecPtr latestlsn;
- droppedpin = true;
- /* Attempt to re-read the buffer, getting pin and lock. */
- buf = _bt_getbuf(scan->indexRelation, so->currPos.currPage, BT_READ);
+ Assert(!BTScanPosIsPinned(so->currPos));
+ Assert(RelationNeedsWAL(rel));
+ buf = _bt_getbuf(rel, so->currPos.currPage, BT_READ);
- page = BufferGetPage(buf);
- if (BufferGetLSNAtomic(buf) == so->currPos.lsn)
- so->currPos.buf = buf;
- else
+ latestlsn = BufferGetLSNAtomic(buf);
+ Assert(!XLogRecPtrIsInvalid(so->currPos.lsn));
+ Assert(so->currPos.lsn <= latestlsn);
+ if (so->currPos.lsn != latestlsn)
{
- /* Modified while not pinned means hinting is not safe. */
- _bt_relbuf(scan->indexRelation, buf);
+ /* Modified, give up on hinting */
+ _bt_relbuf(rel, buf);
return;
}
+
+ /* Unmodified, hinting is safe */
}
+ page = BufferGetPage(buf);
opaque = BTPageGetOpaque(page);
minoff = P_FIRSTDATAKEY(opaque);
maxoff = PageGetMaxOffsetNumber(page);
- for (i = 0; i < numKilled; i++)
+ for (int i = 0; i < numKilled; i++)
{
int itemIndex = so->killedItems[i];
BTScanPosItem *kitem = &so->currPos.items[itemIndex];
@@ -3442,7 +3341,7 @@ _bt_killitems(IndexScanDesc scan)
* correctness.
*
* Note that the page may have been modified in almost any way
- * since we first read it (in the !droppedpin case), so it's
+ * since we first read it (in the !so->dropPin case), so it's
* possible that this posting list tuple wasn't a posting list
* tuple when we first encountered its heap TIDs.
*/
@@ -3458,7 +3357,7 @@ _bt_killitems(IndexScanDesc scan)
* though only in the common case where the page can't
* have been concurrently modified
*/
- Assert(kitem->indexOffset == offnum || !droppedpin);
+ Assert(kitem->indexOffset == offnum || !so->dropPin);
/*
* Read-ahead to later kitems here.
@@ -3522,10 +3421,13 @@ _bt_killitems(IndexScanDesc scan)
if (killedsomething)
{
opaque->btpo_flags |= BTP_HAS_GARBAGE;
- MarkBufferDirtyHint(so->currPos.buf, true);
+ MarkBufferDirtyHint(buf, true);
}
- _bt_unlockbuf(scan->indexRelation, so->currPos.buf);
+ if (!so->dropPin)
+ _bt_unlockbuf(rel, buf);
+ else
+ _bt_relbuf(rel, buf);
}
diff --git a/src/backend/access/rmgrdesc/replorigindesc.c b/src/backend/access/rmgrdesc/replorigindesc.c
index 5dd74233996..35e3af2903e 100644
--- a/src/backend/access/rmgrdesc/replorigindesc.c
+++ b/src/backend/access/rmgrdesc/replorigindesc.c
@@ -29,7 +29,7 @@ replorigin_desc(StringInfo buf, XLogReaderState *record)
xlrec = (xl_replorigin_set *) rec;
- appendStringInfo(buf, "set %u; lsn %X/%X; force: %d",
+ appendStringInfo(buf, "set %u; lsn %X/%08X; force: %d",
xlrec->node_id,
LSN_FORMAT_ARGS(xlrec->remote_lsn),
xlrec->force);
diff --git a/src/backend/access/rmgrdesc/xactdesc.c b/src/backend/access/rmgrdesc/xactdesc.c
index 305598e2865..f0f696855b9 100644
--- a/src/backend/access/rmgrdesc/xactdesc.c
+++ b/src/backend/access/rmgrdesc/xactdesc.c
@@ -359,7 +359,7 @@ xact_desc_commit(StringInfo buf, uint8 info, xl_xact_commit *xlrec, RepOriginId
if (parsed.xinfo & XACT_XINFO_HAS_ORIGIN)
{
- appendStringInfo(buf, "; origin: node %u, lsn %X/%X, at %s",
+ appendStringInfo(buf, "; origin: node %u, lsn %X/%08X, at %s",
origin_id,
LSN_FORMAT_ARGS(parsed.origin_lsn),
timestamptz_to_str(parsed.origin_timestamp));
@@ -384,7 +384,7 @@ xact_desc_abort(StringInfo buf, uint8 info, xl_xact_abort *xlrec, RepOriginId or
if (parsed.xinfo & XACT_XINFO_HAS_ORIGIN)
{
- appendStringInfo(buf, "; origin: node %u, lsn %X/%X, at %s",
+ appendStringInfo(buf, "; origin: node %u, lsn %X/%08X, at %s",
origin_id,
LSN_FORMAT_ARGS(parsed.origin_lsn),
timestamptz_to_str(parsed.origin_timestamp));
@@ -418,7 +418,7 @@ xact_desc_prepare(StringInfo buf, uint8 info, xl_xact_prepare *xlrec, RepOriginI
* way as PrepareRedoAdd().
*/
if (origin_id != InvalidRepOriginId)
- appendStringInfo(buf, "; origin: node %u, lsn %X/%X, at %s",
+ appendStringInfo(buf, "; origin: node %u, lsn %X/%08X, at %s",
origin_id,
LSN_FORMAT_ARGS(parsed.origin_lsn),
timestamptz_to_str(parsed.origin_timestamp));
diff --git a/src/backend/access/rmgrdesc/xlogdesc.c b/src/backend/access/rmgrdesc/xlogdesc.c
index 58040f28656..cd6c2a2f650 100644
--- a/src/backend/access/rmgrdesc/xlogdesc.c
+++ b/src/backend/access/rmgrdesc/xlogdesc.c
@@ -65,7 +65,7 @@ xlog_desc(StringInfo buf, XLogReaderState *record)
{
CheckPoint *checkpoint = (CheckPoint *) rec;
- appendStringInfo(buf, "redo %X/%X; "
+ appendStringInfo(buf, "redo %X/%08X; "
"tli %u; prev tli %u; fpw %s; wal_level %s; xid %u:%u; oid %u; multi %u; offset %u; "
"oldest xid %u in DB %u; oldest multi %u in DB %u; "
"oldest/newest commit timestamp xid: %u/%u; "
@@ -111,7 +111,7 @@ xlog_desc(StringInfo buf, XLogReaderState *record)
XLogRecPtr startpoint;
memcpy(&startpoint, rec, sizeof(XLogRecPtr));
- appendStringInfo(buf, "%X/%X", LSN_FORMAT_ARGS(startpoint));
+ appendStringInfo(buf, "%X/%08X", LSN_FORMAT_ARGS(startpoint));
}
else if (info == XLOG_PARAMETER_CHANGE)
{
@@ -156,7 +156,7 @@ xlog_desc(StringInfo buf, XLogReaderState *record)
xl_overwrite_contrecord xlrec;
memcpy(&xlrec, rec, sizeof(xl_overwrite_contrecord));
- appendStringInfo(buf, "lsn %X/%X; time %s",
+ appendStringInfo(buf, "lsn %X/%08X; time %s",
LSN_FORMAT_ARGS(xlrec.overwritten_lsn),
timestamptz_to_str(xlrec.overwrite_time));
}
diff --git a/src/backend/access/spgist/spgutils.c b/src/backend/access/spgist/spgutils.c
index 95fea74e296..9b86c016acb 100644
--- a/src/backend/access/spgist/spgutils.c
+++ b/src/backend/access/spgist/spgutils.c
@@ -785,7 +785,7 @@ SpGistGetInnerTypeSize(SpGistTypeDesc *att, Datum datum)
else if (att->attlen > 0)
size = att->attlen;
else
- size = VARSIZE_ANY(datum);
+ size = VARSIZE_ANY(DatumGetPointer(datum));
return MAXALIGN(size);
}
@@ -804,7 +804,7 @@ memcpyInnerDatum(void *target, SpGistTypeDesc *att, Datum datum)
}
else
{
- size = (att->attlen > 0) ? att->attlen : VARSIZE_ANY(datum);
+ size = (att->attlen > 0) ? att->attlen : VARSIZE_ANY(DatumGetPointer(datum));
memcpy(target, DatumGetPointer(datum), size);
}
}
diff --git a/src/backend/access/table/toast_helper.c b/src/backend/access/table/toast_helper.c
index b60fab0a4d2..11f97d65367 100644
--- a/src/backend/access/table/toast_helper.c
+++ b/src/backend/access/table/toast_helper.c
@@ -330,7 +330,7 @@ toast_delete_external(Relation rel, const Datum *values, const bool *isnull,
if (isnull[i])
continue;
- else if (VARATT_IS_EXTERNAL_ONDISK(value))
+ else if (VARATT_IS_EXTERNAL_ONDISK(DatumGetPointer(value)))
toast_delete_datum(rel, value, is_speculative);
}
}
diff --git a/src/backend/access/transam/clog.c b/src/backend/access/transam/clog.c
index 48f10bec91e..e80fbe109cf 100644
--- a/src/backend/access/transam/clog.c
+++ b/src/backend/access/transam/clog.c
@@ -110,9 +110,7 @@ static SlruCtlData XactCtlData;
#define XactCtl (&XactCtlData)
-static int ZeroCLOGPage(int64 pageno, bool writeXlog);
static bool CLOGPagePrecedes(int64 page1, int64 page2);
-static void WriteZeroPageXlogRec(int64 pageno);
static void WriteTruncateXlogRec(int64 pageno, TransactionId oldestXact,
Oid oldestXactDb);
static void TransactionIdSetPageStatus(TransactionId xid, int nsubxids,
@@ -832,41 +830,8 @@ check_transaction_buffers(int *newval, void **extra, GucSource source)
void
BootStrapCLOG(void)
{
- int slotno;
- LWLock *lock = SimpleLruGetBankLock(XactCtl, 0);
-
- LWLockAcquire(lock, LW_EXCLUSIVE);
-
- /* Create and zero the first page of the commit log */
- slotno = ZeroCLOGPage(0, false);
-
- /* Make sure it's written out */
- SimpleLruWritePage(XactCtl, slotno);
- Assert(!XactCtl->shared->page_dirty[slotno]);
-
- LWLockRelease(lock);
-}
-
-/*
- * Initialize (or reinitialize) a page of CLOG to zeroes.
- * If writeXlog is true, also emit an XLOG record saying we did this.
- *
- * The page is not actually written, just set up in shared memory.
- * The slot number of the new page is returned.
- *
- * Control lock must be held at entry, and will be held at exit.
- */
-static int
-ZeroCLOGPage(int64 pageno, bool writeXlog)
-{
- int slotno;
-
- slotno = SimpleLruZeroPage(XactCtl, pageno);
-
- if (writeXlog)
- WriteZeroPageXlogRec(pageno);
-
- return slotno;
+ /* Zero the initial page and flush it to disk */
+ SimpleLruZeroAndWritePage(XactCtl, 0);
}
/*
@@ -974,8 +939,9 @@ ExtendCLOG(TransactionId newestXact)
LWLockAcquire(lock, LW_EXCLUSIVE);
- /* Zero the page and make an XLOG entry about it */
- ZeroCLOGPage(pageno, true);
+ /* Zero the page and make a WAL entry about it */
+ SimpleLruZeroPage(XactCtl, pageno);
+ XLogSimpleInsertInt64(RM_CLOG_ID, CLOG_ZEROPAGE, pageno);
LWLockRelease(lock);
}
@@ -1068,17 +1034,6 @@ CLOGPagePrecedes(int64 page1, int64 page2)
/*
- * Write a ZEROPAGE xlog record
- */
-static void
-WriteZeroPageXlogRec(int64 pageno)
-{
- XLogBeginInsert();
- XLogRegisterData(&pageno, sizeof(pageno));
- (void) XLogInsert(RM_CLOG_ID, CLOG_ZEROPAGE);
-}
-
-/*
* Write a TRUNCATE xlog record
*
* We must flush the xlog record to disk before returning --- see notes
@@ -1114,19 +1069,9 @@ clog_redo(XLogReaderState *record)
if (info == CLOG_ZEROPAGE)
{
int64 pageno;
- int slotno;
- LWLock *lock;
memcpy(&pageno, XLogRecGetData(record), sizeof(pageno));
-
- lock = SimpleLruGetBankLock(XactCtl, pageno);
- LWLockAcquire(lock, LW_EXCLUSIVE);
-
- slotno = ZeroCLOGPage(pageno, false);
- SimpleLruWritePage(XactCtl, slotno);
- Assert(!XactCtl->shared->page_dirty[slotno]);
-
- LWLockRelease(lock);
+ SimpleLruZeroAndWritePage(XactCtl, pageno);
}
else if (info == CLOG_TRUNCATE)
{
diff --git a/src/backend/access/transam/commit_ts.c b/src/backend/access/transam/commit_ts.c
index 113fae1437a..370b38e048b 100644
--- a/src/backend/access/transam/commit_ts.c
+++ b/src/backend/access/transam/commit_ts.c
@@ -114,11 +114,9 @@ static void SetXidCommitTsInPage(TransactionId xid, int nsubxids,
static void TransactionIdSetCommitTs(TransactionId xid, TimestampTz ts,
RepOriginId nodeid, int slotno);
static void error_commit_ts_disabled(void);
-static int ZeroCommitTsPage(int64 pageno, bool writeXlog);
static bool CommitTsPagePrecedes(int64 page1, int64 page2);
static void ActivateCommitTs(void);
static void DeactivateCommitTs(void);
-static void WriteZeroPageXlogRec(int64 pageno);
static void WriteTruncateXlogRec(int64 pageno, TransactionId oldestXid);
/*
@@ -603,28 +601,6 @@ BootStrapCommitTs(void)
}
/*
- * Initialize (or reinitialize) a page of CommitTs to zeroes.
- * If writeXlog is true, also emit an XLOG record saying we did this.
- *
- * The page is not actually written, just set up in shared memory.
- * The slot number of the new page is returned.
- *
- * Control lock must be held at entry, and will be held at exit.
- */
-static int
-ZeroCommitTsPage(int64 pageno, bool writeXlog)
-{
- int slotno;
-
- slotno = SimpleLruZeroPage(CommitTsCtl, pageno);
-
- if (writeXlog)
- WriteZeroPageXlogRec(pageno);
-
- return slotno;
-}
-
-/*
* This must be called ONCE during postmaster or standalone-backend startup,
* after StartupXLOG has initialized TransamVariables->nextXid.
*/
@@ -707,6 +683,13 @@ ActivateCommitTs(void)
TransactionId xid;
int64 pageno;
+ /*
+ * During bootstrap, we should not register commit timestamps so skip the
+ * activation in this case.
+ */
+ if (IsBootstrapProcessingMode())
+ return;
+
/* If we've done this already, there's nothing to do */
LWLockAcquire(CommitTsLock, LW_EXCLUSIVE);
if (commitTsShared->commitTsActive)
@@ -747,16 +730,7 @@ ActivateCommitTs(void)
/* Create the current segment file, if necessary */
if (!SimpleLruDoesPhysicalPageExist(CommitTsCtl, pageno))
- {
- LWLock *lock = SimpleLruGetBankLock(CommitTsCtl, pageno);
- int slotno;
-
- LWLockAcquire(lock, LW_EXCLUSIVE);
- slotno = ZeroCommitTsPage(pageno, false);
- SimpleLruWritePage(CommitTsCtl, slotno);
- Assert(!CommitTsCtl->shared->page_dirty[slotno]);
- LWLockRelease(lock);
- }
+ SimpleLruZeroAndWritePage(CommitTsCtl, pageno);
/* Change the activation status in shared memory. */
LWLockAcquire(CommitTsLock, LW_EXCLUSIVE);
@@ -867,8 +841,12 @@ ExtendCommitTs(TransactionId newestXact)
LWLockAcquire(lock, LW_EXCLUSIVE);
- /* Zero the page and make an XLOG entry about it */
- ZeroCommitTsPage(pageno, !InRecovery);
+ /* Zero the page ... */
+ SimpleLruZeroPage(CommitTsCtl, pageno);
+
+ /* and make a WAL entry about that, unless we're in REDO */
+ if (!InRecovery)
+ XLogSimpleInsertInt64(RM_COMMIT_TS_ID, COMMIT_TS_ZEROPAGE, pageno);
LWLockRelease(lock);
}
@@ -983,17 +961,6 @@ CommitTsPagePrecedes(int64 page1, int64 page2)
/*
- * Write a ZEROPAGE xlog record
- */
-static void
-WriteZeroPageXlogRec(int64 pageno)
-{
- XLogBeginInsert();
- XLogRegisterData(&pageno, sizeof(pageno));
- (void) XLogInsert(RM_COMMIT_TS_ID, COMMIT_TS_ZEROPAGE);
-}
-
-/*
* Write a TRUNCATE xlog record
*/
static void
@@ -1023,19 +990,9 @@ commit_ts_redo(XLogReaderState *record)
if (info == COMMIT_TS_ZEROPAGE)
{
int64 pageno;
- int slotno;
- LWLock *lock;
memcpy(&pageno, XLogRecGetData(record), sizeof(pageno));
-
- lock = SimpleLruGetBankLock(CommitTsCtl, pageno);
- LWLockAcquire(lock, LW_EXCLUSIVE);
-
- slotno = ZeroCommitTsPage(pageno, false);
- SimpleLruWritePage(CommitTsCtl, slotno);
- Assert(!CommitTsCtl->shared->page_dirty[slotno]);
-
- LWLockRelease(lock);
+ SimpleLruZeroAndWritePage(CommitTsCtl, pageno);
}
else if (info == COMMIT_TS_TRUNCATE)
{
diff --git a/src/backend/access/transam/multixact.c b/src/backend/access/transam/multixact.c
index 3c06ac45532..3cb09c3d598 100644
--- a/src/backend/access/transam/multixact.c
+++ b/src/backend/access/transam/multixact.c
@@ -401,8 +401,6 @@ static void mXactCachePut(MultiXactId multi, int nmembers,
static char *mxstatus_to_string(MultiXactStatus status);
/* management of SLRU infrastructure */
-static int ZeroMultiXactOffsetPage(int64 pageno, bool writeXlog);
-static int ZeroMultiXactMemberPage(int64 pageno, bool writeXlog);
static bool MultiXactOffsetPagePrecedes(int64 page1, int64 page2);
static bool MultiXactMemberPagePrecedes(int64 page1, int64 page2);
static bool MultiXactOffsetPrecedes(MultiXactOffset offset1,
@@ -413,7 +411,6 @@ static bool MultiXactOffsetWouldWrap(MultiXactOffset boundary,
MultiXactOffset start, uint32 distance);
static bool SetOffsetVacuumLimit(bool is_startup);
static bool find_multixact_start(MultiXactId multi, MultiXactOffset *result);
-static void WriteMZeroPageXlogRec(int64 pageno, uint8 info);
static void WriteMTruncateXlogRec(Oid oldestMultiDB,
MultiXactId startTruncOff,
MultiXactId endTruncOff,
@@ -1847,7 +1844,7 @@ AtPrepare_MultiXact(void)
* Clean up after successful PREPARE TRANSACTION
*/
void
-PostPrepare_MultiXact(TransactionId xid)
+PostPrepare_MultiXact(FullTransactionId fxid)
{
MultiXactId myOldestMember;
@@ -1858,7 +1855,7 @@ PostPrepare_MultiXact(TransactionId xid)
myOldestMember = OldestMemberMXactId[MyProcNumber];
if (MultiXactIdIsValid(myOldestMember))
{
- ProcNumber dummyProcNumber = TwoPhaseGetDummyProcNumber(xid, false);
+ ProcNumber dummyProcNumber = TwoPhaseGetDummyProcNumber(fxid, false);
/*
* Even though storing MultiXactId is atomic, acquire lock to make
@@ -1896,10 +1893,10 @@ PostPrepare_MultiXact(TransactionId xid)
* Recover the state of a prepared transaction at startup
*/
void
-multixact_twophase_recover(TransactionId xid, uint16 info,
+multixact_twophase_recover(FullTransactionId fxid, uint16 info,
void *recdata, uint32 len)
{
- ProcNumber dummyProcNumber = TwoPhaseGetDummyProcNumber(xid, false);
+ ProcNumber dummyProcNumber = TwoPhaseGetDummyProcNumber(fxid, false);
MultiXactId oldestMember;
/*
@@ -1917,10 +1914,10 @@ multixact_twophase_recover(TransactionId xid, uint16 info,
* Similar to AtEOXact_MultiXact but for COMMIT PREPARED
*/
void
-multixact_twophase_postcommit(TransactionId xid, uint16 info,
+multixact_twophase_postcommit(FullTransactionId fxid, uint16 info,
void *recdata, uint32 len)
{
- ProcNumber dummyProcNumber = TwoPhaseGetDummyProcNumber(xid, true);
+ ProcNumber dummyProcNumber = TwoPhaseGetDummyProcNumber(fxid, true);
Assert(len == sizeof(MultiXactId));
@@ -1932,10 +1929,10 @@ multixact_twophase_postcommit(TransactionId xid, uint16 info,
* This is actually just the same as the COMMIT case.
*/
void
-multixact_twophase_postabort(TransactionId xid, uint16 info,
+multixact_twophase_postabort(FullTransactionId fxid, uint16 info,
void *recdata, uint32 len)
{
- multixact_twophase_postcommit(xid, info, recdata, len);
+ multixact_twophase_postcommit(fxid, info, recdata, len);
}
/*
@@ -2033,70 +2030,9 @@ check_multixact_member_buffers(int *newval, void **extra, GucSource source)
void
BootStrapMultiXact(void)
{
- int slotno;
- LWLock *lock;
-
- lock = SimpleLruGetBankLock(MultiXactOffsetCtl, 0);
- LWLockAcquire(lock, LW_EXCLUSIVE);
-
- /* Create and zero the first page of the offsets log */
- slotno = ZeroMultiXactOffsetPage(0, false);
-
- /* Make sure it's written out */
- SimpleLruWritePage(MultiXactOffsetCtl, slotno);
- Assert(!MultiXactOffsetCtl->shared->page_dirty[slotno]);
-
- LWLockRelease(lock);
-
- lock = SimpleLruGetBankLock(MultiXactMemberCtl, 0);
- LWLockAcquire(lock, LW_EXCLUSIVE);
-
- /* Create and zero the first page of the members log */
- slotno = ZeroMultiXactMemberPage(0, false);
-
- /* Make sure it's written out */
- SimpleLruWritePage(MultiXactMemberCtl, slotno);
- Assert(!MultiXactMemberCtl->shared->page_dirty[slotno]);
-
- LWLockRelease(lock);
-}
-
-/*
- * Initialize (or reinitialize) a page of MultiXactOffset to zeroes.
- * If writeXlog is true, also emit an XLOG record saying we did this.
- *
- * The page is not actually written, just set up in shared memory.
- * The slot number of the new page is returned.
- *
- * Control lock must be held at entry, and will be held at exit.
- */
-static int
-ZeroMultiXactOffsetPage(int64 pageno, bool writeXlog)
-{
- int slotno;
-
- slotno = SimpleLruZeroPage(MultiXactOffsetCtl, pageno);
-
- if (writeXlog)
- WriteMZeroPageXlogRec(pageno, XLOG_MULTIXACT_ZERO_OFF_PAGE);
-
- return slotno;
-}
-
-/*
- * Ditto, for MultiXactMember
- */
-static int
-ZeroMultiXactMemberPage(int64 pageno, bool writeXlog)
-{
- int slotno;
-
- slotno = SimpleLruZeroPage(MultiXactMemberCtl, pageno);
-
- if (writeXlog)
- WriteMZeroPageXlogRec(pageno, XLOG_MULTIXACT_ZERO_MEM_PAGE);
-
- return slotno;
+ /* Zero the initial pages and flush them to disk */
+ SimpleLruZeroAndWritePage(MultiXactOffsetCtl, 0);
+ SimpleLruZeroAndWritePage(MultiXactMemberCtl, 0);
}
/*
@@ -2134,7 +2070,7 @@ MaybeExtendOffsetSlru(void)
* with creating a new segment file even if the page we're writing is
* not the first in it, so this is enough.
*/
- slotno = ZeroMultiXactOffsetPage(pageno, false);
+ slotno = SimpleLruZeroPage(MultiXactOffsetCtl, pageno);
SimpleLruWritePage(MultiXactOffsetCtl, slotno);
}
@@ -2568,8 +2504,10 @@ ExtendMultiXactOffset(MultiXactId multi)
LWLockAcquire(lock, LW_EXCLUSIVE);
- /* Zero the page and make an XLOG entry about it */
- ZeroMultiXactOffsetPage(pageno, true);
+ /* Zero the page and make a WAL entry about it */
+ SimpleLruZeroPage(MultiXactOffsetCtl, pageno);
+ XLogSimpleInsertInt64(RM_MULTIXACT_ID, XLOG_MULTIXACT_ZERO_OFF_PAGE,
+ pageno);
LWLockRelease(lock);
}
@@ -2611,8 +2549,10 @@ ExtendMultiXactMember(MultiXactOffset offset, int nmembers)
LWLockAcquire(lock, LW_EXCLUSIVE);
- /* Zero the page and make an XLOG entry about it */
- ZeroMultiXactMemberPage(pageno, true);
+ /* Zero the page and make a WAL entry about it */
+ SimpleLruZeroPage(MultiXactMemberCtl, pageno);
+ XLogSimpleInsertInt64(RM_MULTIXACT_ID,
+ XLOG_MULTIXACT_ZERO_MEM_PAGE, pageno);
LWLockRelease(lock);
}
@@ -3348,18 +3288,6 @@ MultiXactOffsetPrecedes(MultiXactOffset offset1, MultiXactOffset offset2)
}
/*
- * Write an xlog record reflecting the zeroing of either a MEMBERs or
- * OFFSETs page (info shows which)
- */
-static void
-WriteMZeroPageXlogRec(int64 pageno, uint8 info)
-{
- XLogBeginInsert();
- XLogRegisterData(&pageno, sizeof(pageno));
- (void) XLogInsert(RM_MULTIXACT_ID, info);
-}
-
-/*
* Write a TRUNCATE xlog record
*
* We must flush the xlog record to disk before returning --- see notes in
@@ -3401,36 +3329,16 @@ multixact_redo(XLogReaderState *record)
if (info == XLOG_MULTIXACT_ZERO_OFF_PAGE)
{
int64 pageno;
- int slotno;
- LWLock *lock;
memcpy(&pageno, XLogRecGetData(record), sizeof(pageno));
-
- lock = SimpleLruGetBankLock(MultiXactOffsetCtl, pageno);
- LWLockAcquire(lock, LW_EXCLUSIVE);
-
- slotno = ZeroMultiXactOffsetPage(pageno, false);
- SimpleLruWritePage(MultiXactOffsetCtl, slotno);
- Assert(!MultiXactOffsetCtl->shared->page_dirty[slotno]);
-
- LWLockRelease(lock);
+ SimpleLruZeroAndWritePage(MultiXactOffsetCtl, pageno);
}
else if (info == XLOG_MULTIXACT_ZERO_MEM_PAGE)
{
int64 pageno;
- int slotno;
- LWLock *lock;
memcpy(&pageno, XLogRecGetData(record), sizeof(pageno));
-
- lock = SimpleLruGetBankLock(MultiXactMemberCtl, pageno);
- LWLockAcquire(lock, LW_EXCLUSIVE);
-
- slotno = ZeroMultiXactMemberPage(pageno, false);
- SimpleLruWritePage(MultiXactMemberCtl, slotno);
- Assert(!MultiXactMemberCtl->shared->page_dirty[slotno]);
-
- LWLockRelease(lock);
+ SimpleLruZeroAndWritePage(MultiXactMemberCtl, pageno);
}
else if (info == XLOG_MULTIXACT_CREATE_ID)
{
diff --git a/src/backend/access/transam/slru.c b/src/backend/access/transam/slru.c
index fe56286d9a9..10ec259f382 100644
--- a/src/backend/access/transam/slru.c
+++ b/src/backend/access/transam/slru.c
@@ -434,6 +434,31 @@ SimpleLruZeroLSNs(SlruCtl ctl, int slotno)
}
/*
+ * This is a convenience wrapper for the common case of zeroing a page and
+ * immediately flushing it to disk.
+ *
+ * Control lock is acquired and released here.
+ */
+void
+SimpleLruZeroAndWritePage(SlruCtl ctl, int64 pageno)
+{
+ int slotno;
+ LWLock *lock;
+
+ lock = SimpleLruGetBankLock(ctl, pageno);
+ LWLockAcquire(lock, LW_EXCLUSIVE);
+
+ /* Create and zero the page */
+ slotno = SimpleLruZeroPage(ctl, pageno);
+
+ /* Make sure it's written out */
+ SimpleLruWritePage(ctl, slotno);
+ Assert(!ctl->shared->page_dirty[slotno]);
+
+ LWLockRelease(lock);
+}
+
+/*
* Wait for any active I/O on a page slot to finish. (This does not
* guarantee that new I/O hasn't been started before we return, though.
* In fact the slot might not even contain the same page anymore.)
diff --git a/src/backend/access/transam/subtrans.c b/src/backend/access/transam/subtrans.c
index 15153618fad..09aace9e09f 100644
--- a/src/backend/access/transam/subtrans.c
+++ b/src/backend/access/transam/subtrans.c
@@ -74,7 +74,6 @@ static SlruCtlData SubTransCtlData;
#define SubTransCtl (&SubTransCtlData)
-static int ZeroSUBTRANSPage(int64 pageno);
static bool SubTransPagePrecedes(int64 page1, int64 page2);
@@ -269,33 +268,8 @@ check_subtrans_buffers(int *newval, void **extra, GucSource source)
void
BootStrapSUBTRANS(void)
{
- int slotno;
- LWLock *lock = SimpleLruGetBankLock(SubTransCtl, 0);
-
- LWLockAcquire(lock, LW_EXCLUSIVE);
-
- /* Create and zero the first page of the subtrans log */
- slotno = ZeroSUBTRANSPage(0);
-
- /* Make sure it's written out */
- SimpleLruWritePage(SubTransCtl, slotno);
- Assert(!SubTransCtl->shared->page_dirty[slotno]);
-
- LWLockRelease(lock);
-}
-
-/*
- * Initialize (or reinitialize) a page of SUBTRANS to zeroes.
- *
- * The page is not actually written, just set up in shared memory.
- * The slot number of the new page is returned.
- *
- * Control lock must be held at entry, and will be held at exit.
- */
-static int
-ZeroSUBTRANSPage(int64 pageno)
-{
- return SimpleLruZeroPage(SubTransCtl, pageno);
+ /* Zero the initial page and flush it to disk */
+ SimpleLruZeroAndWritePage(SubTransCtl, 0);
}
/*
@@ -335,7 +309,7 @@ StartupSUBTRANS(TransactionId oldestActiveXID)
prevlock = lock;
}
- (void) ZeroSUBTRANSPage(startPage);
+ (void) SimpleLruZeroPage(SubTransCtl, startPage);
if (startPage == endPage)
break;
@@ -395,7 +369,7 @@ ExtendSUBTRANS(TransactionId newestXact)
LWLockAcquire(lock, LW_EXCLUSIVE);
/* Zero the page */
- ZeroSUBTRANSPage(pageno);
+ SimpleLruZeroPage(SubTransCtl, pageno);
LWLockRelease(lock);
}
diff --git a/src/backend/access/transam/timeline.c b/src/backend/access/transam/timeline.c
index a27f27cc037..186eb91f609 100644
--- a/src/backend/access/transam/timeline.c
+++ b/src/backend/access/transam/timeline.c
@@ -154,7 +154,7 @@ readTimeLineHistory(TimeLineID targetTLI)
if (*ptr == '\0' || *ptr == '#')
continue;
- nfields = sscanf(fline, "%u\t%X/%X", &tli, &switchpoint_hi, &switchpoint_lo);
+ nfields = sscanf(fline, "%u\t%X/%08X", &tli, &switchpoint_hi, &switchpoint_lo);
if (nfields < 1)
{
@@ -399,7 +399,7 @@ writeTimeLineHistory(TimeLineID newTLI, TimeLineID parentTLI,
* parent file failed to end with one.
*/
snprintf(buffer, sizeof(buffer),
- "%s%u\t%X/%X\t%s\n",
+ "%s%u\t%X/%08X\t%s\n",
(srcfd < 0) ? "" : "\n",
parentTLI,
LSN_FORMAT_ARGS(switchpoint),
diff --git a/src/backend/access/transam/twophase.c b/src/backend/access/transam/twophase.c
index 73a80559194..7918176fc58 100644
--- a/src/backend/access/transam/twophase.c
+++ b/src/backend/access/transam/twophase.c
@@ -159,7 +159,7 @@ typedef struct GlobalTransactionData
*/
XLogRecPtr prepare_start_lsn; /* XLOG offset of prepare record start */
XLogRecPtr prepare_end_lsn; /* XLOG offset of prepare record end */
- TransactionId xid; /* The GXACT id */
+ FullTransactionId fxid; /* The GXACT full xid */
Oid owner; /* ID of user that executed the xact */
ProcNumber locking_backend; /* backend currently working on the xact */
@@ -197,6 +197,7 @@ static GlobalTransaction MyLockedGxact = NULL;
static bool twophaseExitRegistered = false;
+static void PrepareRedoRemoveFull(FullTransactionId fxid, bool giveWarning);
static void RecordTransactionCommitPrepared(TransactionId xid,
int nchildren,
TransactionId *children,
@@ -216,19 +217,19 @@ static void RecordTransactionAbortPrepared(TransactionId xid,
int nstats,
xl_xact_stats_item *stats,
const char *gid);
-static void ProcessRecords(char *bufptr, TransactionId xid,
+static void ProcessRecords(char *bufptr, FullTransactionId fxid,
const TwoPhaseCallback callbacks[]);
static void RemoveGXact(GlobalTransaction gxact);
static void XlogReadTwoPhaseData(XLogRecPtr lsn, char **buf, int *len);
-static char *ProcessTwoPhaseBuffer(TransactionId xid,
+static char *ProcessTwoPhaseBuffer(FullTransactionId fxid,
XLogRecPtr prepare_start_lsn,
bool fromdisk, bool setParent, bool setNextXid);
-static void MarkAsPreparingGuts(GlobalTransaction gxact, TransactionId xid,
+static void MarkAsPreparingGuts(GlobalTransaction gxact, FullTransactionId fxid,
const char *gid, TimestampTz prepared_at, Oid owner,
Oid databaseid);
-static void RemoveTwoPhaseFile(TransactionId xid, bool giveWarning);
-static void RecreateTwoPhaseFile(TransactionId xid, void *content, int len);
+static void RemoveTwoPhaseFile(FullTransactionId fxid, bool giveWarning);
+static void RecreateTwoPhaseFile(FullTransactionId fxid, void *content, int len);
/*
* Initialization of shared memory
@@ -356,7 +357,7 @@ PostPrepare_Twophase(void)
* Reserve the GID for the given transaction.
*/
GlobalTransaction
-MarkAsPreparing(TransactionId xid, const char *gid,
+MarkAsPreparing(FullTransactionId fxid, const char *gid,
TimestampTz prepared_at, Oid owner, Oid databaseid)
{
GlobalTransaction gxact;
@@ -407,7 +408,7 @@ MarkAsPreparing(TransactionId xid, const char *gid,
gxact = TwoPhaseState->freeGXacts;
TwoPhaseState->freeGXacts = gxact->next;
- MarkAsPreparingGuts(gxact, xid, gid, prepared_at, owner, databaseid);
+ MarkAsPreparingGuts(gxact, fxid, gid, prepared_at, owner, databaseid);
gxact->ondisk = false;
@@ -430,11 +431,13 @@ MarkAsPreparing(TransactionId xid, const char *gid,
* Note: This function should be called with appropriate locks held.
*/
static void
-MarkAsPreparingGuts(GlobalTransaction gxact, TransactionId xid, const char *gid,
- TimestampTz prepared_at, Oid owner, Oid databaseid)
+MarkAsPreparingGuts(GlobalTransaction gxact, FullTransactionId fxid,
+ const char *gid, TimestampTz prepared_at, Oid owner,
+ Oid databaseid)
{
PGPROC *proc;
int i;
+ TransactionId xid = XidFromFullTransactionId(fxid);
Assert(LWLockHeldByMeInMode(TwoPhaseStateLock, LW_EXCLUSIVE));
@@ -479,7 +482,7 @@ MarkAsPreparingGuts(GlobalTransaction gxact, TransactionId xid, const char *gid,
proc->subxidStatus.count = 0;
gxact->prepared_at = prepared_at;
- gxact->xid = xid;
+ gxact->fxid = fxid;
gxact->owner = owner;
gxact->locking_backend = MyProcNumber;
gxact->valid = false;
@@ -797,12 +800,12 @@ pg_prepared_xact(PG_FUNCTION_ARGS)
* caller had better hold it.
*/
static GlobalTransaction
-TwoPhaseGetGXact(TransactionId xid, bool lock_held)
+TwoPhaseGetGXact(FullTransactionId fxid, bool lock_held)
{
GlobalTransaction result = NULL;
int i;
- static TransactionId cached_xid = InvalidTransactionId;
+ static FullTransactionId cached_fxid = {InvalidTransactionId};
static GlobalTransaction cached_gxact = NULL;
Assert(!lock_held || LWLockHeldByMe(TwoPhaseStateLock));
@@ -811,7 +814,7 @@ TwoPhaseGetGXact(TransactionId xid, bool lock_held)
* During a recovery, COMMIT PREPARED, or ABORT PREPARED, we'll be called
* repeatedly for the same XID. We can save work with a simple cache.
*/
- if (xid == cached_xid)
+ if (FullTransactionIdEquals(fxid, cached_fxid))
return cached_gxact;
if (!lock_held)
@@ -821,7 +824,7 @@ TwoPhaseGetGXact(TransactionId xid, bool lock_held)
{
GlobalTransaction gxact = TwoPhaseState->prepXacts[i];
- if (gxact->xid == xid)
+ if (FullTransactionIdEquals(gxact->fxid, fxid))
{
result = gxact;
break;
@@ -832,9 +835,10 @@ TwoPhaseGetGXact(TransactionId xid, bool lock_held)
LWLockRelease(TwoPhaseStateLock);
if (result == NULL) /* should not happen */
- elog(ERROR, "failed to find GlobalTransaction for xid %u", xid);
+ elog(ERROR, "failed to find GlobalTransaction for xid %u",
+ XidFromFullTransactionId(fxid));
- cached_xid = xid;
+ cached_fxid = fxid;
cached_gxact = result;
return result;
@@ -881,7 +885,7 @@ TwoPhaseGetXidByVirtualXID(VirtualTransactionId vxid,
*have_more = true;
break;
}
- result = gxact->xid;
+ result = XidFromFullTransactionId(gxact->fxid);
}
}
@@ -892,7 +896,7 @@ TwoPhaseGetXidByVirtualXID(VirtualTransactionId vxid,
/*
* TwoPhaseGetDummyProcNumber
- * Get the dummy proc number for prepared transaction specified by XID
+ * Get the dummy proc number for prepared transaction
*
* Dummy proc numbers are similar to proc numbers of real backends. They
* start at MaxBackends, and are unique across all currently active real
@@ -900,24 +904,24 @@ TwoPhaseGetXidByVirtualXID(VirtualTransactionId vxid,
* TwoPhaseStateLock will not be taken, so the caller had better hold it.
*/
ProcNumber
-TwoPhaseGetDummyProcNumber(TransactionId xid, bool lock_held)
+TwoPhaseGetDummyProcNumber(FullTransactionId fxid, bool lock_held)
{
- GlobalTransaction gxact = TwoPhaseGetGXact(xid, lock_held);
+ GlobalTransaction gxact = TwoPhaseGetGXact(fxid, lock_held);
return gxact->pgprocno;
}
/*
* TwoPhaseGetDummyProc
- * Get the PGPROC that represents a prepared transaction specified by XID
+ * Get the PGPROC that represents a prepared transaction
*
* If lock_held is set to true, TwoPhaseStateLock will not be taken, so the
* caller had better hold it.
*/
PGPROC *
-TwoPhaseGetDummyProc(TransactionId xid, bool lock_held)
+TwoPhaseGetDummyProc(FullTransactionId fxid, bool lock_held)
{
- GlobalTransaction gxact = TwoPhaseGetGXact(xid, lock_held);
+ GlobalTransaction gxact = TwoPhaseGetGXact(fxid, lock_held);
return GetPGProcByNumber(gxact->pgprocno);
}
@@ -942,10 +946,8 @@ AdjustToFullTransactionId(TransactionId xid)
}
static inline int
-TwoPhaseFilePath(char *path, TransactionId xid)
+TwoPhaseFilePath(char *path, FullTransactionId fxid)
{
- FullTransactionId fxid = AdjustToFullTransactionId(xid);
-
return snprintf(path, MAXPGPATH, TWOPHASE_DIR "/%08X%08X",
EpochFromFullTransactionId(fxid),
XidFromFullTransactionId(fxid));
@@ -1049,7 +1051,7 @@ void
StartPrepare(GlobalTransaction gxact)
{
PGPROC *proc = GetPGProcByNumber(gxact->pgprocno);
- TransactionId xid = gxact->xid;
+ TransactionId xid = XidFromFullTransactionId(gxact->fxid);
TwoPhaseFileHeader hdr;
TransactionId *children;
RelFileLocator *commitrels;
@@ -1181,7 +1183,11 @@ EndPrepare(GlobalTransaction gxact)
* starting immediately after the WAL record is inserted could complete
* without fsync'ing our state file. (This is essentially the same kind
* of race condition as the COMMIT-to-clog-write case that
- * RecordTransactionCommit uses DELAY_CHKPT_START for; see notes there.)
+ * RecordTransactionCommit uses DELAY_CHKPT_IN_COMMIT for; see notes
+ * there.) Note that DELAY_CHKPT_IN_COMMIT is used to find transactions in
+ * the critical commit section. We need to know about such transactions
+ * for conflict detection in logical replication. See
+ * GetOldestActiveTransactionId(true, false) and its use.
*
* We save the PREPARE record's location in the gxact for later use by
* CheckPointTwoPhase.
@@ -1281,10 +1287,11 @@ RegisterTwoPhaseRecord(TwoPhaseRmgrId rmid, uint16 info,
* If it looks OK (has a valid magic number and CRC), return the palloc'd
* contents of the file, issuing an error when finding corrupted data. If
* missing_ok is true, which indicates that missing files can be safely
- * ignored, then return NULL. This state can be reached when doing recovery.
+ * ignored, then return NULL. This state can be reached when doing recovery
+ * after discarding two-phase files from frozen epochs.
*/
static char *
-ReadTwoPhaseFile(TransactionId xid, bool missing_ok)
+ReadTwoPhaseFile(FullTransactionId fxid, bool missing_ok)
{
char path[MAXPGPATH];
char *buf;
@@ -1296,7 +1303,7 @@ ReadTwoPhaseFile(TransactionId xid, bool missing_ok)
file_crc;
int r;
- TwoPhaseFilePath(path, xid);
+ TwoPhaseFilePath(path, fxid);
fd = OpenTransientFile(path, O_RDONLY | PG_BINARY);
if (fd < 0)
@@ -1426,12 +1433,12 @@ XlogReadTwoPhaseData(XLogRecPtr lsn, char **buf, int *len)
if (errormsg)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not read two-phase state from WAL at %X/%X: %s",
+ errmsg("could not read two-phase state from WAL at %X/%08X: %s",
LSN_FORMAT_ARGS(lsn), errormsg)));
else
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not read two-phase state from WAL at %X/%X",
+ errmsg("could not read two-phase state from WAL at %X/%08X",
LSN_FORMAT_ARGS(lsn))));
}
@@ -1439,7 +1446,7 @@ XlogReadTwoPhaseData(XLogRecPtr lsn, char **buf, int *len)
(XLogRecGetInfo(xlogreader) & XLOG_XACT_OPMASK) != XLOG_XACT_PREPARE)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("expected two-phase state data is not present in WAL at %X/%X",
+ errmsg("expected two-phase state data is not present in WAL at %X/%08X",
LSN_FORMAT_ARGS(lsn))));
if (len != NULL)
@@ -1461,6 +1468,7 @@ StandbyTransactionIdIsPrepared(TransactionId xid)
char *buf;
TwoPhaseFileHeader *hdr;
bool result;
+ FullTransactionId fxid;
Assert(TransactionIdIsValid(xid));
@@ -1468,7 +1476,8 @@ StandbyTransactionIdIsPrepared(TransactionId xid)
return false; /* nothing to do */
/* Read and validate file */
- buf = ReadTwoPhaseFile(xid, true);
+ fxid = AdjustToFullTransactionId(xid);
+ buf = ReadTwoPhaseFile(fxid, true);
if (buf == NULL)
return false;
@@ -1488,6 +1497,7 @@ FinishPreparedTransaction(const char *gid, bool isCommit)
{
GlobalTransaction gxact;
PGPROC *proc;
+ FullTransactionId fxid;
TransactionId xid;
bool ondisk;
char *buf;
@@ -1509,7 +1519,8 @@ FinishPreparedTransaction(const char *gid, bool isCommit)
*/
gxact = LockGXact(gid, GetUserId());
proc = GetPGProcByNumber(gxact->pgprocno);
- xid = gxact->xid;
+ fxid = gxact->fxid;
+ xid = XidFromFullTransactionId(fxid);
/*
* Read and validate 2PC state data. State data will typically be stored
@@ -1517,7 +1528,7 @@ FinishPreparedTransaction(const char *gid, bool isCommit)
* to disk if for some reason they have lived for a long time.
*/
if (gxact->ondisk)
- buf = ReadTwoPhaseFile(xid, false);
+ buf = ReadTwoPhaseFile(fxid, false);
else
XlogReadTwoPhaseData(gxact->prepare_start_lsn, &buf, NULL);
@@ -1636,11 +1647,11 @@ FinishPreparedTransaction(const char *gid, bool isCommit)
/* And now do the callbacks */
if (isCommit)
- ProcessRecords(bufptr, xid, twophase_postcommit_callbacks);
+ ProcessRecords(bufptr, fxid, twophase_postcommit_callbacks);
else
- ProcessRecords(bufptr, xid, twophase_postabort_callbacks);
+ ProcessRecords(bufptr, fxid, twophase_postabort_callbacks);
- PredicateLockTwoPhaseFinish(xid, isCommit);
+ PredicateLockTwoPhaseFinish(fxid, isCommit);
/*
* Read this value while holding the two-phase lock, as the on-disk 2PC
@@ -1664,7 +1675,7 @@ FinishPreparedTransaction(const char *gid, bool isCommit)
* And now we can clean up any files we may have left.
*/
if (ondisk)
- RemoveTwoPhaseFile(xid, true);
+ RemoveTwoPhaseFile(fxid, true);
MyLockedGxact = NULL;
@@ -1677,7 +1688,7 @@ FinishPreparedTransaction(const char *gid, bool isCommit)
* Scan 2PC state data in memory and call the indicated callbacks for each 2PC record.
*/
static void
-ProcessRecords(char *bufptr, TransactionId xid,
+ProcessRecords(char *bufptr, FullTransactionId fxid,
const TwoPhaseCallback callbacks[])
{
for (;;)
@@ -1691,24 +1702,28 @@ ProcessRecords(char *bufptr, TransactionId xid,
bufptr += MAXALIGN(sizeof(TwoPhaseRecordOnDisk));
if (callbacks[record->rmid] != NULL)
- callbacks[record->rmid] (xid, record->info, bufptr, record->len);
+ callbacks[record->rmid] (fxid, record->info, bufptr, record->len);
bufptr += MAXALIGN(record->len);
}
}
/*
- * Remove the 2PC file for the specified XID.
+ * Remove the 2PC file.
*
* If giveWarning is false, do not complain about file-not-present;
* this is an expected case during WAL replay.
+ *
+ * This routine is used at early stages at recovery where future and
+ * past orphaned files are checked, hence the FullTransactionId to build
+ * a complete file name fit for the removal.
*/
static void
-RemoveTwoPhaseFile(TransactionId xid, bool giveWarning)
+RemoveTwoPhaseFile(FullTransactionId fxid, bool giveWarning)
{
char path[MAXPGPATH];
- TwoPhaseFilePath(path, xid);
+ TwoPhaseFilePath(path, fxid);
if (unlink(path))
if (errno != ENOENT || giveWarning)
ereport(WARNING,
@@ -1723,7 +1738,7 @@ RemoveTwoPhaseFile(TransactionId xid, bool giveWarning)
* Note: content and len don't include CRC.
*/
static void
-RecreateTwoPhaseFile(TransactionId xid, void *content, int len)
+RecreateTwoPhaseFile(FullTransactionId fxid, void *content, int len)
{
char path[MAXPGPATH];
pg_crc32c statefile_crc;
@@ -1734,7 +1749,7 @@ RecreateTwoPhaseFile(TransactionId xid, void *content, int len)
COMP_CRC32C(statefile_crc, content, len);
FIN_CRC32C(statefile_crc);
- TwoPhaseFilePath(path, xid);
+ TwoPhaseFilePath(path, fxid);
fd = OpenTransientFile(path,
O_CREAT | O_TRUNC | O_WRONLY | PG_BINARY);
@@ -1846,7 +1861,7 @@ CheckPointTwoPhase(XLogRecPtr redo_horizon)
int len;
XlogReadTwoPhaseData(gxact->prepare_start_lsn, &buf, &len);
- RecreateTwoPhaseFile(gxact->xid, buf, len);
+ RecreateTwoPhaseFile(gxact->fxid, buf, len);
gxact->ondisk = true;
gxact->prepare_start_lsn = InvalidXLogRecPtr;
gxact->prepare_end_lsn = InvalidXLogRecPtr;
@@ -1897,19 +1912,17 @@ restoreTwoPhaseData(void)
if (strlen(clde->d_name) == 16 &&
strspn(clde->d_name, "0123456789ABCDEF") == 16)
{
- TransactionId xid;
FullTransactionId fxid;
char *buf;
fxid = FullTransactionIdFromU64(strtou64(clde->d_name, NULL, 16));
- xid = XidFromFullTransactionId(fxid);
- buf = ProcessTwoPhaseBuffer(xid, InvalidXLogRecPtr,
+ buf = ProcessTwoPhaseBuffer(fxid, InvalidXLogRecPtr,
true, false, false);
if (buf == NULL)
continue;
- PrepareRedoAdd(buf, InvalidXLogRecPtr,
+ PrepareRedoAdd(fxid, buf, InvalidXLogRecPtr,
InvalidXLogRecPtr, InvalidRepOriginId);
}
}
@@ -1968,9 +1981,7 @@ PrescanPreparedTransactions(TransactionId **xids_p, int *nxids_p)
Assert(gxact->inredo);
- xid = gxact->xid;
-
- buf = ProcessTwoPhaseBuffer(xid,
+ buf = ProcessTwoPhaseBuffer(gxact->fxid,
gxact->prepare_start_lsn,
gxact->ondisk, false, true);
@@ -1981,6 +1992,7 @@ PrescanPreparedTransactions(TransactionId **xids_p, int *nxids_p)
* OK, we think this file is valid. Incorporate xid into the
* running-minimum result.
*/
+ xid = XidFromFullTransactionId(gxact->fxid);
if (TransactionIdPrecedes(xid, result))
result = xid;
@@ -2036,15 +2048,12 @@ StandbyRecoverPreparedTransactions(void)
LWLockAcquire(TwoPhaseStateLock, LW_EXCLUSIVE);
for (i = 0; i < TwoPhaseState->numPrepXacts; i++)
{
- TransactionId xid;
char *buf;
GlobalTransaction gxact = TwoPhaseState->prepXacts[i];
Assert(gxact->inredo);
- xid = gxact->xid;
-
- buf = ProcessTwoPhaseBuffer(xid,
+ buf = ProcessTwoPhaseBuffer(gxact->fxid,
gxact->prepare_start_lsn,
gxact->ondisk, true, false);
if (buf != NULL)
@@ -2077,16 +2086,14 @@ RecoverPreparedTransactions(void)
LWLockAcquire(TwoPhaseStateLock, LW_EXCLUSIVE);
for (i = 0; i < TwoPhaseState->numPrepXacts; i++)
{
- TransactionId xid;
char *buf;
GlobalTransaction gxact = TwoPhaseState->prepXacts[i];
+ FullTransactionId fxid = gxact->fxid;
char *bufptr;
TwoPhaseFileHeader *hdr;
TransactionId *subxids;
const char *gid;
- xid = gxact->xid;
-
/*
* Reconstruct subtrans state for the transaction --- needed because
* pg_subtrans is not preserved over a restart. Note that we are
@@ -2096,17 +2103,20 @@ RecoverPreparedTransactions(void)
* SubTransSetParent has been set before, if the prepared transaction
* generated xid assignment records.
*/
- buf = ProcessTwoPhaseBuffer(xid,
+ buf = ProcessTwoPhaseBuffer(gxact->fxid,
gxact->prepare_start_lsn,
gxact->ondisk, true, false);
if (buf == NULL)
continue;
ereport(LOG,
- (errmsg("recovering prepared transaction %u from shared memory", xid)));
+ (errmsg("recovering prepared transaction %u of epoch %u from shared memory",
+ XidFromFullTransactionId(gxact->fxid),
+ EpochFromFullTransactionId(gxact->fxid))));
hdr = (TwoPhaseFileHeader *) buf;
- Assert(TransactionIdEquals(hdr->xid, xid));
+ Assert(TransactionIdEquals(hdr->xid,
+ XidFromFullTransactionId(gxact->fxid)));
bufptr = buf + MAXALIGN(sizeof(TwoPhaseFileHeader));
gid = (const char *) bufptr;
bufptr += MAXALIGN(hdr->gidlen);
@@ -2122,7 +2132,7 @@ RecoverPreparedTransactions(void)
* Recreate its GXACT and dummy PGPROC. But, check whether it was
* added in redo and already has a shmem entry for it.
*/
- MarkAsPreparingGuts(gxact, xid, gid,
+ MarkAsPreparingGuts(gxact, gxact->fxid, gid,
hdr->prepared_at,
hdr->owner, hdr->database);
@@ -2137,7 +2147,7 @@ RecoverPreparedTransactions(void)
/*
* Recover other state (notably locks) using resource managers.
*/
- ProcessRecords(bufptr, xid, twophase_recover_callbacks);
+ ProcessRecords(bufptr, fxid, twophase_recover_callbacks);
/*
* Release locks held by the standby process after we process each
@@ -2145,7 +2155,7 @@ RecoverPreparedTransactions(void)
* additional locks at any one time.
*/
if (InHotStandby)
- StandbyReleaseLockTree(xid, hdr->nsubxacts, subxids);
+ StandbyReleaseLockTree(hdr->xid, hdr->nsubxacts, subxids);
/*
* We're done with recovering this transaction. Clear MyLockedGxact,
@@ -2164,7 +2174,7 @@ RecoverPreparedTransactions(void)
/*
* ProcessTwoPhaseBuffer
*
- * Given a transaction id, read it either from disk or read it directly
+ * Given a FullTransactionId, read it either from disk or read it directly
* via shmem xlog record pointer using the provided "prepare_start_lsn".
*
* If setParent is true, set up subtransaction parent linkages.
@@ -2173,13 +2183,12 @@ RecoverPreparedTransactions(void)
* value scanned.
*/
static char *
-ProcessTwoPhaseBuffer(TransactionId xid,
+ProcessTwoPhaseBuffer(FullTransactionId fxid,
XLogRecPtr prepare_start_lsn,
bool fromdisk,
bool setParent, bool setNextXid)
{
FullTransactionId nextXid = TransamVariables->nextXid;
- TransactionId origNextXid = XidFromFullTransactionId(nextXid);
TransactionId *subxids;
char *buf;
TwoPhaseFileHeader *hdr;
@@ -2191,41 +2200,46 @@ ProcessTwoPhaseBuffer(TransactionId xid,
Assert(prepare_start_lsn != InvalidXLogRecPtr);
/* Already processed? */
- if (TransactionIdDidCommit(xid) || TransactionIdDidAbort(xid))
+ if (TransactionIdDidCommit(XidFromFullTransactionId(fxid)) ||
+ TransactionIdDidAbort(XidFromFullTransactionId(fxid)))
{
if (fromdisk)
{
ereport(WARNING,
- (errmsg("removing stale two-phase state file for transaction %u",
- xid)));
- RemoveTwoPhaseFile(xid, true);
+ (errmsg("removing stale two-phase state file for transaction %u of epoch %u",
+ XidFromFullTransactionId(fxid),
+ EpochFromFullTransactionId(fxid))));
+ RemoveTwoPhaseFile(fxid, true);
}
else
{
ereport(WARNING,
- (errmsg("removing stale two-phase state from memory for transaction %u",
- xid)));
- PrepareRedoRemove(xid, true);
+ (errmsg("removing stale two-phase state from memory for transaction %u of epoch %u",
+ XidFromFullTransactionId(fxid),
+ EpochFromFullTransactionId(fxid))));
+ PrepareRedoRemoveFull(fxid, true);
}
return NULL;
}
/* Reject XID if too new */
- if (TransactionIdFollowsOrEquals(xid, origNextXid))
+ if (FullTransactionIdFollowsOrEquals(fxid, nextXid))
{
if (fromdisk)
{
ereport(WARNING,
- (errmsg("removing future two-phase state file for transaction %u",
- xid)));
- RemoveTwoPhaseFile(xid, true);
+ (errmsg("removing future two-phase state file for transaction %u of epoch %u",
+ XidFromFullTransactionId(fxid),
+ EpochFromFullTransactionId(fxid))));
+ RemoveTwoPhaseFile(fxid, true);
}
else
{
ereport(WARNING,
- (errmsg("removing future two-phase state from memory for transaction %u",
- xid)));
- PrepareRedoRemove(xid, true);
+ (errmsg("removing future two-phase state from memory for transaction %u of epoch %u",
+ XidFromFullTransactionId(fxid),
+ EpochFromFullTransactionId(fxid))));
+ PrepareRedoRemoveFull(fxid, true);
}
return NULL;
}
@@ -2233,7 +2247,7 @@ ProcessTwoPhaseBuffer(TransactionId xid,
if (fromdisk)
{
/* Read and validate file */
- buf = ReadTwoPhaseFile(xid, false);
+ buf = ReadTwoPhaseFile(fxid, false);
}
else
{
@@ -2243,18 +2257,20 @@ ProcessTwoPhaseBuffer(TransactionId xid,
/* Deconstruct header */
hdr = (TwoPhaseFileHeader *) buf;
- if (!TransactionIdEquals(hdr->xid, xid))
+ if (!TransactionIdEquals(hdr->xid, XidFromFullTransactionId(fxid)))
{
if (fromdisk)
ereport(ERROR,
(errcode(ERRCODE_DATA_CORRUPTED),
- errmsg("corrupted two-phase state file for transaction %u",
- xid)));
+ errmsg("corrupted two-phase state file for transaction %u of epoch %u",
+ XidFromFullTransactionId(fxid),
+ EpochFromFullTransactionId(fxid))));
else
ereport(ERROR,
(errcode(ERRCODE_DATA_CORRUPTED),
- errmsg("corrupted two-phase state in memory for transaction %u",
- xid)));
+ errmsg("corrupted two-phase state in memory for transaction %u of epoch %u",
+ XidFromFullTransactionId(fxid),
+ EpochFromFullTransactionId(fxid))));
}
/*
@@ -2268,14 +2284,14 @@ ProcessTwoPhaseBuffer(TransactionId xid,
{
TransactionId subxid = subxids[i];
- Assert(TransactionIdFollows(subxid, xid));
+ Assert(TransactionIdFollows(subxid, XidFromFullTransactionId(fxid)));
/* update nextXid if needed */
if (setNextXid)
AdvanceNextFullTransactionIdPastXid(subxid);
if (setParent)
- SubTransSetParent(subxid, xid);
+ SubTransSetParent(subxid, XidFromFullTransactionId(fxid));
}
return buf;
@@ -2286,7 +2302,7 @@ ProcessTwoPhaseBuffer(TransactionId xid,
* RecordTransactionCommitPrepared
*
* This is basically the same as RecordTransactionCommit (q.v. if you change
- * this function): in particular, we must set DELAY_CHKPT_START to avoid a
+ * this function): in particular, we must set DELAY_CHKPT_IN_COMMIT to avoid a
* race condition.
*
* We know the transaction made at least one XLOG entry (its PREPARE),
@@ -2306,7 +2322,7 @@ RecordTransactionCommitPrepared(TransactionId xid,
const char *gid)
{
XLogRecPtr recptr;
- TimestampTz committs = GetCurrentTimestamp();
+ TimestampTz committs;
bool replorigin;
/*
@@ -2319,8 +2335,24 @@ RecordTransactionCommitPrepared(TransactionId xid,
START_CRIT_SECTION();
/* See notes in RecordTransactionCommit */
- Assert((MyProc->delayChkptFlags & DELAY_CHKPT_START) == 0);
- MyProc->delayChkptFlags |= DELAY_CHKPT_START;
+ Assert((MyProc->delayChkptFlags & DELAY_CHKPT_IN_COMMIT) == 0);
+ MyProc->delayChkptFlags |= DELAY_CHKPT_IN_COMMIT;
+
+ /*
+ * Ensures the DELAY_CHKPT_IN_COMMIT flag write is globally visible before
+ * commit time is written.
+ */
+ pg_write_barrier();
+
+ /*
+ * Note it is important to set committs value after marking ourselves as
+ * in the commit critical section (DELAY_CHKPT_IN_COMMIT). This is because
+ * we want to ensure all transactions that have acquired commit timestamp
+ * are finished before we allow the logical replication client to advance
+ * its xid which is used to hold back dead rows for conflict detection.
+ * See comments atop worker.c.
+ */
+ committs = GetCurrentTimestamp();
/*
* Emit the XLOG commit record. Note that we mark 2PC commits as
@@ -2369,7 +2401,7 @@ RecordTransactionCommitPrepared(TransactionId xid,
TransactionIdCommitTree(xid, nchildren, children);
/* Checkpoint can proceed now */
- MyProc->delayChkptFlags &= ~DELAY_CHKPT_START;
+ MyProc->delayChkptFlags &= ~DELAY_CHKPT_IN_COMMIT;
END_CRIT_SECTION();
@@ -2466,8 +2498,9 @@ RecordTransactionAbortPrepared(TransactionId xid,
* data, the entry is marked as located on disk.
*/
void
-PrepareRedoAdd(char *buf, XLogRecPtr start_lsn,
- XLogRecPtr end_lsn, RepOriginId origin_id)
+PrepareRedoAdd(FullTransactionId fxid, char *buf,
+ XLogRecPtr start_lsn, XLogRecPtr end_lsn,
+ RepOriginId origin_id)
{
TwoPhaseFileHeader *hdr = (TwoPhaseFileHeader *) buf;
char *bufptr;
@@ -2477,6 +2510,13 @@ PrepareRedoAdd(char *buf, XLogRecPtr start_lsn,
Assert(LWLockHeldByMeInMode(TwoPhaseStateLock, LW_EXCLUSIVE));
Assert(RecoveryInProgress());
+ if (!FullTransactionIdIsValid(fxid))
+ {
+ Assert(InRecovery);
+ fxid = FullTransactionIdFromAllowableAt(TransamVariables->nextXid,
+ hdr->xid);
+ }
+
bufptr = buf + MAXALIGN(sizeof(TwoPhaseFileHeader));
gid = (const char *) bufptr;
@@ -2505,14 +2545,15 @@ PrepareRedoAdd(char *buf, XLogRecPtr start_lsn,
{
char path[MAXPGPATH];
- TwoPhaseFilePath(path, hdr->xid);
+ Assert(InRecovery);
+ TwoPhaseFilePath(path, fxid);
if (access(path, F_OK) == 0)
{
ereport(reachedConsistency ? ERROR : WARNING,
(errmsg("could not recover two-phase state file for transaction %u",
hdr->xid),
- errdetail("Two-phase state file has been found in WAL record %X/%X, but this transaction has already been restored from disk.",
+ errdetail("Two-phase state file has been found in WAL record %X/%08X, but this transaction has already been restored from disk.",
LSN_FORMAT_ARGS(start_lsn))));
return;
}
@@ -2536,7 +2577,7 @@ PrepareRedoAdd(char *buf, XLogRecPtr start_lsn,
gxact->prepared_at = hdr->prepared_at;
gxact->prepare_start_lsn = start_lsn;
gxact->prepare_end_lsn = end_lsn;
- gxact->xid = hdr->xid;
+ gxact->fxid = fxid;
gxact->owner = hdr->owner;
gxact->locking_backend = INVALID_PROC_NUMBER;
gxact->valid = false;
@@ -2555,11 +2596,13 @@ PrepareRedoAdd(char *buf, XLogRecPtr start_lsn,
false /* backward */ , false /* WAL */ );
}
- elog(DEBUG2, "added 2PC data in shared memory for transaction %u", gxact->xid);
+ elog(DEBUG2, "added 2PC data in shared memory for transaction %u of epoch %u",
+ XidFromFullTransactionId(gxact->fxid),
+ EpochFromFullTransactionId(gxact->fxid));
}
/*
- * PrepareRedoRemove
+ * PrepareRedoRemoveFull
*
* Remove the corresponding gxact entry from TwoPhaseState. Also remove
* the 2PC file if a prepared transaction was saved via an earlier checkpoint.
@@ -2567,8 +2610,8 @@ PrepareRedoAdd(char *buf, XLogRecPtr start_lsn,
* Caller must hold TwoPhaseStateLock in exclusive mode, because TwoPhaseState
* is updated.
*/
-void
-PrepareRedoRemove(TransactionId xid, bool giveWarning)
+static void
+PrepareRedoRemoveFull(FullTransactionId fxid, bool giveWarning)
{
GlobalTransaction gxact = NULL;
int i;
@@ -2581,7 +2624,7 @@ PrepareRedoRemove(TransactionId xid, bool giveWarning)
{
gxact = TwoPhaseState->prepXacts[i];
- if (gxact->xid == xid)
+ if (FullTransactionIdEquals(gxact->fxid, fxid))
{
Assert(gxact->inredo);
found = true;
@@ -2598,13 +2641,29 @@ PrepareRedoRemove(TransactionId xid, bool giveWarning)
/*
* And now we can clean up any files we may have left.
*/
- elog(DEBUG2, "removing 2PC data for transaction %u", xid);
+ elog(DEBUG2, "removing 2PC data for transaction %u of epoch %u ",
+ XidFromFullTransactionId(fxid),
+ EpochFromFullTransactionId(fxid));
+
if (gxact->ondisk)
- RemoveTwoPhaseFile(xid, giveWarning);
+ RemoveTwoPhaseFile(fxid, giveWarning);
+
RemoveGXact(gxact);
}
/*
+ * Wrapper of PrepareRedoRemoveFull(), for TransactionIds.
+ */
+void
+PrepareRedoRemove(TransactionId xid, bool giveWarning)
+{
+ FullTransactionId fxid =
+ FullTransactionIdFromAllowableAt(TransamVariables->nextXid, xid);
+
+ PrepareRedoRemoveFull(fxid, giveWarning);
+}
+
+/*
* LookupGXact
* Check if the prepared transaction with the given GID, lsn and timestamp
* exists.
@@ -2648,7 +2707,7 @@ LookupGXact(const char *gid, XLogRecPtr prepare_end_lsn,
* between publisher and subscriber.
*/
if (gxact->ondisk)
- buf = ReadTwoPhaseFile(gxact->xid, false);
+ buf = ReadTwoPhaseFile(gxact->fxid, false);
else
{
Assert(gxact->prepare_start_lsn);
diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c
index b885513f765..b46e7e9c2a6 100644
--- a/src/backend/access/transam/xact.c
+++ b/src/backend/access/transam/xact.c
@@ -1431,10 +1431,22 @@ RecordTransactionCommit(void)
* without holding the ProcArrayLock, since we're the only one
* modifying it. This makes checkpoint's determination of which xacts
* are delaying the checkpoint a bit fuzzy, but it doesn't matter.
+ *
+ * Note, it is important to get the commit timestamp after marking the
+ * transaction in the commit critical section. See
+ * RecordTransactionCommitPrepared.
*/
- Assert((MyProc->delayChkptFlags & DELAY_CHKPT_START) == 0);
+ Assert((MyProc->delayChkptFlags & DELAY_CHKPT_IN_COMMIT) == 0);
START_CRIT_SECTION();
- MyProc->delayChkptFlags |= DELAY_CHKPT_START;
+ MyProc->delayChkptFlags |= DELAY_CHKPT_IN_COMMIT;
+
+ Assert(xactStopTimestamp == 0);
+
+ /*
+ * Ensures the DELAY_CHKPT_IN_COMMIT flag write is globally visible
+ * before commit time is written.
+ */
+ pg_write_barrier();
/*
* Insert the commit XLOG record.
@@ -1537,7 +1549,7 @@ RecordTransactionCommit(void)
*/
if (markXidCommitted)
{
- MyProc->delayChkptFlags &= ~DELAY_CHKPT_START;
+ MyProc->delayChkptFlags &= ~DELAY_CHKPT_IN_COMMIT;
END_CRIT_SECTION();
}
@@ -2515,7 +2527,7 @@ static void
PrepareTransaction(void)
{
TransactionState s = CurrentTransactionState;
- TransactionId xid = GetCurrentTransactionId();
+ FullTransactionId fxid = GetCurrentFullTransactionId();
GlobalTransaction gxact;
TimestampTz prepared_at;
@@ -2644,7 +2656,7 @@ PrepareTransaction(void)
* Reserve the GID for this transaction. This could fail if the requested
* GID is invalid or already in use.
*/
- gxact = MarkAsPreparing(xid, prepareGID, prepared_at,
+ gxact = MarkAsPreparing(fxid, prepareGID, prepared_at,
GetUserId(), MyDatabaseId);
prepareGID = NULL;
@@ -2694,7 +2706,7 @@ PrepareTransaction(void)
* ProcArrayClearTransaction(). Otherwise, a GetLockConflicts() would
* conclude "xact already committed or aborted" for our locks.
*/
- PostPrepare_Locks(xid);
+ PostPrepare_Locks(fxid);
/*
* Let others know about no transaction in progress by me. This has to be
@@ -2738,9 +2750,9 @@ PrepareTransaction(void)
PostPrepare_smgr();
- PostPrepare_MultiXact(xid);
+ PostPrepare_MultiXact(fxid);
- PostPrepare_PredicateLocks(xid);
+ PostPrepare_PredicateLocks(fxid);
ResourceOwnerRelease(TopTransactionResourceOwner,
RESOURCE_RELEASE_LOCKS,
@@ -6420,7 +6432,8 @@ xact_redo(XLogReaderState *record)
* gxact entry.
*/
LWLockAcquire(TwoPhaseStateLock, LW_EXCLUSIVE);
- PrepareRedoAdd(XLogRecGetData(record),
+ PrepareRedoAdd(InvalidFullTransactionId,
+ XLogRecGetData(record),
record->ReadRecPtr,
record->EndRecPtr,
XLogRecGetOrigin(record));
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index 1914859b2ee..9a4de1616bc 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -96,6 +96,7 @@
#include "utils/guc_hooks.h"
#include "utils/guc_tables.h"
#include "utils/injection_point.h"
+#include "utils/pgstat_internal.h"
#include "utils/ps_status.h"
#include "utils/relmapper.h"
#include "utils/snapmgr.h"
@@ -449,7 +450,6 @@ typedef struct XLogCtlData
/* Protected by info_lck: */
XLogwrtRqst LogwrtRqst;
XLogRecPtr RedoRecPtr; /* a recent copy of Insert->RedoRecPtr */
- FullTransactionId ckptFullXid; /* nextXid of latest checkpoint */
XLogRecPtr asyncXactLSN; /* LSN of newest async commit/abort */
XLogRecPtr replicationSlotMinLSN; /* oldest LSN needed by any slot */
@@ -703,7 +703,7 @@ static void InitControlFile(uint64 sysidentifier, uint32 data_checksum_version);
static void WriteControlFile(void);
static void ReadControlFile(void);
static void UpdateControlFile(void);
-static char *str_time(pg_time_t tnow);
+static char *str_time(pg_time_t tnow, char *buf, size_t bufsize);
static int get_sync_bit(int method);
@@ -1028,7 +1028,7 @@ XLogInsertRecord(XLogRecData *rdata,
oldCxt = MemoryContextSwitchTo(walDebugCxt);
initStringInfo(&buf);
- appendStringInfo(&buf, "INSERT @ %X/%X: ", LSN_FORMAT_ARGS(EndPos));
+ appendStringInfo(&buf, "INSERT @ %X/%08X: ", LSN_FORMAT_ARGS(EndPos));
/*
* We have to piece together the WAL record data from the XLogRecData
@@ -1092,6 +1092,9 @@ XLogInsertRecord(XLogRecData *rdata,
pgWalUsage.wal_bytes += rechdr->xl_tot_len;
pgWalUsage.wal_records++;
pgWalUsage.wal_fpi += num_fpi;
+
+ /* Required for the flush of pending stats WAL data */
+ pgstat_report_fixed = true;
}
return EndPos;
@@ -1549,8 +1552,8 @@ WaitXLogInsertionsToFinish(XLogRecPtr upto)
if (upto > reservedUpto)
{
ereport(LOG,
- (errmsg("request to flush past end of generated WAL; request %X/%X, current position %X/%X",
- LSN_FORMAT_ARGS(upto), LSN_FORMAT_ARGS(reservedUpto))));
+ errmsg("request to flush past end of generated WAL; request %X/%08X, current position %X/%08X",
+ LSN_FORMAT_ARGS(upto), LSN_FORMAT_ARGS(reservedUpto)));
upto = reservedUpto;
}
@@ -1716,7 +1719,7 @@ GetXLogBuffer(XLogRecPtr ptr, TimeLineID tli)
endptr = pg_atomic_read_u64(&XLogCtl->xlblocks[idx]);
if (expectedEndPtr != endptr)
- elog(PANIC, "could not find WAL buffer for %X/%X",
+ elog(PANIC, "could not find WAL buffer for %X/%08X",
LSN_FORMAT_ARGS(ptr));
}
else
@@ -1776,7 +1779,7 @@ WALReadFromBuffers(char *dstbuf, XLogRecPtr startptr, Size count,
inserted = pg_atomic_read_u64(&XLogCtl->logInsertResult);
if (startptr + count > inserted)
ereport(ERROR,
- errmsg("cannot read past end of generated WAL: requested %X/%X, current position %X/%X",
+ errmsg("cannot read past end of generated WAL: requested %X/%08X, current position %X/%08X",
LSN_FORMAT_ARGS(startptr + count),
LSN_FORMAT_ARGS(inserted)));
@@ -2109,6 +2112,12 @@ AdvanceXLInsertBuffer(XLogRecPtr upto, TimeLineID tli, bool opportunistic)
LWLockRelease(WALWriteLock);
pgWalUsage.wal_buffers_full++;
TRACE_POSTGRESQL_WAL_BUFFER_WRITE_DIRTY_DONE();
+
+ /*
+ * Required for the flush of pending stats WAL data, per
+ * update of pgWalUsage.
+ */
+ pgstat_report_fixed = true;
}
}
}
@@ -2281,7 +2290,7 @@ AdvanceXLInsertBuffer(XLogRecPtr upto, TimeLineID tli, bool opportunistic)
#ifdef WAL_DEBUG
if (XLOG_DEBUG && npages > 0)
{
- elog(DEBUG1, "initialized %d pages, up to %X/%X",
+ elog(DEBUG1, "initialized %d pages, up to %X/%08X",
npages, LSN_FORMAT_ARGS(NewPageEndPtr));
}
#endif
@@ -2347,25 +2356,6 @@ check_wal_segment_size(int *newval, void **extra, GucSource source)
}
/*
- * GUC check_hook for max_slot_wal_keep_size
- *
- * We don't allow the value of max_slot_wal_keep_size other than -1 during the
- * binary upgrade. See start_postmaster() in pg_upgrade for more details.
- */
-bool
-check_max_slot_wal_keep_size(int *newval, void **extra, GucSource source)
-{
- if (IsBinaryUpgrade && *newval != -1)
- {
- GUC_check_errdetail("\"%s\" must be set to -1 during binary upgrade mode.",
- "max_slot_wal_keep_size");
- return false;
- }
-
- return true;
-}
-
-/*
* At a checkpoint, how many WAL segments to recycle as preallocated future
* XLOG segments? Returns the highest segment that should be preallocated.
*/
@@ -2492,7 +2482,7 @@ XLogWrite(XLogwrtRqst WriteRqst, TimeLineID tli, bool flexible)
XLogRecPtr EndPtr = pg_atomic_read_u64(&XLogCtl->xlblocks[curridx]);
if (LogwrtResult.Write >= EndPtr)
- elog(PANIC, "xlog write request %X/%X is past end of log %X/%X",
+ elog(PANIC, "xlog write request %X/%08X is past end of log %X/%08X",
LSN_FORMAT_ARGS(LogwrtResult.Write),
LSN_FORMAT_ARGS(EndPtr));
@@ -2892,7 +2882,7 @@ UpdateMinRecoveryPoint(XLogRecPtr lsn, bool force)
newMinRecoveryPoint = GetCurrentReplayRecPtr(&newMinRecoveryPointTLI);
if (!force && newMinRecoveryPoint < lsn)
elog(WARNING,
- "xlog min recovery request %X/%X is past current point %X/%X",
+ "xlog min recovery request %X/%08X is past current point %X/%08X",
LSN_FORMAT_ARGS(lsn), LSN_FORMAT_ARGS(newMinRecoveryPoint));
/* update control file */
@@ -2905,9 +2895,9 @@ UpdateMinRecoveryPoint(XLogRecPtr lsn, bool force)
LocalMinRecoveryPointTLI = newMinRecoveryPointTLI;
ereport(DEBUG2,
- (errmsg_internal("updated min recovery point to %X/%X on timeline %u",
- LSN_FORMAT_ARGS(newMinRecoveryPoint),
- newMinRecoveryPointTLI)));
+ errmsg_internal("updated min recovery point to %X/%08X on timeline %u",
+ LSN_FORMAT_ARGS(newMinRecoveryPoint),
+ newMinRecoveryPointTLI));
}
}
LWLockRelease(ControlFileLock);
@@ -2945,7 +2935,7 @@ XLogFlush(XLogRecPtr record)
#ifdef WAL_DEBUG
if (XLOG_DEBUG)
- elog(LOG, "xlog flush request %X/%X; write %X/%X; flush %X/%X",
+ elog(LOG, "xlog flush request %X/%08X; write %X/%08X; flush %X/%08X",
LSN_FORMAT_ARGS(record),
LSN_FORMAT_ARGS(LogwrtResult.Write),
LSN_FORMAT_ARGS(LogwrtResult.Flush));
@@ -3078,7 +3068,7 @@ XLogFlush(XLogRecPtr record)
*/
if (LogwrtResult.Flush < record)
elog(ERROR,
- "xlog flush request %X/%X is not satisfied --- flushed only to %X/%X",
+ "xlog flush request %X/%08X is not satisfied --- flushed only to %X/%08X",
LSN_FORMAT_ARGS(record),
LSN_FORMAT_ARGS(LogwrtResult.Flush));
}
@@ -3205,7 +3195,7 @@ XLogBackgroundFlush(void)
#ifdef WAL_DEBUG
if (XLOG_DEBUG)
- elog(LOG, "xlog bg flush request write %X/%X; flush: %X/%X, current is write %X/%X; flush %X/%X",
+ elog(LOG, "xlog bg flush request write %X/%08X; flush: %X/%08X, current is write %X/%08X; flush %X/%08X",
LSN_FORMAT_ARGS(WriteRqst.Write),
LSN_FORMAT_ARGS(WriteRqst.Flush),
LSN_FORMAT_ARGS(LogwrtResult.Write),
@@ -5381,11 +5371,9 @@ BootStrapXLOG(uint32 data_checksum_version)
}
static char *
-str_time(pg_time_t tnow)
+str_time(pg_time_t tnow, char *buf, size_t bufsize)
{
- char *buf = palloc(128);
-
- pg_strftime(buf, 128,
+ pg_strftime(buf, bufsize,
"%Y-%m-%d %H:%M:%S %Z",
pg_localtime(&tnow, log_timezone));
@@ -5628,6 +5616,7 @@ StartupXLOG(void)
XLogRecPtr missingContrecPtr;
TransactionId oldestActiveXID;
bool promoted = false;
+ char timebuf[128];
/*
* We should have an aux process resource owner to use, and we should not
@@ -5656,25 +5645,29 @@ StartupXLOG(void)
*/
ereport(IsPostmasterEnvironment ? LOG : NOTICE,
(errmsg("database system was shut down at %s",
- str_time(ControlFile->time))));
+ str_time(ControlFile->time,
+ timebuf, sizeof(timebuf)))));
break;
case DB_SHUTDOWNED_IN_RECOVERY:
ereport(LOG,
(errmsg("database system was shut down in recovery at %s",
- str_time(ControlFile->time))));
+ str_time(ControlFile->time,
+ timebuf, sizeof(timebuf)))));
break;
case DB_SHUTDOWNING:
ereport(LOG,
(errmsg("database system shutdown was interrupted; last known up at %s",
- str_time(ControlFile->time))));
+ str_time(ControlFile->time,
+ timebuf, sizeof(timebuf)))));
break;
case DB_IN_CRASH_RECOVERY:
ereport(LOG,
(errmsg("database system was interrupted while in recovery at %s",
- str_time(ControlFile->time)),
+ str_time(ControlFile->time,
+ timebuf, sizeof(timebuf))),
errhint("This probably means that some data is corrupted and"
" you will have to use the last backup for recovery.")));
break;
@@ -5682,7 +5675,8 @@ StartupXLOG(void)
case DB_IN_ARCHIVE_RECOVERY:
ereport(LOG,
(errmsg("database system was interrupted while in recovery at log time %s",
- str_time(ControlFile->checkPointCopy.time)),
+ str_time(ControlFile->checkPointCopy.time,
+ timebuf, sizeof(timebuf))),
errhint("If this has occurred more than once some data might be corrupted"
" and you might need to choose an earlier recovery target.")));
break;
@@ -5690,7 +5684,8 @@ StartupXLOG(void)
case DB_IN_PRODUCTION:
ereport(LOG,
(errmsg("database system was interrupted; last known up at %s",
- str_time(ControlFile->time))));
+ str_time(ControlFile->time,
+ timebuf, sizeof(timebuf)))));
break;
default:
@@ -5763,7 +5758,6 @@ StartupXLOG(void)
SetMultiXactIdLimit(checkPoint.oldestMulti, checkPoint.oldestMultiDB, true);
SetCommitTsLimit(checkPoint.oldestCommitTsXid,
checkPoint.newestCommitTsXid);
- XLogCtl->ckptFullXid = checkPoint.nextXid;
/*
* Clear out any old relcache cache files. This is *necessary* if we do
@@ -6336,6 +6330,12 @@ StartupXLOG(void)
*/
CompleteCommitTsInitialization();
+ /* Clean up EndOfWalRecoveryInfo data to appease Valgrind leak checking */
+ if (endOfRecoveryInfo->lastPage)
+ pfree(endOfRecoveryInfo->lastPage);
+ pfree(endOfRecoveryInfo->recoveryStopReason);
+ pfree(endOfRecoveryInfo);
+
/*
* All done with end-of-recovery actions.
*
@@ -6505,7 +6505,7 @@ PerformRecoveryXLogAction(void)
else
{
RequestCheckpoint(CHECKPOINT_END_OF_RECOVERY |
- CHECKPOINT_IMMEDIATE |
+ CHECKPOINT_FAST |
CHECKPOINT_WAIT);
}
@@ -6814,7 +6814,7 @@ ShutdownXLOG(int code, Datum arg)
WalSndWaitStopping();
if (RecoveryInProgress())
- CreateRestartPoint(CHECKPOINT_IS_SHUTDOWN | CHECKPOINT_IMMEDIATE);
+ CreateRestartPoint(CHECKPOINT_IS_SHUTDOWN | CHECKPOINT_FAST);
else
{
/*
@@ -6826,7 +6826,7 @@ ShutdownXLOG(int code, Datum arg)
if (XLogArchivingActive())
RequestXLogSwitch(false);
- CreateCheckPoint(CHECKPOINT_IS_SHUTDOWN | CHECKPOINT_IMMEDIATE);
+ CreateCheckPoint(CHECKPOINT_IS_SHUTDOWN | CHECKPOINT_FAST);
}
}
@@ -6842,24 +6842,24 @@ LogCheckpointStart(int flags, bool restartpoint)
(errmsg("restartpoint starting:%s%s%s%s%s%s%s%s",
(flags & CHECKPOINT_IS_SHUTDOWN) ? " shutdown" : "",
(flags & CHECKPOINT_END_OF_RECOVERY) ? " end-of-recovery" : "",
- (flags & CHECKPOINT_IMMEDIATE) ? " immediate" : "",
+ (flags & CHECKPOINT_FAST) ? " fast" : "",
(flags & CHECKPOINT_FORCE) ? " force" : "",
(flags & CHECKPOINT_WAIT) ? " wait" : "",
(flags & CHECKPOINT_CAUSE_XLOG) ? " wal" : "",
(flags & CHECKPOINT_CAUSE_TIME) ? " time" : "",
- (flags & CHECKPOINT_FLUSH_ALL) ? " flush-all" : "")));
+ (flags & CHECKPOINT_FLUSH_UNLOGGED) ? " flush-unlogged" : "")));
else
ereport(LOG,
/* translator: the placeholders show checkpoint options */
(errmsg("checkpoint starting:%s%s%s%s%s%s%s%s",
(flags & CHECKPOINT_IS_SHUTDOWN) ? " shutdown" : "",
(flags & CHECKPOINT_END_OF_RECOVERY) ? " end-of-recovery" : "",
- (flags & CHECKPOINT_IMMEDIATE) ? " immediate" : "",
+ (flags & CHECKPOINT_FAST) ? " fast" : "",
(flags & CHECKPOINT_FORCE) ? " force" : "",
(flags & CHECKPOINT_WAIT) ? " wait" : "",
(flags & CHECKPOINT_CAUSE_XLOG) ? " wal" : "",
(flags & CHECKPOINT_CAUSE_TIME) ? " time" : "",
- (flags & CHECKPOINT_FLUSH_ALL) ? " flush-all" : "")));
+ (flags & CHECKPOINT_FLUSH_UNLOGGED) ? " flush-unlogged" : "")));
}
/*
@@ -6921,7 +6921,7 @@ LogCheckpointEnd(bool restartpoint)
"%d removed, %d recycled; write=%ld.%03d s, "
"sync=%ld.%03d s, total=%ld.%03d s; sync files=%d, "
"longest=%ld.%03d s, average=%ld.%03d s; distance=%d kB, "
- "estimate=%d kB; lsn=%X/%X, redo lsn=%X/%X",
+ "estimate=%d kB; lsn=%X/%08X, redo lsn=%X/%08X",
CheckpointStats.ckpt_bufs_written,
(double) CheckpointStats.ckpt_bufs_written * 100 / NBuffers,
CheckpointStats.ckpt_slru_written,
@@ -6945,7 +6945,7 @@ LogCheckpointEnd(bool restartpoint)
"%d removed, %d recycled; write=%ld.%03d s, "
"sync=%ld.%03d s, total=%ld.%03d s; sync files=%d, "
"longest=%ld.%03d s, average=%ld.%03d s; distance=%d kB, "
- "estimate=%d kB; lsn=%X/%X, redo lsn=%X/%X",
+ "estimate=%d kB; lsn=%X/%08X, redo lsn=%X/%08X",
CheckpointStats.ckpt_bufs_written,
(double) CheckpointStats.ckpt_bufs_written * 100 / NBuffers,
CheckpointStats.ckpt_slru_written,
@@ -7042,12 +7042,12 @@ update_checkpoint_display(int flags, bool restartpoint, bool reset)
* flags is a bitwise OR of the following:
* CHECKPOINT_IS_SHUTDOWN: checkpoint is for database shutdown.
* CHECKPOINT_END_OF_RECOVERY: checkpoint is for end of WAL recovery.
- * CHECKPOINT_IMMEDIATE: finish the checkpoint ASAP,
- * ignoring checkpoint_completion_target parameter.
+ * CHECKPOINT_FAST: finish the checkpoint ASAP, ignoring
+ * checkpoint_completion_target parameter.
* CHECKPOINT_FORCE: force a checkpoint even if no XLOG activity has occurred
* since the last one (implied by CHECKPOINT_IS_SHUTDOWN or
* CHECKPOINT_END_OF_RECOVERY).
- * CHECKPOINT_FLUSH_ALL: also flush buffers of unlogged tables.
+ * CHECKPOINT_FLUSH_UNLOGGED: also flush buffers of unlogged tables.
*
* Note: flags contains other bits, of interest here only for logging purposes.
* In particular note that this routine is synchronous and does not pay
@@ -7142,7 +7142,7 @@ CreateCheckPoint(int flags)
* starting snapshot of locks and transactions.
*/
if (!shutdown && XLogStandbyInfoActive())
- checkPoint.oldestActiveXid = GetOldestActiveTransactionId();
+ checkPoint.oldestActiveXid = GetOldestActiveTransactionId(false, true);
else
checkPoint.oldestActiveXid = InvalidTransactionId;
@@ -7456,11 +7456,6 @@ CreateCheckPoint(int flags)
UpdateControlFile();
LWLockRelease(ControlFileLock);
- /* Update shared-memory copy of checkpoint XID/epoch */
- SpinLockAcquire(&XLogCtl->info_lck);
- XLogCtl->ckptFullXid = checkPoint.nextXid;
- SpinLockRelease(&XLogCtl->info_lck);
-
/*
* We are now done with critical updates; no need for system panic if we
* have trouble while fooling with old log segments.
@@ -7498,6 +7493,10 @@ CreateCheckPoint(int flags)
if (PriorRedoPtr != InvalidXLogRecPtr)
UpdateCheckPointDistanceEstimate(RedoRecPtr - PriorRedoPtr);
+#ifdef USE_INJECTION_POINTS
+ INJECTION_POINT("checkpoint-before-old-wal-removal", NULL);
+#endif
+
/*
* Delete old log files, those no longer needed for last checkpoint to
* prevent the disk holding the xlog from growing full.
@@ -7637,7 +7636,7 @@ CreateOverwriteContrecordRecord(XLogRecPtr aborted_lsn, XLogRecPtr pagePtr,
if (!RecoveryInProgress())
elog(ERROR, "can only be used at end of recovery");
if (pagePtr % XLOG_BLCKSZ != 0)
- elog(ERROR, "invalid position for missing continuation record %X/%X",
+ elog(ERROR, "invalid position for missing continuation record %X/%08X",
LSN_FORMAT_ARGS(pagePtr));
/* The current WAL insert position should be right after the page header */
@@ -7648,7 +7647,7 @@ CreateOverwriteContrecordRecord(XLogRecPtr aborted_lsn, XLogRecPtr pagePtr,
startPos += SizeOfXLogShortPHD;
recptr = GetXLogInsertRecPtr();
if (recptr != startPos)
- elog(ERROR, "invalid WAL insert position %X/%X for OVERWRITE_CONTRECORD",
+ elog(ERROR, "invalid WAL insert position %X/%08X for OVERWRITE_CONTRECORD",
LSN_FORMAT_ARGS(recptr));
START_CRIT_SECTION();
@@ -7678,7 +7677,7 @@ CreateOverwriteContrecordRecord(XLogRecPtr aborted_lsn, XLogRecPtr pagePtr,
/* check that the record was inserted to the right place */
if (ProcLastRecPtr != startPos)
- elog(ERROR, "OVERWRITE_CONTRECORD was inserted to unexpected position %X/%X",
+ elog(ERROR, "OVERWRITE_CONTRECORD was inserted to unexpected position %X/%08X",
LSN_FORMAT_ARGS(ProcLastRecPtr));
XLogFlush(recptr);
@@ -7747,8 +7746,7 @@ RecoveryRestartPoint(const CheckPoint *checkPoint, XLogReaderState *record)
if (XLogHaveInvalidPages())
{
elog(DEBUG2,
- "could not record restart point at %X/%X because there "
- "are unresolved references to invalid pages",
+ "could not record restart point at %X/%08X because there are unresolved references to invalid pages",
LSN_FORMAT_ARGS(checkPoint->redo));
return;
}
@@ -7828,8 +7826,8 @@ CreateRestartPoint(int flags)
lastCheckPoint.redo <= ControlFile->checkPointCopy.redo)
{
ereport(DEBUG2,
- (errmsg_internal("skipping restartpoint, already performed at %X/%X",
- LSN_FORMAT_ARGS(lastCheckPoint.redo))));
+ errmsg_internal("skipping restartpoint, already performed at %X/%08X",
+ LSN_FORMAT_ARGS(lastCheckPoint.redo)));
UpdateMinRecoveryPoint(InvalidXLogRecPtr, true);
if (flags & CHECKPOINT_IS_SHUTDOWN)
@@ -8013,10 +8011,10 @@ CreateRestartPoint(int flags)
xtime = GetLatestXTime();
ereport((log_checkpoints ? LOG : DEBUG2),
- (errmsg("recovery restart point at %X/%X",
- LSN_FORMAT_ARGS(lastCheckPoint.redo)),
- xtime ? errdetail("Last completed transaction was at log time %s.",
- timestamptz_to_str(xtime)) : 0));
+ errmsg("recovery restart point at %X/%08X",
+ LSN_FORMAT_ARGS(lastCheckPoint.redo)),
+ xtime ? errdetail("Last completed transaction was at log time %s.",
+ timestamptz_to_str(xtime)) : 0);
/*
* Finally, execute archive_cleanup_command, if any.
@@ -8147,17 +8145,19 @@ KeepLogSeg(XLogRecPtr recptr, XLogSegNo *logSegNo)
XLByteToSeg(recptr, currSegNo, wal_segment_size);
segno = currSegNo;
- /*
- * Calculate how many segments are kept by slots first, adjusting for
- * max_slot_wal_keep_size.
- */
+ /* Calculate how many segments are kept by slots. */
keep = XLogGetReplicationSlotMinimumLSN();
if (keep != InvalidXLogRecPtr && keep < recptr)
{
XLByteToSeg(keep, segno, wal_segment_size);
- /* Cap by max_slot_wal_keep_size ... */
- if (max_slot_wal_keep_size_mb >= 0)
+ /*
+ * Account for max_slot_wal_keep_size to avoid keeping more than
+ * configured. However, don't do that during a binary upgrade: if
+ * slots were to be invalidated because of this, it would not be
+ * possible to preserve logical ones during the upgrade.
+ */
+ if (max_slot_wal_keep_size_mb >= 0 && !IsBinaryUpgrade)
{
uint64 slot_keep_segs;
@@ -8277,8 +8277,8 @@ XLogRestorePoint(const char *rpName)
RecPtr = XLogInsert(RM_XLOG_ID, XLOG_RESTORE_POINT);
ereport(LOG,
- (errmsg("restore point \"%s\" created at %X/%X",
- rpName, LSN_FORMAT_ARGS(RecPtr))));
+ errmsg("restore point \"%s\" created at %X/%08X",
+ rpName, LSN_FORMAT_ARGS(RecPtr)));
return RecPtr;
}
@@ -8530,11 +8530,6 @@ xlog_redo(XLogReaderState *record)
ControlFile->checkPointCopy.nextXid = checkPoint.nextXid;
LWLockRelease(ControlFileLock);
- /* Update shared-memory copy of checkpoint XID/epoch */
- SpinLockAcquire(&XLogCtl->info_lck);
- XLogCtl->ckptFullXid = checkPoint.nextXid;
- SpinLockRelease(&XLogCtl->info_lck);
-
/*
* We should've already switched to the new TLI before replaying this
* record.
@@ -8591,11 +8586,6 @@ xlog_redo(XLogReaderState *record)
ControlFile->checkPointCopy.nextXid = checkPoint.nextXid;
LWLockRelease(ControlFileLock);
- /* Update shared-memory copy of checkpoint XID/epoch */
- SpinLockAcquire(&XLogCtl->info_lck);
- XLogCtl->ckptFullXid = checkPoint.nextXid;
- SpinLockRelease(&XLogCtl->info_lck);
-
/* TLI should not change in an on-line checkpoint */
(void) GetCurrentReplayRecPtr(&replayTLI);
if (checkPoint.ThisTimeLineID != replayTLI)
@@ -8943,9 +8933,8 @@ issue_xlog_fsync(int fd, XLogSegNo segno, TimeLineID tli)
* backup state and tablespace map.
*
* Input parameters are "state" (the backup state), "fast" (if true, we do
- * the checkpoint in immediate mode to make it faster), and "tablespaces"
- * (if non-NULL, indicates a list of tablespaceinfo structs describing the
- * cluster's tablespaces.).
+ * the checkpoint in fast mode), and "tablespaces" (if non-NULL, indicates a
+ * list of tablespaceinfo structs describing the cluster's tablespaces.).
*
* The tablespace map contents are appended to passed-in parameter
* tablespace_map and the caller is responsible for including it in the backup
@@ -9022,7 +9011,7 @@ do_pg_backup_start(const char *backupidstr, bool fast, List **tablespaces,
* work correctly, it is critical that sessionBackupState is only updated
* after this block is over.
*/
- PG_ENSURE_ERROR_CLEANUP(do_pg_abort_backup, DatumGetBool(true));
+ PG_ENSURE_ERROR_CLEANUP(do_pg_abort_backup, BoolGetDatum(true));
{
bool gotUniqueStartpoint = false;
DIR *tblspcdir;
@@ -9073,11 +9062,11 @@ do_pg_backup_start(const char *backupidstr, bool fast, List **tablespaces,
* during recovery means that checkpointer is running, we can use
* RequestCheckpoint() to establish a restartpoint.
*
- * We use CHECKPOINT_IMMEDIATE only if requested by user (via
- * passing fast = true). Otherwise this can take awhile.
+ * We use CHECKPOINT_FAST only if requested by user (via passing
+ * fast = true). Otherwise this can take awhile.
*/
RequestCheckpoint(CHECKPOINT_FORCE | CHECKPOINT_WAIT |
- (fast ? CHECKPOINT_IMMEDIATE : 0));
+ (fast ? CHECKPOINT_FAST : 0));
/*
* Now we need to fetch the checkpoint record location, and also
@@ -9261,7 +9250,7 @@ do_pg_backup_start(const char *backupidstr, bool fast, List **tablespaces,
state->starttime = (pg_time_t) time(NULL);
}
- PG_END_ENSURE_ERROR_CLEANUP(do_pg_abort_backup, DatumGetBool(true));
+ PG_END_ENSURE_ERROR_CLEANUP(do_pg_abort_backup, BoolGetDatum(true));
state->started_in_recovery = backup_started_in_recovery;
@@ -9601,7 +9590,7 @@ register_persistent_abort_backup_handler(void)
if (already_done)
return;
- before_shmem_exit(do_pg_abort_backup, DatumGetBool(false));
+ before_shmem_exit(do_pg_abort_backup, BoolGetDatum(false));
already_done = true;
}
diff --git a/src/backend/access/transam/xlogbackup.c b/src/backend/access/transam/xlogbackup.c
index 342590e0a46..cda4b38b7d6 100644
--- a/src/backend/access/transam/xlogbackup.c
+++ b/src/backend/access/transam/xlogbackup.c
@@ -42,7 +42,7 @@ build_backup_content(BackupState *state, bool ishistoryfile)
XLByteToSeg(state->startpoint, startsegno, wal_segment_size);
XLogFileName(startxlogfile, state->starttli, startsegno, wal_segment_size);
- appendStringInfo(result, "START WAL LOCATION: %X/%X (file %s)\n",
+ appendStringInfo(result, "START WAL LOCATION: %X/%08X (file %s)\n",
LSN_FORMAT_ARGS(state->startpoint), startxlogfile);
if (ishistoryfile)
@@ -52,11 +52,11 @@ build_backup_content(BackupState *state, bool ishistoryfile)
XLByteToSeg(state->stoppoint, stopsegno, wal_segment_size);
XLogFileName(stopxlogfile, state->stoptli, stopsegno, wal_segment_size);
- appendStringInfo(result, "STOP WAL LOCATION: %X/%X (file %s)\n",
+ appendStringInfo(result, "STOP WAL LOCATION: %X/%08X (file %s)\n",
LSN_FORMAT_ARGS(state->stoppoint), stopxlogfile);
}
- appendStringInfo(result, "CHECKPOINT LOCATION: %X/%X\n",
+ appendStringInfo(result, "CHECKPOINT LOCATION: %X/%08X\n",
LSN_FORMAT_ARGS(state->checkpointloc));
appendStringInfoString(result, "BACKUP METHOD: streamed\n");
appendStringInfo(result, "BACKUP FROM: %s\n",
@@ -81,7 +81,7 @@ build_backup_content(BackupState *state, bool ishistoryfile)
Assert(XLogRecPtrIsInvalid(state->istartpoint) == (state->istarttli == 0));
if (!XLogRecPtrIsInvalid(state->istartpoint))
{
- appendStringInfo(result, "INCREMENTAL FROM LSN: %X/%X\n",
+ appendStringInfo(result, "INCREMENTAL FROM LSN: %X/%08X\n",
LSN_FORMAT_ARGS(state->istartpoint));
appendStringInfo(result, "INCREMENTAL FROM TLI: %u\n",
state->istarttli);
diff --git a/src/backend/access/transam/xloginsert.c b/src/backend/access/transam/xloginsert.c
index 5ee9d0b028e..c7571429e8e 100644
--- a/src/backend/access/transam/xloginsert.c
+++ b/src/backend/access/transam/xloginsert.c
@@ -530,6 +530,18 @@ XLogInsert(RmgrId rmid, uint8 info)
}
/*
+ * Simple wrapper to XLogInsert to insert a WAL record with elementary
+ * contents (only an int64 is supported as value currently).
+ */
+XLogRecPtr
+XLogSimpleInsertInt64(RmgrId rmid, uint8 info, int64 value)
+{
+ XLogBeginInsert();
+ XLogRegisterData(&value, sizeof(value));
+ return XLogInsert(rmid, info);
+}
+
+/*
* Assemble a WAL record from the registered data and buffers into an
* XLogRecData chain, ready for insertion with XLogInsertRecord().
*
diff --git a/src/backend/access/transam/xlogprefetcher.c b/src/backend/access/transam/xlogprefetcher.c
index 7735562db01..ed3aacabc98 100644
--- a/src/backend/access/transam/xlogprefetcher.c
+++ b/src/backend/access/transam/xlogprefetcher.c
@@ -546,7 +546,7 @@ XLogPrefetcherNextBlock(uintptr_t pgsr_private, XLogRecPtr *lsn)
#ifdef XLOGPREFETCHER_DEBUG_LEVEL
elog(XLOGPREFETCHER_DEBUG_LEVEL,
- "suppressing all readahead until %X/%X is replayed due to possible TLI change",
+ "suppressing all readahead until %X/%08X is replayed due to possible TLI change",
LSN_FORMAT_ARGS(record->lsn));
#endif
@@ -579,7 +579,7 @@ XLogPrefetcherNextBlock(uintptr_t pgsr_private, XLogRecPtr *lsn)
#ifdef XLOGPREFETCHER_DEBUG_LEVEL
elog(XLOGPREFETCHER_DEBUG_LEVEL,
- "suppressing prefetch in database %u until %X/%X is replayed due to raw file copy",
+ "suppressing prefetch in database %u until %X/%08X is replayed due to raw file copy",
rlocator.dbOid,
LSN_FORMAT_ARGS(record->lsn));
#endif
@@ -607,7 +607,7 @@ XLogPrefetcherNextBlock(uintptr_t pgsr_private, XLogRecPtr *lsn)
#ifdef XLOGPREFETCHER_DEBUG_LEVEL
elog(XLOGPREFETCHER_DEBUG_LEVEL,
- "suppressing prefetch in relation %u/%u/%u until %X/%X is replayed, which creates the relation",
+ "suppressing prefetch in relation %u/%u/%u until %X/%08X is replayed, which creates the relation",
xlrec->rlocator.spcOid,
xlrec->rlocator.dbOid,
xlrec->rlocator.relNumber,
@@ -630,7 +630,7 @@ XLogPrefetcherNextBlock(uintptr_t pgsr_private, XLogRecPtr *lsn)
#ifdef XLOGPREFETCHER_DEBUG_LEVEL
elog(XLOGPREFETCHER_DEBUG_LEVEL,
- "suppressing prefetch in relation %u/%u/%u from block %u until %X/%X is replayed, which truncates the relation",
+ "suppressing prefetch in relation %u/%u/%u from block %u until %X/%08X is replayed, which truncates the relation",
xlrec->rlocator.spcOid,
xlrec->rlocator.dbOid,
xlrec->rlocator.relNumber,
@@ -729,7 +729,7 @@ XLogPrefetcherNextBlock(uintptr_t pgsr_private, XLogRecPtr *lsn)
{
#ifdef XLOGPREFETCHER_DEBUG_LEVEL
elog(XLOGPREFETCHER_DEBUG_LEVEL,
- "suppressing all prefetch in relation %u/%u/%u until %X/%X is replayed, because the relation does not exist on disk",
+ "suppressing all prefetch in relation %u/%u/%u until %X/%08X is replayed, because the relation does not exist on disk",
reln->smgr_rlocator.locator.spcOid,
reln->smgr_rlocator.locator.dbOid,
reln->smgr_rlocator.locator.relNumber,
@@ -750,7 +750,7 @@ XLogPrefetcherNextBlock(uintptr_t pgsr_private, XLogRecPtr *lsn)
{
#ifdef XLOGPREFETCHER_DEBUG_LEVEL
elog(XLOGPREFETCHER_DEBUG_LEVEL,
- "suppressing prefetch in relation %u/%u/%u from block %u until %X/%X is replayed, because the relation is too small",
+ "suppressing prefetch in relation %u/%u/%u from block %u until %X/%08X is replayed, because the relation is too small",
reln->smgr_rlocator.locator.spcOid,
reln->smgr_rlocator.locator.dbOid,
reln->smgr_rlocator.locator.relNumber,
@@ -928,7 +928,7 @@ XLogPrefetcherIsFiltered(XLogPrefetcher *prefetcher, RelFileLocator rlocator,
{
#ifdef XLOGPREFETCHER_DEBUG_LEVEL
elog(XLOGPREFETCHER_DEBUG_LEVEL,
- "prefetch of %u/%u/%u block %u suppressed; filtering until LSN %X/%X is replayed (blocks >= %u filtered)",
+ "prefetch of %u/%u/%u block %u suppressed; filtering until LSN %X/%08X is replayed (blocks >= %u filtered)",
rlocator.spcOid, rlocator.dbOid, rlocator.relNumber, blockno,
LSN_FORMAT_ARGS(filter->filter_until_replayed),
filter->filter_from_block);
@@ -944,7 +944,7 @@ XLogPrefetcherIsFiltered(XLogPrefetcher *prefetcher, RelFileLocator rlocator,
{
#ifdef XLOGPREFETCHER_DEBUG_LEVEL
elog(XLOGPREFETCHER_DEBUG_LEVEL,
- "prefetch of %u/%u/%u block %u suppressed; filtering until LSN %X/%X is replayed (whole database)",
+ "prefetch of %u/%u/%u block %u suppressed; filtering until LSN %X/%08X is replayed (whole database)",
rlocator.spcOid, rlocator.dbOid, rlocator.relNumber, blockno,
LSN_FORMAT_ARGS(filter->filter_until_replayed));
#endif
diff --git a/src/backend/access/transam/xlogreader.c b/src/backend/access/transam/xlogreader.c
index 2790ade1f91..dcc8d4f9c1b 100644
--- a/src/backend/access/transam/xlogreader.c
+++ b/src/backend/access/transam/xlogreader.c
@@ -617,7 +617,7 @@ restart:
}
else if (targetRecOff < pageHeaderSize)
{
- report_invalid_record(state, "invalid record offset at %X/%X: expected at least %u, got %u",
+ report_invalid_record(state, "invalid record offset at %X/%08X: expected at least %u, got %u",
LSN_FORMAT_ARGS(RecPtr),
pageHeaderSize, targetRecOff);
goto err;
@@ -626,7 +626,7 @@ restart:
if ((((XLogPageHeader) state->readBuf)->xlp_info & XLP_FIRST_IS_CONTRECORD) &&
targetRecOff == pageHeaderSize)
{
- report_invalid_record(state, "contrecord is requested by %X/%X",
+ report_invalid_record(state, "contrecord is requested by %X/%08X",
LSN_FORMAT_ARGS(RecPtr));
goto err;
}
@@ -667,7 +667,7 @@ restart:
if (total_len < SizeOfXLogRecord)
{
report_invalid_record(state,
- "invalid record length at %X/%X: expected at least %u, got %u",
+ "invalid record length at %X/%08X: expected at least %u, got %u",
LSN_FORMAT_ARGS(RecPtr),
(uint32) SizeOfXLogRecord, total_len);
goto err;
@@ -723,11 +723,12 @@ restart:
/* Calculate pointer to beginning of next page */
targetPagePtr += XLOG_BLCKSZ;
- /* Wait for the next page to become available */
- readOff = ReadPageInternal(state, targetPagePtr,
- Min(total_len - gotlen + SizeOfXLogShortPHD,
- XLOG_BLCKSZ));
-
+ /*
+ * Read the page header before processing the record data, so we
+ * can handle the case where the previous record ended as being a
+ * partial one.
+ */
+ readOff = ReadPageInternal(state, targetPagePtr, SizeOfXLogShortPHD);
if (readOff == XLREAD_WOULDBLOCK)
return XLREAD_WOULDBLOCK;
else if (readOff < 0)
@@ -756,7 +757,7 @@ restart:
if (!(pageHeader->xlp_info & XLP_FIRST_IS_CONTRECORD))
{
report_invalid_record(state,
- "there is no contrecord flag at %X/%X",
+ "there is no contrecord flag at %X/%08X",
LSN_FORMAT_ARGS(RecPtr));
goto err;
}
@@ -769,13 +770,22 @@ restart:
total_len != (pageHeader->xlp_rem_len + gotlen))
{
report_invalid_record(state,
- "invalid contrecord length %u (expected %lld) at %X/%X",
+ "invalid contrecord length %u (expected %lld) at %X/%08X",
pageHeader->xlp_rem_len,
((long long) total_len) - gotlen,
LSN_FORMAT_ARGS(RecPtr));
goto err;
}
+ /* Wait for the next page to become available */
+ readOff = ReadPageInternal(state, targetPagePtr,
+ Min(total_len - gotlen + SizeOfXLogShortPHD,
+ XLOG_BLCKSZ));
+ if (readOff == XLREAD_WOULDBLOCK)
+ return XLREAD_WOULDBLOCK;
+ else if (readOff < 0)
+ goto err;
+
/* Append the continuation from this page to the buffer */
pageHeaderSize = XLogPageHeaderSize(pageHeader);
@@ -1132,7 +1142,7 @@ ValidXLogRecordHeader(XLogReaderState *state, XLogRecPtr RecPtr,
if (record->xl_tot_len < SizeOfXLogRecord)
{
report_invalid_record(state,
- "invalid record length at %X/%X: expected at least %u, got %u",
+ "invalid record length at %X/%08X: expected at least %u, got %u",
LSN_FORMAT_ARGS(RecPtr),
(uint32) SizeOfXLogRecord, record->xl_tot_len);
return false;
@@ -1140,7 +1150,7 @@ ValidXLogRecordHeader(XLogReaderState *state, XLogRecPtr RecPtr,
if (!RmgrIdIsValid(record->xl_rmid))
{
report_invalid_record(state,
- "invalid resource manager ID %u at %X/%X",
+ "invalid resource manager ID %u at %X/%08X",
record->xl_rmid, LSN_FORMAT_ARGS(RecPtr));
return false;
}
@@ -1153,7 +1163,7 @@ ValidXLogRecordHeader(XLogReaderState *state, XLogRecPtr RecPtr,
if (!(record->xl_prev < RecPtr))
{
report_invalid_record(state,
- "record with incorrect prev-link %X/%X at %X/%X",
+ "record with incorrect prev-link %X/%08X at %X/%08X",
LSN_FORMAT_ARGS(record->xl_prev),
LSN_FORMAT_ARGS(RecPtr));
return false;
@@ -1169,7 +1179,7 @@ ValidXLogRecordHeader(XLogReaderState *state, XLogRecPtr RecPtr,
if (record->xl_prev != PrevRecPtr)
{
report_invalid_record(state,
- "record with incorrect prev-link %X/%X at %X/%X",
+ "record with incorrect prev-link %X/%08X at %X/%08X",
LSN_FORMAT_ARGS(record->xl_prev),
LSN_FORMAT_ARGS(RecPtr));
return false;
@@ -1207,7 +1217,7 @@ ValidXLogRecord(XLogReaderState *state, XLogRecord *record, XLogRecPtr recptr)
if (!EQ_CRC32C(record->xl_crc, crc))
{
report_invalid_record(state,
- "incorrect resource manager data checksum in record at %X/%X",
+ "incorrect resource manager data checksum in record at %X/%08X",
LSN_FORMAT_ARGS(recptr));
return false;
}
@@ -1241,7 +1251,7 @@ XLogReaderValidatePageHeader(XLogReaderState *state, XLogRecPtr recptr,
XLogFileName(fname, state->seg.ws_tli, segno, state->segcxt.ws_segsize);
report_invalid_record(state,
- "invalid magic number %04X in WAL segment %s, LSN %X/%X, offset %u",
+ "invalid magic number %04X in WAL segment %s, LSN %X/%08X, offset %u",
hdr->xlp_magic,
fname,
LSN_FORMAT_ARGS(recptr),
@@ -1256,7 +1266,7 @@ XLogReaderValidatePageHeader(XLogReaderState *state, XLogRecPtr recptr,
XLogFileName(fname, state->seg.ws_tli, segno, state->segcxt.ws_segsize);
report_invalid_record(state,
- "invalid info bits %04X in WAL segment %s, LSN %X/%X, offset %u",
+ "invalid info bits %04X in WAL segment %s, LSN %X/%08X, offset %u",
hdr->xlp_info,
fname,
LSN_FORMAT_ARGS(recptr),
@@ -1298,7 +1308,7 @@ XLogReaderValidatePageHeader(XLogReaderState *state, XLogRecPtr recptr,
/* hmm, first page of file doesn't have a long header? */
report_invalid_record(state,
- "invalid info bits %04X in WAL segment %s, LSN %X/%X, offset %u",
+ "invalid info bits %04X in WAL segment %s, LSN %X/%08X, offset %u",
hdr->xlp_info,
fname,
LSN_FORMAT_ARGS(recptr),
@@ -1318,7 +1328,7 @@ XLogReaderValidatePageHeader(XLogReaderState *state, XLogRecPtr recptr,
XLogFileName(fname, state->seg.ws_tli, segno, state->segcxt.ws_segsize);
report_invalid_record(state,
- "unexpected pageaddr %X/%X in WAL segment %s, LSN %X/%X, offset %u",
+ "unexpected pageaddr %X/%08X in WAL segment %s, LSN %X/%08X, offset %u",
LSN_FORMAT_ARGS(hdr->xlp_pageaddr),
fname,
LSN_FORMAT_ARGS(recptr),
@@ -1344,7 +1354,7 @@ XLogReaderValidatePageHeader(XLogReaderState *state, XLogRecPtr recptr,
XLogFileName(fname, state->seg.ws_tli, segno, state->segcxt.ws_segsize);
report_invalid_record(state,
- "out-of-sequence timeline ID %u (after %u) in WAL segment %s, LSN %X/%X, offset %u",
+ "out-of-sequence timeline ID %u (after %u) in WAL segment %s, LSN %X/%08X, offset %u",
hdr->xlp_tli,
state->latestPageTLI,
fname,
@@ -1756,7 +1766,7 @@ DecodeXLogRecord(XLogReaderState *state,
if (block_id <= decoded->max_block_id)
{
report_invalid_record(state,
- "out-of-order block_id %u at %X/%X",
+ "out-of-order block_id %u at %X/%08X",
block_id,
LSN_FORMAT_ARGS(state->ReadRecPtr));
goto err;
@@ -1780,14 +1790,14 @@ DecodeXLogRecord(XLogReaderState *state,
if (blk->has_data && blk->data_len == 0)
{
report_invalid_record(state,
- "BKPBLOCK_HAS_DATA set, but no data included at %X/%X",
+ "BKPBLOCK_HAS_DATA set, but no data included at %X/%08X",
LSN_FORMAT_ARGS(state->ReadRecPtr));
goto err;
}
if (!blk->has_data && blk->data_len != 0)
{
report_invalid_record(state,
- "BKPBLOCK_HAS_DATA not set, but data length is %u at %X/%X",
+ "BKPBLOCK_HAS_DATA not set, but data length is %u at %X/%08X",
(unsigned int) blk->data_len,
LSN_FORMAT_ARGS(state->ReadRecPtr));
goto err;
@@ -1823,7 +1833,7 @@ DecodeXLogRecord(XLogReaderState *state,
blk->bimg_len == BLCKSZ))
{
report_invalid_record(state,
- "BKPIMAGE_HAS_HOLE set, but hole offset %u length %u block image length %u at %X/%X",
+ "BKPIMAGE_HAS_HOLE set, but hole offset %u length %u block image length %u at %X/%08X",
(unsigned int) blk->hole_offset,
(unsigned int) blk->hole_length,
(unsigned int) blk->bimg_len,
@@ -1839,7 +1849,7 @@ DecodeXLogRecord(XLogReaderState *state,
(blk->hole_offset != 0 || blk->hole_length != 0))
{
report_invalid_record(state,
- "BKPIMAGE_HAS_HOLE not set, but hole offset %u length %u at %X/%X",
+ "BKPIMAGE_HAS_HOLE not set, but hole offset %u length %u at %X/%08X",
(unsigned int) blk->hole_offset,
(unsigned int) blk->hole_length,
LSN_FORMAT_ARGS(state->ReadRecPtr));
@@ -1853,7 +1863,7 @@ DecodeXLogRecord(XLogReaderState *state,
blk->bimg_len == BLCKSZ)
{
report_invalid_record(state,
- "BKPIMAGE_COMPRESSED set, but block image length %u at %X/%X",
+ "BKPIMAGE_COMPRESSED set, but block image length %u at %X/%08X",
(unsigned int) blk->bimg_len,
LSN_FORMAT_ARGS(state->ReadRecPtr));
goto err;
@@ -1868,7 +1878,7 @@ DecodeXLogRecord(XLogReaderState *state,
blk->bimg_len != BLCKSZ)
{
report_invalid_record(state,
- "neither BKPIMAGE_HAS_HOLE nor BKPIMAGE_COMPRESSED set, but block image length is %u at %X/%X",
+ "neither BKPIMAGE_HAS_HOLE nor BKPIMAGE_COMPRESSED set, but block image length is %u at %X/%08X",
(unsigned int) blk->data_len,
LSN_FORMAT_ARGS(state->ReadRecPtr));
goto err;
@@ -1884,7 +1894,7 @@ DecodeXLogRecord(XLogReaderState *state,
if (rlocator == NULL)
{
report_invalid_record(state,
- "BKPBLOCK_SAME_REL set but no previous rel at %X/%X",
+ "BKPBLOCK_SAME_REL set but no previous rel at %X/%08X",
LSN_FORMAT_ARGS(state->ReadRecPtr));
goto err;
}
@@ -1896,7 +1906,7 @@ DecodeXLogRecord(XLogReaderState *state,
else
{
report_invalid_record(state,
- "invalid block_id %u at %X/%X",
+ "invalid block_id %u at %X/%08X",
block_id, LSN_FORMAT_ARGS(state->ReadRecPtr));
goto err;
}
@@ -1963,7 +1973,7 @@ DecodeXLogRecord(XLogReaderState *state,
shortdata_err:
report_invalid_record(state,
- "record with invalid length at %X/%X",
+ "record with invalid length at %X/%08X",
LSN_FORMAT_ARGS(state->ReadRecPtr));
err:
*errormsg = state->errormsg_buf;
@@ -2073,14 +2083,14 @@ RestoreBlockImage(XLogReaderState *record, uint8 block_id, char *page)
!record->record->blocks[block_id].in_use)
{
report_invalid_record(record,
- "could not restore image at %X/%X with invalid block %d specified",
+ "could not restore image at %X/%08X with invalid block %d specified",
LSN_FORMAT_ARGS(record->ReadRecPtr),
block_id);
return false;
}
if (!record->record->blocks[block_id].has_image)
{
- report_invalid_record(record, "could not restore image at %X/%X with invalid state, block %d",
+ report_invalid_record(record, "could not restore image at %X/%08X with invalid state, block %d",
LSN_FORMAT_ARGS(record->ReadRecPtr),
block_id);
return false;
@@ -2107,7 +2117,7 @@ RestoreBlockImage(XLogReaderState *record, uint8 block_id, char *page)
bkpb->bimg_len, BLCKSZ - bkpb->hole_length) <= 0)
decomp_success = false;
#else
- report_invalid_record(record, "could not restore image at %X/%X compressed with %s not supported by build, block %d",
+ report_invalid_record(record, "could not restore image at %X/%08X compressed with %s not supported by build, block %d",
LSN_FORMAT_ARGS(record->ReadRecPtr),
"LZ4",
block_id);
@@ -2124,7 +2134,7 @@ RestoreBlockImage(XLogReaderState *record, uint8 block_id, char *page)
if (ZSTD_isError(decomp_result))
decomp_success = false;
#else
- report_invalid_record(record, "could not restore image at %X/%X compressed with %s not supported by build, block %d",
+ report_invalid_record(record, "could not restore image at %X/%08X compressed with %s not supported by build, block %d",
LSN_FORMAT_ARGS(record->ReadRecPtr),
"zstd",
block_id);
@@ -2133,7 +2143,7 @@ RestoreBlockImage(XLogReaderState *record, uint8 block_id, char *page)
}
else
{
- report_invalid_record(record, "could not restore image at %X/%X compressed with unknown method, block %d",
+ report_invalid_record(record, "could not restore image at %X/%08X compressed with unknown method, block %d",
LSN_FORMAT_ARGS(record->ReadRecPtr),
block_id);
return false;
@@ -2141,7 +2151,7 @@ RestoreBlockImage(XLogReaderState *record, uint8 block_id, char *page)
if (!decomp_success)
{
- report_invalid_record(record, "could not decompress image at %X/%X, block %d",
+ report_invalid_record(record, "could not decompress image at %X/%08X, block %d",
LSN_FORMAT_ARGS(record->ReadRecPtr),
block_id);
return false;
diff --git a/src/backend/access/transam/xlogrecovery.c b/src/backend/access/transam/xlogrecovery.c
index 6ce979f2d8b..f23ec8969c2 100644
--- a/src/backend/access/transam/xlogrecovery.c
+++ b/src/backend/access/transam/xlogrecovery.c
@@ -620,10 +620,10 @@ InitWalRecovery(ControlFileData *ControlFile, bool *wasShutdown_ptr,
* than ControlFile->checkPoint is used.
*/
ereport(LOG,
- (errmsg("starting backup recovery with redo LSN %X/%X, checkpoint LSN %X/%X, on timeline ID %u",
- LSN_FORMAT_ARGS(RedoStartLSN),
- LSN_FORMAT_ARGS(CheckPointLoc),
- CheckPointTLI)));
+ errmsg("starting backup recovery with redo LSN %X/%08X, checkpoint LSN %X/%08X, on timeline ID %u",
+ LSN_FORMAT_ARGS(RedoStartLSN),
+ LSN_FORMAT_ARGS(CheckPointLoc),
+ CheckPointTLI));
/*
* When a backup_label file is present, we want to roll forward from
@@ -636,8 +636,8 @@ InitWalRecovery(ControlFileData *ControlFile, bool *wasShutdown_ptr,
memcpy(&checkPoint, XLogRecGetData(xlogreader), sizeof(CheckPoint));
wasShutdown = ((record->xl_info & ~XLR_INFO_MASK) == XLOG_CHECKPOINT_SHUTDOWN);
ereport(DEBUG1,
- (errmsg_internal("checkpoint record is at %X/%X",
- LSN_FORMAT_ARGS(CheckPointLoc))));
+ errmsg_internal("checkpoint record is at %X/%08X",
+ LSN_FORMAT_ARGS(CheckPointLoc)));
InRecovery = true; /* force recovery even if SHUTDOWNED */
/*
@@ -652,23 +652,23 @@ InitWalRecovery(ControlFileData *ControlFile, bool *wasShutdown_ptr,
if (!ReadRecord(xlogprefetcher, LOG, false,
checkPoint.ThisTimeLineID))
ereport(FATAL,
- (errmsg("could not find redo location %X/%X referenced by checkpoint record at %X/%X",
- LSN_FORMAT_ARGS(checkPoint.redo), LSN_FORMAT_ARGS(CheckPointLoc)),
- errhint("If you are restoring from a backup, touch \"%s/recovery.signal\" or \"%s/standby.signal\" and add required recovery options.\n"
- "If you are not restoring from a backup, try removing the file \"%s/backup_label\".\n"
- "Be careful: removing \"%s/backup_label\" will result in a corrupt cluster if restoring from a backup.",
- DataDir, DataDir, DataDir, DataDir)));
+ errmsg("could not find redo location %X/%08X referenced by checkpoint record at %X/%08X",
+ LSN_FORMAT_ARGS(checkPoint.redo), LSN_FORMAT_ARGS(CheckPointLoc)),
+ errhint("If you are restoring from a backup, touch \"%s/recovery.signal\" or \"%s/standby.signal\" and add required recovery options.\n"
+ "If you are not restoring from a backup, try removing the file \"%s/backup_label\".\n"
+ "Be careful: removing \"%s/backup_label\" will result in a corrupt cluster if restoring from a backup.",
+ DataDir, DataDir, DataDir, DataDir));
}
}
else
{
ereport(FATAL,
- (errmsg("could not locate required checkpoint record at %X/%X",
- LSN_FORMAT_ARGS(CheckPointLoc)),
- errhint("If you are restoring from a backup, touch \"%s/recovery.signal\" or \"%s/standby.signal\" and add required recovery options.\n"
- "If you are not restoring from a backup, try removing the file \"%s/backup_label\".\n"
- "Be careful: removing \"%s/backup_label\" will result in a corrupt cluster if restoring from a backup.",
- DataDir, DataDir, DataDir, DataDir)));
+ errmsg("could not locate required checkpoint record at %X/%08X",
+ LSN_FORMAT_ARGS(CheckPointLoc)),
+ errhint("If you are restoring from a backup, touch \"%s/recovery.signal\" or \"%s/standby.signal\" and add required recovery options.\n"
+ "If you are not restoring from a backup, try removing the file \"%s/backup_label\".\n"
+ "Be careful: removing \"%s/backup_label\" will result in a corrupt cluster if restoring from a backup.",
+ DataDir, DataDir, DataDir, DataDir));
wasShutdown = false; /* keep compiler quiet */
}
@@ -773,8 +773,8 @@ InitWalRecovery(ControlFileData *ControlFile, bool *wasShutdown_ptr,
*/
if (!XLogRecPtrIsInvalid(ControlFile->backupStartPoint))
ereport(LOG,
- (errmsg("restarting backup recovery with redo LSN %X/%X",
- LSN_FORMAT_ARGS(ControlFile->backupStartPoint))));
+ errmsg("restarting backup recovery with redo LSN %X/%08X",
+ LSN_FORMAT_ARGS(ControlFile->backupStartPoint)));
/* Get the last valid checkpoint record. */
CheckPointLoc = ControlFile->checkPoint;
@@ -786,8 +786,8 @@ InitWalRecovery(ControlFileData *ControlFile, bool *wasShutdown_ptr,
if (record != NULL)
{
ereport(DEBUG1,
- (errmsg_internal("checkpoint record is at %X/%X",
- LSN_FORMAT_ARGS(CheckPointLoc))));
+ errmsg_internal("checkpoint record is at %X/%08X",
+ LSN_FORMAT_ARGS(CheckPointLoc)));
}
else
{
@@ -798,8 +798,8 @@ InitWalRecovery(ControlFileData *ControlFile, bool *wasShutdown_ptr,
* simplify processing around checkpoints.
*/
ereport(PANIC,
- (errmsg("could not locate a valid checkpoint record at %X/%X",
- LSN_FORMAT_ARGS(CheckPointLoc))));
+ errmsg("could not locate a valid checkpoint record at %X/%08X",
+ LSN_FORMAT_ARGS(CheckPointLoc)));
}
memcpy(&checkPoint, XLogRecGetData(xlogreader), sizeof(CheckPoint));
wasShutdown = ((record->xl_info & ~XLR_INFO_MASK) == XLOG_CHECKPOINT_SHUTDOWN);
@@ -824,8 +824,8 @@ InitWalRecovery(ControlFileData *ControlFile, bool *wasShutdown_ptr,
recoveryTargetName)));
else if (recoveryTarget == RECOVERY_TARGET_LSN)
ereport(LOG,
- (errmsg("starting point-in-time recovery to WAL location (LSN) \"%X/%X\"",
- LSN_FORMAT_ARGS(recoveryTargetLSN))));
+ errmsg("starting point-in-time recovery to WAL location (LSN) \"%X/%08X\"",
+ LSN_FORMAT_ARGS(recoveryTargetLSN)));
else if (recoveryTarget == RECOVERY_TARGET_IMMEDIATE)
ereport(LOG,
(errmsg("starting point-in-time recovery to earliest consistent point")));
@@ -855,7 +855,7 @@ InitWalRecovery(ControlFileData *ControlFile, bool *wasShutdown_ptr,
(errmsg("requested timeline %u is not a child of this server's history",
recoveryTargetTLI),
/* translator: %s is a backup_label file or a pg_control file */
- errdetail("Latest checkpoint in file \"%s\" is at %X/%X on timeline %u, but in the history of the requested timeline, the server forked off from that timeline at %X/%X.",
+ errdetail("Latest checkpoint in file \"%s\" is at %X/%08X on timeline %u, but in the history of the requested timeline, the server forked off from that timeline at %X/%08X.",
haveBackupLabel ? "backup_label" : "pg_control",
LSN_FORMAT_ARGS(CheckPointLoc),
CheckPointTLI,
@@ -870,15 +870,15 @@ InitWalRecovery(ControlFileData *ControlFile, bool *wasShutdown_ptr,
tliOfPointInHistory(ControlFile->minRecoveryPoint - 1, expectedTLEs) !=
ControlFile->minRecoveryPointTLI)
ereport(FATAL,
- (errmsg("requested timeline %u does not contain minimum recovery point %X/%X on timeline %u",
- recoveryTargetTLI,
- LSN_FORMAT_ARGS(ControlFile->minRecoveryPoint),
- ControlFile->minRecoveryPointTLI)));
+ errmsg("requested timeline %u does not contain minimum recovery point %X/%08X on timeline %u",
+ recoveryTargetTLI,
+ LSN_FORMAT_ARGS(ControlFile->minRecoveryPoint),
+ ControlFile->minRecoveryPointTLI));
ereport(DEBUG1,
- (errmsg_internal("redo record is at %X/%X; shutdown %s",
- LSN_FORMAT_ARGS(checkPoint.redo),
- wasShutdown ? "true" : "false")));
+ errmsg_internal("redo record is at %X/%08X; shutdown %s",
+ LSN_FORMAT_ARGS(checkPoint.redo),
+ wasShutdown ? "true" : "false"));
ereport(DEBUG1,
(errmsg_internal("next transaction ID: " UINT64_FORMAT "; next OID: %u",
U64FromFullTransactionId(checkPoint.nextXid),
@@ -1253,14 +1253,14 @@ read_backup_label(XLogRecPtr *checkPointLoc, TimeLineID *backupLabelTLI,
* is pretty crude, but we are not expecting any variability in the file
* format).
*/
- if (fscanf(lfp, "START WAL LOCATION: %X/%X (file %08X%16s)%c",
+ if (fscanf(lfp, "START WAL LOCATION: %X/%08X (file %08X%16s)%c",
&hi, &lo, &tli_from_walseg, startxlogfilename, &ch) != 5 || ch != '\n')
ereport(FATAL,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("invalid data in file \"%s\"", BACKUP_LABEL_FILE)));
RedoStartLSN = ((uint64) hi) << 32 | lo;
RedoStartTLI = tli_from_walseg;
- if (fscanf(lfp, "CHECKPOINT LOCATION: %X/%X%c",
+ if (fscanf(lfp, "CHECKPOINT LOCATION: %X/%08X%c",
&hi, &lo, &ch) != 3 || ch != '\n')
ereport(FATAL,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
@@ -1332,7 +1332,7 @@ read_backup_label(XLogRecPtr *checkPointLoc, TimeLineID *backupLabelTLI,
tli_from_file, BACKUP_LABEL_FILE)));
}
- if (fscanf(lfp, "INCREMENTAL FROM LSN: %X/%X\n", &hi, &lo) > 0)
+ if (fscanf(lfp, "INCREMENTAL FROM LSN: %X/%08X\n", &hi, &lo) > 0)
ereport(FATAL,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("this is an incremental backup, not a data directory"),
@@ -1626,6 +1626,7 @@ ShutdownWalRecovery(void)
close(readFile);
readFile = -1;
}
+ pfree(xlogreader->private_data);
XLogReaderFree(xlogreader);
XLogPrefetcherFree(xlogprefetcher);
@@ -1722,8 +1723,8 @@ PerformWalRecovery(void)
if (record->xl_rmid != RM_XLOG_ID ||
(record->xl_info & ~XLR_INFO_MASK) != XLOG_CHECKPOINT_REDO)
ereport(FATAL,
- (errmsg("unexpected record type found at redo point %X/%X",
- LSN_FORMAT_ARGS(xlogreader->ReadRecPtr))));
+ errmsg("unexpected record type found at redo point %X/%08X",
+ LSN_FORMAT_ARGS(xlogreader->ReadRecPtr)));
}
else
{
@@ -1745,8 +1746,8 @@ PerformWalRecovery(void)
RmgrStartup();
ereport(LOG,
- (errmsg("redo starts at %X/%X",
- LSN_FORMAT_ARGS(xlogreader->ReadRecPtr))));
+ errmsg("redo starts at %X/%08X",
+ LSN_FORMAT_ARGS(xlogreader->ReadRecPtr)));
/* Prepare to report progress of the redo phase. */
if (!StandbyMode)
@@ -1758,7 +1759,7 @@ PerformWalRecovery(void)
do
{
if (!StandbyMode)
- ereport_startup_progress("redo in progress, elapsed time: %ld.%02d s, current LSN: %X/%X",
+ ereport_startup_progress("redo in progress, elapsed time: %ld.%02d s, current LSN: %X/%08X",
LSN_FORMAT_ARGS(xlogreader->ReadRecPtr));
#ifdef WAL_DEBUG
@@ -1767,7 +1768,7 @@ PerformWalRecovery(void)
StringInfoData buf;
initStringInfo(&buf);
- appendStringInfo(&buf, "REDO @ %X/%X; LSN %X/%X: ",
+ appendStringInfo(&buf, "REDO @ %X/%08X; LSN %X/%08X: ",
LSN_FORMAT_ARGS(xlogreader->ReadRecPtr),
LSN_FORMAT_ARGS(xlogreader->EndRecPtr));
xlog_outrec(&buf, xlogreader);
@@ -1880,9 +1881,9 @@ PerformWalRecovery(void)
RmgrCleanup();
ereport(LOG,
- (errmsg("redo done at %X/%X system usage: %s",
- LSN_FORMAT_ARGS(xlogreader->ReadRecPtr),
- pg_rusage_show(&ru0))));
+ errmsg("redo done at %X/%08X system usage: %s",
+ LSN_FORMAT_ARGS(xlogreader->ReadRecPtr),
+ pg_rusage_show(&ru0)));
xtime = GetLatestXTime();
if (xtime)
ereport(LOG,
@@ -2092,7 +2093,7 @@ xlogrecovery_redo(XLogReaderState *record, TimeLineID replayTLI)
memcpy(&xlrec, XLogRecGetData(record), sizeof(xl_overwrite_contrecord));
if (xlrec.overwritten_lsn != record->overwrittenRecPtr)
- elog(FATAL, "mismatching overwritten LSN %X/%X -> %X/%X",
+ elog(FATAL, "mismatching overwritten LSN %X/%08X -> %X/%08X",
LSN_FORMAT_ARGS(xlrec.overwritten_lsn),
LSN_FORMAT_ARGS(record->overwrittenRecPtr));
@@ -2101,9 +2102,9 @@ xlogrecovery_redo(XLogReaderState *record, TimeLineID replayTLI)
missingContrecPtr = InvalidXLogRecPtr;
ereport(LOG,
- (errmsg("successfully skipped missing contrecord at %X/%X, overwritten at %s",
- LSN_FORMAT_ARGS(xlrec.overwritten_lsn),
- timestamptz_to_str(xlrec.overwrite_time))));
+ errmsg("successfully skipped missing contrecord at %X/%08X, overwritten at %s",
+ LSN_FORMAT_ARGS(xlrec.overwritten_lsn),
+ timestamptz_to_str(xlrec.overwrite_time)));
/* Verifying the record should only happen once */
record->overwrittenRecPtr = InvalidXLogRecPtr;
@@ -2129,7 +2130,7 @@ xlogrecovery_redo(XLogReaderState *record, TimeLineID replayTLI)
backupEndPoint = lsn;
}
else
- elog(DEBUG1, "saw end-of-backup record for backup starting at %X/%X, waiting for %X/%X",
+ elog(DEBUG1, "saw end-of-backup record for backup starting at %X/%08X, waiting for %X/%08X",
LSN_FORMAT_ARGS(startpoint), LSN_FORMAT_ARGS(backupStartPoint));
}
}
@@ -2224,9 +2225,9 @@ CheckRecoveryConsistency(void)
backupEndRequired = false;
ereport(LOG,
- (errmsg("completed backup recovery with redo LSN %X/%X and end LSN %X/%X",
- LSN_FORMAT_ARGS(saveBackupStartPoint),
- LSN_FORMAT_ARGS(saveBackupEndPoint))));
+ errmsg("completed backup recovery with redo LSN %X/%08X and end LSN %X/%08X",
+ LSN_FORMAT_ARGS(saveBackupStartPoint),
+ LSN_FORMAT_ARGS(saveBackupEndPoint)));
}
/*
@@ -2255,8 +2256,8 @@ CheckRecoveryConsistency(void)
reachedConsistency = true;
SendPostmasterSignal(PMSIGNAL_RECOVERY_CONSISTENT);
ereport(LOG,
- (errmsg("consistent recovery state reached at %X/%X",
- LSN_FORMAT_ARGS(lastReplayedEndRecPtr))));
+ errmsg("consistent recovery state reached at %X/%08X",
+ LSN_FORMAT_ARGS(lastReplayedEndRecPtr)));
}
/*
@@ -2293,7 +2294,7 @@ rm_redo_error_callback(void *arg)
xlog_block_info(&buf, record);
/* translator: %s is a WAL record description */
- errcontext("WAL redo at %X/%X for %s",
+ errcontext("WAL redo at %X/%08X for %s",
LSN_FORMAT_ARGS(record->ReadRecPtr),
buf.data);
@@ -2328,7 +2329,7 @@ xlog_outdesc(StringInfo buf, XLogReaderState *record)
static void
xlog_outrec(StringInfo buf, XLogReaderState *record)
{
- appendStringInfo(buf, "prev %X/%X; xid %u",
+ appendStringInfo(buf, "prev %X/%08X; xid %u",
LSN_FORMAT_ARGS(XLogRecGetPrev(record)),
XLogRecGetXid(record));
@@ -2416,10 +2417,10 @@ checkTimeLineSwitch(XLogRecPtr lsn, TimeLineID newTLI, TimeLineID prevTLI,
lsn < minRecoveryPoint &&
newTLI > minRecoveryPointTLI)
ereport(PANIC,
- (errmsg("unexpected timeline ID %u in checkpoint record, before reaching minimum recovery point %X/%X on timeline %u",
- newTLI,
- LSN_FORMAT_ARGS(minRecoveryPoint),
- minRecoveryPointTLI)));
+ errmsg("unexpected timeline ID %u in checkpoint record, before reaching minimum recovery point %X/%08X on timeline %u",
+ newTLI,
+ LSN_FORMAT_ARGS(minRecoveryPoint),
+ minRecoveryPointTLI));
/* Looks good */
}
@@ -2621,8 +2622,8 @@ recoveryStopsBefore(XLogReaderState *record)
recoveryStopTime = 0;
recoveryStopName[0] = '\0';
ereport(LOG,
- (errmsg("recovery stopping before WAL location (LSN) \"%X/%X\"",
- LSN_FORMAT_ARGS(recoveryStopLSN))));
+ errmsg("recovery stopping before WAL location (LSN) \"%X/%08X\"",
+ LSN_FORMAT_ARGS(recoveryStopLSN)));
return true;
}
@@ -2789,8 +2790,8 @@ recoveryStopsAfter(XLogReaderState *record)
recoveryStopTime = 0;
recoveryStopName[0] = '\0';
ereport(LOG,
- (errmsg("recovery stopping after WAL location (LSN) \"%X/%X\"",
- LSN_FORMAT_ARGS(recoveryStopLSN))));
+ errmsg("recovery stopping after WAL location (LSN) \"%X/%08X\"",
+ LSN_FORMAT_ARGS(recoveryStopLSN)));
return true;
}
@@ -2910,7 +2911,7 @@ getRecoveryStopReason(void)
timestamptz_to_str(recoveryStopTime));
else if (recoveryTarget == RECOVERY_TARGET_LSN)
snprintf(reason, sizeof(reason),
- "%s LSN %X/%X\n",
+ "%s LSN %X/%08X\n",
recoveryStopAfter ? "after" : "before",
LSN_FORMAT_ARGS(recoveryStopLSN));
else if (recoveryTarget == RECOVERY_TARGET_NAME)
@@ -3213,11 +3214,11 @@ ReadRecord(XLogPrefetcher *xlogprefetcher, int emode,
XLogFileName(fname, xlogreader->seg.ws_tli, segno,
wal_segment_size);
ereport(emode_for_corrupt_record(emode, xlogreader->EndRecPtr),
- (errmsg("unexpected timeline ID %u in WAL segment %s, LSN %X/%X, offset %u",
- xlogreader->latestPageTLI,
- fname,
- LSN_FORMAT_ARGS(xlogreader->latestPagePtr),
- offset)));
+ errmsg("unexpected timeline ID %u in WAL segment %s, LSN %X/%08X, offset %u",
+ xlogreader->latestPageTLI,
+ fname,
+ LSN_FORMAT_ARGS(xlogreader->latestPagePtr),
+ offset));
record = NULL;
}
@@ -3429,14 +3430,14 @@ retry:
errno = save_errno;
ereport(emode_for_corrupt_record(emode, targetPagePtr + reqLen),
(errcode_for_file_access(),
- errmsg("could not read from WAL segment %s, LSN %X/%X, offset %u: %m",
+ errmsg("could not read from WAL segment %s, LSN %X/%08X, offset %u: %m",
fname, LSN_FORMAT_ARGS(targetPagePtr),
readOff)));
}
else
ereport(emode_for_corrupt_record(emode, targetPagePtr + reqLen),
(errcode(ERRCODE_DATA_CORRUPTED),
- errmsg("could not read from WAL segment %s, LSN %X/%X, offset %u: read %d of %zu",
+ errmsg("could not read from WAL segment %s, LSN %X/%08X, offset %u: read %d of %zu",
fname, LSN_FORMAT_ARGS(targetPagePtr),
readOff, r, (Size) XLOG_BLCKSZ)));
goto next_record_is_invalid;
@@ -3718,7 +3719,7 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
wait_time = wal_retrieve_retry_interval -
TimestampDifferenceMilliseconds(last_fail_time, now);
- elog(LOG, "waiting for WAL to become available at %X/%X",
+ elog(LOG, "waiting for WAL to become available at %X/%08X",
LSN_FORMAT_ARGS(RecPtr));
/* Do background tasks that might benefit us later. */
@@ -3864,7 +3865,7 @@ WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
tli = tliOfPointInHistory(tliRecPtr, expectedTLEs);
if (curFileTLI > 0 && tli < curFileTLI)
- elog(ERROR, "according to history file, WAL location %X/%X belongs to timeline %u, but previous recovered WAL file came from timeline %u",
+ elog(ERROR, "according to history file, WAL location %X/%08X belongs to timeline %u, but previous recovered WAL file came from timeline %u",
LSN_FORMAT_ARGS(tliRecPtr),
tli, curFileTLI);
}
@@ -4177,10 +4178,10 @@ rescanLatestTimeLine(TimeLineID replayTLI, XLogRecPtr replayLSN)
if (currentTle->end < replayLSN)
{
ereport(LOG,
- (errmsg("new timeline %u forked off current database system timeline %u before current recovery point %X/%X",
- newtarget,
- replayTLI,
- LSN_FORMAT_ARGS(replayLSN))));
+ errmsg("new timeline %u forked off current database system timeline %u before current recovery point %X/%08X",
+ newtarget,
+ replayTLI,
+ LSN_FORMAT_ARGS(replayLSN)));
return false;
}
@@ -4760,7 +4761,7 @@ bool
check_primary_slot_name(char **newval, void **extra, GucSource source)
{
if (*newval && strcmp(*newval, "") != 0 &&
- !ReplicationSlotValidateName(*newval, WARNING))
+ !ReplicationSlotValidateName(*newval, false, WARNING))
return false;
return true;
@@ -4994,13 +4995,25 @@ check_recovery_target_timeline(char **newval, void **extra, GucSource source)
rttg = RECOVERY_TARGET_TIMELINE_LATEST;
else
{
+ char *endp;
+ uint64 timeline;
+
rttg = RECOVERY_TARGET_TIMELINE_NUMERIC;
errno = 0;
- strtoul(*newval, NULL, 0);
- if (errno == EINVAL || errno == ERANGE)
+ timeline = strtou64(*newval, &endp, 0);
+
+ if (*endp != '\0' || errno == EINVAL || errno == ERANGE)
+ {
+ GUC_check_errdetail("\"%s\" is not a valid number.",
+ "recovery_target_timeline");
+ return false;
+ }
+
+ if (timeline < 1 || timeline > PG_UINT32_MAX)
{
- GUC_check_errdetail("\"recovery_target_timeline\" is not a valid number.");
+ GUC_check_errdetail("\"%s\" must be between %u and %u.",
+ "recovery_target_timeline", 1, UINT_MAX);
return false;
}
}
diff --git a/src/backend/access/transam/xlogutils.c b/src/backend/access/transam/xlogutils.c
index c389b27f77d..27ea52fdfee 100644
--- a/src/backend/access/transam/xlogutils.c
+++ b/src/backend/access/transam/xlogutils.c
@@ -795,7 +795,7 @@ XLogReadDetermineTimeline(XLogReaderState *state, XLogRecPtr wantPage,
list_free_deep(timelineHistory);
- elog(DEBUG3, "switched to timeline %u valid until %X/%X",
+ elog(DEBUG3, "switched to timeline %u valid until %X/%08X",
state->currTLI,
LSN_FORMAT_ARGS(state->currTLIValidUntil));
}
diff --git a/src/backend/backup/backup_manifest.c b/src/backend/backup/backup_manifest.c
index 22e2be37c95..d05252f383c 100644
--- a/src/backend/backup/backup_manifest.c
+++ b/src/backend/backup/backup_manifest.c
@@ -281,7 +281,7 @@ AddWALInfoToBackupManifest(backup_manifest_info *manifest, XLogRecPtr startptr,
}
AppendToManifest(manifest,
- "%s{ \"Timeline\": %u, \"Start-LSN\": \"%X/%X\", \"End-LSN\": \"%X/%X\" }",
+ "%s{ \"Timeline\": %u, \"Start-LSN\": \"%X/%08X\", \"End-LSN\": \"%X/%08X\" }",
first_wal_range ? "" : ",\n",
entry->tli,
LSN_FORMAT_ARGS(tl_beginptr),
diff --git a/src/backend/backup/basebackup_copy.c b/src/backend/backup/basebackup_copy.c
index a284ce318ff..18b0b5a52d3 100644
--- a/src/backend/backup/basebackup_copy.c
+++ b/src/backend/backup/basebackup_copy.c
@@ -361,7 +361,7 @@ SendXlogRecPtrResult(XLogRecPtr ptr, TimeLineID tli)
tstate = begin_tup_output_tupdesc(dest, tupdesc, &TTSOpsVirtual);
/* Data row */
- values[0] = CStringGetTextDatum(psprintf("%X/%X", LSN_FORMAT_ARGS(ptr)));
+ values[0] = CStringGetTextDatum(psprintf("%X/%08X", LSN_FORMAT_ARGS(ptr)));
values[1] = Int64GetDatum(tli);
do_tup_output(tstate, values, nulls);
diff --git a/src/backend/backup/basebackup_incremental.c b/src/backend/backup/basebackup_incremental.c
index 28491b1e0ab..a0d48ff0fef 100644
--- a/src/backend/backup/basebackup_incremental.c
+++ b/src/backend/backup/basebackup_incremental.c
@@ -409,7 +409,7 @@ PrepareForIncrementalBackup(IncrementalBackupInfo *ib,
if (range->start_lsn < tlep[i]->begin)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("manifest requires WAL from initial timeline %u starting at %X/%X, but that timeline begins at %X/%X",
+ errmsg("manifest requires WAL from initial timeline %u starting at %X/%08X, but that timeline begins at %X/%08X",
range->tli,
LSN_FORMAT_ARGS(range->start_lsn),
LSN_FORMAT_ARGS(tlep[i]->begin))));
@@ -419,7 +419,7 @@ PrepareForIncrementalBackup(IncrementalBackupInfo *ib,
if (range->start_lsn != tlep[i]->begin)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("manifest requires WAL from continuation timeline %u starting at %X/%X, but that timeline begins at %X/%X",
+ errmsg("manifest requires WAL from continuation timeline %u starting at %X/%08X, but that timeline begins at %X/%08X",
range->tli,
LSN_FORMAT_ARGS(range->start_lsn),
LSN_FORMAT_ARGS(tlep[i]->begin))));
@@ -430,7 +430,7 @@ PrepareForIncrementalBackup(IncrementalBackupInfo *ib,
if (range->end_lsn > backup_state->startpoint)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("manifest requires WAL from final timeline %u ending at %X/%X, but this backup starts at %X/%X",
+ errmsg("manifest requires WAL from final timeline %u ending at %X/%08X, but this backup starts at %X/%08X",
range->tli,
LSN_FORMAT_ARGS(range->end_lsn),
LSN_FORMAT_ARGS(backup_state->startpoint)),
@@ -441,7 +441,7 @@ PrepareForIncrementalBackup(IncrementalBackupInfo *ib,
if (range->end_lsn != tlep[i]->end)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("manifest requires WAL from non-final timeline %u ending at %X/%X, but this server switched timelines at %X/%X",
+ errmsg("manifest requires WAL from non-final timeline %u ending at %X/%08X, but this server switched timelines at %X/%08X",
range->tli,
LSN_FORMAT_ARGS(range->end_lsn),
LSN_FORMAT_ARGS(tlep[i]->end))));
@@ -522,18 +522,18 @@ PrepareForIncrementalBackup(IncrementalBackupInfo *ib,
if (XLogRecPtrIsInvalid(tli_missing_lsn))
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("WAL summaries are required on timeline %u from %X/%X to %X/%X, but no summaries for that timeline and LSN range exist",
+ errmsg("WAL summaries are required on timeline %u from %X/%08X to %X/%08X, but no summaries for that timeline and LSN range exist",
tle->tli,
LSN_FORMAT_ARGS(tli_start_lsn),
LSN_FORMAT_ARGS(tli_end_lsn))));
else
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("WAL summaries are required on timeline %u from %X/%X to %X/%X, but the summaries for that timeline and LSN range are incomplete",
+ errmsg("WAL summaries are required on timeline %u from %X/%08X to %X/%08X, but the summaries for that timeline and LSN range are incomplete",
tle->tli,
LSN_FORMAT_ARGS(tli_start_lsn),
LSN_FORMAT_ARGS(tli_end_lsn)),
- errdetail("The first unsummarized LSN in this range is %X/%X.",
+ errdetail("The first unsummarized LSN in this range is %X/%08X.",
LSN_FORMAT_ARGS(tli_missing_lsn))));
}
diff --git a/src/backend/bootstrap/bootstrap.c b/src/backend/bootstrap/bootstrap.c
index 6db864892d0..fc8638c1b61 100644
--- a/src/backend/bootstrap/bootstrap.c
+++ b/src/backend/bootstrap/bootstrap.c
@@ -109,6 +109,8 @@ static const struct typinfo TypInfo[] = {
F_REGROLEIN, F_REGROLEOUT},
{"regnamespace", REGNAMESPACEOID, 0, 4, true, TYPALIGN_INT, TYPSTORAGE_PLAIN, InvalidOid,
F_REGNAMESPACEIN, F_REGNAMESPACEOUT},
+ {"regdatabase", REGDATABASEOID, 0, 4, true, TYPALIGN_INT, TYPSTORAGE_PLAIN, InvalidOid,
+ F_REGDATABASEIN, F_REGDATABASEOUT},
{"text", TEXTOID, 0, -1, false, TYPALIGN_INT, TYPSTORAGE_EXTENDED, DEFAULT_COLLATION_OID,
F_TEXTIN, F_TEXTOUT},
{"oid", OIDOID, 0, 4, true, TYPALIGN_INT, TYPSTORAGE_PLAIN, InvalidOid,
diff --git a/src/backend/catalog/dependency.c b/src/backend/catalog/dependency.c
index 18316a3968b..7dded634eb8 100644
--- a/src/backend/catalog/dependency.c
+++ b/src/backend/catalog/dependency.c
@@ -1850,6 +1850,17 @@ find_expr_references_walker(Node *node,
errmsg("constant of the type %s cannot be used here",
"regrole")));
break;
+
+ /*
+ * Dependencies for regdatabase should be shared among all
+ * databases, so explicitly inhibit to have dependencies.
+ */
+ case REGDATABASEOID:
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("constant of the type %s cannot be used here",
+ "regdatabase")));
+ break;
}
}
return false;
diff --git a/src/backend/catalog/heap.c b/src/backend/catalog/heap.c
index fbaed5359ad..fd6537567ea 100644
--- a/src/backend/catalog/heap.c
+++ b/src/backend/catalog/heap.c
@@ -665,6 +665,15 @@ CheckAttributeType(const char *attname,
}
/*
+ * For consistency with check_virtual_generated_security().
+ */
+ if ((flags & CHKATYPE_IS_VIRTUAL) && atttypid >= FirstUnpinnedObjectId)
+ ereport(ERROR,
+ errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("virtual generated column \"%s\" cannot have a user-defined type", attname),
+ errdetail("Virtual generated columns that make use of user-defined types are not yet supported."));
+
+ /*
* This might not be strictly invalid per SQL standard, but it is pretty
* useless, and it cannot be dumped, so we must disallow it.
*/
@@ -1100,6 +1109,7 @@ AddNewRelationType(const char *typeName,
* if false, relacl is always set NULL
* allow_system_table_mods: true to allow creation in system namespaces
* is_internal: is this a system-generated catalog?
+ * relrewrite: link to original relation during a table rewrite
*
* Output parameters:
* typaddress: if not null, gets the object address of the new pg_type entry
@@ -2996,7 +3006,7 @@ AddRelationNotNullConstraints(Relation rel, List *constraints,
if (constr->is_no_inherit)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("cannot define not-null constraint on column \"%s\" with NO INHERIT",
+ errmsg("cannot define not-null constraint with NO INHERIT on column \"%s\"",
strVal(linitial(constr->keys))),
errdetail("The column has an inherited not-null constraint.")));
@@ -3215,6 +3225,86 @@ check_nested_generated(ParseState *pstate, Node *node)
}
/*
+ * Check security of virtual generated column expression.
+ *
+ * Just like selecting from a view is exploitable (CVE-2024-7348), selecting
+ * from a table with virtual generated columns is exploitable. Users who are
+ * concerned about this can avoid selecting from views, but telling them to
+ * avoid selecting from tables is less practical.
+ *
+ * To address this, this restricts generation expressions for virtual
+ * generated columns are restricted to using built-in functions and types. We
+ * assume that built-in functions and types cannot be exploited for this
+ * purpose. Note the overall security also requires that all functions in use
+ * a immutable. (For example, there are some built-in non-immutable functions
+ * that can run arbitrary SQL.) The immutability is checked elsewhere, since
+ * that is a property that needs to hold independent of security
+ * considerations.
+ *
+ * In the future, this could be expanded by some new mechanism to declare
+ * other functions and types as safe or trusted for this purpose, but that is
+ * to be designed.
+ */
+
+/*
+ * Callback for check_functions_in_node() that determines whether a function
+ * is user-defined.
+ */
+static bool
+contains_user_functions_checker(Oid func_id, void *context)
+{
+ return (func_id >= FirstUnpinnedObjectId);
+}
+
+/*
+ * Checks for all the things we don't want in the generation expressions of
+ * virtual generated columns for security reasons. Errors out if it finds
+ * one.
+ */
+static bool
+check_virtual_generated_security_walker(Node *node, void *context)
+{
+ ParseState *pstate = context;
+
+ if (node == NULL)
+ return false;
+
+ if (!IsA(node, List))
+ {
+ if (check_functions_in_node(node, contains_user_functions_checker, NULL))
+ ereport(ERROR,
+ errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("generation expression uses user-defined function"),
+ errdetail("Virtual generated columns that make use of user-defined functions are not yet supported."),
+ parser_errposition(pstate, exprLocation(node)));
+
+ /*
+ * check_functions_in_node() doesn't check some node types (see
+ * comment there). We handle CoerceToDomain and MinMaxExpr by
+ * checking for built-in types. The other listed node types cannot
+ * call user-definable SQL-visible functions.
+ *
+ * We furthermore need this type check to handle built-in, immutable
+ * polymorphic functions such as array_eq().
+ */
+ if (exprType(node) >= FirstUnpinnedObjectId)
+ ereport(ERROR,
+ errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("generation expression uses user-defined type"),
+ errdetail("Virtual generated columns that make use of user-defined types are not yet supported."),
+ parser_errposition(pstate, exprLocation(node)));
+ }
+
+ return expression_tree_walker(node, check_virtual_generated_security_walker, context);
+}
+
+static void
+check_virtual_generated_security(ParseState *pstate, Node *node)
+{
+ check_virtual_generated_security_walker(node, pstate);
+}
+
+/*
* Take a raw default and convert it to a cooked format ready for
* storage.
*
@@ -3253,6 +3343,10 @@ cookDefault(ParseState *pstate,
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
errmsg("generation expression is not immutable")));
+
+ /* Check security of expressions for virtual generated column */
+ if (attgenerated == ATTRIBUTE_GENERATED_VIRTUAL)
+ check_virtual_generated_security(pstate, expr);
}
else
{
diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c
index 739a92bdcc1..c4029a4f3d3 100644
--- a/src/backend/catalog/index.c
+++ b/src/backend/catalog/index.c
@@ -800,11 +800,11 @@ index_create(Relation heapRelation,
errmsg("user-defined indexes on system catalog tables are not supported")));
/*
- * Btree text_pattern_ops uses text_eq as the equality operator, which is
- * fine as long as the collation is deterministic; text_eq then reduces to
+ * Btree text_pattern_ops uses texteq as the equality operator, which is
+ * fine as long as the collation is deterministic; texteq then reduces to
* bitwise equality and so it is semantically compatible with the other
* operators and functions in that opclass. But with a nondeterministic
- * collation, text_eq could yield results that are incompatible with the
+ * collation, texteq could yield results that are incompatible with the
* actual behavior of the index (which is determined by the opclass's
* comparison function). We prevent such problems by refusing creation of
* an index with that opclass and a nondeterministic collation.
@@ -814,7 +814,7 @@ index_create(Relation heapRelation,
* opclasses as incompatible with nondeterminism; but for now, this small
* hack suffices.
*
- * Another solution is to use a special operator, not text_eq, as the
+ * Another solution is to use a special operator, not texteq, as the
* equality opclass member; but that is undesirable because it would
* prevent index usage in many queries that work fine today.
*/
@@ -3020,7 +3020,7 @@ index_build(Relation heapRelation,
/*
* Determine worker process details for parallel CREATE INDEX. Currently,
- * only btree and BRIN have support for parallel builds.
+ * only btree, GIN, and BRIN have support for parallel builds.
*
* Note that planner considers parallel safety for us.
*/
diff --git a/src/backend/catalog/pg_subscription.c b/src/backend/catalog/pg_subscription.c
index 1395032413e..244acf52f36 100644
--- a/src/backend/catalog/pg_subscription.c
+++ b/src/backend/catalog/pg_subscription.c
@@ -103,6 +103,7 @@ GetSubscription(Oid subid, bool missing_ok)
sub->passwordrequired = subform->subpasswordrequired;
sub->runasowner = subform->subrunasowner;
sub->failover = subform->subfailover;
+ sub->retaindeadtuples = subform->subretaindeadtuples;
/* Get conninfo */
datum = SysCacheGetAttrNotNull(SUBSCRIPTIONOID,
@@ -319,7 +320,7 @@ AddSubscriptionRelState(Oid subid, Oid relid, char state,
*/
void
UpdateSubscriptionRelState(Oid subid, Oid relid, char state,
- XLogRecPtr sublsn)
+ XLogRecPtr sublsn, bool already_locked)
{
Relation rel;
HeapTuple tup;
@@ -327,9 +328,24 @@ UpdateSubscriptionRelState(Oid subid, Oid relid, char state,
Datum values[Natts_pg_subscription_rel];
bool replaces[Natts_pg_subscription_rel];
- LockSharedObject(SubscriptionRelationId, subid, 0, AccessShareLock);
+ if (already_locked)
+ {
+#ifdef USE_ASSERT_CHECKING
+ LOCKTAG tag;
- rel = table_open(SubscriptionRelRelationId, RowExclusiveLock);
+ Assert(CheckRelationOidLockedByMe(SubscriptionRelRelationId,
+ RowExclusiveLock, true));
+ SET_LOCKTAG_OBJECT(tag, InvalidOid, SubscriptionRelationId, subid, 0);
+ Assert(LockHeldByMe(&tag, AccessShareLock, true));
+#endif
+
+ rel = table_open(SubscriptionRelRelationId, NoLock);
+ }
+ else
+ {
+ LockSharedObject(SubscriptionRelationId, subid, 0, AccessShareLock);
+ rel = table_open(SubscriptionRelRelationId, RowExclusiveLock);
+ }
/* Try finding existing mapping. */
tup = SearchSysCacheCopy2(SUBSCRIPTIONRELMAP,
diff --git a/src/backend/catalog/system_views.sql b/src/backend/catalog/system_views.sql
index 08f780a2e63..77c693f630e 100644
--- a/src/backend/catalog/system_views.sql
+++ b/src/backend/catalog/system_views.sql
@@ -666,6 +666,14 @@ GRANT SELECT ON pg_shmem_allocations_numa TO pg_read_all_stats;
REVOKE EXECUTE ON FUNCTION pg_get_shmem_allocations_numa() FROM PUBLIC;
GRANT EXECUTE ON FUNCTION pg_get_shmem_allocations_numa() TO pg_read_all_stats;
+CREATE VIEW pg_dsm_registry_allocations AS
+ SELECT * FROM pg_get_dsm_registry_allocations();
+
+REVOKE ALL ON pg_dsm_registry_allocations FROM PUBLIC;
+GRANT SELECT ON pg_dsm_registry_allocations TO pg_read_all_stats;
+REVOKE EXECUTE ON FUNCTION pg_get_dsm_registry_allocations() FROM PUBLIC;
+GRANT EXECUTE ON FUNCTION pg_get_dsm_registry_allocations() TO pg_read_all_stats;
+
CREATE VIEW pg_backend_memory_contexts AS
SELECT * FROM pg_get_backend_memory_contexts();
@@ -895,7 +903,7 @@ CREATE VIEW pg_stat_activity AS
S.wait_event,
S.state,
S.backend_xid,
- s.backend_xmin,
+ S.backend_xmin,
S.query_id,
S.query,
S.backend_type
@@ -1378,7 +1386,8 @@ REVOKE ALL ON pg_subscription FROM public;
GRANT SELECT (oid, subdbid, subskiplsn, subname, subowner, subenabled,
subbinary, substream, subtwophasestate, subdisableonerr,
subpasswordrequired, subrunasowner, subfailover,
- subslotname, subsynccommit, subpublications, suborigin)
+ subretaindeadtuples, subslotname, subsynccommit,
+ subpublications, suborigin)
ON pg_subscription TO public;
CREATE VIEW pg_stat_subscription_stats AS
@@ -1390,6 +1399,7 @@ CREATE VIEW pg_stat_subscription_stats AS
ss.confl_insert_exists,
ss.confl_update_origin_differs,
ss.confl_update_exists,
+ ss.confl_update_deleted,
ss.confl_update_missing,
ss.confl_delete_origin_differs,
ss.confl_delete_missing,
diff --git a/src/backend/commands/analyze.c b/src/backend/commands/analyze.c
index 4fffb76e557..40d66537ad7 100644
--- a/src/backend/commands/analyze.c
+++ b/src/backend/commands/analyze.c
@@ -76,7 +76,7 @@ static BufferAccessStrategy vac_strategy;
static void do_analyze_rel(Relation onerel,
- VacuumParams *params, List *va_cols,
+ const VacuumParams params, List *va_cols,
AcquireSampleRowsFunc acquirefunc, BlockNumber relpages,
bool inh, bool in_outer_xact, int elevel);
static void compute_index_stats(Relation onerel, double totalrows,
@@ -107,7 +107,7 @@ static Datum ind_fetch_func(VacAttrStatsP stats, int rownum, bool *isNull);
*/
void
analyze_rel(Oid relid, RangeVar *relation,
- VacuumParams *params, List *va_cols, bool in_outer_xact,
+ const VacuumParams params, List *va_cols, bool in_outer_xact,
BufferAccessStrategy bstrategy)
{
Relation onerel;
@@ -116,7 +116,7 @@ analyze_rel(Oid relid, RangeVar *relation,
BlockNumber relpages = 0;
/* Select logging level */
- if (params->options & VACOPT_VERBOSE)
+ if (params.options & VACOPT_VERBOSE)
elevel = INFO;
else
elevel = DEBUG2;
@@ -138,8 +138,8 @@ analyze_rel(Oid relid, RangeVar *relation,
*
* Make sure to generate only logs for ANALYZE in this case.
*/
- onerel = vacuum_open_relation(relid, relation, params->options & ~(VACOPT_VACUUM),
- params->log_min_duration >= 0,
+ onerel = vacuum_open_relation(relid, relation, params.options & ~(VACOPT_VACUUM),
+ params.log_min_duration >= 0,
ShareUpdateExclusiveLock);
/* leave if relation could not be opened or locked */
@@ -155,7 +155,7 @@ analyze_rel(Oid relid, RangeVar *relation,
*/
if (!vacuum_is_permitted_for_relation(RelationGetRelid(onerel),
onerel->rd_rel,
- params->options & ~VACOPT_VACUUM))
+ params.options & ~VACOPT_VACUUM))
{
relation_close(onerel, ShareUpdateExclusiveLock);
return;
@@ -227,7 +227,7 @@ analyze_rel(Oid relid, RangeVar *relation,
else
{
/* No need for a WARNING if we already complained during VACUUM */
- if (!(params->options & VACOPT_VACUUM))
+ if (!(params.options & VACOPT_VACUUM))
ereport(WARNING,
(errmsg("skipping \"%s\" --- cannot analyze non-tables or special system tables",
RelationGetRelationName(onerel))));
@@ -275,7 +275,7 @@ analyze_rel(Oid relid, RangeVar *relation,
* appropriate acquirefunc for each child table.
*/
static void
-do_analyze_rel(Relation onerel, VacuumParams *params,
+do_analyze_rel(Relation onerel, const VacuumParams params,
List *va_cols, AcquireSampleRowsFunc acquirefunc,
BlockNumber relpages, bool inh, bool in_outer_xact,
int elevel)
@@ -309,9 +309,9 @@ do_analyze_rel(Relation onerel, VacuumParams *params,
PgStat_Counter startreadtime = 0;
PgStat_Counter startwritetime = 0;
- verbose = (params->options & VACOPT_VERBOSE) != 0;
+ verbose = (params.options & VACOPT_VERBOSE) != 0;
instrument = (verbose || (AmAutoVacuumWorkerProcess() &&
- params->log_min_duration >= 0));
+ params.log_min_duration >= 0));
if (inh)
ereport(elevel,
(errmsg("analyzing \"%s.%s\" inheritance tree",
@@ -690,8 +690,8 @@ do_analyze_rel(Relation onerel, VacuumParams *params,
* only do it for inherited stats. (We're never called for not-inherited
* stats on partitioned tables anyway.)
*
- * Reset the changes_since_analyze counter only if we analyzed all
- * columns; otherwise, there is still work for auto-analyze to do.
+ * Reset the mod_since_analyze counter only if we analyzed all columns;
+ * otherwise, there is still work for auto-analyze to do.
*/
if (!inh)
pgstat_report_analyze(onerel, totalrows, totaldeadrows,
@@ -706,7 +706,7 @@ do_analyze_rel(Relation onerel, VacuumParams *params,
* amvacuumcleanup() when called in ANALYZE-only mode. The only exception
* among core index AMs is GIN/ginvacuumcleanup().
*/
- if (!(params->options & VACOPT_VACUUM))
+ if (!(params.options & VACOPT_VACUUM))
{
for (ind = 0; ind < nindexes; ind++)
{
@@ -736,9 +736,9 @@ do_analyze_rel(Relation onerel, VacuumParams *params,
{
TimestampTz endtime = GetCurrentTimestamp();
- if (verbose || params->log_min_duration == 0 ||
+ if (verbose || params.log_min_duration == 0 ||
TimestampDifferenceExceeds(starttime, endtime,
- params->log_min_duration))
+ params.log_min_duration))
{
long delay_in_ms;
WalUsage walusage;
diff --git a/src/backend/commands/cluster.c b/src/backend/commands/cluster.c
index 54a08e4102e..b55221d44cd 100644
--- a/src/backend/commands/cluster.c
+++ b/src/backend/commands/cluster.c
@@ -917,7 +917,7 @@ copy_table_data(Relation NewHeap, Relation OldHeap, Relation OldIndex, bool verb
* not to be aggressive about this.
*/
memset(&params, 0, sizeof(VacuumParams));
- vacuum_get_cutoffs(OldHeap, &params, &cutoffs);
+ vacuum_get_cutoffs(OldHeap, params, &cutoffs);
/*
* FreezeXid will become the table's new relfrozenxid, and that mustn't go
diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c
index 74ae42b19a7..fae9c41db65 100644
--- a/src/backend/commands/copy.c
+++ b/src/backend/commands/copy.c
@@ -322,11 +322,13 @@ DoCopy(ParseState *pstate, const CopyStmt *stmt,
}
/*
- * Extract a CopyHeaderChoice value from a DefElem. This is like
- * defGetBoolean() but also accepts the special value "match".
+ * Extract the CopyFormatOptions.header_line value from a DefElem.
+ *
+ * Parses the HEADER option for COPY, which can be a boolean, a non-negative
+ * integer (number of lines to skip), or the special value "match".
*/
-static CopyHeaderChoice
-defGetCopyHeaderChoice(DefElem *def, bool is_from)
+static int
+defGetCopyHeaderOption(DefElem *def, bool is_from)
{
/*
* If no parameter value given, assume "true" is meant.
@@ -335,20 +337,27 @@ defGetCopyHeaderChoice(DefElem *def, bool is_from)
return COPY_HEADER_TRUE;
/*
- * Allow 0, 1, "true", "false", "on", "off", or "match".
+ * Allow 0, 1, "true", "false", "on", "off", a non-negative integer, or
+ * "match".
*/
switch (nodeTag(def->arg))
{
case T_Integer:
- switch (intVal(def->arg))
{
- case 0:
- return COPY_HEADER_FALSE;
- case 1:
- return COPY_HEADER_TRUE;
- default:
- /* otherwise, error out below */
- break;
+ int ival = intVal(def->arg);
+
+ if (ival < 0)
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("a negative integer value cannot be "
+ "specified for %s", def->defname)));
+
+ if (!is_from && ival > 1)
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("cannot use multi-line header in COPY TO")));
+
+ return ival;
}
break;
default:
@@ -381,7 +390,8 @@ defGetCopyHeaderChoice(DefElem *def, bool is_from)
}
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("%s requires a Boolean value or \"match\"",
+ errmsg("%s requires a Boolean value, a non-negative integer, "
+ "or the string \"match\"",
def->defname)));
return COPY_HEADER_FALSE; /* keep compiler quiet */
}
@@ -566,7 +576,7 @@ ProcessCopyOptions(ParseState *pstate,
if (header_specified)
errorConflictingDefElem(defel, pstate);
header_specified = true;
- opts_out->header_line = defGetCopyHeaderChoice(defel, is_from);
+ opts_out->header_line = defGetCopyHeaderOption(defel, is_from);
}
else if (strcmp(defel->defname, "quote") == 0)
{
@@ -769,7 +779,7 @@ ProcessCopyOptions(ParseState *pstate,
errmsg("COPY delimiter cannot be \"%s\"", opts_out->delim)));
/* Check header */
- if (opts_out->binary && opts_out->header_line)
+ if (opts_out->binary && opts_out->header_line != COPY_HEADER_FALSE)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
/*- translator: %s is the name of a COPY option, e.g. ON_ERROR */
diff --git a/src/backend/commands/copyfromparse.c b/src/backend/commands/copyfromparse.c
index f5fc346e201..b1ae97b833d 100644
--- a/src/backend/commands/copyfromparse.c
+++ b/src/backend/commands/copyfromparse.c
@@ -771,21 +771,30 @@ static pg_attribute_always_inline bool
NextCopyFromRawFieldsInternal(CopyFromState cstate, char ***fields, int *nfields, bool is_csv)
{
int fldct;
- bool done;
+ bool done = false;
/* only available for text or csv input */
Assert(!cstate->opts.binary);
/* on input check that the header line is correct if needed */
- if (cstate->cur_lineno == 0 && cstate->opts.header_line)
+ if (cstate->cur_lineno == 0 && cstate->opts.header_line != COPY_HEADER_FALSE)
{
ListCell *cur;
TupleDesc tupDesc;
+ int lines_to_skip = cstate->opts.header_line;
+
+ /* If set to "match", one header line is skipped */
+ if (cstate->opts.header_line == COPY_HEADER_MATCH)
+ lines_to_skip = 1;
tupDesc = RelationGetDescr(cstate->rel);
- cstate->cur_lineno++;
- done = CopyReadLine(cstate, is_csv);
+ for (int i = 0; i < lines_to_skip; i++)
+ {
+ cstate->cur_lineno++;
+ if ((done = CopyReadLine(cstate, is_csv)))
+ break;
+ }
if (cstate->opts.header_line == COPY_HEADER_MATCH)
{
@@ -1538,7 +1547,7 @@ GetDecimalFromHex(char hex)
if (isdigit((unsigned char) hex))
return hex - '0';
else
- return tolower((unsigned char) hex) - 'a' + 10;
+ return pg_ascii_tolower((unsigned char) hex) - 'a' + 10;
}
/*
diff --git a/src/backend/commands/copyto.c b/src/backend/commands/copyto.c
index ea6f18f2c80..67b94b91cae 100644
--- a/src/backend/commands/copyto.c
+++ b/src/backend/commands/copyto.c
@@ -199,7 +199,7 @@ CopyToTextLikeStart(CopyToState cstate, TupleDesc tupDesc)
cstate->file_encoding);
/* if a header has been requested send the line */
- if (cstate->opts.header_line)
+ if (cstate->opts.header_line == COPY_HEADER_TRUE)
{
ListCell *cur;
bool hdr_delim = false;
diff --git a/src/backend/commands/dbcommands.c b/src/backend/commands/dbcommands.c
index 5fbbcdaabb1..502a45163c8 100644
--- a/src/backend/commands/dbcommands.c
+++ b/src/backend/commands/dbcommands.c
@@ -570,8 +570,8 @@ CreateDatabaseUsingFileCopy(Oid src_dboid, Oid dst_dboid, Oid src_tsid,
* any CREATE DATABASE commands.
*/
if (!IsBinaryUpgrade)
- RequestCheckpoint(CHECKPOINT_IMMEDIATE | CHECKPOINT_FORCE |
- CHECKPOINT_WAIT | CHECKPOINT_FLUSH_ALL);
+ RequestCheckpoint(CHECKPOINT_FAST | CHECKPOINT_FORCE |
+ CHECKPOINT_WAIT | CHECKPOINT_FLUSH_UNLOGGED);
/*
* Iterate through all tablespaces of the template database, and copy each
@@ -673,7 +673,7 @@ CreateDatabaseUsingFileCopy(Oid src_dboid, Oid dst_dboid, Oid src_tsid,
* strategy that avoids these problems.
*/
if (!IsBinaryUpgrade)
- RequestCheckpoint(CHECKPOINT_IMMEDIATE | CHECKPOINT_FORCE |
+ RequestCheckpoint(CHECKPOINT_FAST | CHECKPOINT_FORCE |
CHECKPOINT_WAIT);
}
@@ -1065,16 +1065,41 @@ createdb(ParseState *pstate, const CreatedbStmt *stmt)
/* Check that the chosen locales are valid, and get canonical spellings */
if (!check_locale(LC_COLLATE, dbcollate, &canonname))
- ereport(ERROR,
- (errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("invalid LC_COLLATE locale name: \"%s\"", dbcollate),
- errhint("If the locale name is specific to ICU, use ICU_LOCALE.")));
+ {
+ if (dblocprovider == COLLPROVIDER_BUILTIN)
+ ereport(ERROR,
+ (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+ errmsg("invalid LC_COLLATE locale name: \"%s\"", dbcollate),
+ errhint("If the locale name is specific to the builtin provider, use BUILTIN_LOCALE.")));
+ else if (dblocprovider == COLLPROVIDER_ICU)
+ ereport(ERROR,
+ (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+ errmsg("invalid LC_COLLATE locale name: \"%s\"", dbcollate),
+ errhint("If the locale name is specific to the ICU provider, use ICU_LOCALE.")));
+ else
+ ereport(ERROR,
+ (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+ errmsg("invalid LC_COLLATE locale name: \"%s\"", dbcollate)));
+ }
dbcollate = canonname;
if (!check_locale(LC_CTYPE, dbctype, &canonname))
- ereport(ERROR,
- (errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("invalid LC_CTYPE locale name: \"%s\"", dbctype),
- errhint("If the locale name is specific to ICU, use ICU_LOCALE.")));
+ {
+ if (dblocprovider == COLLPROVIDER_BUILTIN)
+ ereport(ERROR,
+ (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+ errmsg("invalid LC_CTYPE locale name: \"%s\"", dbctype),
+ errhint("If the locale name is specific to the builtin provider, use BUILTIN_LOCALE.")));
+ else if (dblocprovider == COLLPROVIDER_ICU)
+ ereport(ERROR,
+ (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+ errmsg("invalid LC_CTYPE locale name: \"%s\"", dbctype),
+ errhint("If the locale name is specific to the ICU provider, use ICU_LOCALE.")));
+ else
+ ereport(ERROR,
+ (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+ errmsg("invalid LC_CTYPE locale name: \"%s\"", dbctype)));
+ }
+
dbctype = canonname;
check_encoding_locale_matches(encoding, dbcollate, dbctype);
@@ -1845,7 +1870,7 @@ dropdb(const char *dbname, bool missing_ok, bool force)
* Force a checkpoint to make sure the checkpointer has received the
* message sent by ForgetDatabaseSyncRequests.
*/
- RequestCheckpoint(CHECKPOINT_IMMEDIATE | CHECKPOINT_FORCE | CHECKPOINT_WAIT);
+ RequestCheckpoint(CHECKPOINT_FAST | CHECKPOINT_FORCE | CHECKPOINT_WAIT);
/* Close all smgr fds in all backends. */
WaitForProcSignalBarrier(EmitProcSignalBarrier(PROCSIGNAL_BARRIER_SMGRRELEASE));
@@ -2095,8 +2120,8 @@ movedb(const char *dbname, const char *tblspcname)
* On Windows, this also ensures that background procs don't hold any open
* files, which would cause rmdir() to fail.
*/
- RequestCheckpoint(CHECKPOINT_IMMEDIATE | CHECKPOINT_FORCE | CHECKPOINT_WAIT
- | CHECKPOINT_FLUSH_ALL);
+ RequestCheckpoint(CHECKPOINT_FAST | CHECKPOINT_FORCE | CHECKPOINT_WAIT
+ | CHECKPOINT_FLUSH_UNLOGGED);
/* Close all smgr fds in all backends. */
WaitForProcSignalBarrier(EmitProcSignalBarrier(PROCSIGNAL_BARRIER_SMGRRELEASE));
@@ -2227,7 +2252,7 @@ movedb(const char *dbname, const char *tblspcname)
* any unlogged operations done in the new DB tablespace before the
* next checkpoint.
*/
- RequestCheckpoint(CHECKPOINT_IMMEDIATE | CHECKPOINT_FORCE | CHECKPOINT_WAIT);
+ RequestCheckpoint(CHECKPOINT_FAST | CHECKPOINT_FORCE | CHECKPOINT_WAIT);
/*
* Force synchronous commit, thus minimizing the window between
diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c
index 7e2792ead71..8345bc0264b 100644
--- a/src/backend/commands/explain.c
+++ b/src/backend/commands/explain.c
@@ -3582,6 +3582,7 @@ static void
show_memoize_info(MemoizeState *mstate, List *ancestors, ExplainState *es)
{
Plan *plan = ((PlanState *) mstate)->plan;
+ Memoize *mplan = (Memoize *) plan;
ListCell *lc;
List *context;
StringInfoData keystr;
@@ -3602,7 +3603,7 @@ show_memoize_info(MemoizeState *mstate, List *ancestors, ExplainState *es)
plan,
ancestors);
- foreach(lc, ((Memoize *) plan)->param_exprs)
+ foreach(lc, mplan->param_exprs)
{
Node *expr = (Node *) lfirst(lc);
@@ -3618,6 +3619,24 @@ show_memoize_info(MemoizeState *mstate, List *ancestors, ExplainState *es)
pfree(keystr.data);
+ if (es->costs)
+ {
+ if (es->format == EXPLAIN_FORMAT_TEXT)
+ {
+ ExplainIndentText(es);
+ appendStringInfo(es->str, "Estimates: capacity=%u distinct keys=%.0f lookups=%.0f hit percent=%.2f%%\n",
+ mplan->est_entries, mplan->est_unique_keys,
+ mplan->est_calls, mplan->est_hit_ratio * 100.0);
+ }
+ else
+ {
+ ExplainPropertyUInteger("Estimated Capacity", NULL, mplan->est_entries, es);
+ ExplainPropertyFloat("Estimated Distinct Lookup Keys", NULL, mplan->est_unique_keys, 0, es);
+ ExplainPropertyFloat("Estimated Lookups", NULL, mplan->est_calls, 0, es);
+ ExplainPropertyFloat("Estimated Hit Percent", NULL, mplan->est_hit_ratio * 100.0, 2, es);
+ }
+ }
+
if (!es->analyze)
return;
diff --git a/src/backend/commands/foreigncmds.c b/src/backend/commands/foreigncmds.c
index c14e038d54f..77f8461f42e 100644
--- a/src/backend/commands/foreigncmds.c
+++ b/src/backend/commands/foreigncmds.c
@@ -71,15 +71,26 @@ optionListToArray(List *options)
foreach(cell, options)
{
DefElem *def = lfirst(cell);
+ const char *name;
const char *value;
Size len;
text *t;
+ name = def->defname;
value = defGetString(def);
- len = VARHDRSZ + strlen(def->defname) + 1 + strlen(value);
+
+ /* Insist that name not contain "=", else "a=b=c" is ambiguous */
+ if (strchr(name, '=') != NULL)
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("invalid option name \"%s\": must not contain \"=\"",
+ name)));
+
+ len = VARHDRSZ + strlen(name) + 1 + strlen(value);
+ /* +1 leaves room for sprintf's trailing null */
t = palloc(len + 1);
SET_VARSIZE(t, len);
- sprintf(VARDATA(t), "%s=%s", def->defname, value);
+ sprintf(VARDATA(t), "%s=%s", name, value);
astate = accumArrayResult(astate, PointerGetDatum(t),
false, TEXTOID,
@@ -1577,6 +1588,7 @@ ImportForeignSchema(ImportForeignSchemaStmt *stmt)
pstmt->utilityStmt = (Node *) cstmt;
pstmt->stmt_location = rs->stmt_location;
pstmt->stmt_len = rs->stmt_len;
+ pstmt->planOrigin = PLAN_STMT_INTERNAL;
/* Execute statement */
ProcessUtility(pstmt, cmd, false,
diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c
index c3ec2076a52..6f753ab6d7a 100644
--- a/src/backend/commands/indexcmds.c
+++ b/src/backend/commands/indexcmds.c
@@ -2469,8 +2469,8 @@ GetOperatorFromCompareType(Oid opclass, Oid rhstype, CompareType cmptype,
cmptype == COMPARE_EQ ? errmsg("could not identify an equality operator for type %s", format_type_be(opcintype)) :
cmptype == COMPARE_OVERLAP ? errmsg("could not identify an overlaps operator for type %s", format_type_be(opcintype)) :
cmptype == COMPARE_CONTAINED_BY ? errmsg("could not identify a contained-by operator for type %s", format_type_be(opcintype)) : 0,
- errdetail("Could not translate compare type %d for operator family \"%s\", input type %s, access method \"%s\".",
- cmptype, get_opfamily_name(opfamily, false), format_type_be(opcintype), get_am_name(amid)));
+ errdetail("Could not translate compare type %d for operator family \"%s\" of access method \"%s\".",
+ cmptype, get_opfamily_name(opfamily, false), get_am_name(amid)));
/*
* We parameterize rhstype so foreign keys can ask for a <@ operator
@@ -2592,7 +2592,9 @@ makeObjectName(const char *name1, const char *name2, const char *label)
* constraint names.)
*
* Note: it is theoretically possible to get a collision anyway, if someone
- * else chooses the same name concurrently. This is fairly unlikely to be
+ * else chooses the same name concurrently. We shorten the race condition
+ * window by checking for conflicting relations using SnapshotDirty, but
+ * that doesn't close the window entirely. This is fairly unlikely to be
* a problem in practice, especially if one is holding an exclusive lock on
* the relation identified by name1. However, if choosing multiple names
* within a single command, you'd better create the new object and do
@@ -2608,15 +2610,45 @@ ChooseRelationName(const char *name1, const char *name2,
int pass = 0;
char *relname = NULL;
char modlabel[NAMEDATALEN];
+ SnapshotData SnapshotDirty;
+ Relation pgclassrel;
+
+ /* prepare to search pg_class with a dirty snapshot */
+ InitDirtySnapshot(SnapshotDirty);
+ pgclassrel = table_open(RelationRelationId, AccessShareLock);
/* try the unmodified label first */
strlcpy(modlabel, label, sizeof(modlabel));
for (;;)
{
+ ScanKeyData key[2];
+ SysScanDesc scan;
+ bool collides;
+
relname = makeObjectName(name1, name2, modlabel);
- if (!OidIsValid(get_relname_relid(relname, namespaceid)))
+ /* is there any conflicting relation name? */
+ ScanKeyInit(&key[0],
+ Anum_pg_class_relname,
+ BTEqualStrategyNumber, F_NAMEEQ,
+ CStringGetDatum(relname));
+ ScanKeyInit(&key[1],
+ Anum_pg_class_relnamespace,
+ BTEqualStrategyNumber, F_OIDEQ,
+ ObjectIdGetDatum(namespaceid));
+
+ scan = systable_beginscan(pgclassrel, ClassNameNspIndexId,
+ true /* indexOK */ ,
+ &SnapshotDirty,
+ 2, key);
+
+ collides = HeapTupleIsValid(systable_getnext(scan));
+
+ systable_endscan(scan);
+
+ /* break out of loop if no conflict */
+ if (!collides)
{
if (!isconstraint ||
!ConstraintNameExists(relname, namespaceid))
@@ -2628,6 +2660,8 @@ ChooseRelationName(const char *name1, const char *name2,
snprintf(modlabel, sizeof(modlabel), "%s%d", label, ++pass);
}
+ table_close(pgclassrel, AccessShareLock);
+
return relname;
}
diff --git a/src/backend/commands/matview.c b/src/backend/commands/matview.c
index 27c2cb26ef5..188e26f0e6e 100644
--- a/src/backend/commands/matview.c
+++ b/src/backend/commands/matview.c
@@ -835,7 +835,8 @@ refresh_by_match_merge(Oid matviewOid, Oid tempOid, Oid relowner,
if (!foundUniqueIndex)
ereport(ERROR,
errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("could not find suitable unique index on materialized view"));
+ errmsg("could not find suitable unique index on materialized view \"%s\"",
+ RelationGetRelationName(matviewRel)));
appendStringInfoString(&querybuf,
" AND newdata.* OPERATOR(pg_catalog.*=) mv.*) "
diff --git a/src/backend/commands/publicationcmds.c b/src/backend/commands/publicationcmds.c
index 0b23d94c38e..803c26ab216 100644
--- a/src/backend/commands/publicationcmds.c
+++ b/src/backend/commands/publicationcmds.c
@@ -2113,25 +2113,25 @@ AlterPublicationOwner_oid(Oid pubid, Oid newOwnerId)
static char
defGetGeneratedColsOption(DefElem *def)
{
- char *sval;
+ char *sval = "";
/*
- * If no parameter value given, assume "stored" is meant.
+ * A parameter value is required.
*/
- if (!def->arg)
- return PUBLISH_GENCOLS_STORED;
-
- sval = defGetString(def);
+ if (def->arg)
+ {
+ sval = defGetString(def);
- if (pg_strcasecmp(sval, "none") == 0)
- return PUBLISH_GENCOLS_NONE;
- if (pg_strcasecmp(sval, "stored") == 0)
- return PUBLISH_GENCOLS_STORED;
+ if (pg_strcasecmp(sval, "none") == 0)
+ return PUBLISH_GENCOLS_NONE;
+ if (pg_strcasecmp(sval, "stored") == 0)
+ return PUBLISH_GENCOLS_STORED;
+ }
ereport(ERROR,
errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("%s requires a \"none\" or \"stored\" value",
- def->defname));
+ errmsg("invalid value for publication parameter \"%s\": \"%s\"", def->defname, sval),
+ errdetail("Valid values are \"%s\" and \"%s\".", "none", "stored"));
return PUBLISH_GENCOLS_NONE; /* keep compiler quiet */
}
diff --git a/src/backend/commands/schemacmds.c b/src/backend/commands/schemacmds.c
index 546160f0941..0f03d9743d2 100644
--- a/src/backend/commands/schemacmds.c
+++ b/src/backend/commands/schemacmds.c
@@ -215,6 +215,7 @@ CreateSchemaCommand(CreateSchemaStmt *stmt, const char *queryString,
wrapper->utilityStmt = stmt;
wrapper->stmt_location = stmt_location;
wrapper->stmt_len = stmt_len;
+ wrapper->planOrigin = PLAN_STMT_INTERNAL;
/* do this step */
ProcessUtility(wrapper,
diff --git a/src/backend/commands/subscriptioncmds.c b/src/backend/commands/subscriptioncmds.c
index 4aec73bcc6b..cd6c3684482 100644
--- a/src/backend/commands/subscriptioncmds.c
+++ b/src/backend/commands/subscriptioncmds.c
@@ -14,6 +14,7 @@
#include "postgres.h"
+#include "access/commit_ts.h"
#include "access/htup_details.h"
#include "access/table.h"
#include "access/twophase.h"
@@ -71,8 +72,9 @@
#define SUBOPT_PASSWORD_REQUIRED 0x00000800
#define SUBOPT_RUN_AS_OWNER 0x00001000
#define SUBOPT_FAILOVER 0x00002000
-#define SUBOPT_LSN 0x00004000
-#define SUBOPT_ORIGIN 0x00008000
+#define SUBOPT_RETAIN_DEAD_TUPLES 0x00004000
+#define SUBOPT_LSN 0x00008000
+#define SUBOPT_ORIGIN 0x00010000
/* check if the 'val' has 'bits' set */
#define IsSet(val, bits) (((val) & (bits)) == (bits))
@@ -98,6 +100,7 @@ typedef struct SubOpts
bool passwordrequired;
bool runasowner;
bool failover;
+ bool retaindeadtuples;
char *origin;
XLogRecPtr lsn;
} SubOpts;
@@ -105,8 +108,10 @@ typedef struct SubOpts
static List *fetch_table_list(WalReceiverConn *wrconn, List *publications);
static void check_publications_origin(WalReceiverConn *wrconn,
List *publications, bool copydata,
- char *origin, Oid *subrel_local_oids,
- int subrel_count, char *subname);
+ bool retain_dead_tuples, char *origin,
+ Oid *subrel_local_oids, int subrel_count,
+ char *subname);
+static void check_pub_dead_tuple_retention(WalReceiverConn *wrconn);
static void check_duplicates_in_publist(List *publist, Datum *datums);
static List *merge_publications(List *oldpublist, List *newpublist, bool addpub, const char *subname);
static void ReportSlotConnectionError(List *rstates, Oid subid, char *slotname, char *err);
@@ -162,6 +167,8 @@ parse_subscription_options(ParseState *pstate, List *stmt_options,
opts->runasowner = false;
if (IsSet(supported_opts, SUBOPT_FAILOVER))
opts->failover = false;
+ if (IsSet(supported_opts, SUBOPT_RETAIN_DEAD_TUPLES))
+ opts->retaindeadtuples = false;
if (IsSet(supported_opts, SUBOPT_ORIGIN))
opts->origin = pstrdup(LOGICALREP_ORIGIN_ANY);
@@ -210,7 +217,7 @@ parse_subscription_options(ParseState *pstate, List *stmt_options,
if (strcmp(opts->slot_name, "none") == 0)
opts->slot_name = NULL;
else
- ReplicationSlotValidateName(opts->slot_name, ERROR);
+ ReplicationSlotValidateName(opts->slot_name, false, ERROR);
}
else if (IsSet(supported_opts, SUBOPT_COPY_DATA) &&
strcmp(defel->defname, "copy_data") == 0)
@@ -307,6 +314,15 @@ parse_subscription_options(ParseState *pstate, List *stmt_options,
opts->specified_opts |= SUBOPT_FAILOVER;
opts->failover = defGetBoolean(defel);
}
+ else if (IsSet(supported_opts, SUBOPT_RETAIN_DEAD_TUPLES) &&
+ strcmp(defel->defname, "retain_dead_tuples") == 0)
+ {
+ if (IsSet(opts->specified_opts, SUBOPT_RETAIN_DEAD_TUPLES))
+ errorConflictingDefElem(defel, pstate);
+
+ opts->specified_opts |= SUBOPT_RETAIN_DEAD_TUPLES;
+ opts->retaindeadtuples = defGetBoolean(defel);
+ }
else if (IsSet(supported_opts, SUBOPT_ORIGIN) &&
strcmp(defel->defname, "origin") == 0)
{
@@ -563,7 +579,8 @@ CreateSubscription(ParseState *pstate, CreateSubscriptionStmt *stmt,
SUBOPT_SYNCHRONOUS_COMMIT | SUBOPT_BINARY |
SUBOPT_STREAMING | SUBOPT_TWOPHASE_COMMIT |
SUBOPT_DISABLE_ON_ERR | SUBOPT_PASSWORD_REQUIRED |
- SUBOPT_RUN_AS_OWNER | SUBOPT_FAILOVER | SUBOPT_ORIGIN);
+ SUBOPT_RUN_AS_OWNER | SUBOPT_FAILOVER |
+ SUBOPT_RETAIN_DEAD_TUPLES | SUBOPT_ORIGIN);
parse_subscription_options(pstate, stmt->options, supported_opts, &opts);
/*
@@ -630,6 +647,10 @@ CreateSubscription(ParseState *pstate, CreateSubscriptionStmt *stmt,
stmt->subname)));
}
+ /* Ensure that we can enable retain_dead_tuples */
+ if (opts.retaindeadtuples)
+ CheckSubDeadTupleRetention(true, !opts.enabled, WARNING);
+
if (!IsSet(opts.specified_opts, SUBOPT_SLOT_NAME) &&
opts.slot_name == NULL)
opts.slot_name = stmt->subname;
@@ -670,6 +691,8 @@ CreateSubscription(ParseState *pstate, CreateSubscriptionStmt *stmt,
values[Anum_pg_subscription_subpasswordrequired - 1] = BoolGetDatum(opts.passwordrequired);
values[Anum_pg_subscription_subrunasowner - 1] = BoolGetDatum(opts.runasowner);
values[Anum_pg_subscription_subfailover - 1] = BoolGetDatum(opts.failover);
+ values[Anum_pg_subscription_subretaindeadtuples - 1] =
+ BoolGetDatum(opts.retaindeadtuples);
values[Anum_pg_subscription_subconninfo - 1] =
CStringGetTextDatum(conninfo);
if (opts.slot_name)
@@ -722,7 +745,11 @@ CreateSubscription(ParseState *pstate, CreateSubscriptionStmt *stmt,
{
check_publications(wrconn, publications);
check_publications_origin(wrconn, publications, opts.copy_data,
- opts.origin, NULL, 0, stmt->subname);
+ opts.retaindeadtuples, opts.origin,
+ NULL, 0, stmt->subname);
+
+ if (opts.retaindeadtuples)
+ check_pub_dead_tuple_retention(wrconn);
/*
* Set sync state based on if we were asked to do data copy or
@@ -881,8 +908,8 @@ AlterSubscription_refresh(Subscription *sub, bool copy_data,
sizeof(Oid), oid_cmp);
check_publications_origin(wrconn, sub->publications, copy_data,
- sub->origin, subrel_local_oids,
- subrel_count, sub->name);
+ sub->retaindeadtuples, sub->origin,
+ subrel_local_oids, subrel_count, sub->name);
/*
* Rels that we want to remove from subscription and drop any slots
@@ -1040,18 +1067,22 @@ AlterSubscription_refresh(Subscription *sub, bool copy_data,
}
/*
- * Common checks for altering failover and two_phase options.
+ * Common checks for altering failover, two_phase, and retain_dead_tuples
+ * options.
*/
static void
CheckAlterSubOption(Subscription *sub, const char *option,
bool slot_needs_update, bool isTopLevel)
{
+ Assert(strcmp(option, "failover") == 0 ||
+ strcmp(option, "two_phase") == 0 ||
+ strcmp(option, "retain_dead_tuples") == 0);
+
/*
- * The checks in this function are required only for failover and
- * two_phase options.
+ * Altering the retain_dead_tuples option does not update the slot on the
+ * publisher.
*/
- Assert(strcmp(option, "failover") == 0 ||
- strcmp(option, "two_phase") == 0);
+ Assert(!slot_needs_update || strcmp(option, "retain_dead_tuples") != 0);
/*
* Do not allow changing the option if the subscription is enabled. This
@@ -1063,6 +1094,39 @@ CheckAlterSubOption(Subscription *sub, const char *option,
* the publisher by the existing walsender, so we could have allowed that
* even when the subscription is enabled. But we kept this restriction for
* the sake of consistency and simplicity.
+ *
+ * Additionally, do not allow changing the retain_dead_tuples option when
+ * the subscription is enabled to prevent race conditions arising from the
+ * new option value being acknowledged asynchronously by the launcher and
+ * apply workers.
+ *
+ * Without the restriction, a race condition may arise when a user
+ * disables and immediately re-enables the retain_dead_tuples option. In
+ * this case, the launcher might drop the slot upon noticing the disabled
+ * action, while the apply worker may keep maintaining
+ * oldest_nonremovable_xid without noticing the option change. During this
+ * period, a transaction ID wraparound could falsely make this ID appear
+ * as if it originates from the future w.r.t the transaction ID stored in
+ * the slot maintained by launcher.
+ *
+ * Similarly, if the user enables retain_dead_tuples concurrently with the
+ * launcher starting the worker, the apply worker may start calculating
+ * oldest_nonremovable_xid before the launcher notices the enable action.
+ * Consequently, the launcher may update slot.xmin to a newer value than
+ * that maintained by the worker. In subsequent cycles, upon integrating
+ * the worker's oldest_nonremovable_xid, the launcher might detect a
+ * retreat in the calculated xmin, necessitating additional handling.
+ *
+ * XXX To address the above race conditions, we can define
+ * oldest_nonremovable_xid as FullTransactionID and adds the check to
+ * disallow retreating the conflict slot's xmin. For now, we kept the
+ * implementation simple by disallowing change to the retain_dead_tuples,
+ * but in the future we can change this after some more analysis.
+ *
+ * Note that we could restrict only the enabling of retain_dead_tuples to
+ * avoid the race conditions described above, but we maintain the
+ * restriction for both enable and disable operations for the sake of
+ * consistency.
*/
if (sub->enabled)
ereport(ERROR,
@@ -1110,6 +1174,9 @@ AlterSubscription(ParseState *pstate, AlterSubscriptionStmt *stmt,
bool update_tuple = false;
bool update_failover = false;
bool update_two_phase = false;
+ bool check_pub_rdt = false;
+ bool retain_dead_tuples;
+ char *origin;
Subscription *sub;
Form_pg_subscription form;
bits32 supported_opts;
@@ -1137,6 +1204,9 @@ AlterSubscription(ParseState *pstate, AlterSubscriptionStmt *stmt,
sub = GetSubscription(subid, false);
+ retain_dead_tuples = sub->retaindeadtuples;
+ origin = sub->origin;
+
/*
* Don't allow non-superuser modification of a subscription with
* password_required=false.
@@ -1165,7 +1235,7 @@ AlterSubscription(ParseState *pstate, AlterSubscriptionStmt *stmt,
SUBOPT_DISABLE_ON_ERR |
SUBOPT_PASSWORD_REQUIRED |
SUBOPT_RUN_AS_OWNER | SUBOPT_FAILOVER |
- SUBOPT_ORIGIN);
+ SUBOPT_RETAIN_DEAD_TUPLES | SUBOPT_ORIGIN);
parse_subscription_options(pstate, stmt->options,
supported_opts, &opts);
@@ -1267,7 +1337,7 @@ AlterSubscription(ParseState *pstate, AlterSubscriptionStmt *stmt,
IsSet(opts.specified_opts, SUBOPT_SLOT_NAME))
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("slot_name and two_phase cannot be altered at the same time")));
+ errmsg("\"slot_name\" and \"two_phase\" cannot be altered at the same time")));
/*
* Note that workers may still survive even if the
@@ -1283,7 +1353,7 @@ AlterSubscription(ParseState *pstate, AlterSubscriptionStmt *stmt,
if (logicalrep_workers_find(subid, true, true))
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("cannot alter two_phase when logical replication worker is still running"),
+ errmsg("cannot alter \"two_phase\" when logical replication worker is still running"),
errhint("Try again after some time.")));
/*
@@ -1297,7 +1367,7 @@ AlterSubscription(ParseState *pstate, AlterSubscriptionStmt *stmt,
LookupGXactBySubid(subid))
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("cannot disable two_phase when prepared transactions are present"),
+ errmsg("cannot disable \"two_phase\" when prepared transactions exist"),
errhint("Resolve these transactions and try again.")));
/* Change system catalog accordingly */
@@ -1325,11 +1395,62 @@ AlterSubscription(ParseState *pstate, AlterSubscriptionStmt *stmt,
replaces[Anum_pg_subscription_subfailover - 1] = true;
}
+ if (IsSet(opts.specified_opts, SUBOPT_RETAIN_DEAD_TUPLES))
+ {
+ values[Anum_pg_subscription_subretaindeadtuples - 1] =
+ BoolGetDatum(opts.retaindeadtuples);
+ replaces[Anum_pg_subscription_subretaindeadtuples - 1] = true;
+
+ CheckAlterSubOption(sub, "retain_dead_tuples", false, isTopLevel);
+
+ /*
+ * Workers may continue running even after the
+ * subscription has been disabled.
+ *
+ * To prevent race conditions (as described in
+ * CheckAlterSubOption()), ensure that all worker
+ * processes have already exited before proceeding.
+ */
+ if (logicalrep_workers_find(subid, true, true))
+ ereport(ERROR,
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("cannot alter retain_dead_tuples when logical replication worker is still running"),
+ errhint("Try again after some time.")));
+
+ /*
+ * Remind the user that enabling subscription will prevent
+ * the accumulation of dead tuples.
+ */
+ if (opts.retaindeadtuples)
+ CheckSubDeadTupleRetention(true, !sub->enabled, NOTICE);
+
+ /*
+ * Notify the launcher to manage the replication slot for
+ * conflict detection. This ensures that replication slot
+ * is efficiently handled (created, updated, or dropped)
+ * in response to any configuration changes.
+ */
+ ApplyLauncherWakeupAtCommit();
+
+ check_pub_rdt = opts.retaindeadtuples;
+ retain_dead_tuples = opts.retaindeadtuples;
+ }
+
if (IsSet(opts.specified_opts, SUBOPT_ORIGIN))
{
values[Anum_pg_subscription_suborigin - 1] =
CStringGetTextDatum(opts.origin);
replaces[Anum_pg_subscription_suborigin - 1] = true;
+
+ /*
+ * Check if changes from different origins may be received
+ * from the publisher when the origin is changed to ANY
+ * and retain_dead_tuples is enabled.
+ */
+ check_pub_rdt = retain_dead_tuples &&
+ pg_strcasecmp(opts.origin, LOGICALREP_ORIGIN_ANY) == 0;
+
+ origin = opts.origin;
}
update_tuple = true;
@@ -1347,6 +1468,15 @@ AlterSubscription(ParseState *pstate, AlterSubscriptionStmt *stmt,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("cannot enable subscription that does not have a slot name")));
+ /*
+ * Check track_commit_timestamp only when enabling the
+ * subscription in case it was disabled after creation. See
+ * comments atop CheckSubDeadTupleRetention() for details.
+ */
+ if (sub->retaindeadtuples)
+ CheckSubDeadTupleRetention(opts.enabled, !opts.enabled,
+ WARNING);
+
values[Anum_pg_subscription_subenabled - 1] =
BoolGetDatum(opts.enabled);
replaces[Anum_pg_subscription_subenabled - 1] = true;
@@ -1355,6 +1485,14 @@ AlterSubscription(ParseState *pstate, AlterSubscriptionStmt *stmt,
ApplyLauncherWakeupAtCommit();
update_tuple = true;
+
+ /*
+ * The subscription might be initially created with
+ * connect=false and retain_dead_tuples=true, meaning the
+ * remote server's status may not be checked. Ensure this
+ * check is conducted now.
+ */
+ check_pub_rdt = sub->retaindeadtuples && opts.enabled;
break;
}
@@ -1369,6 +1507,13 @@ AlterSubscription(ParseState *pstate, AlterSubscriptionStmt *stmt,
CStringGetTextDatum(stmt->conninfo);
replaces[Anum_pg_subscription_subconninfo - 1] = true;
update_tuple = true;
+
+ /*
+ * Since the remote server configuration might have changed,
+ * perform a check to ensure it permits enabling
+ * retain_dead_tuples.
+ */
+ check_pub_rdt = sub->retaindeadtuples;
break;
case ALTER_SUBSCRIPTION_SET_PUBLICATION:
@@ -1539,7 +1684,7 @@ AlterSubscription(ParseState *pstate, AlterSubscriptionStmt *stmt,
if (!XLogRecPtrIsInvalid(remote_lsn) && opts.lsn < remote_lsn)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("skip WAL location (LSN %X/%X) must be greater than origin LSN %X/%X",
+ errmsg("skip WAL location (LSN %X/%08X) must be greater than origin LSN %X/%08X",
LSN_FORMAT_ARGS(opts.lsn),
LSN_FORMAT_ARGS(remote_lsn))));
}
@@ -1568,14 +1713,15 @@ AlterSubscription(ParseState *pstate, AlterSubscriptionStmt *stmt,
}
/*
- * Try to acquire the connection necessary for altering the slot, if
- * needed.
+ * Try to acquire the connection necessary either for modifying the slot
+ * or for checking if the remote server permits enabling
+ * retain_dead_tuples.
*
* This has to be at the end because otherwise if there is an error while
* doing the database operations we won't be able to rollback altered
* slot.
*/
- if (update_failover || update_two_phase)
+ if (update_failover || update_two_phase || check_pub_rdt)
{
bool must_use_password;
char *err;
@@ -1584,10 +1730,14 @@ AlterSubscription(ParseState *pstate, AlterSubscriptionStmt *stmt,
/* Load the library providing us libpq calls. */
load_file("libpqwalreceiver", false);
- /* Try to connect to the publisher. */
+ /*
+ * Try to connect to the publisher, using the new connection string if
+ * available.
+ */
must_use_password = sub->passwordrequired && !sub->ownersuperuser;
- wrconn = walrcv_connect(sub->conninfo, true, true, must_use_password,
- sub->name, &err);
+ wrconn = walrcv_connect(stmt->conninfo ? stmt->conninfo : sub->conninfo,
+ true, true, must_use_password, sub->name,
+ &err);
if (!wrconn)
ereport(ERROR,
(errcode(ERRCODE_CONNECTION_FAILURE),
@@ -1596,9 +1746,17 @@ AlterSubscription(ParseState *pstate, AlterSubscriptionStmt *stmt,
PG_TRY();
{
- walrcv_alter_slot(wrconn, sub->slotname,
- update_failover ? &opts.failover : NULL,
- update_two_phase ? &opts.twophase : NULL);
+ if (retain_dead_tuples)
+ check_pub_dead_tuple_retention(wrconn);
+
+ check_publications_origin(wrconn, sub->publications, false,
+ retain_dead_tuples, origin, NULL, 0,
+ sub->name);
+
+ if (update_failover || update_two_phase)
+ walrcv_alter_slot(wrconn, sub->slotname,
+ update_failover ? &opts.failover : NULL,
+ update_two_phase ? &opts.twophase : NULL);
}
PG_FINALLY();
{
@@ -2086,20 +2244,29 @@ AlterSubscriptionOwner_oid(Oid subid, Oid newOwnerId)
* Check and log a warning if the publisher has subscribed to the same table,
* its partition ancestors (if it's a partition), or its partition children (if
* it's a partitioned table), from some other publishers. This check is
- * required only if "copy_data = true" and "origin = none" for CREATE
- * SUBSCRIPTION and ALTER SUBSCRIPTION ... REFRESH statements to notify the
- * user that data having origin might have been copied.
+ * required in the following scenarios:
*
- * This check need not be performed on the tables that are already added
- * because incremental sync for those tables will happen through WAL and the
- * origin of the data can be identified from the WAL records.
+ * 1) For CREATE SUBSCRIPTION and ALTER SUBSCRIPTION ... REFRESH statements
+ * with "copy_data = true" and "origin = none":
+ * - Warn the user that data with an origin might have been copied.
+ * - This check is skipped for tables already added, as incremental sync via
+ * WAL allows origin tracking. The list of such tables is in
+ * subrel_local_oids.
*
- * subrel_local_oids contains the list of relation oids that are already
- * present on the subscriber.
+ * 2) For CREATE SUBSCRIPTION and ALTER SUBSCRIPTION ... REFRESH statements
+ * with "retain_dead_tuples = true" and "origin = any", and for ALTER
+ * SUBSCRIPTION statements that modify retain_dead_tuples or origin, or
+ * when the publisher's status changes (e.g., due to a connection string
+ * update):
+ * - Warn the user that only conflict detection info for local changes on
+ * the publisher is retained. Data from other origins may lack sufficient
+ * details for reliable conflict detection.
+ * - See comments atop worker.c for more details.
*/
static void
check_publications_origin(WalReceiverConn *wrconn, List *publications,
- bool copydata, char *origin, Oid *subrel_local_oids,
+ bool copydata, bool retain_dead_tuples,
+ char *origin, Oid *subrel_local_oids,
int subrel_count, char *subname)
{
WalRcvExecResult *res;
@@ -2108,9 +2275,29 @@ check_publications_origin(WalReceiverConn *wrconn, List *publications,
Oid tableRow[1] = {TEXTOID};
List *publist = NIL;
int i;
+ bool check_rdt;
+ bool check_table_sync;
+ bool origin_none = origin &&
+ pg_strcasecmp(origin, LOGICALREP_ORIGIN_NONE) == 0;
+
+ /*
+ * Enable retain_dead_tuples checks only when origin is set to 'any',
+ * since with origin='none' only local changes are replicated to the
+ * subscriber.
+ */
+ check_rdt = retain_dead_tuples && !origin_none;
+
+ /*
+ * Enable table synchronization checks only when origin is 'none', to
+ * ensure that data from other origins is not inadvertently copied.
+ */
+ check_table_sync = copydata && origin_none;
- if (!copydata || !origin ||
- (pg_strcasecmp(origin, LOGICALREP_ORIGIN_NONE) != 0))
+ /* retain_dead_tuples and table sync checks occur separately */
+ Assert(!(check_rdt && check_table_sync));
+
+ /* Return if no checks are required */
+ if (!check_rdt && !check_table_sync)
return;
initStringInfo(&cmd);
@@ -2129,16 +2316,23 @@ check_publications_origin(WalReceiverConn *wrconn, List *publications,
/*
* In case of ALTER SUBSCRIPTION ... REFRESH, subrel_local_oids contains
* the list of relation oids that are already present on the subscriber.
- * This check should be skipped for these tables.
+ * This check should be skipped for these tables if checking for table
+ * sync scenario. However, when handling the retain_dead_tuples scenario,
+ * ensure all tables are checked, as some existing tables may now include
+ * changes from other origins due to newly created subscriptions on the
+ * publisher.
*/
- for (i = 0; i < subrel_count; i++)
+ if (check_table_sync)
{
- Oid relid = subrel_local_oids[i];
- char *schemaname = get_namespace_name(get_rel_namespace(relid));
- char *tablename = get_rel_name(relid);
+ for (i = 0; i < subrel_count; i++)
+ {
+ Oid relid = subrel_local_oids[i];
+ char *schemaname = get_namespace_name(get_rel_namespace(relid));
+ char *tablename = get_rel_name(relid);
- appendStringInfo(&cmd, "AND NOT (N.nspname = '%s' AND C.relname = '%s')\n",
- schemaname, tablename);
+ appendStringInfo(&cmd, "AND NOT (N.nspname = '%s' AND C.relname = '%s')\n",
+ schemaname, tablename);
+ }
}
res = walrcv_exec(wrconn, cmd.data, 1, tableRow);
@@ -2173,22 +2367,37 @@ check_publications_origin(WalReceiverConn *wrconn, List *publications,
* XXX: For simplicity, we don't check whether the table has any data or
* not. If the table doesn't have any data then we don't need to
* distinguish between data having origin and data not having origin so we
- * can avoid logging a warning in that case.
+ * can avoid logging a warning for table sync scenario.
*/
if (publist)
{
StringInfo pubnames = makeStringInfo();
+ StringInfo err_msg = makeStringInfo();
+ StringInfo err_hint = makeStringInfo();
/* Prepare the list of publication(s) for warning message. */
GetPublicationsStr(publist, pubnames, false);
+
+ if (check_table_sync)
+ {
+ appendStringInfo(err_msg, _("subscription \"%s\" requested copy_data with origin = NONE but might copy data that had a different origin"),
+ subname);
+ appendStringInfoString(err_hint, _("Verify that initial data copied from the publisher tables did not come from other origins."));
+ }
+ else
+ {
+ appendStringInfo(err_msg, _("subscription \"%s\" enabled retain_dead_tuples but might not reliably detect conflicts for changes from different origins"),
+ subname);
+ appendStringInfoString(err_hint, _("Consider using origin = NONE or disabling retain_dead_tuples."));
+ }
+
ereport(WARNING,
errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("subscription \"%s\" requested copy_data with origin = NONE but might copy data that had a different origin",
- subname),
- errdetail_plural("The subscription being created subscribes to a publication (%s) that contains tables that are written to by other subscriptions.",
- "The subscription being created subscribes to publications (%s) that contain tables that are written to by other subscriptions.",
+ errmsg_internal("%s", err_msg->data),
+ errdetail_plural("The subscription subscribes to a publication (%s) that contains tables that are written to by other subscriptions.",
+ "The subscription subscribes to publications (%s) that contain tables that are written to by other subscriptions.",
list_length(publist), pubnames->data),
- errhint("Verify that initial data copied from the publisher tables did not come from other origins."));
+ errhint_internal("%s", err_hint->data));
}
ExecDropSingleTupleTableSlot(slot);
@@ -2197,6 +2406,101 @@ check_publications_origin(WalReceiverConn *wrconn, List *publications,
}
/*
+ * Determine whether the retain_dead_tuples can be enabled based on the
+ * publisher's status.
+ *
+ * This option is disallowed if the publisher is running a version earlier
+ * than the PG19, or if the publisher is in recovery (i.e., it is a standby
+ * server).
+ *
+ * See comments atop worker.c for a detailed explanation.
+ */
+static void
+check_pub_dead_tuple_retention(WalReceiverConn *wrconn)
+{
+ WalRcvExecResult *res;
+ Oid RecoveryRow[1] = {BOOLOID};
+ TupleTableSlot *slot;
+ bool isnull;
+ bool remote_in_recovery;
+
+ if (walrcv_server_version(wrconn) < 19000)
+ ereport(ERROR,
+ errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("cannot enable retain_dead_tuples if the publisher is running a version earlier than PostgreSQL 19"));
+
+ res = walrcv_exec(wrconn, "SELECT pg_is_in_recovery()", 1, RecoveryRow);
+
+ if (res->status != WALRCV_OK_TUPLES)
+ ereport(ERROR,
+ (errcode(ERRCODE_CONNECTION_FAILURE),
+ errmsg("could not obtain recovery progress from the publisher: %s",
+ res->err)));
+
+ slot = MakeSingleTupleTableSlot(res->tupledesc, &TTSOpsMinimalTuple);
+ if (!tuplestore_gettupleslot(res->tuplestore, true, false, slot))
+ elog(ERROR, "failed to fetch tuple for the recovery progress");
+
+ remote_in_recovery = DatumGetBool(slot_getattr(slot, 1, &isnull));
+
+ if (remote_in_recovery)
+ ereport(ERROR,
+ errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("cannot enable retain_dead_tuples if the publisher is in recovery."));
+
+ ExecDropSingleTupleTableSlot(slot);
+
+ walrcv_clear_result(res);
+}
+
+/*
+ * Check if the subscriber's configuration is adequate to enable the
+ * retain_dead_tuples option.
+ *
+ * Issue an ERROR if the wal_level does not support the use of replication
+ * slots when check_guc is set to true.
+ *
+ * Issue a WARNING if track_commit_timestamp is not enabled when check_guc is
+ * set to true. This is only to highlight the importance of enabling
+ * track_commit_timestamp instead of catching all the misconfigurations, as
+ * this setting can be adjusted after subscription creation. Without it, the
+ * apply worker will simply skip conflict detection.
+ *
+ * Issue a WARNING or NOTICE if the subscription is disabled. Do not raise an
+ * ERROR since users can only modify retain_dead_tuples for disabled
+ * subscriptions. And as long as the subscription is enabled promptly, it will
+ * not pose issues.
+ */
+void
+CheckSubDeadTupleRetention(bool check_guc, bool sub_disabled,
+ int elevel_for_sub_disabled)
+{
+ Assert(elevel_for_sub_disabled == NOTICE ||
+ elevel_for_sub_disabled == WARNING);
+
+ if (check_guc && wal_level < WAL_LEVEL_REPLICA)
+ ereport(ERROR,
+ errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("\"wal_level\" is insufficient to create the replication slot required by retain_dead_tuples"),
+ errhint("\"wal_level\" must be set to \"replica\" or \"logical\" at server start."));
+
+ if (check_guc && !track_commit_timestamp)
+ ereport(WARNING,
+ errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("commit timestamp and origin data required for detecting conflicts won't be retained"),
+ errhint("Consider setting \"%s\" to true.",
+ "track_commit_timestamp"));
+
+ if (sub_disabled)
+ ereport(elevel_for_sub_disabled,
+ errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("deleted rows to detect conflicts would not be removed until the subscription is enabled"),
+ (elevel_for_sub_disabled > NOTICE)
+ ? errhint("Consider setting %s to false.",
+ "retain_dead_tuples") : 0);
+}
+
+/*
* Get the list of tables which belong to specified publications on the
* publisher connection.
*
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index acf11e83c04..cb811520c29 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -430,8 +430,8 @@ static void AlterConstrUpdateConstraintEntry(ATAlterConstraint *cmdcon, Relation
static ObjectAddress ATExecValidateConstraint(List **wqueue,
Relation rel, char *constrName,
bool recurse, bool recursing, LOCKMODE lockmode);
-static void QueueFKConstraintValidation(List **wqueue, Relation conrel, Relation rel,
- HeapTuple contuple, LOCKMODE lockmode);
+static void QueueFKConstraintValidation(List **wqueue, Relation conrel, Relation fkrel,
+ Oid pkrelid, HeapTuple contuple, LOCKMODE lockmode);
static void QueueCheckConstraintValidation(List **wqueue, Relation conrel, Relation rel,
char *constrName, HeapTuple contuple,
bool recurse, bool recursing, LOCKMODE lockmode);
@@ -2711,8 +2711,7 @@ MergeAttributes(List *columns, const List *supers, char relpersistence,
RelationGetRelationName(relation))));
/* If existing rel is temp, it must belong to this session */
- if (relation->rd_rel->relpersistence == RELPERSISTENCE_TEMP &&
- !relation->rd_islocaltemp)
+ if (RELATION_IS_OTHER_TEMP(relation))
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg(!is_partition
@@ -7374,7 +7373,7 @@ ATExecAddColumn(List **wqueue, AlteredTableInfo *tab, Relation rel,
/* make sure datatype is legal for a column */
CheckAttributeType(NameStr(attribute->attname), attribute->atttypid, attribute->attcollation,
list_make1_oid(rel->rd_rel->reltype),
- 0);
+ (attribute->attgenerated == ATTRIBUTE_GENERATED_VIRTUAL ? CHKATYPE_IS_VIRTUAL : 0));
InsertPgAttributeTuples(attrdesc, tupdesc, myrelid, NULL, NULL);
@@ -8609,7 +8608,7 @@ ATExecSetExpression(AlteredTableInfo *tab, Relation rel, const char *colName,
rel->rd_att->constr && rel->rd_att->constr->num_check > 0)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("ALTER TABLE / SET EXPRESSION is not supported for virtual generated columns on tables with check constraints"),
+ errmsg("ALTER TABLE / SET EXPRESSION is not supported for virtual generated columns in tables with check constraints"),
errdetail("Column \"%s\" of relation \"%s\" is a virtual generated column.",
colName, RelationGetRelationName(rel))));
@@ -8627,7 +8626,7 @@ ATExecSetExpression(AlteredTableInfo *tab, Relation rel, const char *colName,
GetRelationPublications(RelationGetRelid(rel)) != NIL)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("ALTER TABLE / SET EXPRESSION is not supported for virtual generated columns on tables that are part of a publication"),
+ errmsg("ALTER TABLE / SET EXPRESSION is not supported for virtual generated columns in tables that are part of a publication"),
errdetail("Column \"%s\" of relation \"%s\" is a virtual generated column.",
colName, RelationGetRelationName(rel))));
@@ -10189,7 +10188,7 @@ ATAddForeignKeyConstraint(List **wqueue, AlteredTableInfo *tab, Relation rel,
if (pk_has_without_overlaps && !with_period)
ereport(ERROR,
errcode(ERRCODE_INVALID_FOREIGN_KEY),
- errmsg("foreign key must use PERIOD when referencing a primary using WITHOUT OVERLAPS"));
+ errmsg("foreign key must use PERIOD when referencing a primary key using WITHOUT OVERLAPS"));
/*
* Now we can check permissions.
@@ -10330,8 +10329,8 @@ ATAddForeignKeyConstraint(List **wqueue, AlteredTableInfo *tab, Relation rel,
for_overlaps
? errmsg("could not identify an overlaps operator for foreign key")
: errmsg("could not identify an equality operator for foreign key"),
- errdetail("Could not translate compare type %d for operator family \"%s\", input type %s, access method \"%s\".",
- cmptype, get_opfamily_name(opfamily, false), format_type_be(opcintype), get_am_name(amid)));
+ errdetail("Could not translate compare type %d for operator family \"%s\" of access method \"%s\".",
+ cmptype, get_opfamily_name(opfamily, false), get_am_name(amid)));
/*
* There had better be a primary equality operator for the index.
@@ -11858,6 +11857,7 @@ AttachPartitionForeignKey(List **wqueue,
if (queueValidation)
{
Relation conrel;
+ Oid confrelid;
conrel = table_open(ConstraintRelationId, RowExclusiveLock);
@@ -11865,9 +11865,11 @@ AttachPartitionForeignKey(List **wqueue,
if (!HeapTupleIsValid(partcontup))
elog(ERROR, "cache lookup failed for constraint %u", partConstrOid);
+ confrelid = ((Form_pg_constraint) GETSTRUCT(partcontup))->confrelid;
+
/* Use the same lock as for AT_ValidateConstraint */
- QueueFKConstraintValidation(wqueue, conrel, partition, partcontup,
- ShareUpdateExclusiveLock);
+ QueueFKConstraintValidation(wqueue, conrel, partition, confrelid,
+ partcontup, ShareUpdateExclusiveLock);
ReleaseSysCache(partcontup);
table_close(conrel, RowExclusiveLock);
}
@@ -12463,9 +12465,12 @@ ATExecAlterConstrEnforceability(List **wqueue, ATAlterConstraint *cmdcon,
/*
* Tell Phase 3 to check that the constraint is satisfied by existing
- * rows.
+ * rows. Only applies to leaf partitions, and (for constraints that
+ * reference a partitioned table) only if this is not one of the
+ * pg_constraint rows that exist solely to support action triggers.
*/
- if (rel->rd_rel->relkind == RELKIND_RELATION)
+ if (rel->rd_rel->relkind == RELKIND_RELATION &&
+ currcon->confrelid == pkrelid)
{
AlteredTableInfo *tab;
NewConstraint *newcon;
@@ -12907,8 +12912,9 @@ ATExecValidateConstraint(List **wqueue, Relation rel, char *constrName,
con->contype != CONSTRAINT_NOTNULL)
ereport(ERROR,
errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("constraint \"%s\" of relation \"%s\" is not a foreign key, check, or not-null constraint",
- constrName, RelationGetRelationName(rel)));
+ errmsg("cannot validate constraint \"%s\" of relation \"%s\"",
+ constrName, RelationGetRelationName(rel)),
+ errdetail("This operation is not supported for this type of constraint."));
if (!con->conenforced)
ereport(ERROR,
@@ -12919,7 +12925,8 @@ ATExecValidateConstraint(List **wqueue, Relation rel, char *constrName,
{
if (con->contype == CONSTRAINT_FOREIGN)
{
- QueueFKConstraintValidation(wqueue, conrel, rel, tuple, lockmode);
+ QueueFKConstraintValidation(wqueue, conrel, rel, con->confrelid,
+ tuple, lockmode);
}
else if (con->contype == CONSTRAINT_CHECK)
{
@@ -12952,8 +12959,8 @@ ATExecValidateConstraint(List **wqueue, Relation rel, char *constrName,
* for the specified relation and all its children.
*/
static void
-QueueFKConstraintValidation(List **wqueue, Relation conrel, Relation rel,
- HeapTuple contuple, LOCKMODE lockmode)
+QueueFKConstraintValidation(List **wqueue, Relation conrel, Relation fkrel,
+ Oid pkrelid, HeapTuple contuple, LOCKMODE lockmode)
{
Form_pg_constraint con;
AlteredTableInfo *tab;
@@ -12964,7 +12971,17 @@ QueueFKConstraintValidation(List **wqueue, Relation conrel, Relation rel,
Assert(con->contype == CONSTRAINT_FOREIGN);
Assert(!con->convalidated);
- if (rel->rd_rel->relkind == RELKIND_RELATION)
+ /*
+ * Add the validation to phase 3's queue; not needed for partitioned
+ * tables themselves, only for their partitions.
+ *
+ * When the referenced table (pkrelid) is partitioned, the referencing
+ * table (fkrel) has one pg_constraint row pointing to each partition
+ * thereof. These rows are there only to support action triggers and no
+ * table scan is needed, therefore skip this for them as well.
+ */
+ if (fkrel->rd_rel->relkind == RELKIND_RELATION &&
+ con->confrelid == pkrelid)
{
NewConstraint *newcon;
Constraint *fkconstraint;
@@ -12983,15 +13000,16 @@ QueueFKConstraintValidation(List **wqueue, Relation conrel, Relation rel,
newcon->qual = (Node *) fkconstraint;
/* Find or create work queue entry for this table */
- tab = ATGetQueueEntry(wqueue, rel);
+ tab = ATGetQueueEntry(wqueue, fkrel);
tab->constraints = lappend(tab->constraints, newcon);
}
/*
* If the table at either end of the constraint is partitioned, we need to
- * recurse and handle every constraint that is a child of this constraint.
+ * recurse and handle every unvalidate constraint that is a child of this
+ * constraint.
*/
- if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE ||
+ if (fkrel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE ||
get_rel_relkind(con->confrelid) == RELKIND_PARTITIONED_TABLE)
{
ScanKeyData pkey;
@@ -13023,8 +13041,12 @@ QueueFKConstraintValidation(List **wqueue, Relation conrel, Relation rel,
childrel = table_open(childcon->conrelid, lockmode);
- QueueFKConstraintValidation(wqueue, conrel, childrel, childtup,
- lockmode);
+ /*
+ * NB: Note that pkrelid should be passed as-is during recursion,
+ * as it is required to identify the root referenced table.
+ */
+ QueueFKConstraintValidation(wqueue, conrel, childrel, pkrelid,
+ childtup, lockmode);
table_close(childrel, NoLock);
}
@@ -13032,7 +13054,11 @@ QueueFKConstraintValidation(List **wqueue, Relation conrel, Relation rel,
}
/*
- * Now update the catalog, while we have the door open.
+ * Now mark the pg_constraint row as validated (even if we didn't check,
+ * notably the ones for partitions on the referenced side).
+ *
+ * We rely on transaction abort to roll back this change if phase 3
+ * ultimately finds violating rows. This is a bit ugly.
*/
copyTuple = heap_copytuple(contuple);
copy_con = (Form_pg_constraint) GETSTRUCT(copyTuple);
@@ -14400,7 +14426,7 @@ ATPrepAlterColumnType(List **wqueue,
/* make sure datatype is legal for a column */
CheckAttributeType(colName, targettype, targetcollid,
list_make1_oid(rel->rd_rel->reltype),
- 0);
+ (attTup->attgenerated == ATTRIBUTE_GENERATED_VIRTUAL ? CHKATYPE_IS_VIRTUAL : 0));
if (attTup->attgenerated == ATTRIBUTE_GENERATED_VIRTUAL)
{
@@ -14458,6 +14484,9 @@ ATPrepAlterColumnType(List **wqueue,
/* Fix collations after all else */
assign_expr_collations(pstate, transform);
+ /* Expand virtual generated columns in the expr. */
+ transform = expand_generated_columns_in_expr(transform, rel, 1);
+
/* Plan the expr now so we can accurately assess the need to rewrite. */
transform = (Node *) expression_planner((Expr *) transform);
@@ -15385,9 +15414,12 @@ ATPostAlterTypeCleanup(List **wqueue, AlteredTableInfo *tab, LOCKMODE lockmode)
/*
* Re-parse the index and constraint definitions, and attach them to the
* appropriate work queue entries. We do this before dropping because in
- * the case of a FOREIGN KEY constraint, we might not yet have exclusive
- * lock on the table the constraint is attached to, and we need to get
- * that before reparsing/dropping.
+ * the case of a constraint on another table, we might not yet have
+ * exclusive lock on the table the constraint is attached to, and we need
+ * to get that before reparsing/dropping. (That's possible at least for
+ * FOREIGN KEY, CHECK, and EXCLUSION constraints; in non-FK cases it
+ * requires a dependency on the target table's composite type in the other
+ * table's constraint expressions.)
*
* We can't rely on the output of deparsing to tell us which relation to
* operate on, because concurrent activity might have made the name
@@ -15403,7 +15435,6 @@ ATPostAlterTypeCleanup(List **wqueue, AlteredTableInfo *tab, LOCKMODE lockmode)
Form_pg_constraint con;
Oid relid;
Oid confrelid;
- char contype;
bool conislocal;
tup = SearchSysCache1(CONSTROID, ObjectIdGetDatum(oldId));
@@ -15420,7 +15451,6 @@ ATPostAlterTypeCleanup(List **wqueue, AlteredTableInfo *tab, LOCKMODE lockmode)
elog(ERROR, "could not identify relation associated with constraint %u", oldId);
}
confrelid = con->confrelid;
- contype = con->contype;
conislocal = con->conislocal;
ReleaseSysCache(tup);
@@ -15438,12 +15468,12 @@ ATPostAlterTypeCleanup(List **wqueue, AlteredTableInfo *tab, LOCKMODE lockmode)
continue;
/*
- * When rebuilding an FK constraint that references the table we're
- * modifying, we might not yet have any lock on the FK's table, so get
- * one now. We'll need AccessExclusiveLock for the DROP CONSTRAINT
- * step, so there's no value in asking for anything weaker.
+ * When rebuilding another table's constraint that references the
+ * table we're modifying, we might not yet have any lock on the other
+ * table, so get one now. We'll need AccessExclusiveLock for the DROP
+ * CONSTRAINT step, so there's no value in asking for anything weaker.
*/
- if (relid != tab->relid && contype == CONSTRAINT_FOREIGN)
+ if (relid != tab->relid)
LockRelationOid(relid, AccessExclusiveLock);
ATPostAlterTypeParse(oldId, relid, confrelid,
@@ -15457,6 +15487,14 @@ ATPostAlterTypeCleanup(List **wqueue, AlteredTableInfo *tab, LOCKMODE lockmode)
Oid relid;
relid = IndexGetRelation(oldId, false);
+
+ /*
+ * As above, make sure we have lock on the index's table if it's not
+ * the same table.
+ */
+ if (relid != tab->relid)
+ LockRelationOid(relid, AccessExclusiveLock);
+
ATPostAlterTypeParse(oldId, relid, InvalidOid,
(char *) lfirst(def_item),
wqueue, lockmode, tab->rewrite);
@@ -15473,6 +15511,20 @@ ATPostAlterTypeCleanup(List **wqueue, AlteredTableInfo *tab, LOCKMODE lockmode)
Oid relid;
relid = StatisticsGetRelation(oldId, false);
+
+ /*
+ * As above, make sure we have lock on the statistics object's table
+ * if it's not the same table. However, we take
+ * ShareUpdateExclusiveLock here, aligning with the lock level used in
+ * CreateStatistics and RemoveStatisticsById.
+ *
+ * CAUTION: this should be done after all cases that grab
+ * AccessExclusiveLock, else we risk causing deadlock due to needing
+ * to promote our table lock.
+ */
+ if (relid != tab->relid)
+ LockRelationOid(relid, ShareUpdateExclusiveLock);
+
ATPostAlterTypeParse(oldId, relid, InvalidOid,
(char *) lfirst(def_item),
wqueue, lockmode, tab->rewrite);
@@ -15696,7 +15748,7 @@ ATPostAlterTypeParse(Oid oldId, Oid oldRelId, Oid refRelId, char *cmd,
{
AlterDomainStmt *stmt = (AlterDomainStmt *) stm;
- if (stmt->subtype == 'C') /* ADD CONSTRAINT */
+ if (stmt->subtype == AD_AddConstraint)
{
Constraint *con = castNode(Constraint, stmt->def);
AlterTableCmd *cmd = makeNode(AlterTableCmd);
@@ -17199,15 +17251,13 @@ ATExecAddInherit(Relation child_rel, RangeVar *parent, LOCKMODE lockmode)
RelationGetRelationName(parent_rel))));
/* If parent rel is temp, it must belong to this session */
- if (parent_rel->rd_rel->relpersistence == RELPERSISTENCE_TEMP &&
- !parent_rel->rd_islocaltemp)
+ if (RELATION_IS_OTHER_TEMP(parent_rel))
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("cannot inherit from temporary relation of another session")));
/* Ditto for the child */
- if (child_rel->rd_rel->relpersistence == RELPERSISTENCE_TEMP &&
- !child_rel->rd_islocaltemp)
+ if (RELATION_IS_OTHER_TEMP(child_rel))
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("cannot inherit to temporary relation of another session")));
@@ -20278,15 +20328,13 @@ ATExecAttachPartition(List **wqueue, Relation rel, PartitionCmd *cmd,
RelationGetRelationName(rel))));
/* If the parent is temp, it must belong to this session */
- if (rel->rd_rel->relpersistence == RELPERSISTENCE_TEMP &&
- !rel->rd_islocaltemp)
+ if (RELATION_IS_OTHER_TEMP(rel))
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("cannot attach as partition of temporary relation of another session")));
/* Ditto for the partition */
- if (attachrel->rd_rel->relpersistence == RELPERSISTENCE_TEMP &&
- !attachrel->rd_islocaltemp)
+ if (RELATION_IS_OTHER_TEMP(attachrel))
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("cannot attach temporary relation of another session as partition")));
diff --git a/src/backend/commands/tablespace.c b/src/backend/commands/tablespace.c
index a9005cc7212..df31eace47a 100644
--- a/src/backend/commands/tablespace.c
+++ b/src/backend/commands/tablespace.c
@@ -500,7 +500,7 @@ DropTableSpace(DropTableSpaceStmt *stmt)
* mustn't delete. So instead, we force a checkpoint which will clean
* out any lingering files, and try again.
*/
- RequestCheckpoint(CHECKPOINT_IMMEDIATE | CHECKPOINT_FORCE | CHECKPOINT_WAIT);
+ RequestCheckpoint(CHECKPOINT_FAST | CHECKPOINT_FORCE | CHECKPOINT_WAIT);
/*
* On Windows, an unlinked file persists in the directory listing
diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c
index 67f8e70f9c1..7dc121f73f1 100644
--- a/src/backend/commands/trigger.c
+++ b/src/backend/commands/trigger.c
@@ -80,6 +80,7 @@ static bool GetTupleForTrigger(EState *estate,
ItemPointer tid,
LockTupleMode lockmode,
TupleTableSlot *oldslot,
+ bool do_epq_recheck,
TupleTableSlot **epqslot,
TM_Result *tmresultp,
TM_FailureData *tmfdp);
@@ -2693,7 +2694,8 @@ ExecBRDeleteTriggers(EState *estate, EPQState *epqstate,
HeapTuple fdw_trigtuple,
TupleTableSlot **epqslot,
TM_Result *tmresult,
- TM_FailureData *tmfd)
+ TM_FailureData *tmfd,
+ bool is_merge_delete)
{
TupleTableSlot *slot = ExecGetTriggerOldSlot(estate, relinfo);
TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
@@ -2708,9 +2710,17 @@ ExecBRDeleteTriggers(EState *estate, EPQState *epqstate,
{
TupleTableSlot *epqslot_candidate = NULL;
+ /*
+ * Get a copy of the on-disk tuple we are planning to delete. In
+ * general, if the tuple has been concurrently updated, we should
+ * recheck it using EPQ. However, if this is a MERGE DELETE action,
+ * we skip this EPQ recheck and leave it to the caller (it must do
+ * additional rechecking, and might end up executing a different
+ * action entirely).
+ */
if (!GetTupleForTrigger(estate, epqstate, relinfo, tupleid,
- LockTupleExclusive, slot, &epqslot_candidate,
- tmresult, tmfd))
+ LockTupleExclusive, slot, !is_merge_delete,
+ &epqslot_candidate, tmresult, tmfd))
return false;
/*
@@ -2800,6 +2810,7 @@ ExecARDeleteTriggers(EState *estate,
tupleid,
LockTupleExclusive,
slot,
+ false,
NULL,
NULL,
NULL);
@@ -2944,7 +2955,8 @@ ExecBRUpdateTriggers(EState *estate, EPQState *epqstate,
HeapTuple fdw_trigtuple,
TupleTableSlot *newslot,
TM_Result *tmresult,
- TM_FailureData *tmfd)
+ TM_FailureData *tmfd,
+ bool is_merge_update)
{
TriggerDesc *trigdesc = relinfo->ri_TrigDesc;
TupleTableSlot *oldslot = ExecGetTriggerOldSlot(estate, relinfo);
@@ -2965,10 +2977,17 @@ ExecBRUpdateTriggers(EState *estate, EPQState *epqstate,
{
TupleTableSlot *epqslot_candidate = NULL;
- /* get a copy of the on-disk tuple we are planning to update */
+ /*
+ * Get a copy of the on-disk tuple we are planning to update. In
+ * general, if the tuple has been concurrently updated, we should
+ * recheck it using EPQ. However, if this is a MERGE UPDATE action,
+ * we skip this EPQ recheck and leave it to the caller (it must do
+ * additional rechecking, and might end up executing a different
+ * action entirely).
+ */
if (!GetTupleForTrigger(estate, epqstate, relinfo, tupleid,
- lockmode, oldslot, &epqslot_candidate,
- tmresult, tmfd))
+ lockmode, oldslot, !is_merge_update,
+ &epqslot_candidate, tmresult, tmfd))
return false; /* cancel the update action */
/*
@@ -3142,6 +3161,7 @@ ExecARUpdateTriggers(EState *estate, ResultRelInfo *relinfo,
tupleid,
LockTupleExclusive,
oldslot,
+ false,
NULL,
NULL,
NULL);
@@ -3298,6 +3318,7 @@ GetTupleForTrigger(EState *estate,
ItemPointer tid,
LockTupleMode lockmode,
TupleTableSlot *oldslot,
+ bool do_epq_recheck,
TupleTableSlot **epqslot,
TM_Result *tmresultp,
TM_FailureData *tmfdp)
@@ -3357,29 +3378,30 @@ GetTupleForTrigger(EState *estate,
if (tmfd.traversed)
{
/*
- * Recheck the tuple using EPQ. For MERGE, we leave this
- * to the caller (it must do additional rechecking, and
- * might end up executing a different action entirely).
+ * Recheck the tuple using EPQ, if requested. Otherwise,
+ * just return that it was concurrently updated.
*/
- if (estate->es_plannedstmt->commandType == CMD_MERGE)
+ if (do_epq_recheck)
{
- if (tmresultp)
- *tmresultp = TM_Updated;
- return false;
+ *epqslot = EvalPlanQual(epqstate,
+ relation,
+ relinfo->ri_RangeTableIndex,
+ oldslot);
+
+ /*
+ * If PlanQual failed for updated tuple - we must not
+ * process this tuple!
+ */
+ if (TupIsNull(*epqslot))
+ {
+ *epqslot = NULL;
+ return false;
+ }
}
-
- *epqslot = EvalPlanQual(epqstate,
- relation,
- relinfo->ri_RangeTableIndex,
- oldslot);
-
- /*
- * If PlanQual failed for updated tuple - we must not
- * process this tuple!
- */
- if (TupIsNull(*epqslot))
+ else
{
- *epqslot = NULL;
+ if (tmresultp)
+ *tmresultp = TM_Updated;
return false;
}
}
diff --git a/src/backend/commands/typecmds.c b/src/backend/commands/typecmds.c
index 45ae7472ab5..26d985193ae 100644
--- a/src/backend/commands/typecmds.c
+++ b/src/backend/commands/typecmds.c
@@ -939,11 +939,19 @@ DefineDomain(ParseState *pstate, CreateDomainStmt *stmt)
break;
case CONSTR_NOTNULL:
- if (nullDefined && !typNotNull)
+ if (nullDefined)
+ {
+ if (!typNotNull)
+ ereport(ERROR,
+ errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("conflicting NULL/NOT NULL constraints"),
+ parser_errposition(pstate, constr->location));
+
ereport(ERROR,
- errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("conflicting NULL/NOT NULL constraints"),
+ errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
+ errmsg("redundant NOT NULL constraint definition"),
parser_errposition(pstate, constr->location));
+ }
if (constr->is_no_inherit)
ereport(ERROR,
errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c
index 33a33bf6b1c..733ef40ae7c 100644
--- a/src/backend/commands/vacuum.c
+++ b/src/backend/commands/vacuum.c
@@ -56,6 +56,7 @@
#include "utils/fmgroids.h"
#include "utils/guc.h"
#include "utils/guc_hooks.h"
+#include "utils/injection_point.h"
#include "utils/memutils.h"
#include "utils/snapmgr.h"
#include "utils/syscache.h"
@@ -123,7 +124,7 @@ static void vac_truncate_clog(TransactionId frozenXID,
MultiXactId minMulti,
TransactionId lastSaneFrozenXid,
MultiXactId lastSaneMinMulti);
-static bool vacuum_rel(Oid relid, RangeVar *relation, VacuumParams *params,
+static bool vacuum_rel(Oid relid, RangeVar *relation, VacuumParams params,
BufferAccessStrategy bstrategy);
static double compute_parallel_delay(void);
static VacOptValue get_vacoptval_from_boolean(DefElem *def);
@@ -464,7 +465,7 @@ ExecVacuum(ParseState *pstate, VacuumStmt *vacstmt, bool isTopLevel)
}
/* Now go through the common routine */
- vacuum(vacstmt->rels, &params, bstrategy, vac_context, isTopLevel);
+ vacuum(vacstmt->rels, params, bstrategy, vac_context, isTopLevel);
/* Finally, clean up the vacuum memory context */
MemoryContextDelete(vac_context);
@@ -493,7 +494,7 @@ ExecVacuum(ParseState *pstate, VacuumStmt *vacstmt, bool isTopLevel)
* memory context that will not disappear at transaction commit.
*/
void
-vacuum(List *relations, VacuumParams *params, BufferAccessStrategy bstrategy,
+vacuum(List *relations, const VacuumParams params, BufferAccessStrategy bstrategy,
MemoryContext vac_context, bool isTopLevel)
{
static bool in_vacuum = false;
@@ -502,9 +503,7 @@ vacuum(List *relations, VacuumParams *params, BufferAccessStrategy bstrategy,
volatile bool in_outer_xact,
use_own_xacts;
- Assert(params != NULL);
-
- stmttype = (params->options & VACOPT_VACUUM) ? "VACUUM" : "ANALYZE";
+ stmttype = (params.options & VACOPT_VACUUM) ? "VACUUM" : "ANALYZE";
/*
* We cannot run VACUUM inside a user transaction block; if we were inside
@@ -514,7 +513,7 @@ vacuum(List *relations, VacuumParams *params, BufferAccessStrategy bstrategy,
*
* ANALYZE (without VACUUM) can run either way.
*/
- if (params->options & VACOPT_VACUUM)
+ if (params.options & VACOPT_VACUUM)
{
PreventInTransactionBlock(isTopLevel, stmttype);
in_outer_xact = false;
@@ -537,7 +536,7 @@ vacuum(List *relations, VacuumParams *params, BufferAccessStrategy bstrategy,
* Build list of relation(s) to process, putting any new data in
* vac_context for safekeeping.
*/
- if (params->options & VACOPT_ONLY_DATABASE_STATS)
+ if (params.options & VACOPT_ONLY_DATABASE_STATS)
{
/* We don't process any tables in this case */
Assert(relations == NIL);
@@ -553,7 +552,7 @@ vacuum(List *relations, VacuumParams *params, BufferAccessStrategy bstrategy,
List *sublist;
MemoryContext old_context;
- sublist = expand_vacuum_rel(vrel, vac_context, params->options);
+ sublist = expand_vacuum_rel(vrel, vac_context, params.options);
old_context = MemoryContextSwitchTo(vac_context);
newrels = list_concat(newrels, sublist);
MemoryContextSwitchTo(old_context);
@@ -561,7 +560,7 @@ vacuum(List *relations, VacuumParams *params, BufferAccessStrategy bstrategy,
relations = newrels;
}
else
- relations = get_all_vacuum_rels(vac_context, params->options);
+ relations = get_all_vacuum_rels(vac_context, params.options);
/*
* Decide whether we need to start/commit our own transactions.
@@ -577,11 +576,11 @@ vacuum(List *relations, VacuumParams *params, BufferAccessStrategy bstrategy,
* transaction block, and also in an autovacuum worker, use own
* transactions so we can release locks sooner.
*/
- if (params->options & VACOPT_VACUUM)
+ if (params.options & VACOPT_VACUUM)
use_own_xacts = true;
else
{
- Assert(params->options & VACOPT_ANALYZE);
+ Assert(params.options & VACOPT_ANALYZE);
if (AmAutoVacuumWorkerProcess())
use_own_xacts = true;
else if (in_outer_xact)
@@ -632,13 +631,13 @@ vacuum(List *relations, VacuumParams *params, BufferAccessStrategy bstrategy,
{
VacuumRelation *vrel = lfirst_node(VacuumRelation, cur);
- if (params->options & VACOPT_VACUUM)
+ if (params.options & VACOPT_VACUUM)
{
if (!vacuum_rel(vrel->oid, vrel->relation, params, bstrategy))
continue;
}
- if (params->options & VACOPT_ANALYZE)
+ if (params.options & VACOPT_ANALYZE)
{
/*
* If using separate xacts, start one for analyze. Otherwise,
@@ -702,8 +701,8 @@ vacuum(List *relations, VacuumParams *params, BufferAccessStrategy bstrategy,
StartTransactionCommand();
}
- if ((params->options & VACOPT_VACUUM) &&
- !(params->options & VACOPT_SKIP_DATABASE_STATS))
+ if ((params.options & VACOPT_VACUUM) &&
+ !(params.options & VACOPT_SKIP_DATABASE_STATS))
{
/*
* Update pg_database.datfrozenxid, and truncate pg_xact if possible.
@@ -1101,7 +1100,7 @@ get_all_vacuum_rels(MemoryContext vac_context, int options)
* minimum).
*/
bool
-vacuum_get_cutoffs(Relation rel, const VacuumParams *params,
+vacuum_get_cutoffs(Relation rel, const VacuumParams params,
struct VacuumCutoffs *cutoffs)
{
int freeze_min_age,
@@ -1117,10 +1116,10 @@ vacuum_get_cutoffs(Relation rel, const VacuumParams *params,
aggressiveMXIDCutoff;
/* Use mutable copies of freeze age parameters */
- freeze_min_age = params->freeze_min_age;
- multixact_freeze_min_age = params->multixact_freeze_min_age;
- freeze_table_age = params->freeze_table_age;
- multixact_freeze_table_age = params->multixact_freeze_table_age;
+ freeze_min_age = params.freeze_min_age;
+ multixact_freeze_min_age = params.multixact_freeze_min_age;
+ freeze_table_age = params.freeze_table_age;
+ multixact_freeze_table_age = params.multixact_freeze_table_age;
/* Set pg_class fields in cutoffs */
cutoffs->relfrozenxid = rel->rd_rel->relfrozenxid;
@@ -1997,7 +1996,7 @@ vac_truncate_clog(TransactionId frozenXID,
* At entry and exit, we are not inside a transaction.
*/
static bool
-vacuum_rel(Oid relid, RangeVar *relation, VacuumParams *params,
+vacuum_rel(Oid relid, RangeVar *relation, VacuumParams params,
BufferAccessStrategy bstrategy)
{
LOCKMODE lmode;
@@ -2008,13 +2007,18 @@ vacuum_rel(Oid relid, RangeVar *relation, VacuumParams *params,
Oid save_userid;
int save_sec_context;
int save_nestlevel;
+ VacuumParams toast_vacuum_params;
- Assert(params != NULL);
+ /*
+ * This function scribbles on the parameters, so make a copy early to
+ * avoid affecting the TOAST table (if we do end up recursing to it).
+ */
+ memcpy(&toast_vacuum_params, &params, sizeof(VacuumParams));
/* Begin a transaction for vacuuming this relation */
StartTransactionCommand();
- if (!(params->options & VACOPT_FULL))
+ if (!(params.options & VACOPT_FULL))
{
/*
* In lazy vacuum, we can set the PROC_IN_VACUUM flag, which lets
@@ -2040,7 +2044,7 @@ vacuum_rel(Oid relid, RangeVar *relation, VacuumParams *params,
*/
LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
MyProc->statusFlags |= PROC_IN_VACUUM;
- if (params->is_wraparound)
+ if (params.is_wraparound)
MyProc->statusFlags |= PROC_VACUUM_FOR_WRAPAROUND;
ProcGlobal->statusFlags[MyProc->pgxactoff] = MyProc->statusFlags;
LWLockRelease(ProcArrayLock);
@@ -2064,12 +2068,12 @@ vacuum_rel(Oid relid, RangeVar *relation, VacuumParams *params,
* vacuum, but just ShareUpdateExclusiveLock for concurrent vacuum. Either
* way, we can be sure that no other backend is vacuuming the same table.
*/
- lmode = (params->options & VACOPT_FULL) ?
+ lmode = (params.options & VACOPT_FULL) ?
AccessExclusiveLock : ShareUpdateExclusiveLock;
/* open the relation and get the appropriate lock on it */
- rel = vacuum_open_relation(relid, relation, params->options,
- params->log_min_duration >= 0, lmode);
+ rel = vacuum_open_relation(relid, relation, params.options,
+ params.log_min_duration >= 0, lmode);
/* leave if relation could not be opened or locked */
if (!rel)
@@ -2084,8 +2088,8 @@ vacuum_rel(Oid relid, RangeVar *relation, VacuumParams *params,
* This is only safe to do because we hold a session lock on the main
* relation that prevents concurrent deletion.
*/
- if (OidIsValid(params->toast_parent))
- priv_relid = params->toast_parent;
+ if (OidIsValid(params.toast_parent))
+ priv_relid = params.toast_parent;
else
priv_relid = RelationGetRelid(rel);
@@ -2098,7 +2102,7 @@ vacuum_rel(Oid relid, RangeVar *relation, VacuumParams *params,
*/
if (!vacuum_is_permitted_for_relation(priv_relid,
rel->rd_rel,
- params->options & ~VACOPT_ANALYZE))
+ params.options & ~VACOPT_ANALYZE))
{
relation_close(rel, lmode);
PopActiveSnapshot();
@@ -2169,7 +2173,7 @@ vacuum_rel(Oid relid, RangeVar *relation, VacuumParams *params,
* Set index_cleanup option based on index_cleanup reloption if it wasn't
* specified in VACUUM command, or when running in an autovacuum worker
*/
- if (params->index_cleanup == VACOPTVALUE_UNSPECIFIED)
+ if (params.index_cleanup == VACOPTVALUE_UNSPECIFIED)
{
StdRdOptIndexCleanup vacuum_index_cleanup;
@@ -2180,56 +2184,74 @@ vacuum_rel(Oid relid, RangeVar *relation, VacuumParams *params,
((StdRdOptions *) rel->rd_options)->vacuum_index_cleanup;
if (vacuum_index_cleanup == STDRD_OPTION_VACUUM_INDEX_CLEANUP_AUTO)
- params->index_cleanup = VACOPTVALUE_AUTO;
+ params.index_cleanup = VACOPTVALUE_AUTO;
else if (vacuum_index_cleanup == STDRD_OPTION_VACUUM_INDEX_CLEANUP_ON)
- params->index_cleanup = VACOPTVALUE_ENABLED;
+ params.index_cleanup = VACOPTVALUE_ENABLED;
else
{
Assert(vacuum_index_cleanup ==
STDRD_OPTION_VACUUM_INDEX_CLEANUP_OFF);
- params->index_cleanup = VACOPTVALUE_DISABLED;
+ params.index_cleanup = VACOPTVALUE_DISABLED;
}
}
+#ifdef USE_INJECTION_POINTS
+ if (params.index_cleanup == VACOPTVALUE_AUTO)
+ INJECTION_POINT("vacuum-index-cleanup-auto", NULL);
+ else if (params.index_cleanup == VACOPTVALUE_DISABLED)
+ INJECTION_POINT("vacuum-index-cleanup-disabled", NULL);
+ else if (params.index_cleanup == VACOPTVALUE_ENABLED)
+ INJECTION_POINT("vacuum-index-cleanup-enabled", NULL);
+#endif
+
/*
* Check if the vacuum_max_eager_freeze_failure_rate table storage
* parameter was specified. This overrides the GUC value.
*/
if (rel->rd_options != NULL &&
((StdRdOptions *) rel->rd_options)->vacuum_max_eager_freeze_failure_rate >= 0)
- params->max_eager_freeze_failure_rate =
+ params.max_eager_freeze_failure_rate =
((StdRdOptions *) rel->rd_options)->vacuum_max_eager_freeze_failure_rate;
/*
* Set truncate option based on truncate reloption or GUC if it wasn't
* specified in VACUUM command, or when running in an autovacuum worker
*/
- if (params->truncate == VACOPTVALUE_UNSPECIFIED)
+ if (params.truncate == VACOPTVALUE_UNSPECIFIED)
{
StdRdOptions *opts = (StdRdOptions *) rel->rd_options;
if (opts && opts->vacuum_truncate_set)
{
if (opts->vacuum_truncate)
- params->truncate = VACOPTVALUE_ENABLED;
+ params.truncate = VACOPTVALUE_ENABLED;
else
- params->truncate = VACOPTVALUE_DISABLED;
+ params.truncate = VACOPTVALUE_DISABLED;
}
else if (vacuum_truncate)
- params->truncate = VACOPTVALUE_ENABLED;
+ params.truncate = VACOPTVALUE_ENABLED;
else
- params->truncate = VACOPTVALUE_DISABLED;
+ params.truncate = VACOPTVALUE_DISABLED;
}
+#ifdef USE_INJECTION_POINTS
+ if (params.truncate == VACOPTVALUE_AUTO)
+ INJECTION_POINT("vacuum-truncate-auto", NULL);
+ else if (params.truncate == VACOPTVALUE_DISABLED)
+ INJECTION_POINT("vacuum-truncate-disabled", NULL);
+ else if (params.truncate == VACOPTVALUE_ENABLED)
+ INJECTION_POINT("vacuum-truncate-enabled", NULL);
+#endif
+
/*
* Remember the relation's TOAST relation for later, if the caller asked
* us to process it. In VACUUM FULL, though, the toast table is
* automatically rebuilt by cluster_rel so we shouldn't recurse to it,
* unless PROCESS_MAIN is disabled.
*/
- if ((params->options & VACOPT_PROCESS_TOAST) != 0 &&
- ((params->options & VACOPT_FULL) == 0 ||
- (params->options & VACOPT_PROCESS_MAIN) == 0))
+ if ((params.options & VACOPT_PROCESS_TOAST) != 0 &&
+ ((params.options & VACOPT_FULL) == 0 ||
+ (params.options & VACOPT_PROCESS_MAIN) == 0))
toast_relid = rel->rd_rel->reltoastrelid;
else
toast_relid = InvalidOid;
@@ -2252,16 +2274,16 @@ vacuum_rel(Oid relid, RangeVar *relation, VacuumParams *params,
* table is required (e.g., PROCESS_TOAST is set), we force PROCESS_MAIN
* to be set when we recurse to the TOAST table.
*/
- if (params->options & VACOPT_PROCESS_MAIN)
+ if (params.options & VACOPT_PROCESS_MAIN)
{
/*
* Do the actual work --- either FULL or "lazy" vacuum
*/
- if (params->options & VACOPT_FULL)
+ if (params.options & VACOPT_FULL)
{
ClusterParams cluster_params = {0};
- if ((params->options & VACOPT_VERBOSE) != 0)
+ if ((params.options & VACOPT_VERBOSE) != 0)
cluster_params.options |= CLUOPT_VERBOSE;
/* VACUUM FULL is now a variant of CLUSTER; see cluster.c */
@@ -2299,19 +2321,16 @@ vacuum_rel(Oid relid, RangeVar *relation, VacuumParams *params,
*/
if (toast_relid != InvalidOid)
{
- VacuumParams toast_vacuum_params;
-
/*
* Force VACOPT_PROCESS_MAIN so vacuum_rel() processes it. Likewise,
* set toast_parent so that the privilege checks are done on the main
* relation. NB: This is only safe to do because we hold a session
* lock on the main relation that prevents concurrent deletion.
*/
- memcpy(&toast_vacuum_params, params, sizeof(VacuumParams));
toast_vacuum_params.options |= VACOPT_PROCESS_MAIN;
toast_vacuum_params.toast_parent = relid;
- vacuum_rel(toast_relid, NULL, &toast_vacuum_params, bstrategy);
+ vacuum_rel(toast_relid, NULL, toast_vacuum_params, bstrategy);
}
/*
diff --git a/src/backend/executor/execExprInterp.c b/src/backend/executor/execExprInterp.c
index 8a72b5e70a4..1a37737d4a2 100644
--- a/src/backend/executor/execExprInterp.c
+++ b/src/backend/executor/execExprInterp.c
@@ -5228,7 +5228,6 @@ ExecEvalJsonCoercionFinish(ExprState *state, ExprEvalStep *op)
* JsonBehavior expression.
*/
jsestate->escontext.error_occurred = false;
- jsestate->escontext.error_occurred = false;
jsestate->escontext.details_wanted = true;
}
}
diff --git a/src/backend/executor/execGrouping.c b/src/backend/executor/execGrouping.c
index 255bd795361..b5400749353 100644
--- a/src/backend/executor/execGrouping.c
+++ b/src/backend/executor/execGrouping.c
@@ -144,7 +144,7 @@ execTuplesHashPrepare(int numCols,
* hashfunctions: FmgrInfos of datatype-specific hashing functions to use
* collations: collations to use in comparisons
* nbuckets: initial estimate of hashtable size
- * additionalsize: size of data stored in ->additional
+ * additionalsize: size of data that may be stored along with the hash entry
* metacxt: memory context for long-lived allocation, but not per-entry data
* tablecxt: memory context in which to store table entries
* tempcxt: short-lived context for evaluation hash and comparison functions
@@ -288,7 +288,7 @@ ResetTupleHashTable(TupleHashTable hashtable)
*
* If isnew isn't NULL, then a new entry is created if no existing entry
* matches. On return, *isnew is true if the entry is newly created,
- * false if it existed already. ->additional_data in the new entry has
+ * false if it existed already. The additional data in the new entry has
* been zeroed.
*/
TupleHashEntry
diff --git a/src/backend/executor/execIndexing.c b/src/backend/executor/execIndexing.c
index bdf862b2406..ca33a854278 100644
--- a/src/backend/executor/execIndexing.c
+++ b/src/backend/executor/execIndexing.c
@@ -279,7 +279,7 @@ ExecCloseIndices(ResultRelInfo *resultRelInfo)
* executor is performing an UPDATE that could not use an
* optimization like heapam's HOT (in more general terms a
* call to table_tuple_update() took place and set
- * 'update_indexes' to TUUI_All). Receiving this hint makes
+ * 'update_indexes' to TU_All). Receiving this hint makes
* us consider if we should pass down the 'indexUnchanged'
* hint in turn. That's something that we figure out for
* each index_insert() call iff 'update' is true.
@@ -290,7 +290,7 @@ ExecCloseIndices(ResultRelInfo *resultRelInfo)
* HOT has been applied and any updated columns are indexed
* only by summarizing indexes (or in more general terms a
* call to table_tuple_update() took place and set
- * 'update_indexes' to TUUI_Summarizing). We can (and must)
+ * 'update_indexes' to TU_Summarizing). We can (and must)
* therefore only update the indexes that have
* 'amsummarizing' = true.
*
diff --git a/src/backend/executor/execParallel.c b/src/backend/executor/execParallel.c
index f3e77bda279..f098a5557cf 100644
--- a/src/backend/executor/execParallel.c
+++ b/src/backend/executor/execParallel.c
@@ -189,6 +189,7 @@ ExecSerializePlan(Plan *plan, EState *estate)
pstmt->permInfos = estate->es_rteperminfos;
pstmt->resultRelations = NIL;
pstmt->appendRelations = NIL;
+ pstmt->planOrigin = PLAN_STMT_INTERNAL;
/*
* Transfer only parallel-safe subplans, leaving a NULL "hole" in the list
diff --git a/src/backend/executor/execReplication.c b/src/backend/executor/execReplication.c
index 53ddd25c42d..68184f5d671 100644
--- a/src/backend/executor/execReplication.c
+++ b/src/backend/executor/execReplication.c
@@ -14,12 +14,14 @@
#include "postgres.h"
+#include "access/commit_ts.h"
#include "access/genam.h"
#include "access/gist.h"
#include "access/relscan.h"
#include "access/tableam.h"
#include "access/transam.h"
#include "access/xact.h"
+#include "access/heapam.h"
#include "catalog/pg_am_d.h"
#include "commands/trigger.h"
#include "executor/executor.h"
@@ -36,7 +38,7 @@
static bool tuples_equal(TupleTableSlot *slot1, TupleTableSlot *slot2,
- TypeCacheEntry **eq);
+ TypeCacheEntry **eq, Bitmapset *columns);
/*
* Setup a ScanKey for a search in the relation 'rel' for a tuple 'key' that
@@ -221,7 +223,7 @@ retry:
if (eq == NULL)
eq = palloc0(sizeof(*eq) * outslot->tts_tupleDescriptor->natts);
- if (!tuples_equal(outslot, searchslot, eq))
+ if (!tuples_equal(outslot, searchslot, eq, NULL))
continue;
}
@@ -277,10 +279,13 @@ retry:
/*
* Compare the tuples in the slots by checking if they have equal values.
+ *
+ * If 'columns' is not null, only the columns specified within it will be
+ * considered for the equality check, ignoring all other columns.
*/
static bool
tuples_equal(TupleTableSlot *slot1, TupleTableSlot *slot2,
- TypeCacheEntry **eq)
+ TypeCacheEntry **eq, Bitmapset *columns)
{
int attrnum;
@@ -306,6 +311,14 @@ tuples_equal(TupleTableSlot *slot1, TupleTableSlot *slot2,
continue;
/*
+ * Ignore columns that are not listed for checking.
+ */
+ if (columns &&
+ !bms_is_member(att->attnum - FirstLowInvalidHeapAttributeNumber,
+ columns))
+ continue;
+
+ /*
* If one value is NULL and other is not, then they are certainly not
* equal
*/
@@ -380,7 +393,7 @@ retry:
/* Try to find the tuple */
while (table_scan_getnextslot(scan, ForwardScanDirection, scanslot))
{
- if (!tuples_equal(scanslot, searchslot, eq))
+ if (!tuples_equal(scanslot, searchslot, eq, NULL))
continue;
found = true;
@@ -456,6 +469,236 @@ BuildConflictIndexInfo(ResultRelInfo *resultRelInfo, Oid conflictindex)
}
/*
+ * If the tuple is recently dead and was deleted by a transaction with a newer
+ * commit timestamp than previously recorded, update the associated transaction
+ * ID, commit time, and origin. This helps ensure that conflict detection uses
+ * the most recent and relevant deletion metadata.
+ */
+static void
+update_most_recent_deletion_info(TupleTableSlot *scanslot,
+ TransactionId oldestxmin,
+ TransactionId *delete_xid,
+ TimestampTz *delete_time,
+ RepOriginId *delete_origin)
+{
+ BufferHeapTupleTableSlot *hslot;
+ HeapTuple tuple;
+ Buffer buf;
+ bool recently_dead = false;
+ TransactionId xmax;
+ TimestampTz localts;
+ RepOriginId localorigin;
+
+ hslot = (BufferHeapTupleTableSlot *) scanslot;
+
+ tuple = ExecFetchSlotHeapTuple(scanslot, false, NULL);
+ buf = hslot->buffer;
+
+ LockBuffer(buf, BUFFER_LOCK_SHARE);
+
+ /*
+ * We do not consider HEAPTUPLE_DEAD status because it indicates either
+ * tuples whose inserting transaction was aborted (meaning there is no
+ * commit timestamp or origin), or tuples deleted by a transaction older
+ * than oldestxmin, making it safe to ignore them during conflict
+ * detection (See comments atop worker.c for details).
+ */
+ if (HeapTupleSatisfiesVacuum(tuple, oldestxmin, buf) == HEAPTUPLE_RECENTLY_DEAD)
+ recently_dead = true;
+
+ LockBuffer(buf, BUFFER_LOCK_UNLOCK);
+
+ if (!recently_dead)
+ return;
+
+ xmax = HeapTupleHeaderGetUpdateXid(tuple->t_data);
+ if (!TransactionIdIsValid(xmax))
+ return;
+
+ /* Select the dead tuple with the most recent commit timestamp */
+ if (TransactionIdGetCommitTsData(xmax, &localts, &localorigin) &&
+ TimestampDifferenceExceeds(*delete_time, localts, 0))
+ {
+ *delete_xid = xmax;
+ *delete_time = localts;
+ *delete_origin = localorigin;
+ }
+}
+
+/*
+ * Searches the relation 'rel' for the most recently deleted tuple that matches
+ * the values in 'searchslot' and is not yet removable by VACUUM. The function
+ * returns the transaction ID, origin, and commit timestamp of the transaction
+ * that deleted this tuple.
+ *
+ * 'oldestxmin' acts as a cutoff transaction ID. Tuples deleted by transactions
+ * with IDs >= 'oldestxmin' are considered recently dead and are eligible for
+ * conflict detection.
+ *
+ * Instead of stopping at the first match, we scan all matching dead tuples to
+ * identify most recent deletion. This is crucial because only the latest
+ * deletion is relevant for resolving conflicts.
+ *
+ * For example, consider a scenario on the subscriber where a row is deleted,
+ * re-inserted, and then deleted again only on the subscriber:
+ *
+ * - (pk, 1) - deleted at 9:00,
+ * - (pk, 1) - deleted at 9:02,
+ *
+ * Now, a remote update arrives: (pk, 1) -> (pk, 2), timestamped at 9:01.
+ *
+ * If we mistakenly return the older deletion (9:00), the system may wrongly
+ * apply the remote update using a last-update-wins strategy. Instead, we must
+ * recognize the more recent deletion at 9:02 and skip the update. See
+ * comments atop worker.c for details. Note, as of now, conflict resolution
+ * is not implemented. Consequently, the system may incorrectly report the
+ * older tuple as the conflicted one, leading to misleading results.
+ *
+ * The commit timestamp of the deleting transaction is used to determine which
+ * tuple was deleted most recently.
+ */
+bool
+RelationFindDeletedTupleInfoSeq(Relation rel, TupleTableSlot *searchslot,
+ TransactionId oldestxmin,
+ TransactionId *delete_xid,
+ RepOriginId *delete_origin,
+ TimestampTz *delete_time)
+{
+ TupleTableSlot *scanslot;
+ TableScanDesc scan;
+ TypeCacheEntry **eq;
+ Bitmapset *indexbitmap;
+ TupleDesc desc PG_USED_FOR_ASSERTS_ONLY = RelationGetDescr(rel);
+
+ Assert(equalTupleDescs(desc, searchslot->tts_tupleDescriptor));
+
+ *delete_xid = InvalidTransactionId;
+ *delete_origin = InvalidRepOriginId;
+ *delete_time = 0;
+
+ /*
+ * If the relation has a replica identity key or a primary key that is
+ * unusable for locating deleted tuples (see
+ * IsIndexUsableForFindingDeletedTuple), a full table scan becomes
+ * necessary. In such cases, comparing the entire tuple is not required,
+ * since the remote tuple might not include all column values. Instead,
+ * the indexed columns alone are suffcient to identify the target tuple
+ * (see logicalrep_rel_mark_updatable).
+ */
+ indexbitmap = RelationGetIndexAttrBitmap(rel,
+ INDEX_ATTR_BITMAP_IDENTITY_KEY);
+
+ /* fallback to PK if no replica identity */
+ if (!indexbitmap)
+ indexbitmap = RelationGetIndexAttrBitmap(rel,
+ INDEX_ATTR_BITMAP_PRIMARY_KEY);
+
+ eq = palloc0(sizeof(*eq) * searchslot->tts_tupleDescriptor->natts);
+
+ /*
+ * Start a heap scan using SnapshotAny to identify dead tuples that are
+ * not visible under a standard MVCC snapshot. Tuples from transactions
+ * not yet committed or those just committed prior to the scan are
+ * excluded in update_most_recent_deletion_info().
+ */
+ scan = table_beginscan(rel, SnapshotAny, 0, NULL);
+ scanslot = table_slot_create(rel, NULL);
+
+ table_rescan(scan, NULL);
+
+ /* Try to find the tuple */
+ while (table_scan_getnextslot(scan, ForwardScanDirection, scanslot))
+ {
+ if (!tuples_equal(scanslot, searchslot, eq, indexbitmap))
+ continue;
+
+ update_most_recent_deletion_info(scanslot, oldestxmin, delete_xid,
+ delete_time, delete_origin);
+ }
+
+ table_endscan(scan);
+ ExecDropSingleTupleTableSlot(scanslot);
+
+ return *delete_time != 0;
+}
+
+/*
+ * Similar to RelationFindDeletedTupleInfoSeq() but using index scan to locate
+ * the deleted tuple.
+ */
+bool
+RelationFindDeletedTupleInfoByIndex(Relation rel, Oid idxoid,
+ TupleTableSlot *searchslot,
+ TransactionId oldestxmin,
+ TransactionId *delete_xid,
+ RepOriginId *delete_origin,
+ TimestampTz *delete_time)
+{
+ Relation idxrel;
+ ScanKeyData skey[INDEX_MAX_KEYS];
+ int skey_attoff;
+ IndexScanDesc scan;
+ TupleTableSlot *scanslot;
+ TypeCacheEntry **eq = NULL;
+ bool isIdxSafeToSkipDuplicates;
+ TupleDesc desc PG_USED_FOR_ASSERTS_ONLY = RelationGetDescr(rel);
+
+ Assert(equalTupleDescs(desc, searchslot->tts_tupleDescriptor));
+ Assert(OidIsValid(idxoid));
+
+ *delete_xid = InvalidTransactionId;
+ *delete_time = 0;
+ *delete_origin = InvalidRepOriginId;
+
+ isIdxSafeToSkipDuplicates = (GetRelationIdentityOrPK(rel) == idxoid);
+
+ scanslot = table_slot_create(rel, NULL);
+
+ idxrel = index_open(idxoid, RowExclusiveLock);
+
+ /* Build scan key. */
+ skey_attoff = build_replindex_scan_key(skey, rel, idxrel, searchslot);
+
+ /*
+ * Start an index scan using SnapshotAny to identify dead tuples that are
+ * not visible under a standard MVCC snapshot. Tuples from transactions
+ * not yet committed or those just committed prior to the scan are
+ * excluded in update_most_recent_deletion_info().
+ */
+ scan = index_beginscan(rel, idxrel, SnapshotAny, NULL, skey_attoff, 0);
+
+ index_rescan(scan, skey, skey_attoff, NULL, 0);
+
+ /* Try to find the tuple */
+ while (index_getnext_slot(scan, ForwardScanDirection, scanslot))
+ {
+ /*
+ * Avoid expensive equality check if the index is primary key or
+ * replica identity index.
+ */
+ if (!isIdxSafeToSkipDuplicates)
+ {
+ if (eq == NULL)
+ eq = palloc0(sizeof(*eq) * scanslot->tts_tupleDescriptor->natts);
+
+ if (!tuples_equal(scanslot, searchslot, eq, NULL))
+ continue;
+ }
+
+ update_most_recent_deletion_info(scanslot, oldestxmin, delete_xid,
+ delete_time, delete_origin);
+ }
+
+ index_endscan(scan);
+
+ index_close(idxrel, NoLock);
+
+ ExecDropSingleTupleTableSlot(scanslot);
+
+ return *delete_time != 0;
+}
+
+/*
* Find the tuple that violates the passed unique index (conflictindex).
*
* If the conflicting tuple is found return true, otherwise false.
@@ -670,7 +913,7 @@ ExecSimpleRelationUpdate(ResultRelInfo *resultRelInfo,
resultRelInfo->ri_TrigDesc->trig_update_before_row)
{
if (!ExecBRUpdateTriggers(estate, epqstate, resultRelInfo,
- tid, NULL, slot, NULL, NULL))
+ tid, NULL, slot, NULL, NULL, false))
skip_tuple = true; /* "do nothing" */
}
@@ -746,7 +989,7 @@ ExecSimpleRelationDelete(ResultRelInfo *resultRelInfo,
resultRelInfo->ri_TrigDesc->trig_delete_before_row)
{
skip_tuple = !ExecBRDeleteTriggers(estate, epqstate, resultRelInfo,
- tid, NULL, NULL, NULL, NULL);
+ tid, NULL, NULL, NULL, NULL, false);
}
if (!skip_tuple)
diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c
index 2bc89bf84dc..7c6c2c1f6e4 100644
--- a/src/backend/executor/nodeModifyTable.c
+++ b/src/backend/executor/nodeModifyTable.c
@@ -64,6 +64,7 @@
#include "nodes/nodeFuncs.h"
#include "optimizer/optimizer.h"
#include "rewrite/rewriteHandler.h"
+#include "rewrite/rewriteManip.h"
#include "storage/lmgr.h"
#include "utils/builtins.h"
#include "utils/datum.h"
@@ -1473,7 +1474,8 @@ ExecDeletePrologue(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
return ExecBRDeleteTriggers(context->estate, context->epqstate,
resultRelInfo, tupleid, oldtuple,
- epqreturnslot, result, &context->tmfd);
+ epqreturnslot, result, &context->tmfd,
+ context->mtstate->operation == CMD_MERGE);
}
return true;
@@ -2116,7 +2118,8 @@ ExecUpdatePrologue(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
return ExecBRUpdateTriggers(context->estate, context->epqstate,
resultRelInfo, tupleid, oldtuple, slot,
- result, &context->tmfd);
+ result, &context->tmfd,
+ context->mtstate->operation == CMD_MERGE);
}
return true;
@@ -3735,6 +3738,7 @@ ExecInitMerge(ModifyTableState *mtstate, EState *estate)
switch (action->commandType)
{
case CMD_INSERT:
+ /* INSERT actions always use rootRelInfo */
ExecCheckPlanOutput(rootRelInfo->ri_RelationDesc,
action->targetList);
@@ -3774,9 +3778,23 @@ ExecInitMerge(ModifyTableState *mtstate, EState *estate)
}
else
{
- /* not partitioned? use the stock relation and slot */
- tgtslot = resultRelInfo->ri_newTupleSlot;
- tgtdesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
+ /*
+ * If the MERGE targets an inherited table, we insert
+ * into the root table, so we must initialize its
+ * "new" tuple slot, if not already done, and use its
+ * relation descriptor for the projection.
+ *
+ * For non-inherited tables, rootRelInfo and
+ * resultRelInfo are the same, and the "new" tuple
+ * slot will already have been initialized.
+ */
+ if (rootRelInfo->ri_newTupleSlot == NULL)
+ rootRelInfo->ri_newTupleSlot =
+ table_slot_create(rootRelInfo->ri_RelationDesc,
+ &estate->es_tupleTable);
+
+ tgtslot = rootRelInfo->ri_newTupleSlot;
+ tgtdesc = RelationGetDescr(rootRelInfo->ri_RelationDesc);
}
action_state->mas_proj =
@@ -3809,6 +3827,114 @@ ExecInitMerge(ModifyTableState *mtstate, EState *estate)
}
}
}
+
+ /*
+ * If the MERGE targets an inherited table, any INSERT actions will use
+ * rootRelInfo, and rootRelInfo will not be in the resultRelInfo array.
+ * Therefore we must initialize its WITH CHECK OPTION constraints and
+ * RETURNING projection, as ExecInitModifyTable did for the resultRelInfo
+ * entries.
+ *
+ * Note that the planner does not build a withCheckOptionList or
+ * returningList for the root relation, but as in ExecInitPartitionInfo,
+ * we can use the first resultRelInfo entry as a reference to calculate
+ * the attno's for the root table.
+ */
+ if (rootRelInfo != mtstate->resultRelInfo &&
+ rootRelInfo->ri_RelationDesc->rd_rel->relkind != RELKIND_PARTITIONED_TABLE &&
+ (mtstate->mt_merge_subcommands & MERGE_INSERT) != 0)
+ {
+ ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
+ Relation rootRelation = rootRelInfo->ri_RelationDesc;
+ Relation firstResultRel = mtstate->resultRelInfo[0].ri_RelationDesc;
+ int firstVarno = mtstate->resultRelInfo[0].ri_RangeTableIndex;
+ AttrMap *part_attmap = NULL;
+ bool found_whole_row;
+
+ if (node->withCheckOptionLists != NIL)
+ {
+ List *wcoList;
+ List *wcoExprs = NIL;
+
+ /* There should be as many WCO lists as result rels */
+ Assert(list_length(node->withCheckOptionLists) ==
+ list_length(node->resultRelations));
+
+ /*
+ * Use the first WCO list as a reference. In the most common case,
+ * this will be for the same relation as rootRelInfo, and so there
+ * will be no need to adjust its attno's.
+ */
+ wcoList = linitial(node->withCheckOptionLists);
+ if (rootRelation != firstResultRel)
+ {
+ /* Convert any Vars in it to contain the root's attno's */
+ part_attmap =
+ build_attrmap_by_name(RelationGetDescr(rootRelation),
+ RelationGetDescr(firstResultRel),
+ false);
+
+ wcoList = (List *)
+ map_variable_attnos((Node *) wcoList,
+ firstVarno, 0,
+ part_attmap,
+ RelationGetForm(rootRelation)->reltype,
+ &found_whole_row);
+ }
+
+ foreach(lc, wcoList)
+ {
+ WithCheckOption *wco = lfirst_node(WithCheckOption, lc);
+ ExprState *wcoExpr = ExecInitQual(castNode(List, wco->qual),
+ &mtstate->ps);
+
+ wcoExprs = lappend(wcoExprs, wcoExpr);
+ }
+
+ rootRelInfo->ri_WithCheckOptions = wcoList;
+ rootRelInfo->ri_WithCheckOptionExprs = wcoExprs;
+ }
+
+ if (node->returningLists != NIL)
+ {
+ List *returningList;
+
+ /* There should be as many returning lists as result rels */
+ Assert(list_length(node->returningLists) ==
+ list_length(node->resultRelations));
+
+ /*
+ * Use the first returning list as a reference. In the most common
+ * case, this will be for the same relation as rootRelInfo, and so
+ * there will be no need to adjust its attno's.
+ */
+ returningList = linitial(node->returningLists);
+ if (rootRelation != firstResultRel)
+ {
+ /* Convert any Vars in it to contain the root's attno's */
+ if (part_attmap == NULL)
+ part_attmap =
+ build_attrmap_by_name(RelationGetDescr(rootRelation),
+ RelationGetDescr(firstResultRel),
+ false);
+
+ returningList = (List *)
+ map_variable_attnos((Node *) returningList,
+ firstVarno, 0,
+ part_attmap,
+ RelationGetForm(rootRelation)->reltype,
+ &found_whole_row);
+ }
+ rootRelInfo->ri_returningList = returningList;
+
+ /* Initialize the RETURNING projection */
+ rootRelInfo->ri_projectReturning =
+ ExecBuildProjectionInfo(returningList, econtext,
+ mtstate->ps.ps_ResultTupleSlot,
+ &mtstate->ps,
+ RelationGetDescr(rootRelation));
+ }
+ }
}
/*
diff --git a/src/backend/executor/nodeTidrangescan.c b/src/backend/executor/nodeTidrangescan.c
index ab2eab9596e..26f7420b64b 100644
--- a/src/backend/executor/nodeTidrangescan.c
+++ b/src/backend/executor/nodeTidrangescan.c
@@ -128,9 +128,11 @@ TidExprListCreate(TidRangeScanState *tidrangestate)
* TidRangeEval
*
* Compute and set node's block and offset range to scan by evaluating
- * the trss_tidexprs. Returns false if we detect the range cannot
+ * node->trss_tidexprs. Returns false if we detect the range cannot
* contain any tuples. Returns true if it's possible for the range to
- * contain tuples.
+ * contain tuples. We don't bother validating that trss_mintid is less
+ * than or equal to trss_maxtid, as the scan_set_tidrange() table AM
+ * function will handle that.
* ----------------------------------------------------------------
*/
static bool
diff --git a/src/backend/jit/llvm/Makefile b/src/backend/jit/llvm/Makefile
index e8c12060b93..68677ba42e1 100644
--- a/src/backend/jit/llvm/Makefile
+++ b/src/backend/jit/llvm/Makefile
@@ -31,7 +31,7 @@ endif
# All files in this directory use LLVM.
CFLAGS += $(LLVM_CFLAGS)
CXXFLAGS += $(LLVM_CXXFLAGS)
-override CPPFLAGS := $(LLVM_CPPFLAGS) $(CPPFLAGS)
+override CPPFLAGS += $(LLVM_CPPFLAGS)
SHLIB_LINK += $(LLVM_LIBS)
# Because this module includes C++ files, we need to use a C++
diff --git a/src/backend/jit/llvm/meson.build b/src/backend/jit/llvm/meson.build
index c8e06dfbe35..805fbd69006 100644
--- a/src/backend/jit/llvm/meson.build
+++ b/src/backend/jit/llvm/meson.build
@@ -53,7 +53,7 @@ llvm_irgen_args = [
if ccache.found()
llvm_irgen_command = ccache
- llvm_irgen_args = [clang.path()] + llvm_irgen_args
+ llvm_irgen_args = [clang.full_path()] + llvm_irgen_args
else
llvm_irgen_command = clang
endif
diff --git a/src/backend/lib/README b/src/backend/lib/README
index f2fb591237d..c28cbe356f0 100644
--- a/src/backend/lib/README
+++ b/src/backend/lib/README
@@ -1,8 +1,6 @@
This directory contains a general purpose data structures, for use anywhere
in the backend:
-binaryheap.c - a binary heap
-
bipartite_match.c - Hopcroft-Karp maximum cardinality algorithm for bipartite graphs
bloomfilter.c - probabilistic, space-efficient set membership testing
@@ -21,8 +19,6 @@ pairingheap.c - a pairing heap
rbtree.c - a red-black tree
-stringinfo.c - an extensible string type
-
Aside from the inherent characteristics of the data structures, there are a
few practical differences between the binary heap and the pairing heap. The
diff --git a/src/backend/libpq/auth.c b/src/backend/libpq/auth.c
index 9f4d05ffbd4..4da46666439 100644
--- a/src/backend/libpq/auth.c
+++ b/src/backend/libpq/auth.c
@@ -94,8 +94,16 @@ static int auth_peer(hbaPort *port);
#define PGSQL_PAM_SERVICE "postgresql" /* Service name passed to PAM */
+/* Work around original Solaris' lack of "const" in the conv_proc signature */
+#ifdef _PAM_LEGACY_NONCONST
+#define PG_PAM_CONST
+#else
+#define PG_PAM_CONST const
+#endif
+
static int CheckPAMAuth(Port *port, const char *user, const char *password);
-static int pam_passwd_conv_proc(int num_msg, const struct pam_message **msg,
+static int pam_passwd_conv_proc(int num_msg,
+ PG_PAM_CONST struct pam_message **msg,
struct pam_response **resp, void *appdata_ptr);
static struct pam_conv pam_passw_conv = {
@@ -1917,7 +1925,7 @@ auth_peer(hbaPort *port)
*/
static int
-pam_passwd_conv_proc(int num_msg, const struct pam_message **msg,
+pam_passwd_conv_proc(int num_msg, PG_PAM_CONST struct pam_message **msg,
struct pam_response **resp, void *appdata_ptr)
{
const char *passwd;
diff --git a/src/backend/libpq/be-secure-gssapi.c b/src/backend/libpq/be-secure-gssapi.c
index 3534f0b8111..5d98c58ffa8 100644
--- a/src/backend/libpq/be-secure-gssapi.c
+++ b/src/backend/libpq/be-secure-gssapi.c
@@ -121,9 +121,9 @@ be_gssapi_write(Port *port, const void *ptr, size_t len)
* again, so if it offers a len less than that, something is wrong.
*
* Note: it may seem attractive to report partial write completion once
- * we've successfully sent any encrypted packets. However, that can cause
- * problems for callers; notably, pqPutMsgEnd's heuristic to send only
- * full 8K blocks interacts badly with such a hack. We won't save much,
+ * we've successfully sent any encrypted packets. However, doing that
+ * expands the state space of this processing and has been responsible for
+ * bugs in the past (cf. commit d053a879b). We won't save much,
* typically, by letting callers discard data early, so don't risk it.
*/
if (len < PqGSSSendConsumed)
diff --git a/src/backend/libpq/be-secure-openssl.c b/src/backend/libpq/be-secure-openssl.c
index 64ff3ce3d6a..c8b63ef8249 100644
--- a/src/backend/libpq/be-secure-openssl.c
+++ b/src/backend/libpq/be-secure-openssl.c
@@ -1436,10 +1436,10 @@ initialize_ecdh(SSL_CTX *context, bool isServerStart)
*/
ereport(isServerStart ? FATAL : LOG,
errcode(ERRCODE_CONFIG_FILE_ERROR),
- errmsg("failed to set group names specified in ssl_groups: %s",
+ errmsg("could not set group names specified in ssl_groups: %s",
SSLerrmessageExt(ERR_get_error(),
_("No valid groups found"))),
- errhint("Ensure that each group name is spelled correctly and supported by the installed version of OpenSSL"));
+ errhint("Ensure that each group name is spelled correctly and supported by the installed version of OpenSSL."));
return false;
}
#endif
diff --git a/src/backend/libpq/hba.c b/src/backend/libpq/hba.c
index 332fad27835..fecee8224d0 100644
--- a/src/backend/libpq/hba.c
+++ b/src/backend/libpq/hba.c
@@ -2873,8 +2873,11 @@ check_ident_usermap(IdentLine *identLine, const char *usermap_name,
!token_has_regexp(identLine->pg_user) &&
(ofs = strstr(identLine->pg_user->string, "\\1")) != NULL)
{
+ const char *repl_str;
+ size_t repl_len;
+ char *old_pg_user;
char *expanded_pg_user;
- int offset;
+ size_t offset;
/* substitution of the first argument requested */
if (matches[1].rm_so < 0)
@@ -2886,18 +2889,33 @@ check_ident_usermap(IdentLine *identLine, const char *usermap_name,
*error_p = true;
return;
}
+ repl_str = system_user + matches[1].rm_so;
+ repl_len = matches[1].rm_eo - matches[1].rm_so;
/*
- * length: original length minus length of \1 plus length of match
- * plus null terminator
+ * It's allowed to have more than one \1 in the string, and we'll
+ * replace them all. But that's pretty unusual so we optimize on
+ * the assumption of only one occurrence, which motivates doing
+ * repeated replacements instead of making two passes over the
+ * string to determine the final length right away.
*/
- expanded_pg_user = palloc0(strlen(identLine->pg_user->string) - 2 + (matches[1].rm_eo - matches[1].rm_so) + 1);
- offset = ofs - identLine->pg_user->string;
- memcpy(expanded_pg_user, identLine->pg_user->string, offset);
- memcpy(expanded_pg_user + offset,
- system_user + matches[1].rm_so,
- matches[1].rm_eo - matches[1].rm_so);
- strcat(expanded_pg_user, ofs + 2);
+ old_pg_user = identLine->pg_user->string;
+ do
+ {
+ /*
+ * length: current length minus length of \1 plus length of
+ * replacement plus null terminator
+ */
+ expanded_pg_user = palloc(strlen(old_pg_user) - 2 + repl_len + 1);
+ /* ofs points into the old_pg_user string at this point */
+ offset = ofs - old_pg_user;
+ memcpy(expanded_pg_user, old_pg_user, offset);
+ memcpy(expanded_pg_user + offset, repl_str, repl_len);
+ strcpy(expanded_pg_user + offset + repl_len, ofs + 2);
+ if (old_pg_user != identLine->pg_user->string)
+ pfree(old_pg_user);
+ old_pg_user = expanded_pg_user;
+ } while ((ofs = strstr(old_pg_user + offset + repl_len, "\\1")) != NULL);
/*
* Mark the token as quoted, so it will only be compared literally
diff --git a/src/backend/libpq/pg_ident.conf.sample b/src/backend/libpq/pg_ident.conf.sample
index f5225f26cdf..8ee6c0ba315 100644
--- a/src/backend/libpq/pg_ident.conf.sample
+++ b/src/backend/libpq/pg_ident.conf.sample
@@ -13,25 +13,25 @@
# user names to their corresponding PostgreSQL user names. Records
# are of the form:
#
-# MAPNAME SYSTEM-USERNAME PG-USERNAME
+# MAPNAME SYSTEM-USERNAME DATABASE-USERNAME
#
# (The uppercase quantities must be replaced by actual values.)
#
# MAPNAME is the (otherwise freely chosen) map name that was used in
# pg_hba.conf. SYSTEM-USERNAME is the detected user name of the
-# client. PG-USERNAME is the requested PostgreSQL user name. The
-# existence of a record specifies that SYSTEM-USERNAME may connect as
-# PG-USERNAME.
+# client. DATABASE-USERNAME is the requested PostgreSQL user name.
+# The existence of a record specifies that SYSTEM-USERNAME may connect
+# as DATABASE-USERNAME.
#
-# If SYSTEM-USERNAME starts with a slash (/), it will be treated as a
-# regular expression. Optionally this can contain a capture (a
-# parenthesized subexpression). The substring matching the capture
-# will be substituted for \1 (backslash-one) if present in
-# PG-USERNAME.
+# If SYSTEM-USERNAME starts with a slash (/), the rest of it will be
+# treated as a regular expression. Optionally this can contain a capture
+# (a parenthesized subexpression). The substring matching the capture
+# will be substituted for \1 (backslash-one) if that appears in
+# DATABASE-USERNAME.
#
-# PG-USERNAME can be "all", a user name, a group name prefixed with "+", or
-# a regular expression (if it starts with a slash (/)). If it is a regular
-# expression, the substring matching with \1 has no effect.
+# DATABASE-USERNAME can be "all", a user name, a group name prefixed with "+",
+# or a regular expression (if it starts with a slash (/)). If it is a regular
+# expression, no substitution for \1 will occur.
#
# Multiple maps may be specified in this file and used by pg_hba.conf.
#
@@ -69,4 +69,4 @@
# Put your actual configuration here
# ----------------------------------
-# MAPNAME SYSTEM-USERNAME PG-USERNAME
+# MAPNAME SYSTEM-USERNAME DATABASE-USERNAME
diff --git a/src/backend/libpq/pqcomm.c b/src/backend/libpq/pqcomm.c
index e5171467de1..25f739a6a17 100644
--- a/src/backend/libpq/pqcomm.c
+++ b/src/backend/libpq/pqcomm.c
@@ -858,7 +858,6 @@ RemoveSocketFiles(void)
(void) unlink(sock_path);
}
/* Since we're about to exit, no need to reclaim storage */
- sock_paths = NIL;
}
diff --git a/src/backend/libpq/pqmq.c b/src/backend/libpq/pqmq.c
index f1a08bc32ca..5f39949a367 100644
--- a/src/backend/libpq/pqmq.c
+++ b/src/backend/libpq/pqmq.c
@@ -23,7 +23,7 @@
#include "tcop/tcopprot.h"
#include "utils/builtins.h"
-static shm_mq_handle *pq_mq_handle;
+static shm_mq_handle *pq_mq_handle = NULL;
static bool pq_mq_busy = false;
static pid_t pq_mq_parallel_leader_pid = 0;
static ProcNumber pq_mq_parallel_leader_proc_number = INVALID_PROC_NUMBER;
@@ -66,7 +66,11 @@ pq_redirect_to_shm_mq(dsm_segment *seg, shm_mq_handle *mqh)
static void
pq_cleanup_redirect_to_shm_mq(dsm_segment *seg, Datum arg)
{
- pq_mq_handle = NULL;
+ if (pq_mq_handle != NULL)
+ {
+ pfree(pq_mq_handle);
+ pq_mq_handle = NULL;
+ }
whereToSendOutput = DestNone;
}
@@ -131,8 +135,11 @@ mq_putmessage(char msgtype, const char *s, size_t len)
if (pq_mq_busy)
{
if (pq_mq_handle != NULL)
+ {
shm_mq_detach(pq_mq_handle);
- pq_mq_handle = NULL;
+ pfree(pq_mq_handle);
+ pq_mq_handle = NULL;
+ }
return EOF;
}
@@ -152,8 +159,6 @@ mq_putmessage(char msgtype, const char *s, size_t len)
iov[1].data = s;
iov[1].len = len;
- Assert(pq_mq_handle != NULL);
-
for (;;)
{
/*
@@ -161,6 +166,7 @@ mq_putmessage(char msgtype, const char *s, size_t len)
* that the shared memory value is updated before we send the parallel
* message signal right after this.
*/
+ Assert(pq_mq_handle != NULL);
result = shm_mq_sendv(pq_mq_handle, iov, 2, true, true);
if (pq_mq_parallel_leader_pid != 0)
diff --git a/src/backend/main/main.c b/src/backend/main/main.c
index 7d63cf94a6b..bdcb5e4f261 100644
--- a/src/backend/main/main.c
+++ b/src/backend/main/main.c
@@ -125,13 +125,17 @@ main(int argc, char *argv[])
set_pglocale_pgservice(argv[0], PG_TEXTDOMAIN("postgres"));
/*
- * In the postmaster, absorb the environment values for LC_COLLATE and
- * LC_CTYPE. Individual backends will change these later to settings
- * taken from pg_database, but the postmaster cannot do that. If we leave
- * these set to "C" then message localization might not work well in the
- * postmaster.
+ * Collation is handled by pg_locale.c, and the behavior is dependent on
+ * the provider. strcoll(), etc., should not be called directly.
+ */
+ init_locale("LC_COLLATE", LC_COLLATE, "C");
+
+ /*
+ * In the postmaster, absorb the environment value for LC_CTYPE.
+ * Individual backends will change it later to pg_database.datctype, but
+ * the postmaster cannot do that. If we leave it set to "C" then message
+ * localization might not work well in the postmaster.
*/
- init_locale("LC_COLLATE", LC_COLLATE, "");
init_locale("LC_CTYPE", LC_CTYPE, "");
/*
diff --git a/src/backend/nodes/gen_node_support.pl b/src/backend/nodes/gen_node_support.pl
index c8595109b0e..9ecddb14231 100644
--- a/src/backend/nodes/gen_node_support.pl
+++ b/src/backend/nodes/gen_node_support.pl
@@ -1329,7 +1329,7 @@ _jumble${n}(JumbleState *jstate, Node *node)
# Node type. Squash constants if requested.
if ($query_jumble_squash)
{
- print $jff "\tJUMBLE_ELEMENTS($f);\n"
+ print $jff "\tJUMBLE_ELEMENTS($f, node);\n"
unless $query_jumble_ignore;
}
else
diff --git a/src/backend/nodes/outfuncs.c b/src/backend/nodes/outfuncs.c
index 25e08ba3426..eaf391fc2ab 100644
--- a/src/backend/nodes/outfuncs.c
+++ b/src/backend/nodes/outfuncs.c
@@ -653,6 +653,8 @@ _outA_Expr(StringInfo str, const A_Expr *node)
WRITE_NODE_FIELD(lexpr);
WRITE_NODE_FIELD(rexpr);
+ WRITE_LOCATION_FIELD(rexpr_list_start);
+ WRITE_LOCATION_FIELD(rexpr_list_end);
WRITE_LOCATION_FIELD(location);
}
diff --git a/src/backend/nodes/queryjumblefuncs.c b/src/backend/nodes/queryjumblefuncs.c
index ac3cb3d9caf..31f97151977 100644
--- a/src/backend/nodes/queryjumblefuncs.c
+++ b/src/backend/nodes/queryjumblefuncs.c
@@ -21,6 +21,11 @@
* tree(s) generated from the query. The executor can then use this value
* to blame query costs on the proper queryId.
*
+ * Arrays of two or more constants and PARAM_EXTERN parameters are "squashed"
+ * and contribute only once to the jumble. This has the effect that queries
+ * that differ only on the length of such lists have the same queryId.
+ *
+ *
* Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
@@ -61,11 +66,13 @@ static void AppendJumble(JumbleState *jstate,
const unsigned char *value, Size size);
static void FlushPendingNulls(JumbleState *jstate);
static void RecordConstLocation(JumbleState *jstate,
- int location, bool squashed);
+ bool extern_param,
+ int location, int len);
static void _jumbleNode(JumbleState *jstate, Node *node);
-static void _jumbleElements(JumbleState *jstate, List *elements);
-static void _jumbleA_Const(JumbleState *jstate, Node *node);
static void _jumbleList(JumbleState *jstate, Node *node);
+static void _jumbleElements(JumbleState *jstate, List *elements, Node *node);
+static void _jumbleParam(JumbleState *jstate, Node *node);
+static void _jumbleA_Const(JumbleState *jstate, Node *node);
static void _jumbleVariableSetStmt(JumbleState *jstate, Node *node);
static void _jumbleRangeTblEntry_eref(JumbleState *jstate,
RangeTblEntry *rte,
@@ -185,6 +192,7 @@ InitJumble(void)
jstate->clocations_count = 0;
jstate->highest_extern_param_id = 0;
jstate->pending_nulls = 0;
+ jstate->has_squashed_lists = false;
#ifdef USE_ASSERT_CHECKING
jstate->total_jumble_len = 0;
#endif
@@ -207,6 +215,10 @@ DoJumble(JumbleState *jstate, Node *node)
if (jstate->pending_nulls > 0)
FlushPendingNulls(jstate);
+ /* Squashed list found, reset highest_extern_param_id */
+ if (jstate->has_squashed_lists)
+ jstate->highest_extern_param_id = 0;
+
/* Process the jumble buffer and produce the hash value */
return DatumGetInt64(hash_any_extended(jstate->jumble,
jstate->jumble_len,
@@ -373,15 +385,17 @@ FlushPendingNulls(JumbleState *jstate)
/*
- * Record location of constant within query string of query tree that is
- * currently being walked.
+ * Record the location of some kind of constant within a query string.
+ * These are not only bare constants but also expressions that ultimately
+ * constitute a constant, such as those inside casts and simple function
+ * calls; if extern_param, then it corresponds to a PARAM_EXTERN Param.
*
- * 'squashed' signals that the constant represents the first or the last
- * element in a series of merged constants, and everything but the first/last
- * element contributes nothing to the jumble hash.
+ * If length is -1, it indicates a single such constant element. If
+ * it's a positive integer, it indicates the length of a squashable
+ * list of them.
*/
static void
-RecordConstLocation(JumbleState *jstate, int location, bool squashed)
+RecordConstLocation(JumbleState *jstate, bool extern_param, int location, int len)
{
/* -1 indicates unknown or undefined location */
if (location >= 0)
@@ -396,9 +410,15 @@ RecordConstLocation(JumbleState *jstate, int location, bool squashed)
sizeof(LocationLen));
}
jstate->clocations[jstate->clocations_count].location = location;
- /* initialize lengths to -1 to simplify third-party module usage */
- jstate->clocations[jstate->clocations_count].squashed = squashed;
- jstate->clocations[jstate->clocations_count].length = -1;
+
+ /*
+ * Lengths are either positive integers (indicating a squashable
+ * list), or -1.
+ */
+ Assert(len > -1 || len == -1);
+ jstate->clocations[jstate->clocations_count].length = len;
+ jstate->clocations[jstate->clocations_count].squashed = (len > -1);
+ jstate->clocations[jstate->clocations_count].extern_param = extern_param;
jstate->clocations_count++;
}
}
@@ -407,47 +427,74 @@ RecordConstLocation(JumbleState *jstate, int location, bool squashed)
* Subroutine for _jumbleElements: Verify a few simple cases where we can
* deduce that the expression is a constant:
*
- * - Ignore a possible wrapping RelabelType and CoerceViaIO.
- * - If it's a FuncExpr, check that the function is an implicit
+ * - See through any wrapping RelabelType and CoerceViaIO layers.
+ * - If it's a FuncExpr, check that the function is a builtin
* cast and its arguments are Const.
- * - Otherwise test if the expression is a simple Const.
+ * - Otherwise test if the expression is a simple Const or a
+ * PARAM_EXTERN param.
*/
static bool
-IsSquashableConst(Node *element)
+IsSquashableConstant(Node *element)
{
- if (IsA(element, RelabelType))
- element = (Node *) ((RelabelType *) element)->arg;
-
- if (IsA(element, CoerceViaIO))
- element = (Node *) ((CoerceViaIO *) element)->arg;
-
- if (IsA(element, FuncExpr))
+restart:
+ switch (nodeTag(element))
{
- FuncExpr *func = (FuncExpr *) element;
- ListCell *temp;
+ case T_RelabelType:
+ /* Unwrap RelabelType */
+ element = (Node *) ((RelabelType *) element)->arg;
+ goto restart;
- if (func->funcformat != COERCE_IMPLICIT_CAST &&
- func->funcformat != COERCE_EXPLICIT_CAST)
- return false;
+ case T_CoerceViaIO:
+ /* Unwrap CoerceViaIO */
+ element = (Node *) ((CoerceViaIO *) element)->arg;
+ goto restart;
- if (func->funcid > FirstGenbkiObjectId)
- return false;
+ case T_Const:
+ return true;
- foreach(temp, func->args)
- {
- Node *arg = lfirst(temp);
+ case T_Param:
+ return castNode(Param, element)->paramkind == PARAM_EXTERN;
- if (!IsA(arg, Const)) /* XXX we could recurse here instead */
- return false;
- }
+ case T_FuncExpr:
+ {
+ FuncExpr *func = (FuncExpr *) element;
+ ListCell *temp;
- return true;
- }
+ if (func->funcformat != COERCE_IMPLICIT_CAST &&
+ func->funcformat != COERCE_EXPLICIT_CAST)
+ return false;
- if (!IsA(element, Const))
- return false;
+ if (func->funcid > FirstGenbkiObjectId)
+ return false;
- return true;
+ /*
+ * We can check function arguments recursively, being careful
+ * about recursing too deep. At each recursion level it's
+ * enough to test the stack on the first element. (Note that
+ * I wasn't able to hit this without bloating the stack
+ * artificially in this function: the parser errors out before
+ * stack size becomes a problem here.)
+ */
+ foreach(temp, func->args)
+ {
+ Node *arg = lfirst(temp);
+
+ if (!IsA(arg, Const))
+ {
+ if (foreach_current_index(temp) == 0 &&
+ stack_is_too_deep())
+ return false;
+ else if (!IsSquashableConstant(arg))
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ default:
+ return false;
+ }
}
/*
@@ -457,39 +504,33 @@ IsSquashableConst(Node *element)
* Return value indicates if squashing is possible.
*
* Note that this function searches only for explicit Const nodes with
- * possibly very simple decorations on top, and does not try to simplify
- * expressions.
+ * possibly very simple decorations on top and PARAM_EXTERN parameters,
+ * and does not try to simplify expressions.
*/
static bool
-IsSquashableConstList(List *elements, Node **firstExpr, Node **lastExpr)
+IsSquashableConstantList(List *elements)
{
ListCell *temp;
- /*
- * If squashing is disabled, or the list is too short, we don't try to
- * squash it.
- */
+ /* If the list is too short, we don't try to squash it. */
if (list_length(elements) < 2)
return false;
foreach(temp, elements)
{
- if (!IsSquashableConst(lfirst(temp)))
+ if (!IsSquashableConstant(lfirst(temp)))
return false;
}
- *firstExpr = linitial(elements);
- *lastExpr = llast(elements);
-
return true;
}
#define JUMBLE_NODE(item) \
_jumbleNode(jstate, (Node *) expr->item)
-#define JUMBLE_ELEMENTS(list) \
- _jumbleElements(jstate, (List *) expr->list)
+#define JUMBLE_ELEMENTS(list, node) \
+ _jumbleElements(jstate, (List *) expr->list, node)
#define JUMBLE_LOCATION(location) \
- RecordConstLocation(jstate, expr->location, false)
+ RecordConstLocation(jstate, false, expr->location, -1)
#define JUMBLE_FIELD(item) \
do { \
if (sizeof(expr->item) == 8) \
@@ -516,42 +557,6 @@ do { \
#include "queryjumblefuncs.funcs.c"
-/*
- * We jumble lists of constant elements as one individual item regardless
- * of how many elements are in the list. This means different queries
- * jumble to the same query_id, if the only difference is the number of
- * elements in the list.
- */
-static void
-_jumbleElements(JumbleState *jstate, List *elements)
-{
- Node *first,
- *last;
-
- if (IsSquashableConstList(elements, &first, &last))
- {
- /*
- * If this list of elements is squashable, keep track of the location
- * of its first and last elements. When reading back the locations
- * array, we'll see two consecutive locations with ->squashed set to
- * true, indicating the location of initial and final elements of this
- * list.
- *
- * For the limited set of cases we support now (implicit coerce via
- * FuncExpr, Const) it's fine to use exprLocation of the 'last'
- * expression, but if more complex composite expressions are to be
- * supported (e.g., OpExpr or FuncExpr as an explicit call), more
- * sophisticated tracking will be needed.
- */
- RecordConstLocation(jstate, exprLocation(first), true);
- RecordConstLocation(jstate, exprLocation(last), true);
- }
- else
- {
- _jumbleNode(jstate, (Node *) elements);
- }
-}
-
static void
_jumbleNode(JumbleState *jstate, Node *node)
{
@@ -593,26 +598,6 @@ _jumbleNode(JumbleState *jstate, Node *node)
break;
}
- /* Special cases to handle outside the automated code */
- switch (nodeTag(expr))
- {
- case T_Param:
- {
- Param *p = (Param *) node;
-
- /*
- * Update the highest Param id seen, in order to start
- * normalization correctly.
- */
- if (p->paramkind == PARAM_EXTERN &&
- p->paramid > jstate->highest_extern_param_id)
- jstate->highest_extern_param_id = p->paramid;
- }
- break;
- default:
- break;
- }
-
/* Ensure we added something to the jumble buffer */
Assert(jstate->total_jumble_len > prev_jumble_len);
}
@@ -648,6 +633,79 @@ _jumbleList(JumbleState *jstate, Node *node)
}
}
+/*
+ * We try to jumble lists of expressions as one individual item regardless
+ * of how many elements are in the list. This is know as squashing, which
+ * results in different queries jumbling to the same query_id, if the only
+ * difference is the number of elements in the list.
+ *
+ * We allow constants and PARAM_EXTERN parameters to be squashed. To normalize
+ * such queries, we use the start and end locations of the list of elements in
+ * a list.
+ */
+static void
+_jumbleElements(JumbleState *jstate, List *elements, Node *node)
+{
+ bool normalize_list = false;
+
+ if (IsSquashableConstantList(elements))
+ {
+ if (IsA(node, ArrayExpr))
+ {
+ ArrayExpr *aexpr = (ArrayExpr *) node;
+
+ if (aexpr->list_start > 0 && aexpr->list_end > 0)
+ {
+ RecordConstLocation(jstate,
+ false,
+ aexpr->list_start + 1,
+ (aexpr->list_end - aexpr->list_start) - 1);
+ normalize_list = true;
+ jstate->has_squashed_lists = true;
+ }
+ }
+ }
+
+ if (!normalize_list)
+ {
+ _jumbleNode(jstate, (Node *) elements);
+ }
+}
+
+/*
+ * We store the highest param ID of extern params. This can later be used
+ * to start the numbering of the placeholder for squashed lists.
+ */
+static void
+_jumbleParam(JumbleState *jstate, Node *node)
+{
+ Param *expr = (Param *) node;
+
+ JUMBLE_FIELD(paramkind);
+ JUMBLE_FIELD(paramid);
+ JUMBLE_FIELD(paramtype);
+ /* paramtypmode and paramcollid are ignored */
+
+ if (expr->paramkind == PARAM_EXTERN)
+ {
+ /*
+ * At this point, only external parameter locations outside of
+ * squashable lists will be recorded.
+ */
+ RecordConstLocation(jstate, true, expr->location, -1);
+
+ /*
+ * Update the highest Param id seen, in order to start normalization
+ * correctly.
+ *
+ * Note: This value is reset at the end of jumbling if there exists a
+ * squashable list. See the comment in the definition of JumbleState.
+ */
+ if (expr->paramid > jstate->highest_extern_param_id)
+ jstate->highest_extern_param_id = expr->paramid;
+ }
+}
+
static void
_jumbleA_Const(JumbleState *jstate, Node *node)
{
diff --git a/src/backend/nodes/readfuncs.c b/src/backend/nodes/readfuncs.c
index 8c90ab54af8..48b5d13b9b6 100644
--- a/src/backend/nodes/readfuncs.c
+++ b/src/backend/nodes/readfuncs.c
@@ -526,6 +526,8 @@ _readA_Expr(void)
READ_NODE_FIELD(lexpr);
READ_NODE_FIELD(rexpr);
+ READ_LOCATION_FIELD(rexpr_list_start);
+ READ_LOCATION_FIELD(rexpr_list_end);
READ_LOCATION_FIELD(location);
READ_DONE();
diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c
index 3d44815ed5a..344a3188317 100644
--- a/src/backend/optimizer/path/costsize.c
+++ b/src/backend/optimizer/path/costsize.c
@@ -2247,7 +2247,7 @@ append_nonpartial_cost(List *subpaths, int numpaths, int parallel_workers)
* Determines and returns the cost of an Append node.
*/
void
-cost_append(AppendPath *apath)
+cost_append(AppendPath *apath, PlannerInfo *root)
{
ListCell *l;
@@ -2309,26 +2309,52 @@ cost_append(AppendPath *apath)
foreach(l, apath->subpaths)
{
Path *subpath = (Path *) lfirst(l);
- Path sort_path; /* dummy for result of cost_sort */
+ int presorted_keys;
+ Path sort_path; /* dummy for result of
+ * cost_sort/cost_incremental_sort */
- if (!pathkeys_contained_in(pathkeys, subpath->pathkeys))
+ if (!pathkeys_count_contained_in(pathkeys, subpath->pathkeys,
+ &presorted_keys))
{
/*
* We'll need to insert a Sort node, so include costs for
- * that. We can use the parent's LIMIT if any, since we
+ * that. We choose to use incremental sort if it is
+ * enabled and there are presorted keys; otherwise we use
+ * full sort.
+ *
+ * We can use the parent's LIMIT if any, since we
* certainly won't pull more than that many tuples from
* any child.
*/
- cost_sort(&sort_path,
- NULL, /* doesn't currently need root */
- pathkeys,
- subpath->disabled_nodes,
- subpath->total_cost,
- subpath->rows,
- subpath->pathtarget->width,
- 0.0,
- work_mem,
- apath->limit_tuples);
+ if (enable_incremental_sort && presorted_keys > 0)
+ {
+ cost_incremental_sort(&sort_path,
+ root,
+ pathkeys,
+ presorted_keys,
+ subpath->disabled_nodes,
+ subpath->startup_cost,
+ subpath->total_cost,
+ subpath->rows,
+ subpath->pathtarget->width,
+ 0.0,
+ work_mem,
+ apath->limit_tuples);
+ }
+ else
+ {
+ cost_sort(&sort_path,
+ root,
+ pathkeys,
+ subpath->disabled_nodes,
+ subpath->total_cost,
+ subpath->rows,
+ subpath->pathtarget->width,
+ 0.0,
+ work_mem,
+ apath->limit_tuples);
+ }
+
subpath = &sort_path;
}
@@ -2546,13 +2572,13 @@ cost_memoize_rescan(PlannerInfo *root, MemoizePath *mpath,
Cost input_startup_cost = mpath->subpath->startup_cost;
Cost input_total_cost = mpath->subpath->total_cost;
double tuples = mpath->subpath->rows;
- double calls = mpath->calls;
+ Cardinality est_calls = mpath->est_calls;
int width = mpath->subpath->pathtarget->width;
double hash_mem_bytes;
double est_entry_bytes;
- double est_cache_entries;
- double ndistinct;
+ Cardinality est_cache_entries;
+ Cardinality ndistinct;
double evict_ratio;
double hit_ratio;
Cost startup_cost;
@@ -2578,7 +2604,7 @@ cost_memoize_rescan(PlannerInfo *root, MemoizePath *mpath,
est_cache_entries = floor(hash_mem_bytes / est_entry_bytes);
/* estimate on the distinct number of parameter values */
- ndistinct = estimate_num_groups(root, mpath->param_exprs, calls, NULL,
+ ndistinct = estimate_num_groups(root, mpath->param_exprs, est_calls, NULL,
&estinfo);
/*
@@ -2590,7 +2616,10 @@ cost_memoize_rescan(PlannerInfo *root, MemoizePath *mpath,
* certainly mean a MemoizePath will never survive add_path().
*/
if ((estinfo.flags & SELFLAG_USED_DEFAULT) != 0)
- ndistinct = calls;
+ ndistinct = est_calls;
+
+ /* Remember the ndistinct estimate for EXPLAIN */
+ mpath->est_unique_keys = ndistinct;
/*
* Since we've already estimated the maximum number of entries we can
@@ -2618,9 +2647,12 @@ cost_memoize_rescan(PlannerInfo *root, MemoizePath *mpath,
* must look at how many scans are estimated in total for this node and
* how many of those scans we expect to get a cache hit.
*/
- hit_ratio = ((calls - ndistinct) / calls) *
+ hit_ratio = ((est_calls - ndistinct) / est_calls) *
(est_cache_entries / Max(ndistinct, est_cache_entries));
+ /* Remember the hit ratio estimate for EXPLAIN */
+ mpath->est_hit_ratio = hit_ratio;
+
Assert(hit_ratio >= 0 && hit_ratio <= 1.0);
/*
diff --git a/src/backend/optimizer/path/joinpath.c b/src/backend/optimizer/path/joinpath.c
index 26f0336f1e4..ebedc5574ca 100644
--- a/src/backend/optimizer/path/joinpath.c
+++ b/src/backend/optimizer/path/joinpath.c
@@ -154,13 +154,17 @@ add_paths_to_joinrel(PlannerInfo *root,
/*
* See if the inner relation is provably unique for this outer rel.
*
- * We have some special cases: for JOIN_SEMI and JOIN_ANTI, it doesn't
- * matter since the executor can make the equivalent optimization anyway;
- * we need not expend planner cycles on proofs. For JOIN_UNIQUE_INNER, we
- * must be considering a semijoin whose inner side is not provably unique
- * (else reduce_unique_semijoins would've simplified it), so there's no
- * point in calling innerrel_is_unique. However, if the LHS covers all of
- * the semijoin's min_lefthand, then it's appropriate to set inner_unique
+ * We have some special cases: for JOIN_SEMI, it doesn't matter since the
+ * executor can make the equivalent optimization anyway. It also doesn't
+ * help enable use of Memoize, since a semijoin with a provably unique
+ * inner side should have been reduced to an inner join in that case.
+ * Therefore, we need not expend planner cycles on proofs. (For
+ * JOIN_ANTI, although it doesn't help the executor for the same reason,
+ * it can benefit Memoize paths.) For JOIN_UNIQUE_INNER, we must be
+ * considering a semijoin whose inner side is not provably unique (else
+ * reduce_unique_semijoins would've simplified it), so there's no point in
+ * calling innerrel_is_unique. However, if the LHS covers all of the
+ * semijoin's min_lefthand, then it's appropriate to set inner_unique
* because the path produced by create_unique_path will be unique relative
* to the LHS. (If we have an LHS that's only part of the min_lefthand,
* that is *not* true.) For JOIN_UNIQUE_OUTER, pass JOIN_INNER to avoid
@@ -169,12 +173,6 @@ add_paths_to_joinrel(PlannerInfo *root,
switch (jointype)
{
case JOIN_SEMI:
- case JOIN_ANTI:
-
- /*
- * XXX it may be worth proving this to allow a Memoize to be
- * considered for Nested Loop Semi/Anti Joins.
- */
extra.inner_unique = false; /* well, unproven */
break;
case JOIN_UNIQUE_INNER:
@@ -715,16 +713,21 @@ get_memoize_path(PlannerInfo *root, RelOptInfo *innerrel,
return NULL;
/*
- * Currently we don't do this for SEMI and ANTI joins unless they're
- * marked as inner_unique. This is because nested loop SEMI/ANTI joins
- * don't scan the inner node to completion, which will mean memoize cannot
- * mark the cache entry as complete.
- *
- * XXX Currently we don't attempt to mark SEMI/ANTI joins as inner_unique
- * = true. Should we? See add_paths_to_joinrel()
+ * Currently we don't do this for SEMI and ANTI joins, because nested loop
+ * SEMI/ANTI joins don't scan the inner node to completion, which means
+ * memoize cannot mark the cache entry as complete. Nor can we mark the
+ * cache entry as complete after fetching the first inner tuple, because
+ * if that tuple and the current outer tuple don't satisfy the join
+ * clauses, a second inner tuple that satisfies the parameters would find
+ * the cache entry already marked as complete. The only exception is when
+ * the inner relation is provably unique, as in that case, there won't be
+ * a second matching tuple and we can safely mark the cache entry as
+ * complete after fetching the first inner tuple. Note that in such
+ * cases, the SEMI join should have been reduced to an inner join by
+ * reduce_unique_semijoins.
*/
- if (!extra->inner_unique && (jointype == JOIN_SEMI ||
- jointype == JOIN_ANTI))
+ if ((jointype == JOIN_SEMI || jointype == JOIN_ANTI) &&
+ !extra->inner_unique)
return NULL;
/*
@@ -876,16 +879,13 @@ try_nestloop_path(PlannerInfo *root,
/*
* Check to see if proposed path is still parameterized, and reject if the
* parameterization wouldn't be sensible --- unless allow_star_schema_join
- * says to allow it anyway. Also, we must reject if have_dangerous_phv
- * doesn't like the look of it, which could only happen if the nestloop is
- * still parameterized.
+ * says to allow it anyway.
*/
required_outer = calc_nestloop_required_outer(outerrelids, outer_paramrels,
innerrelids, inner_paramrels);
if (required_outer &&
- ((!bms_overlap(required_outer, extra->param_source_rels) &&
- !allow_star_schema_join(root, outerrelids, inner_paramrels)) ||
- have_dangerous_phv(root, outerrelids, inner_paramrels)))
+ !bms_overlap(required_outer, extra->param_source_rels) &&
+ !allow_star_schema_join(root, outerrelids, inner_paramrels))
{
/* Waste no memory when we reject a path here */
bms_free(required_outer);
diff --git a/src/backend/optimizer/path/joinrels.c b/src/backend/optimizer/path/joinrels.c
index 60d65762b5d..aad41b94009 100644
--- a/src/backend/optimizer/path/joinrels.c
+++ b/src/backend/optimizer/path/joinrels.c
@@ -565,9 +565,6 @@ join_is_legal(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2,
* Also, if the lateral reference is only indirect, we should reject
* the join; whatever rel(s) the reference chain goes through must be
* joined to first.
- *
- * Another case that might keep us from building a valid plan is the
- * implementation restriction described by have_dangerous_phv().
*/
lateral_fwd = bms_overlap(rel1->relids, rel2->lateral_relids);
lateral_rev = bms_overlap(rel2->relids, rel1->lateral_relids);
@@ -584,9 +581,6 @@ join_is_legal(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2,
/* check there is a direct reference from rel2 to rel1 */
if (!bms_overlap(rel1->relids, rel2->direct_lateral_relids))
return false; /* only indirect refs, so reject */
- /* check we won't have a dangerous PHV */
- if (have_dangerous_phv(root, rel1->relids, rel2->lateral_relids))
- return false; /* might be unable to handle required PHV */
}
else if (lateral_rev)
{
@@ -599,9 +593,6 @@ join_is_legal(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2,
/* check there is a direct reference from rel1 to rel2 */
if (!bms_overlap(rel2->relids, rel1->direct_lateral_relids))
return false; /* only indirect refs, so reject */
- /* check we won't have a dangerous PHV */
- if (have_dangerous_phv(root, rel2->relids, rel1->lateral_relids))
- return false; /* might be unable to handle required PHV */
}
/*
@@ -1279,57 +1270,6 @@ has_legal_joinclause(PlannerInfo *root, RelOptInfo *rel)
/*
- * There's a pitfall for creating parameterized nestloops: suppose the inner
- * rel (call it A) has a parameter that is a PlaceHolderVar, and that PHV's
- * minimum eval_at set includes the outer rel (B) and some third rel (C).
- * We might think we could create a B/A nestloop join that's parameterized by
- * C. But we would end up with a plan in which the PHV's expression has to be
- * evaluated as a nestloop parameter at the B/A join; and the executor is only
- * set up to handle simple Vars as NestLoopParams. Rather than add complexity
- * and overhead to the executor for such corner cases, it seems better to
- * forbid the join. (Note that we can still make use of A's parameterized
- * path with pre-joined B+C as the outer rel. have_join_order_restriction()
- * ensures that we will consider making such a join even if there are not
- * other reasons to do so.)
- *
- * So we check whether any PHVs used in the query could pose such a hazard.
- * We don't have any simple way of checking whether a risky PHV would actually
- * be used in the inner plan, and the case is so unusual that it doesn't seem
- * worth working very hard on it.
- *
- * This needs to be checked in two places. If the inner rel's minimum
- * parameterization would trigger the restriction, then join_is_legal() should
- * reject the join altogether, because there will be no workable paths for it.
- * But joinpath.c has to check again for every proposed nestloop path, because
- * the inner path might have more than the minimum parameterization, causing
- * some PHV to be dangerous for it that otherwise wouldn't be.
- */
-bool
-have_dangerous_phv(PlannerInfo *root,
- Relids outer_relids, Relids inner_params)
-{
- ListCell *lc;
-
- foreach(lc, root->placeholder_list)
- {
- PlaceHolderInfo *phinfo = (PlaceHolderInfo *) lfirst(lc);
-
- if (!bms_is_subset(phinfo->ph_eval_at, inner_params))
- continue; /* ignore, could not be a nestloop param */
- if (!bms_overlap(phinfo->ph_eval_at, outer_relids))
- continue; /* ignore, not relevant to this join */
- if (bms_is_subset(phinfo->ph_eval_at, outer_relids))
- continue; /* safe, it can be eval'd within outerrel */
- /* Otherwise, it's potentially unsafe, so reject the join */
- return true;
- }
-
- /* OK to perform the join */
- return false;
-}
-
-
-/*
* is_dummy_rel --- has relation been proven empty?
*/
bool
diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c
index 4ad30b7627e..bfefc7dbea1 100644
--- a/src/backend/optimizer/plan/createplan.c
+++ b/src/backend/optimizer/plan/createplan.c
@@ -284,7 +284,10 @@ static Material *make_material(Plan *lefttree);
static Memoize *make_memoize(Plan *lefttree, Oid *hashoperators,
Oid *collations, List *param_exprs,
bool singlerow, bool binary_mode,
- uint32 est_entries, Bitmapset *keyparamids);
+ uint32 est_entries, Bitmapset *keyparamids,
+ Cardinality est_calls,
+ Cardinality est_unique_keys,
+ double est_hit_ratio);
static WindowAgg *make_windowagg(List *tlist, WindowClause *wc,
int partNumCols, AttrNumber *partColIdx, Oid *partOperators, Oid *partCollations,
int ordNumCols, AttrNumber *ordColIdx, Oid *ordOperators, Oid *ordCollations,
@@ -1318,6 +1321,7 @@ create_append_plan(PlannerInfo *root, AppendPath *best_path, int flags)
Oid *sortOperators;
Oid *collations;
bool *nullsFirst;
+ int presorted_keys;
/*
* Compute sort column info, and adjust subplan's tlist as needed.
@@ -1353,14 +1357,38 @@ create_append_plan(PlannerInfo *root, AppendPath *best_path, int flags)
numsortkeys * sizeof(bool)) == 0);
/* Now, insert a Sort node if subplan isn't sufficiently ordered */
- if (!pathkeys_contained_in(pathkeys, subpath->pathkeys))
+ if (!pathkeys_count_contained_in(pathkeys, subpath->pathkeys,
+ &presorted_keys))
{
- Sort *sort = make_sort(subplan, numsortkeys,
+ Plan *sort_plan;
+
+ /*
+ * We choose to use incremental sort if it is enabled and
+ * there are presorted keys; otherwise we use full sort.
+ */
+ if (enable_incremental_sort && presorted_keys > 0)
+ {
+ sort_plan = (Plan *)
+ make_incrementalsort(subplan, numsortkeys, presorted_keys,
sortColIdx, sortOperators,
collations, nullsFirst);
- label_sort_with_costsize(root, sort, best_path->limit_tuples);
- subplan = (Plan *) sort;
+ label_incrementalsort_with_costsize(root,
+ (IncrementalSort *) sort_plan,
+ pathkeys,
+ best_path->limit_tuples);
+ }
+ else
+ {
+ sort_plan = (Plan *) make_sort(subplan, numsortkeys,
+ sortColIdx, sortOperators,
+ collations, nullsFirst);
+
+ label_sort_with_costsize(root, (Sort *) sort_plan,
+ best_path->limit_tuples);
+ }
+
+ subplan = sort_plan;
}
}
@@ -1491,6 +1519,7 @@ create_merge_append_plan(PlannerInfo *root, MergeAppendPath *best_path,
Oid *sortOperators;
Oid *collations;
bool *nullsFirst;
+ int presorted_keys;
/* Build the child plan */
/* Must insist that all children return the same tlist */
@@ -1525,14 +1554,38 @@ create_merge_append_plan(PlannerInfo *root, MergeAppendPath *best_path,
numsortkeys * sizeof(bool)) == 0);
/* Now, insert a Sort node if subplan isn't sufficiently ordered */
- if (!pathkeys_contained_in(pathkeys, subpath->pathkeys))
+ if (!pathkeys_count_contained_in(pathkeys, subpath->pathkeys,
+ &presorted_keys))
{
- Sort *sort = make_sort(subplan, numsortkeys,
+ Plan *sort_plan;
+
+ /*
+ * We choose to use incremental sort if it is enabled and there
+ * are presorted keys; otherwise we use full sort.
+ */
+ if (enable_incremental_sort && presorted_keys > 0)
+ {
+ sort_plan = (Plan *)
+ make_incrementalsort(subplan, numsortkeys, presorted_keys,
sortColIdx, sortOperators,
collations, nullsFirst);
- label_sort_with_costsize(root, sort, best_path->limit_tuples);
- subplan = (Plan *) sort;
+ label_incrementalsort_with_costsize(root,
+ (IncrementalSort *) sort_plan,
+ pathkeys,
+ best_path->limit_tuples);
+ }
+ else
+ {
+ sort_plan = (Plan *) make_sort(subplan, numsortkeys,
+ sortColIdx, sortOperators,
+ collations, nullsFirst);
+
+ label_sort_with_costsize(root, (Sort *) sort_plan,
+ best_path->limit_tuples);
+ }
+
+ subplan = sort_plan;
}
subplans = lappend(subplans, subplan);
@@ -1703,7 +1756,8 @@ create_memoize_plan(PlannerInfo *root, MemoizePath *best_path, int flags)
plan = make_memoize(subplan, operators, collations, param_exprs,
best_path->singlerow, best_path->binary_mode,
- best_path->est_entries, keyparamids);
+ best_path->est_entries, keyparamids, best_path->est_calls,
+ best_path->est_unique_keys, best_path->est_hit_ratio);
copy_generic_path_info(&plan->plan, (Path *) best_path);
@@ -4344,13 +4398,16 @@ create_nestloop_plan(PlannerInfo *root,
NestLoop *join_plan;
Plan *outer_plan;
Plan *inner_plan;
+ Relids outerrelids;
List *tlist = build_path_tlist(root, &best_path->jpath.path);
List *joinrestrictclauses = best_path->jpath.joinrestrictinfo;
List *joinclauses;
List *otherclauses;
- Relids outerrelids;
List *nestParams;
+ List *outer_tlist;
+ bool outer_parallel_safe;
Relids saveOuterRels = root->curOuterRels;
+ ListCell *lc;
/*
* If the inner path is parameterized by the topmost parent of the outer
@@ -4372,8 +4429,8 @@ create_nestloop_plan(PlannerInfo *root,
outer_plan = create_plan_recurse(root, best_path->jpath.outerjoinpath, 0);
/* For a nestloop, include outer relids in curOuterRels for inner side */
- root->curOuterRels = bms_union(root->curOuterRels,
- best_path->jpath.outerjoinpath->parent->relids);
+ outerrelids = best_path->jpath.outerjoinpath->parent->relids;
+ root->curOuterRels = bms_union(root->curOuterRels, outerrelids);
inner_plan = create_plan_recurse(root, best_path->jpath.innerjoinpath, 0);
@@ -4412,9 +4469,66 @@ create_nestloop_plan(PlannerInfo *root,
* Identify any nestloop parameters that should be supplied by this join
* node, and remove them from root->curOuterParams.
*/
- outerrelids = best_path->jpath.outerjoinpath->parent->relids;
- nestParams = identify_current_nestloop_params(root, outerrelids);
+ nestParams = identify_current_nestloop_params(root,
+ outerrelids,
+ PATH_REQ_OUTER((Path *) best_path));
+
+ /*
+ * While nestloop parameters that are Vars had better be available from
+ * the outer_plan already, there are edge cases where nestloop parameters
+ * that are PHVs won't be. In such cases we must add them to the
+ * outer_plan's tlist, since the executor's NestLoopParam machinery
+ * requires the params to be simple outer-Var references to that tlist.
+ * (This is cheating a little bit, because the outer path's required-outer
+ * relids might not be enough to allow evaluating such a PHV. But in
+ * practice, if we could have evaluated the PHV at the nestloop node, we
+ * can do so in the outer plan too.)
+ */
+ outer_tlist = outer_plan->targetlist;
+ outer_parallel_safe = outer_plan->parallel_safe;
+ foreach(lc, nestParams)
+ {
+ NestLoopParam *nlp = (NestLoopParam *) lfirst(lc);
+ PlaceHolderVar *phv;
+ TargetEntry *tle;
+
+ if (IsA(nlp->paramval, Var))
+ continue; /* nothing to do for simple Vars */
+ /* Otherwise it must be a PHV */
+ phv = castNode(PlaceHolderVar, nlp->paramval);
+
+ if (tlist_member((Expr *) phv, outer_tlist))
+ continue; /* already available */
+
+ /*
+ * It's possible that nestloop parameter PHVs selected to evaluate
+ * here contain references to surviving root->curOuterParams items
+ * (that is, they reference values that will be supplied by some
+ * higher-level nestloop). Those need to be converted to Params now.
+ * Note: it's safe to do this after the tlist_member() check, because
+ * equal() won't pay attention to phv->phexpr.
+ */
+ phv->phexpr = (Expr *) replace_nestloop_params(root,
+ (Node *) phv->phexpr);
+
+ /* Make a shallow copy of outer_tlist, if we didn't already */
+ if (outer_tlist == outer_plan->targetlist)
+ outer_tlist = list_copy(outer_tlist);
+ /* ... and add the needed expression */
+ tle = makeTargetEntry((Expr *) copyObject(phv),
+ list_length(outer_tlist) + 1,
+ NULL,
+ true);
+ outer_tlist = lappend(outer_tlist, tle);
+ /* ... and track whether tlist is (still) parallel-safe */
+ if (outer_parallel_safe)
+ outer_parallel_safe = is_parallel_safe(root, (Node *) phv);
+ }
+ if (outer_tlist != outer_plan->targetlist)
+ outer_plan = change_plan_targetlist(outer_plan, outer_tlist,
+ outer_parallel_safe);
+ /* And finally, we can build the join plan node */
join_plan = make_nestloop(tlist,
joinclauses,
otherclauses,
@@ -6639,7 +6753,9 @@ materialize_finished_plan(Plan *subplan)
static Memoize *
make_memoize(Plan *lefttree, Oid *hashoperators, Oid *collations,
List *param_exprs, bool singlerow, bool binary_mode,
- uint32 est_entries, Bitmapset *keyparamids)
+ uint32 est_entries, Bitmapset *keyparamids,
+ Cardinality est_calls, Cardinality est_unique_keys,
+ double est_hit_ratio)
{
Memoize *node = makeNode(Memoize);
Plan *plan = &node->plan;
@@ -6657,6 +6773,9 @@ make_memoize(Plan *lefttree, Oid *hashoperators, Oid *collations,
node->binary_mode = binary_mode;
node->est_entries = est_entries;
node->keyparamids = keyparamids;
+ node->est_calls = est_calls;
+ node->est_unique_keys = est_unique_keys;
+ node->est_hit_ratio = est_hit_ratio;
return node;
}
diff --git a/src/backend/optimizer/plan/initsplan.c b/src/backend/optimizer/plan/initsplan.c
index 01804b085b3..3e3fec89252 100644
--- a/src/backend/optimizer/plan/initsplan.c
+++ b/src/backend/optimizer/plan/initsplan.c
@@ -3048,36 +3048,16 @@ add_base_clause_to_rel(PlannerInfo *root, Index relid,
* expr_is_nonnullable
* Check to see if the Expr cannot be NULL
*
- * If the Expr is a simple Var that is defined NOT NULL and meanwhile is not
- * nulled by any outer joins, then we can know that it cannot be NULL.
+ * Currently we only support simple Vars.
*/
static bool
expr_is_nonnullable(PlannerInfo *root, Expr *expr)
{
- RelOptInfo *rel;
- Var *var;
-
/* For now only check simple Vars */
if (!IsA(expr, Var))
return false;
- var = (Var *) expr;
-
- /* could the Var be nulled by any outer joins? */
- if (!bms_is_empty(var->varnullingrels))
- return false;
-
- /* system columns cannot be NULL */
- if (var->varattno < 0)
- return true;
-
- /* is the column defined NOT NULL? */
- rel = find_base_rel(root, var->varno);
- if (var->varattno > 0 &&
- bms_is_member(var->varattno, rel->notnullattnums))
- return true;
-
- return false;
+ return var_is_nonnullable(root, (Var *) expr, true);
}
/*
diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c
index ff65867eebe..d59d6e4c6a0 100644
--- a/src/backend/optimizer/plan/planner.c
+++ b/src/backend/optimizer/plan/planner.c
@@ -342,6 +342,7 @@ standard_planner(Query *parse, const char *query_string, int cursorOptions,
glob->transientPlan = false;
glob->dependsOnRole = false;
glob->partition_directory = NULL;
+ glob->rel_notnullatts_hash = NULL;
/*
* Assess whether it's feasible to use parallel mode for this query. We
@@ -557,6 +558,7 @@ standard_planner(Query *parse, const char *query_string, int cursorOptions,
result->commandType = parse->commandType;
result->queryId = parse->queryId;
+ result->planOrigin = PLAN_STMT_STANDARD;
result->hasReturning = (parse->returningList != NIL);
result->hasModifyingCTE = parse->hasModifyingCTE;
result->canSetTag = parse->canSetTag;
@@ -721,6 +723,18 @@ subquery_planner(PlannerGlobal *glob, Query *parse, PlannerInfo *parent_root,
transform_MERGE_to_join(parse);
/*
+ * Scan the rangetable for relation RTEs and retrieve the necessary
+ * catalog information for each relation. Using this information, clear
+ * the inh flag for any relation that has no children, collect not-null
+ * attribute numbers for any relation that has column not-null
+ * constraints, and expand virtual generated columns for any relation that
+ * contains them. Note that this step does not descend into sublinks and
+ * subqueries; if we pull up any sublinks or subqueries below, their
+ * relation RTEs are processed just before pulling them up.
+ */
+ parse = root->parse = preprocess_relation_rtes(root);
+
+ /*
* If the FROM clause is empty, replace it with a dummy RTE_RESULT RTE, so
* that we don't need so many special cases to deal with that situation.
*/
@@ -744,14 +758,6 @@ subquery_planner(PlannerGlobal *glob, Query *parse, PlannerInfo *parent_root,
preprocess_function_rtes(root);
/*
- * Scan the rangetable for relations with virtual generated columns, and
- * replace all Var nodes in the query that reference these columns with
- * the generation expressions. Recursion issues here are handled in the
- * same way as for SubLinks.
- */
- parse = root->parse = expand_virtual_generated_columns(root);
-
- /*
* Check to see if any subqueries in the jointree can be merged into this
* query.
*/
@@ -787,23 +793,6 @@ subquery_planner(PlannerGlobal *glob, Query *parse, PlannerInfo *parent_root,
switch (rte->rtekind)
{
- case RTE_RELATION:
- if (rte->inh)
- {
- /*
- * Check to see if the relation actually has any children;
- * if not, clear the inh flag so we can treat it as a
- * plain base relation.
- *
- * Note: this could give a false-positive result, if the
- * rel once had children but no longer does. We used to
- * be able to clear rte->inh later on when we discovered
- * that, but no more; we have to handle such cases as
- * full-fledged inheritance.
- */
- rte->inh = has_subclass(rte->relid);
- }
- break;
case RTE_JOIN:
root->hasJoinRTEs = true;
if (IS_OUTER_JOIN(rte->jointype))
@@ -6879,7 +6868,7 @@ plan_cluster_use_sort(Oid tableOid, Oid indexOid)
*
* tableOid is the table on which the index is to be built. indexOid is the
* OID of an index to be created or reindexed (which must be an index with
- * support for parallel builds - currently btree or BRIN).
+ * support for parallel builds - currently btree, GIN, or BRIN).
*
* Return value is the number of parallel worker processes to request. It
* may be unsafe to proceed if this is 0. Note that this does not include the
diff --git a/src/backend/optimizer/plan/subselect.c b/src/backend/optimizer/plan/subselect.c
index e7cb3fede66..d71ed958e31 100644
--- a/src/backend/optimizer/plan/subselect.c
+++ b/src/backend/optimizer/plan/subselect.c
@@ -1454,6 +1454,7 @@ convert_EXISTS_sublink_to_join(PlannerInfo *root, SubLink *sublink,
Query *parse = root->parse;
Query *subselect = (Query *) sublink->subselect;
Node *whereClause;
+ PlannerInfo subroot;
int rtoffset;
int varno;
Relids clause_varnos;
@@ -1516,6 +1517,35 @@ convert_EXISTS_sublink_to_join(PlannerInfo *root, SubLink *sublink,
return NULL;
/*
+ * Scan the rangetable for relation RTEs and retrieve the necessary
+ * catalog information for each relation. Using this information, clear
+ * the inh flag for any relation that has no children, collect not-null
+ * attribute numbers for any relation that has column not-null
+ * constraints, and expand virtual generated columns for any relation that
+ * contains them.
+ *
+ * Note: we construct up an entirely dummy PlannerInfo for use here. This
+ * is fine because only the "glob" and "parse" links will be used in this
+ * case.
+ *
+ * Note: we temporarily assign back the WHERE clause so that any virtual
+ * generated column references within it can be expanded. It should be
+ * separated out again afterward.
+ */
+ MemSet(&subroot, 0, sizeof(subroot));
+ subroot.type = T_PlannerInfo;
+ subroot.glob = root->glob;
+ subroot.parse = subselect;
+ subselect->jointree->quals = whereClause;
+ subselect = preprocess_relation_rtes(&subroot);
+
+ /*
+ * Now separate out the WHERE clause again.
+ */
+ whereClause = subselect->jointree->quals;
+ subselect->jointree->quals = NULL;
+
+ /*
* The subquery must have a nonempty jointree, but we can make it so.
*/
replace_empty_jointree(subselect);
@@ -1732,6 +1762,7 @@ convert_EXISTS_to_ANY(PlannerInfo *root, Query *subselect,
Node **testexpr, List **paramIds)
{
Node *whereClause;
+ PlannerInfo subroot;
List *leftargs,
*rightargs,
*opids,
@@ -1791,12 +1822,15 @@ convert_EXISTS_to_ANY(PlannerInfo *root, Query *subselect,
* parent aliases were flattened already, and we're not going to pull any
* child Vars (of any description) into the parent.
*
- * Note: passing the parent's root to eval_const_expressions is
- * technically wrong, but we can get away with it since only the
- * boundParams (if any) are used, and those would be the same in a
- * subroot.
- */
- whereClause = eval_const_expressions(root, whereClause);
+ * Note: we construct up an entirely dummy PlannerInfo to pass to
+ * eval_const_expressions. This is fine because only the "glob" and
+ * "parse" links are used by eval_const_expressions.
+ */
+ MemSet(&subroot, 0, sizeof(subroot));
+ subroot.type = T_PlannerInfo;
+ subroot.glob = root->glob;
+ subroot.parse = subselect;
+ whereClause = eval_const_expressions(&subroot, whereClause);
whereClause = (Node *) canonicalize_qual((Expr *) whereClause, false);
whereClause = (Node *) make_ands_implicit((Expr *) whereClause);
diff --git a/src/backend/optimizer/prep/prepjointree.c b/src/backend/optimizer/prep/prepjointree.c
index 87dc6f56b57..35e8d3c183b 100644
--- a/src/backend/optimizer/prep/prepjointree.c
+++ b/src/backend/optimizer/prep/prepjointree.c
@@ -4,10 +4,10 @@
* Planner preprocessing for subqueries and join tree manipulation.
*
* NOTE: the intended sequence for invoking these operations is
+ * preprocess_relation_rtes
* replace_empty_jointree
* pull_up_sublinks
* preprocess_function_rtes
- * expand_virtual_generated_columns
* pull_up_subqueries
* flatten_simple_union_all
* do expression preprocessing (including flattening JOIN alias vars)
@@ -36,6 +36,7 @@
#include "optimizer/clauses.h"
#include "optimizer/optimizer.h"
#include "optimizer/placeholder.h"
+#include "optimizer/plancat.h"
#include "optimizer/prep.h"
#include "optimizer/subselect.h"
#include "optimizer/tlist.h"
@@ -102,6 +103,9 @@ typedef struct reduce_outer_joins_partial_state
Relids unreduced_side; /* relids in its still-nullable side */
} reduce_outer_joins_partial_state;
+static Query *expand_virtual_generated_columns(PlannerInfo *root, Query *parse,
+ RangeTblEntry *rte, int rt_index,
+ Relation relation);
static Node *pull_up_sublinks_jointree_recurse(PlannerInfo *root, Node *jtnode,
Relids *relids);
static Node *pull_up_sublinks_qual_recurse(PlannerInfo *root, Node *node,
@@ -393,6 +397,181 @@ transform_MERGE_to_join(Query *parse)
}
/*
+ * preprocess_relation_rtes
+ * Do the preprocessing work for any relation RTEs in the FROM clause.
+ *
+ * This scans the rangetable for relation RTEs and retrieves the necessary
+ * catalog information for each relation. Using this information, it clears
+ * the inh flag for any relation that has no children, collects not-null
+ * attribute numbers for any relation that has column not-null constraints, and
+ * expands virtual generated columns for any relation that contains them.
+ *
+ * Note that expanding virtual generated columns may cause the query tree to
+ * have new copies of rangetable entries. Therefore, we have to use list_nth
+ * instead of foreach when iterating over the query's rangetable.
+ *
+ * Returns a modified copy of the query tree, if any relations with virtual
+ * generated columns are present.
+ */
+Query *
+preprocess_relation_rtes(PlannerInfo *root)
+{
+ Query *parse = root->parse;
+ int rtable_size;
+ int rt_index;
+
+ rtable_size = list_length(parse->rtable);
+
+ for (rt_index = 0; rt_index < rtable_size; rt_index++)
+ {
+ RangeTblEntry *rte = rt_fetch(rt_index + 1, parse->rtable);
+ Relation relation;
+
+ /* We only care about relation RTEs. */
+ if (rte->rtekind != RTE_RELATION)
+ continue;
+
+ /*
+ * We need not lock the relation since it was already locked by the
+ * rewriter.
+ */
+ relation = table_open(rte->relid, NoLock);
+
+ /*
+ * Check to see if the relation actually has any children; if not,
+ * clear the inh flag so we can treat it as a plain base relation.
+ *
+ * Note: this could give a false-positive result, if the rel once had
+ * children but no longer does. We used to be able to clear rte->inh
+ * later on when we discovered that, but no more; we have to handle
+ * such cases as full-fledged inheritance.
+ */
+ if (rte->inh)
+ rte->inh = relation->rd_rel->relhassubclass;
+
+ /*
+ * Check to see if the relation has any column not-null constraints;
+ * if so, retrieve the constraint information and store it in a
+ * relation OID based hash table.
+ */
+ get_relation_notnullatts(root, relation);
+
+ /*
+ * Check to see if the relation has any virtual generated columns; if
+ * so, replace all Var nodes in the query that reference these columns
+ * with the generation expressions.
+ */
+ parse = expand_virtual_generated_columns(root, parse,
+ rte, rt_index + 1,
+ relation);
+
+ table_close(relation, NoLock);
+ }
+
+ return parse;
+}
+
+/*
+ * expand_virtual_generated_columns
+ * Expand virtual generated columns for the given relation.
+ *
+ * This checks whether the given relation has any virtual generated columns,
+ * and if so, replaces all Var nodes in the query that reference those columns
+ * with their generation expressions.
+ *
+ * Returns a modified copy of the query tree if the relation contains virtual
+ * generated columns.
+ */
+static Query *
+expand_virtual_generated_columns(PlannerInfo *root, Query *parse,
+ RangeTblEntry *rte, int rt_index,
+ Relation relation)
+{
+ TupleDesc tupdesc;
+
+ /* Only normal relations can have virtual generated columns */
+ Assert(rte->rtekind == RTE_RELATION);
+
+ tupdesc = RelationGetDescr(relation);
+ if (tupdesc->constr && tupdesc->constr->has_generated_virtual)
+ {
+ List *tlist = NIL;
+ pullup_replace_vars_context rvcontext;
+
+ for (int i = 0; i < tupdesc->natts; i++)
+ {
+ Form_pg_attribute attr = TupleDescAttr(tupdesc, i);
+ TargetEntry *tle;
+
+ if (attr->attgenerated == ATTRIBUTE_GENERATED_VIRTUAL)
+ {
+ Node *defexpr;
+
+ defexpr = build_generation_expression(relation, i + 1);
+ ChangeVarNodes(defexpr, 1, rt_index, 0);
+
+ tle = makeTargetEntry((Expr *) defexpr, i + 1, 0, false);
+ tlist = lappend(tlist, tle);
+ }
+ else
+ {
+ Var *var;
+
+ var = makeVar(rt_index,
+ i + 1,
+ attr->atttypid,
+ attr->atttypmod,
+ attr->attcollation,
+ 0);
+
+ tle = makeTargetEntry((Expr *) var, i + 1, 0, false);
+ tlist = lappend(tlist, tle);
+ }
+ }
+
+ Assert(list_length(tlist) > 0);
+ Assert(!rte->lateral);
+
+ /*
+ * The relation's targetlist items are now in the appropriate form to
+ * insert into the query, except that we may need to wrap them in
+ * PlaceHolderVars. Set up required context data for
+ * pullup_replace_vars.
+ */
+ rvcontext.root = root;
+ rvcontext.targetlist = tlist;
+ rvcontext.target_rte = rte;
+ rvcontext.result_relation = parse->resultRelation;
+ /* won't need these values */
+ rvcontext.relids = NULL;
+ rvcontext.nullinfo = NULL;
+ /* pass NULL for outer_hasSubLinks */
+ rvcontext.outer_hasSubLinks = NULL;
+ rvcontext.varno = rt_index;
+ /* this flag will be set below, if needed */
+ rvcontext.wrap_option = REPLACE_WRAP_NONE;
+ /* initialize cache array with indexes 0 .. length(tlist) */
+ rvcontext.rv_cache = palloc0((list_length(tlist) + 1) *
+ sizeof(Node *));
+
+ /*
+ * If the query uses grouping sets, we need a PlaceHolderVar for each
+ * expression of the relation's targetlist items. (See comments in
+ * pull_up_simple_subquery().)
+ */
+ if (parse->groupingSets)
+ rvcontext.wrap_option = REPLACE_WRAP_ALL;
+
+ /*
+ * Apply pullup variable replacement throughout the query tree.
+ */
+ parse = (Query *) pullup_replace_vars((Node *) parse, &rvcontext);
+ }
+
+ return parse;
+}
+
+/*
* replace_empty_jointree
* If the Query's jointree is empty, replace it with a dummy RTE_RESULT
* relation.
@@ -950,128 +1129,6 @@ preprocess_function_rtes(PlannerInfo *root)
}
/*
- * expand_virtual_generated_columns
- * Expand all virtual generated column references in a query.
- *
- * This scans the rangetable for relations with virtual generated columns, and
- * replaces all Var nodes in the query that reference these columns with the
- * generation expressions. Note that we do not descend into subqueries; that
- * is taken care of when the subqueries are planned.
- *
- * This has to be done after we have pulled up any SubLinks within the query's
- * quals; otherwise any virtual generated column references within the SubLinks
- * that should be transformed into joins wouldn't get expanded.
- *
- * Returns a modified copy of the query tree, if any relations with virtual
- * generated columns are present.
- */
-Query *
-expand_virtual_generated_columns(PlannerInfo *root)
-{
- Query *parse = root->parse;
- int rt_index;
- ListCell *lc;
-
- rt_index = 0;
- foreach(lc, parse->rtable)
- {
- RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc);
- Relation rel;
- TupleDesc tupdesc;
-
- ++rt_index;
-
- /*
- * Only normal relations can have virtual generated columns.
- */
- if (rte->rtekind != RTE_RELATION)
- continue;
-
- rel = table_open(rte->relid, NoLock);
-
- tupdesc = RelationGetDescr(rel);
- if (tupdesc->constr && tupdesc->constr->has_generated_virtual)
- {
- List *tlist = NIL;
- pullup_replace_vars_context rvcontext;
-
- for (int i = 0; i < tupdesc->natts; i++)
- {
- Form_pg_attribute attr = TupleDescAttr(tupdesc, i);
- TargetEntry *tle;
-
- if (attr->attgenerated == ATTRIBUTE_GENERATED_VIRTUAL)
- {
- Node *defexpr;
-
- defexpr = build_generation_expression(rel, i + 1);
- ChangeVarNodes(defexpr, 1, rt_index, 0);
-
- tle = makeTargetEntry((Expr *) defexpr, i + 1, 0, false);
- tlist = lappend(tlist, tle);
- }
- else
- {
- Var *var;
-
- var = makeVar(rt_index,
- i + 1,
- attr->atttypid,
- attr->atttypmod,
- attr->attcollation,
- 0);
-
- tle = makeTargetEntry((Expr *) var, i + 1, 0, false);
- tlist = lappend(tlist, tle);
- }
- }
-
- Assert(list_length(tlist) > 0);
- Assert(!rte->lateral);
-
- /*
- * The relation's targetlist items are now in the appropriate form
- * to insert into the query, except that we may need to wrap them
- * in PlaceHolderVars. Set up required context data for
- * pullup_replace_vars.
- */
- rvcontext.root = root;
- rvcontext.targetlist = tlist;
- rvcontext.target_rte = rte;
- rvcontext.result_relation = parse->resultRelation;
- /* won't need these values */
- rvcontext.relids = NULL;
- rvcontext.nullinfo = NULL;
- /* pass NULL for outer_hasSubLinks */
- rvcontext.outer_hasSubLinks = NULL;
- rvcontext.varno = rt_index;
- /* this flag will be set below, if needed */
- rvcontext.wrap_option = REPLACE_WRAP_NONE;
- /* initialize cache array with indexes 0 .. length(tlist) */
- rvcontext.rv_cache = palloc0((list_length(tlist) + 1) *
- sizeof(Node *));
-
- /*
- * If the query uses grouping sets, we need a PlaceHolderVar for
- * each expression of the relation's targetlist items. (See
- * comments in pull_up_simple_subquery().)
- */
- if (parse->groupingSets)
- rvcontext.wrap_option = REPLACE_WRAP_ALL;
-
- /*
- * Apply pullup variable replacement throughout the query tree.
- */
- parse = (Query *) pullup_replace_vars((Node *) parse, &rvcontext);
- }
-
- table_close(rel, NoLock);
- }
-
- return parse;
-}
-
-/*
* pull_up_subqueries
* Look for subqueries in the rangetable that can be pulled up into
* the parent query. If the subquery has no special features like
@@ -1334,6 +1391,16 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte,
Assert(subquery->cteList == NIL);
/*
+ * Scan the rangetable for relation RTEs and retrieve the necessary
+ * catalog information for each relation. Using this information, clear
+ * the inh flag for any relation that has no children, collect not-null
+ * attribute numbers for any relation that has column not-null
+ * constraints, and expand virtual generated columns for any relation that
+ * contains them.
+ */
+ subquery = subroot->parse = preprocess_relation_rtes(subroot);
+
+ /*
* If the FROM clause is empty, replace it with a dummy RTE_RESULT RTE, so
* that we don't need so many special cases to deal with that situation.
*/
@@ -1353,13 +1420,6 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte,
preprocess_function_rtes(subroot);
/*
- * Scan the rangetable for relations with virtual generated columns, and
- * replace all Var nodes in the query that reference these columns with
- * the generation expressions.
- */
- subquery = subroot->parse = expand_virtual_generated_columns(subroot);
-
- /*
* Recursively pull up the subquery's subqueries, so that
* pull_up_subqueries' processing is complete for its jointree and
* rangetable.
diff --git a/src/backend/optimizer/util/clauses.c b/src/backend/optimizer/util/clauses.c
index 26a3e050086..6f0b338d2cd 100644
--- a/src/backend/optimizer/util/clauses.c
+++ b/src/backend/optimizer/util/clauses.c
@@ -20,6 +20,7 @@
#include "postgres.h"
#include "access/htup_details.h"
+#include "catalog/pg_class.h"
#include "catalog/pg_language.h"
#include "catalog/pg_operator.h"
#include "catalog/pg_proc.h"
@@ -36,6 +37,7 @@
#include "optimizer/clauses.h"
#include "optimizer/cost.h"
#include "optimizer/optimizer.h"
+#include "optimizer/pathnode.h"
#include "optimizer/plancat.h"
#include "optimizer/planmain.h"
#include "parser/analyze.h"
@@ -43,6 +45,7 @@
#include "parser/parse_collate.h"
#include "parser/parse_func.h"
#include "parser/parse_oper.h"
+#include "parser/parsetree.h"
#include "rewrite/rewriteHandler.h"
#include "rewrite/rewriteManip.h"
#include "tcop/tcopprot.h"
@@ -2242,7 +2245,8 @@ rowtype_field_matches(Oid rowtypeid, int fieldnum,
* only operators and functions that are reasonable to try to execute.
*
* NOTE: "root" can be passed as NULL if the caller never wants to do any
- * Param substitutions nor receive info about inlined functions.
+ * Param substitutions nor receive info about inlined functions nor reduce
+ * NullTest for Vars to constant true or constant false.
*
* NOTE: the planner assumes that this will always flatten nested AND and
* OR clauses into N-argument form. See comments in prepqual.c.
@@ -3333,6 +3337,13 @@ eval_const_expressions_mutator(Node *node,
-1,
coalesceexpr->coalescecollid);
+ /*
+ * If there's exactly one surviving argument, we no longer
+ * need COALESCE at all: the result is that argument
+ */
+ if (list_length(newargs) == 1)
+ return (Node *) linitial(newargs);
+
newcoalesce = makeNode(CoalesceExpr);
newcoalesce->coalescetype = coalesceexpr->coalescetype;
newcoalesce->coalescecollid = coalesceexpr->coalescecollid;
@@ -3537,6 +3548,31 @@ eval_const_expressions_mutator(Node *node,
return makeBoolConst(result, false);
}
+ if (!ntest->argisrow && arg && IsA(arg, Var) && context->root)
+ {
+ Var *varg = (Var *) arg;
+ bool result;
+
+ if (var_is_nonnullable(context->root, varg, false))
+ {
+ switch (ntest->nulltesttype)
+ {
+ case IS_NULL:
+ result = false;
+ break;
+ case IS_NOT_NULL:
+ result = true;
+ break;
+ default:
+ elog(ERROR, "unrecognized nulltesttype: %d",
+ (int) ntest->nulltesttype);
+ result = false; /* keep compiler quiet */
+ break;
+ }
+
+ return makeBoolConst(result, false);
+ }
+ }
newntest = makeNode(NullTest);
newntest->arg = (Expr *) arg;
@@ -4156,6 +4192,67 @@ simplify_function(Oid funcid, Oid result_type, int32 result_typmod,
}
/*
+ * var_is_nonnullable: check to see if the Var cannot be NULL
+ *
+ * If the Var is defined NOT NULL and meanwhile is not nulled by any outer
+ * joins or grouping sets, then we can know that it cannot be NULL.
+ *
+ * use_rel_info indicates whether the corresponding RelOptInfo is available for
+ * use.
+ */
+bool
+var_is_nonnullable(PlannerInfo *root, Var *var, bool use_rel_info)
+{
+ Relids notnullattnums = NULL;
+
+ Assert(IsA(var, Var));
+
+ /* skip upper-level Vars */
+ if (var->varlevelsup != 0)
+ return false;
+
+ /* could the Var be nulled by any outer joins or grouping sets? */
+ if (!bms_is_empty(var->varnullingrels))
+ return false;
+
+ /* system columns cannot be NULL */
+ if (var->varattno < 0)
+ return true;
+
+ /*
+ * Check if the Var is defined as NOT NULL. We retrieve the column NOT
+ * NULL constraint information from the corresponding RelOptInfo if it is
+ * available; otherwise, we search the hash table for this information.
+ */
+ if (use_rel_info)
+ {
+ RelOptInfo *rel = find_base_rel(root, var->varno);
+
+ notnullattnums = rel->notnullattnums;
+ }
+ else
+ {
+ RangeTblEntry *rte = planner_rt_fetch(var->varno, root);
+
+ /*
+ * We must skip inheritance parent tables, as some child tables may
+ * have a NOT NULL constraint for a column while others may not. This
+ * cannot happen with partitioned tables, though.
+ */
+ if (rte->inh && rte->relkind != RELKIND_PARTITIONED_TABLE)
+ return false;
+
+ notnullattnums = find_relation_notnullatts(root, rte->relid);
+ }
+
+ if (var->varattno > 0 &&
+ bms_is_member(var->varattno, notnullattnums))
+ return true;
+
+ return false;
+}
+
+/*
* expand_function_arguments: convert named-notation args to positional args
* and/or insert default args, as needed
*
diff --git a/src/backend/optimizer/util/inherit.c b/src/backend/optimizer/util/inherit.c
index 17e51cd75d7..30d158069e3 100644
--- a/src/backend/optimizer/util/inherit.c
+++ b/src/backend/optimizer/util/inherit.c
@@ -466,8 +466,7 @@ expand_single_inheritance_child(PlannerInfo *root, RangeTblEntry *parentrte,
Index *childRTindex_p)
{
Query *parse = root->parse;
- Oid parentOID PG_USED_FOR_ASSERTS_ONLY =
- RelationGetRelid(parentrel);
+ Oid parentOID = RelationGetRelid(parentrel);
Oid childOID = RelationGetRelid(childrel);
RangeTblEntry *childrte;
Index childRTindex;
@@ -514,6 +513,13 @@ expand_single_inheritance_child(PlannerInfo *root, RangeTblEntry *parentrte,
*childRTindex_p = childRTindex;
/*
+ * Retrieve column not-null constraint information for the child relation
+ * if its relation OID is different from the parent's.
+ */
+ if (childOID != parentOID)
+ get_relation_notnullatts(root, childrel);
+
+ /*
* Build an AppendRelInfo struct for each parent/child pair.
*/
appinfo = make_append_rel_info(parentrel, childrel,
diff --git a/src/backend/optimizer/util/paramassign.c b/src/backend/optimizer/util/paramassign.c
index 3bd3ce37c8f..4c13c5931b4 100644
--- a/src/backend/optimizer/util/paramassign.c
+++ b/src/backend/optimizer/util/paramassign.c
@@ -599,38 +599,46 @@ process_subquery_nestloop_params(PlannerInfo *root, List *subplan_params)
}
/*
- * Identify any NestLoopParams that should be supplied by a NestLoop plan
- * node with the specified lefthand rels. Remove them from the active
- * root->curOuterParams list and return them as the result list.
+ * Identify any NestLoopParams that should be supplied by a NestLoop
+ * plan node with the specified lefthand rels and required-outer rels.
+ * Remove them from the active root->curOuterParams list and return
+ * them as the result list.
*
- * XXX Here we also hack up the returned Vars and PHVs so that they do not
- * contain nullingrel sets exceeding what is available from the outer side.
- * This is needed if we have applied outer join identity 3,
- * (A leftjoin B on (Pab)) leftjoin C on (Pb*c)
- * = A leftjoin (B leftjoin C on (Pbc)) on (Pab)
- * and C contains lateral references to B. It's still safe to apply the
- * identity, but the parser will have created those references in the form
- * "b*" (i.e., with varnullingrels listing the A/B join), while what we will
- * have available from the nestloop's outer side is just "b". We deal with
- * that here by stripping the nullingrels down to what is available from the
- * outer side according to leftrelids.
- *
- * That fixes matters for the case of forward application of identity 3.
- * If the identity was applied in the reverse direction, we will have
- * parameter Vars containing too few nullingrel bits rather than too many.
- * Currently, that causes no problems because setrefs.c applies only a
- * subset check to nullingrels in NestLoopParams, but we'd have to work
- * harder if we ever want to tighten that check. This is all pretty annoying
- * because it greatly weakens setrefs.c's cross-check, but the alternative
+ * Vars and PHVs appearing in the result list must have nullingrel sets
+ * that could validly appear in the lefthand rel's output. Ordinarily that
+ * would be true already, but if we have applied outer join identity 3,
+ * there could be more or fewer nullingrel bits in the nodes appearing in
+ * curOuterParams than are in the nominal leftrelids. We deal with that by
+ * forcing their nullingrel sets to include exactly the outer-join relids
+ * that appear in leftrelids and can null the respective Var or PHV.
+ * This fix is a bit ad-hoc and intellectually unsatisfactory, because it's
+ * essentially jumping to the conclusion that we've placed evaluation of
+ * the nestloop parameters correctly, and thus it defeats the intent of the
+ * subsequent nullingrel cross-checks in setrefs.c. But the alternative
* seems to be to generate multiple versions of each laterally-parameterized
* subquery, which'd be unduly expensive.
*/
List *
-identify_current_nestloop_params(PlannerInfo *root, Relids leftrelids)
+identify_current_nestloop_params(PlannerInfo *root,
+ Relids leftrelids,
+ Relids outerrelids)
{
List *result;
+ Relids allleftrelids;
ListCell *cell;
+ /*
+ * We'll be able to evaluate a PHV in the lefthand path if it uses the
+ * lefthand rels plus any available required-outer rels. But don't do so
+ * if it uses *only* required-outer rels; in that case it should be
+ * evaluated higher in the tree. For Vars, no such hair-splitting is
+ * necessary since they depend on only one relid.
+ */
+ if (outerrelids)
+ allleftrelids = bms_union(leftrelids, outerrelids);
+ else
+ allleftrelids = leftrelids;
+
result = NIL;
foreach(cell, root->curOuterParams)
{
@@ -646,25 +654,60 @@ identify_current_nestloop_params(PlannerInfo *root, Relids leftrelids)
bms_is_member(nlp->paramval->varno, leftrelids))
{
Var *var = (Var *) nlp->paramval;
+ RelOptInfo *rel = root->simple_rel_array[var->varno];
root->curOuterParams = foreach_delete_current(root->curOuterParams,
cell);
- var->varnullingrels = bms_intersect(var->varnullingrels,
+ var->varnullingrels = bms_intersect(rel->nulling_relids,
leftrelids);
result = lappend(result, nlp);
}
- else if (IsA(nlp->paramval, PlaceHolderVar) &&
- bms_is_subset(find_placeholder_info(root,
- (PlaceHolderVar *) nlp->paramval)->ph_eval_at,
- leftrelids))
+ else if (IsA(nlp->paramval, PlaceHolderVar))
{
PlaceHolderVar *phv = (PlaceHolderVar *) nlp->paramval;
+ PlaceHolderInfo *phinfo = find_placeholder_info(root, phv);
+ Relids eval_at = phinfo->ph_eval_at;
- root->curOuterParams = foreach_delete_current(root->curOuterParams,
- cell);
- phv->phnullingrels = bms_intersect(phv->phnullingrels,
- leftrelids);
- result = lappend(result, nlp);
+ if (bms_is_subset(eval_at, allleftrelids) &&
+ bms_overlap(eval_at, leftrelids))
+ {
+ root->curOuterParams = foreach_delete_current(root->curOuterParams,
+ cell);
+
+ /*
+ * Deal with an edge case: if the PHV was pulled up out of a
+ * subquery and it contains a subquery that was originally
+ * pushed down from this query level, then that will still be
+ * represented as a SubLink, because SS_process_sublinks won't
+ * recurse into outer PHVs, so it didn't get transformed
+ * during expression preprocessing in the subquery. We need a
+ * version of the PHV that has a SubPlan, which we can get
+ * from the current query level's placeholder_list. This is
+ * quite grotty of course, but dealing with it earlier in the
+ * handling of subplan params would be just as grotty, and it
+ * might end up being a waste of cycles if we don't decide to
+ * treat the PHV as a NestLoopParam. (Perhaps that whole
+ * mechanism should be redesigned someday, but today is not
+ * that day.)
+ */
+ if (root->parse->hasSubLinks)
+ {
+ phv = copyObject(phinfo->ph_var);
+
+ /*
+ * The ph_var will have empty nullingrels, but that
+ * doesn't matter since we're about to overwrite
+ * phv->phnullingrels. Other fields should be OK already.
+ */
+ nlp->paramval = (Var *) phv;
+ }
+
+ phv->phnullingrels =
+ bms_intersect(get_placeholder_nulling_relids(root, phinfo),
+ leftrelids);
+
+ result = lappend(result, nlp);
+ }
}
}
return result;
diff --git a/src/backend/optimizer/util/pathnode.c b/src/backend/optimizer/util/pathnode.c
index e0192d4a491..a4c5867cdcb 100644
--- a/src/backend/optimizer/util/pathnode.c
+++ b/src/backend/optimizer/util/pathnode.c
@@ -1404,12 +1404,12 @@ create_append_path(PlannerInfo *root,
pathnode->path.total_cost = child->total_cost;
}
else
- cost_append(pathnode);
+ cost_append(pathnode, root);
/* Must do this last, else cost_append complains */
pathnode->path.pathkeys = child->pathkeys;
}
else
- cost_append(pathnode);
+ cost_append(pathnode, root);
/* If the caller provided a row estimate, override the computed value. */
if (rows >= 0)
@@ -1515,6 +1515,9 @@ create_merge_append_path(PlannerInfo *root,
foreach(l, subpaths)
{
Path *subpath = (Path *) lfirst(l);
+ int presorted_keys;
+ Path sort_path; /* dummy for result of
+ * cost_sort/cost_incremental_sort */
/* All child paths should be unparameterized */
Assert(bms_is_empty(PATH_REQ_OUTER(subpath)));
@@ -1523,32 +1526,52 @@ create_merge_append_path(PlannerInfo *root,
pathnode->path.parallel_safe = pathnode->path.parallel_safe &&
subpath->parallel_safe;
- if (pathkeys_contained_in(pathkeys, subpath->pathkeys))
+ if (!pathkeys_count_contained_in(pathkeys, subpath->pathkeys,
+ &presorted_keys))
{
- /* Subpath is adequately ordered, we won't need to sort it */
- input_disabled_nodes += subpath->disabled_nodes;
- input_startup_cost += subpath->startup_cost;
- input_total_cost += subpath->total_cost;
- }
- else
- {
- /* We'll need to insert a Sort node, so include cost for that */
- Path sort_path; /* dummy for result of cost_sort */
+ /*
+ * We'll need to insert a Sort node, so include costs for that. We
+ * choose to use incremental sort if it is enabled and there are
+ * presorted keys; otherwise we use full sort.
+ *
+ * We can use the parent's LIMIT if any, since we certainly won't
+ * pull more than that many tuples from any child.
+ */
+ if (enable_incremental_sort && presorted_keys > 0)
+ {
+ cost_incremental_sort(&sort_path,
+ root,
+ pathkeys,
+ presorted_keys,
+ subpath->disabled_nodes,
+ subpath->startup_cost,
+ subpath->total_cost,
+ subpath->rows,
+ subpath->pathtarget->width,
+ 0.0,
+ work_mem,
+ pathnode->limit_tuples);
+ }
+ else
+ {
+ cost_sort(&sort_path,
+ root,
+ pathkeys,
+ subpath->disabled_nodes,
+ subpath->total_cost,
+ subpath->rows,
+ subpath->pathtarget->width,
+ 0.0,
+ work_mem,
+ pathnode->limit_tuples);
+ }
- cost_sort(&sort_path,
- root,
- pathkeys,
- subpath->disabled_nodes,
- subpath->total_cost,
- subpath->rows,
- subpath->pathtarget->width,
- 0.0,
- work_mem,
- pathnode->limit_tuples);
- input_disabled_nodes += sort_path.disabled_nodes;
- input_startup_cost += sort_path.startup_cost;
- input_total_cost += sort_path.total_cost;
+ subpath = &sort_path;
}
+
+ input_disabled_nodes += subpath->disabled_nodes;
+ input_startup_cost += subpath->startup_cost;
+ input_total_cost += subpath->total_cost;
}
/*
@@ -1666,7 +1689,7 @@ create_material_path(RelOptInfo *rel, Path *subpath)
MemoizePath *
create_memoize_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
List *param_exprs, List *hash_operators,
- bool singlerow, bool binary_mode, double calls)
+ bool singlerow, bool binary_mode, Cardinality est_calls)
{
MemoizePath *pathnode = makeNode(MemoizePath);
@@ -1687,7 +1710,6 @@ create_memoize_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
pathnode->param_exprs = param_exprs;
pathnode->singlerow = singlerow;
pathnode->binary_mode = binary_mode;
- pathnode->calls = clamp_row_est(calls);
/*
* For now we set est_entries to 0. cost_memoize_rescan() does all the
@@ -1697,6 +1719,12 @@ create_memoize_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
*/
pathnode->est_entries = 0;
+ pathnode->est_calls = clamp_row_est(est_calls);
+
+ /* These will also be set later in cost_memoize_rescan() */
+ pathnode->est_unique_keys = 0.0;
+ pathnode->est_hit_ratio = 0.0;
+
/* we should not generate this path type when enable_memoize=false */
Assert(enable_memoize);
pathnode->path.disabled_nodes = subpath->disabled_nodes;
@@ -4236,7 +4264,7 @@ reparameterize_path(PlannerInfo *root, Path *path,
mpath->hash_operators,
mpath->singlerow,
mpath->binary_mode,
- mpath->calls);
+ mpath->est_calls);
}
default:
break;
diff --git a/src/backend/optimizer/util/placeholder.c b/src/backend/optimizer/util/placeholder.c
index 41a4c81e94a..e1cd00a72fb 100644
--- a/src/backend/optimizer/util/placeholder.c
+++ b/src/backend/optimizer/util/placeholder.c
@@ -545,3 +545,43 @@ contain_placeholder_references_walker(Node *node,
return expression_tree_walker(node, contain_placeholder_references_walker,
context);
}
+
+/*
+ * Compute the set of outer-join relids that can null a placeholder.
+ *
+ * This is analogous to RelOptInfo.nulling_relids for Vars, but we compute it
+ * on-the-fly rather than saving it somewhere. Currently the value is needed
+ * at most once per query, so there's little value in doing otherwise. If it
+ * ever gains more widespread use, perhaps we should cache the result in
+ * PlaceHolderInfo.
+ */
+Relids
+get_placeholder_nulling_relids(PlannerInfo *root, PlaceHolderInfo *phinfo)
+{
+ Relids result = NULL;
+ int relid = -1;
+
+ /*
+ * Form the union of all potential nulling OJs for each baserel included
+ * in ph_eval_at.
+ */
+ while ((relid = bms_next_member(phinfo->ph_eval_at, relid)) > 0)
+ {
+ RelOptInfo *rel = root->simple_rel_array[relid];
+
+ /* ignore the RTE_GROUP RTE */
+ if (relid == root->group_rtindex)
+ continue;
+
+ if (rel == NULL) /* must be an outer join */
+ {
+ Assert(bms_is_member(relid, root->outer_join_rels));
+ continue;
+ }
+ result = bms_add_members(result, rel->nulling_relids);
+ }
+
+ /* Now remove any OJs already included in ph_eval_at, and we're done. */
+ result = bms_del_members(result, phinfo->ph_eval_at);
+ return result;
+}
diff --git a/src/backend/optimizer/util/plancat.c b/src/backend/optimizer/util/plancat.c
index 59233b64730..c6a58afc5e5 100644
--- a/src/backend/optimizer/util/plancat.c
+++ b/src/backend/optimizer/util/plancat.c
@@ -59,6 +59,12 @@ int constraint_exclusion = CONSTRAINT_EXCLUSION_PARTITION;
/* Hook for plugins to get control in get_relation_info() */
get_relation_info_hook_type get_relation_info_hook = NULL;
+typedef struct NotnullHashEntry
+{
+ Oid relid; /* OID of the relation */
+ Relids notnullattnums; /* attnums of NOT NULL columns */
+} NotnullHashEntry;
+
static void get_relation_foreign_keys(PlannerInfo *root, RelOptInfo *rel,
Relation relation, bool inhparent);
@@ -172,27 +178,7 @@ get_relation_info(PlannerInfo *root, Oid relationObjectId, bool inhparent,
* RangeTblEntry does get populated.
*/
if (!inhparent || relation->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
- {
- for (int i = 0; i < relation->rd_att->natts; i++)
- {
- CompactAttribute *attr = TupleDescCompactAttr(relation->rd_att, i);
-
- Assert(attr->attnullability != ATTNULLABLE_UNKNOWN);
-
- if (attr->attnullability == ATTNULLABLE_VALID)
- {
- rel->notnullattnums = bms_add_member(rel->notnullattnums,
- i + 1);
-
- /*
- * Per RemoveAttributeById(), dropped columns will have their
- * attnotnull unset, so we needn't check for dropped columns
- * in the above condition.
- */
- Assert(!attr->attisdropped);
- }
- }
- }
+ rel->notnullattnums = find_relation_notnullatts(root, relationObjectId);
/*
* Estimate relation size --- unless it's an inheritance parent, in which
@@ -684,6 +670,105 @@ get_relation_foreign_keys(PlannerInfo *root, RelOptInfo *rel,
}
/*
+ * get_relation_notnullatts -
+ * Retrieves column not-null constraint information for a given relation.
+ *
+ * We do this while we have the relcache entry open, and store the column
+ * not-null constraint information in a hash table based on the relation OID.
+ */
+void
+get_relation_notnullatts(PlannerInfo *root, Relation relation)
+{
+ Oid relid = RelationGetRelid(relation);
+ NotnullHashEntry *hentry;
+ bool found;
+ Relids notnullattnums = NULL;
+
+ /* bail out if the relation has no not-null constraints */
+ if (relation->rd_att->constr == NULL ||
+ !relation->rd_att->constr->has_not_null)
+ return;
+
+ /* create the hash table if it hasn't been created yet */
+ if (root->glob->rel_notnullatts_hash == NULL)
+ {
+ HTAB *hashtab;
+ HASHCTL hash_ctl;
+
+ hash_ctl.keysize = sizeof(Oid);
+ hash_ctl.entrysize = sizeof(NotnullHashEntry);
+ hash_ctl.hcxt = CurrentMemoryContext;
+
+ hashtab = hash_create("Relation NOT NULL attnums",
+ 64L, /* arbitrary initial size */
+ &hash_ctl,
+ HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+
+ root->glob->rel_notnullatts_hash = hashtab;
+ }
+
+ /*
+ * Create a hash entry for this relation OID, if we don't have one
+ * already.
+ */
+ hentry = (NotnullHashEntry *) hash_search(root->glob->rel_notnullatts_hash,
+ &relid,
+ HASH_ENTER,
+ &found);
+
+ /* bail out if a hash entry already exists for this relation OID */
+ if (found)
+ return;
+
+ /* collect the column not-null constraint information for this relation */
+ for (int i = 0; i < relation->rd_att->natts; i++)
+ {
+ CompactAttribute *attr = TupleDescCompactAttr(relation->rd_att, i);
+
+ Assert(attr->attnullability != ATTNULLABLE_UNKNOWN);
+
+ if (attr->attnullability == ATTNULLABLE_VALID)
+ {
+ notnullattnums = bms_add_member(notnullattnums, i + 1);
+
+ /*
+ * Per RemoveAttributeById(), dropped columns will have their
+ * attnotnull unset, so we needn't check for dropped columns in
+ * the above condition.
+ */
+ Assert(!attr->attisdropped);
+ }
+ }
+
+ /* ... and initialize the new hash entry */
+ hentry->notnullattnums = notnullattnums;
+}
+
+/*
+ * find_relation_notnullatts -
+ * Searches the hash table and returns the column not-null constraint
+ * information for a given relation.
+ */
+Relids
+find_relation_notnullatts(PlannerInfo *root, Oid relid)
+{
+ NotnullHashEntry *hentry;
+ bool found;
+
+ if (root->glob->rel_notnullatts_hash == NULL)
+ return NULL;
+
+ hentry = (NotnullHashEntry *) hash_search(root->glob->rel_notnullatts_hash,
+ &relid,
+ HASH_FIND,
+ &found);
+ if (!found)
+ return NULL;
+
+ return hentry->notnullattnums;
+}
+
+/*
* infer_arbiter_indexes -
* Determine the unique indexes used to arbitrate speculative insertion.
*
diff --git a/src/backend/parser/analyze.c b/src/backend/parser/analyze.c
index a16fdd65601..34f7c17f576 100644
--- a/src/backend/parser/analyze.c
+++ b/src/backend/parser/analyze.c
@@ -239,102 +239,23 @@ parse_sub_analyze(Node *parseTree, ParseState *parentParseState,
}
/*
- * setQueryLocationAndLength
- * Set query's location and length from statement and ParseState
- *
- * Some statements, like PreparableStmt, can be located within parentheses.
- * For example "(SELECT 1)" or "COPY (UPDATE ...) to x;". For those, we
- * cannot use the whole string from the statement's location or the SQL
- * string would yield incorrectly. The parser will set stmt_len, reflecting
- * the size of the statement within the parentheses. Thus, when stmt_len is
- * available, we need to use it for the Query's stmt_len.
- *
- * For other cases, the parser can't provide the length of individual
- * statements. However, we have the statement's location plus the length
- * (p_stmt_len) and location (p_stmt_location) of the top level RawStmt,
- * stored in pstate. Thus, the statement's length is the RawStmt's length
- * minus how much we've advanced in the RawStmt's string. If p_stmt_len
- * is 0, the SQL string is used up to its end.
- */
-static void
-setQueryLocationAndLength(ParseState *pstate, Query *qry, Node *parseTree)
-{
- ParseLoc stmt_len = 0;
-
- switch (nodeTag(parseTree))
- {
- case T_InsertStmt:
- qry->stmt_location = ((InsertStmt *) parseTree)->stmt_location;
- stmt_len = ((InsertStmt *) parseTree)->stmt_len;
- break;
-
- case T_DeleteStmt:
- qry->stmt_location = ((DeleteStmt *) parseTree)->stmt_location;
- stmt_len = ((DeleteStmt *) parseTree)->stmt_len;
- break;
-
- case T_UpdateStmt:
- qry->stmt_location = ((UpdateStmt *) parseTree)->stmt_location;
- stmt_len = ((UpdateStmt *) parseTree)->stmt_len;
- break;
-
- case T_MergeStmt:
- qry->stmt_location = ((MergeStmt *) parseTree)->stmt_location;
- stmt_len = ((MergeStmt *) parseTree)->stmt_len;
- break;
-
- case T_SelectStmt:
- qry->stmt_location = ((SelectStmt *) parseTree)->stmt_location;
- stmt_len = ((SelectStmt *) parseTree)->stmt_len;
- break;
-
- case T_PLAssignStmt:
- qry->stmt_location = ((PLAssignStmt *) parseTree)->location;
- break;
-
- default:
- qry->stmt_location = pstate->p_stmt_location;
- break;
- }
-
- if (stmt_len > 0)
- {
- /* Statement's length is known, use it */
- qry->stmt_len = stmt_len;
- }
- else if (pstate->p_stmt_len > 0)
- {
- /*
- * The top RawStmt's length is known, so calculate the statement's
- * length from the statement's location and the RawStmt's length and
- * location.
- */
- qry->stmt_len = pstate->p_stmt_len - (qry->stmt_location - pstate->p_stmt_location);
- }
-
- /* The calculated statement length should be calculated as positive. */
- Assert(qry->stmt_len >= 0);
-}
-
-/*
* transformTopLevelStmt -
* transform a Parse tree into a Query tree.
*
- * This function is just responsible for storing location data
- * from the RawStmt into the ParseState.
+ * This function is just responsible for transferring statement location data
+ * from the RawStmt into the finished Query.
*/
Query *
transformTopLevelStmt(ParseState *pstate, RawStmt *parseTree)
{
Query *result;
- /* Store RawStmt's length and location in pstate */
- pstate->p_stmt_len = parseTree->stmt_len;
- pstate->p_stmt_location = parseTree->stmt_location;
-
/* We're at top level, so allow SELECT INTO */
result = transformOptionalSelectInto(pstate, parseTree->stmt);
+ result->stmt_location = parseTree->stmt_location;
+ result->stmt_len = parseTree->stmt_len;
+
return result;
}
@@ -503,7 +424,6 @@ transformStmt(ParseState *pstate, Node *parseTree)
/* Mark as original query until we learn differently */
result->querySource = QSRC_ORIGINAL;
result->canSetTag = true;
- setQueryLocationAndLength(pstate, result, parseTree);
return result;
}
diff --git a/src/backend/parser/gram.y b/src/backend/parser/gram.y
index 0b5652071d1..db43034b9db 100644
--- a/src/backend/parser/gram.y
+++ b/src/backend/parser/gram.y
@@ -154,7 +154,6 @@ static void base_yyerror(YYLTYPE *yylloc, core_yyscan_t yyscanner,
const char *msg);
static RawStmt *makeRawStmt(Node *stmt, int stmt_location);
static void updateRawStmtEnd(RawStmt *rs, int end_location);
-static void updatePreparableStmtEnd(Node *n, int end_location);
static Node *makeColumnRef(char *colname, List *indirection,
int location, core_yyscan_t yyscanner);
static Node *makeTypeCast(Node *arg, TypeName *typename, int location);
@@ -178,13 +177,13 @@ static void insertSelectOptions(SelectStmt *stmt,
SelectLimit *limitClause,
WithClause *withClause,
core_yyscan_t yyscanner);
-static Node *makeSetOp(SetOperation op, bool all, Node *larg, Node *rarg, int location);
+static Node *makeSetOp(SetOperation op, bool all, Node *larg, Node *rarg);
static Node *doNegate(Node *n, int location);
static void doNegateFloat(Float *v);
static Node *makeAndExpr(Node *lexpr, Node *rexpr, int location);
static Node *makeOrExpr(Node *lexpr, Node *rexpr, int location);
static Node *makeNotExpr(Node *expr, int location);
-static Node *makeAArrayExpr(List *elements, int location);
+static Node *makeAArrayExpr(List *elements, int location, int end_location);
static Node *makeSQLValueFunction(SQLValueFunctionOp op, int32 typmod,
int location);
static Node *makeXmlExpr(XmlExprOp op, char *name, List *named_args,
@@ -319,6 +318,11 @@ static Node *makeRecursiveViewSelect(char *relname, List *aliases, Node *query);
%type <list> opt_qualified_name
%type <boolean> opt_concurrently
%type <dbehavior> opt_drop_behavior
+%type <list> opt_utility_option_list
+%type <list> utility_option_list
+%type <defelt> utility_option_elem
+%type <str> utility_option_name
+%type <node> utility_option_arg
%type <node> alter_column_default opclass_item opclass_drop alter_using
%type <ival> add_drop opt_asc_desc opt_nulls_order
@@ -339,10 +343,6 @@ static Node *makeRecursiveViewSelect(char *relname, List *aliases, Node *query);
create_extension_opt_item alter_extension_opt_item
%type <ival> opt_lock lock_type cast_context
-%type <str> utility_option_name
-%type <defelt> utility_option_elem
-%type <list> utility_option_list
-%type <node> utility_option_arg
%type <defelt> drop_option
%type <boolean> opt_or_replace opt_no
opt_grant_grant_option
@@ -523,7 +523,7 @@ static Node *makeRecursiveViewSelect(char *relname, List *aliases, Node *query);
%type <defelt> def_elem reloption_elem old_aggr_elem operator_def_elem
%type <node> def_arg columnElem where_clause where_or_current_clause
a_expr b_expr c_expr AexprConst indirection_el opt_slice_bound
- columnref in_expr having_clause func_table xmltable array_expr
+ columnref having_clause func_table xmltable array_expr
OptWhereClause operator_def_arg
%type <list> opt_column_and_period_list
%type <list> rowsfrom_item rowsfrom_list opt_col_def_list
@@ -557,7 +557,6 @@ static Node *makeRecursiveViewSelect(char *relname, List *aliases, Node *query);
%type <list> generic_option_list alter_generic_option_list
%type <ival> reindex_target_relation reindex_target_all
-%type <list> opt_reindex_option_list
%type <node> copy_generic_opt_arg copy_generic_opt_arg_list_item
%type <defelt> copy_generic_opt_elem
@@ -1142,6 +1141,41 @@ opt_drop_behavior:
| /* EMPTY */ { $$ = DROP_RESTRICT; /* default */ }
;
+opt_utility_option_list:
+ '(' utility_option_list ')' { $$ = $2; }
+ | /* EMPTY */ { $$ = NULL; }
+ ;
+
+utility_option_list:
+ utility_option_elem
+ {
+ $$ = list_make1($1);
+ }
+ | utility_option_list ',' utility_option_elem
+ {
+ $$ = lappend($1, $3);
+ }
+ ;
+
+utility_option_elem:
+ utility_option_name utility_option_arg
+ {
+ $$ = makeDefElem($1, $2, @1);
+ }
+ ;
+
+utility_option_name:
+ NonReservedWord { $$ = $1; }
+ | analyze_keyword { $$ = "analyze"; }
+ | FORMAT_LA { $$ = "format"; }
+ ;
+
+utility_option_arg:
+ opt_boolean_or_string { $$ = (Node *) makeString($1); }
+ | NumericOnly { $$ = (Node *) $1; }
+ | /* EMPTY */ { $$ = NULL; }
+ ;
+
/*****************************************************************************
*
* CALL statement
@@ -2029,11 +2063,12 @@ constraints_set_mode:
* Checkpoint statement
*/
CheckPointStmt:
- CHECKPOINT
+ CHECKPOINT opt_utility_option_list
{
CheckPointStmt *n = makeNode(CheckPointStmt);
$$ = (Node *) n;
+ n->options = $2;
}
;
@@ -2669,6 +2704,12 @@ alter_table_cmd:
c->alterDeferrability = true;
if ($4 & CAS_NO_INHERIT)
c->alterInheritability = true;
+ /* handle unsupported case with specific error message */
+ if ($4 & CAS_NOT_VALID)
+ ereport(ERROR,
+ errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("constraints cannot be altered to be NOT VALID"),
+ parser_errposition(@4));
processCASbits($4, @4, "FOREIGN KEY",
&c->deferrable,
&c->initdeferred,
@@ -3417,7 +3458,6 @@ CopyStmt: COPY opt_binary qualified_name opt_column_list
{
CopyStmt *n = makeNode(CopyStmt);
- updatePreparableStmtEnd($3, @4);
n->relation = NULL;
n->query = $3;
n->attlist = NIL;
@@ -6037,6 +6077,26 @@ CreateTrigStmt:
EXECUTE FUNCTION_or_PROCEDURE func_name '(' TriggerFuncArgs ')'
{
CreateTrigStmt *n = makeNode(CreateTrigStmt);
+ bool dummy;
+
+ if (($11 & CAS_NOT_VALID) != 0)
+ ereport(ERROR,
+ errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("constraint triggers cannot be marked %s",
+ "NOT VALID"),
+ parser_errposition(@11));
+ if (($11 & CAS_NO_INHERIT) != 0)
+ ereport(ERROR,
+ errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("constraint triggers cannot be marked %s",
+ "NO INHERIT"),
+ parser_errposition(@11));
+ if (($11 & CAS_NOT_ENFORCED) != 0)
+ ereport(ERROR,
+ errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("constraint triggers cannot be marked %s",
+ "NOT ENFORCED"),
+ parser_errposition(@11));
n->replace = $2;
if (n->replace) /* not supported, see CreateTrigger */
@@ -6056,7 +6116,7 @@ CreateTrigStmt:
n->whenClause = $15;
n->transitionRels = NIL;
processCASbits($11, @11, "TRIGGER",
- &n->deferrable, &n->initdeferred, NULL,
+ &n->deferrable, &n->initdeferred, &dummy,
NULL, NULL, yyscanner);
n->constrrel = $10;
$$ = (Node *) n;
@@ -7479,6 +7539,8 @@ fetch_args: cursor_name
n->portalname = $1;
n->direction = FETCH_FORWARD;
n->howMany = 1;
+ n->location = -1;
+ n->direction_keyword = FETCH_KEYWORD_NONE;
$$ = (Node *) n;
}
| from_in cursor_name
@@ -7488,6 +7550,19 @@ fetch_args: cursor_name
n->portalname = $2;
n->direction = FETCH_FORWARD;
n->howMany = 1;
+ n->location = -1;
+ n->direction_keyword = FETCH_KEYWORD_NONE;
+ $$ = (Node *) n;
+ }
+ | SignedIconst opt_from_in cursor_name
+ {
+ FetchStmt *n = makeNode(FetchStmt);
+
+ n->portalname = $3;
+ n->direction = FETCH_FORWARD;
+ n->howMany = $1;
+ n->location = @1;
+ n->direction_keyword = FETCH_KEYWORD_NONE;
$$ = (Node *) n;
}
| NEXT opt_from_in cursor_name
@@ -7497,6 +7572,8 @@ fetch_args: cursor_name
n->portalname = $3;
n->direction = FETCH_FORWARD;
n->howMany = 1;
+ n->location = -1;
+ n->direction_keyword = FETCH_KEYWORD_NEXT;
$$ = (Node *) n;
}
| PRIOR opt_from_in cursor_name
@@ -7506,6 +7583,8 @@ fetch_args: cursor_name
n->portalname = $3;
n->direction = FETCH_BACKWARD;
n->howMany = 1;
+ n->location = -1;
+ n->direction_keyword = FETCH_KEYWORD_PRIOR;
$$ = (Node *) n;
}
| FIRST_P opt_from_in cursor_name
@@ -7515,6 +7594,8 @@ fetch_args: cursor_name
n->portalname = $3;
n->direction = FETCH_ABSOLUTE;
n->howMany = 1;
+ n->location = -1;
+ n->direction_keyword = FETCH_KEYWORD_FIRST;
$$ = (Node *) n;
}
| LAST_P opt_from_in cursor_name
@@ -7524,6 +7605,8 @@ fetch_args: cursor_name
n->portalname = $3;
n->direction = FETCH_ABSOLUTE;
n->howMany = -1;
+ n->location = -1;
+ n->direction_keyword = FETCH_KEYWORD_LAST;
$$ = (Node *) n;
}
| ABSOLUTE_P SignedIconst opt_from_in cursor_name
@@ -7533,6 +7616,8 @@ fetch_args: cursor_name
n->portalname = $4;
n->direction = FETCH_ABSOLUTE;
n->howMany = $2;
+ n->location = @2;
+ n->direction_keyword = FETCH_KEYWORD_ABSOLUTE;
$$ = (Node *) n;
}
| RELATIVE_P SignedIconst opt_from_in cursor_name
@@ -7542,15 +7627,8 @@ fetch_args: cursor_name
n->portalname = $4;
n->direction = FETCH_RELATIVE;
n->howMany = $2;
- $$ = (Node *) n;
- }
- | SignedIconst opt_from_in cursor_name
- {
- FetchStmt *n = makeNode(FetchStmt);
-
- n->portalname = $3;
- n->direction = FETCH_FORWARD;
- n->howMany = $1;
+ n->location = @2;
+ n->direction_keyword = FETCH_KEYWORD_RELATIVE;
$$ = (Node *) n;
}
| ALL opt_from_in cursor_name
@@ -7560,6 +7638,8 @@ fetch_args: cursor_name
n->portalname = $3;
n->direction = FETCH_FORWARD;
n->howMany = FETCH_ALL;
+ n->location = -1;
+ n->direction_keyword = FETCH_KEYWORD_ALL;
$$ = (Node *) n;
}
| FORWARD opt_from_in cursor_name
@@ -7569,6 +7649,8 @@ fetch_args: cursor_name
n->portalname = $3;
n->direction = FETCH_FORWARD;
n->howMany = 1;
+ n->location = -1;
+ n->direction_keyword = FETCH_KEYWORD_FORWARD;
$$ = (Node *) n;
}
| FORWARD SignedIconst opt_from_in cursor_name
@@ -7578,6 +7660,8 @@ fetch_args: cursor_name
n->portalname = $4;
n->direction = FETCH_FORWARD;
n->howMany = $2;
+ n->location = @2;
+ n->direction_keyword = FETCH_KEYWORD_FORWARD;
$$ = (Node *) n;
}
| FORWARD ALL opt_from_in cursor_name
@@ -7587,6 +7671,8 @@ fetch_args: cursor_name
n->portalname = $4;
n->direction = FETCH_FORWARD;
n->howMany = FETCH_ALL;
+ n->location = -1;
+ n->direction_keyword = FETCH_KEYWORD_FORWARD_ALL;
$$ = (Node *) n;
}
| BACKWARD opt_from_in cursor_name
@@ -7596,6 +7682,8 @@ fetch_args: cursor_name
n->portalname = $3;
n->direction = FETCH_BACKWARD;
n->howMany = 1;
+ n->location = -1;
+ n->direction_keyword = FETCH_KEYWORD_BACKWARD;
$$ = (Node *) n;
}
| BACKWARD SignedIconst opt_from_in cursor_name
@@ -7605,6 +7693,8 @@ fetch_args: cursor_name
n->portalname = $4;
n->direction = FETCH_BACKWARD;
n->howMany = $2;
+ n->location = @2;
+ n->direction_keyword = FETCH_KEYWORD_BACKWARD;
$$ = (Node *) n;
}
| BACKWARD ALL opt_from_in cursor_name
@@ -7614,6 +7704,8 @@ fetch_args: cursor_name
n->portalname = $4;
n->direction = FETCH_BACKWARD;
n->howMany = FETCH_ALL;
+ n->location = -1;
+ n->direction_keyword = FETCH_KEYWORD_BACKWARD_ALL;
$$ = (Node *) n;
}
;
@@ -9291,7 +9383,7 @@ DropTransformStmt: DROP TRANSFORM opt_if_exists FOR Typename LANGUAGE name opt_d
*****************************************************************************/
ReindexStmt:
- REINDEX opt_reindex_option_list reindex_target_relation opt_concurrently qualified_name
+ REINDEX opt_utility_option_list reindex_target_relation opt_concurrently qualified_name
{
ReindexStmt *n = makeNode(ReindexStmt);
@@ -9304,7 +9396,7 @@ ReindexStmt:
makeDefElem("concurrently", NULL, @4));
$$ = (Node *) n;
}
- | REINDEX opt_reindex_option_list SCHEMA opt_concurrently name
+ | REINDEX opt_utility_option_list SCHEMA opt_concurrently name
{
ReindexStmt *n = makeNode(ReindexStmt);
@@ -9317,7 +9409,7 @@ ReindexStmt:
makeDefElem("concurrently", NULL, @4));
$$ = (Node *) n;
}
- | REINDEX opt_reindex_option_list reindex_target_all opt_concurrently opt_single_name
+ | REINDEX opt_utility_option_list reindex_target_all opt_concurrently opt_single_name
{
ReindexStmt *n = makeNode(ReindexStmt);
@@ -9339,10 +9431,6 @@ reindex_target_all:
SYSTEM_P { $$ = REINDEX_OBJECT_SYSTEM; }
| DATABASE { $$ = REINDEX_OBJECT_DATABASE; }
;
-opt_reindex_option_list:
- '(' utility_option_list ')' { $$ = $2; }
- | /* EMPTY */ { $$ = NULL; }
- ;
/*****************************************************************************
*
@@ -11629,7 +11717,7 @@ AlterDomainStmt:
{
AlterDomainStmt *n = makeNode(AlterDomainStmt);
- n->subtype = 'T';
+ n->subtype = AD_AlterDefault;
n->typeName = $3;
n->def = $4;
$$ = (Node *) n;
@@ -11639,7 +11727,7 @@ AlterDomainStmt:
{
AlterDomainStmt *n = makeNode(AlterDomainStmt);
- n->subtype = 'N';
+ n->subtype = AD_DropNotNull;
n->typeName = $3;
$$ = (Node *) n;
}
@@ -11648,7 +11736,7 @@ AlterDomainStmt:
{
AlterDomainStmt *n = makeNode(AlterDomainStmt);
- n->subtype = 'O';
+ n->subtype = AD_SetNotNull;
n->typeName = $3;
$$ = (Node *) n;
}
@@ -11657,7 +11745,7 @@ AlterDomainStmt:
{
AlterDomainStmt *n = makeNode(AlterDomainStmt);
- n->subtype = 'C';
+ n->subtype = AD_AddConstraint;
n->typeName = $3;
n->def = $5;
$$ = (Node *) n;
@@ -11667,7 +11755,7 @@ AlterDomainStmt:
{
AlterDomainStmt *n = makeNode(AlterDomainStmt);
- n->subtype = 'X';
+ n->subtype = AD_DropConstraint;
n->typeName = $3;
n->name = $6;
n->behavior = $7;
@@ -11679,7 +11767,7 @@ AlterDomainStmt:
{
AlterDomainStmt *n = makeNode(AlterDomainStmt);
- n->subtype = 'X';
+ n->subtype = AD_DropConstraint;
n->typeName = $3;
n->name = $8;
n->behavior = $9;
@@ -11691,7 +11779,7 @@ AlterDomainStmt:
{
AlterDomainStmt *n = makeNode(AlterDomainStmt);
- n->subtype = 'V';
+ n->subtype = AD_ValidateConstraint;
n->typeName = $3;
n->name = $6;
$$ = (Node *) n;
@@ -11840,13 +11928,13 @@ ClusterStmt:
n->params = $3;
$$ = (Node *) n;
}
- | CLUSTER '(' utility_option_list ')'
+ | CLUSTER opt_utility_option_list
{
ClusterStmt *n = makeNode(ClusterStmt);
n->relation = NULL;
n->indexname = NULL;
- n->params = $3;
+ n->params = $2;
$$ = (Node *) n;
}
/* unparenthesized VERBOSE kept for pre-14 compatibility */
@@ -11856,21 +11944,18 @@ ClusterStmt:
n->relation = $3;
n->indexname = $4;
- n->params = NIL;
if ($2)
- n->params = lappend(n->params, makeDefElem("verbose", NULL, @2));
+ n->params = list_make1(makeDefElem("verbose", NULL, @2));
$$ = (Node *) n;
}
/* unparenthesized VERBOSE kept for pre-17 compatibility */
- | CLUSTER opt_verbose
+ | CLUSTER VERBOSE
{
ClusterStmt *n = makeNode(ClusterStmt);
n->relation = NULL;
n->indexname = NULL;
- n->params = NIL;
- if ($2)
- n->params = lappend(n->params, makeDefElem("verbose", NULL, @2));
+ n->params = list_make1(makeDefElem("verbose", NULL, @2));
$$ = (Node *) n;
}
/* kept for pre-8.3 compatibility */
@@ -11880,9 +11965,8 @@ ClusterStmt:
n->relation = $5;
n->indexname = $3;
- n->params = NIL;
if ($2)
- n->params = lappend(n->params, makeDefElem("verbose", NULL, @2));
+ n->params = list_make1(makeDefElem("verbose", NULL, @2));
$$ = (Node *) n;
}
;
@@ -11933,64 +12017,31 @@ VacuumStmt: VACUUM opt_full opt_freeze opt_verbose opt_analyze opt_vacuum_relati
}
;
-AnalyzeStmt: analyze_keyword opt_verbose opt_vacuum_relation_list
+AnalyzeStmt: analyze_keyword opt_utility_option_list opt_vacuum_relation_list
{
VacuumStmt *n = makeNode(VacuumStmt);
- n->options = NIL;
- if ($2)
- n->options = lappend(n->options,
- makeDefElem("verbose", NULL, @2));
+ n->options = $2;
n->rels = $3;
n->is_vacuumcmd = false;
$$ = (Node *) n;
}
- | analyze_keyword '(' utility_option_list ')' opt_vacuum_relation_list
+ | analyze_keyword VERBOSE opt_vacuum_relation_list
{
VacuumStmt *n = makeNode(VacuumStmt);
- n->options = $3;
- n->rels = $5;
+ n->options = list_make1(makeDefElem("verbose", NULL, @2));
+ n->rels = $3;
n->is_vacuumcmd = false;
$$ = (Node *) n;
}
;
-utility_option_list:
- utility_option_elem
- {
- $$ = list_make1($1);
- }
- | utility_option_list ',' utility_option_elem
- {
- $$ = lappend($1, $3);
- }
- ;
-
analyze_keyword:
ANALYZE
| ANALYSE /* British */
;
-utility_option_elem:
- utility_option_name utility_option_arg
- {
- $$ = makeDefElem($1, $2, @1);
- }
- ;
-
-utility_option_name:
- NonReservedWord { $$ = $1; }
- | analyze_keyword { $$ = "analyze"; }
- | FORMAT_LA { $$ = "format"; }
- ;
-
-utility_option_arg:
- opt_boolean_or_string { $$ = (Node *) makeString($1); }
- | NumericOnly { $$ = (Node *) $1; }
- | /* EMPTY */ { $$ = NULL; }
- ;
-
opt_analyze:
analyze_keyword { $$ = true; }
| /*EMPTY*/ { $$ = false; }
@@ -12240,7 +12291,6 @@ InsertStmt:
$5->onConflictClause = $6;
$5->returningClause = $7;
$5->withClause = $1;
- $5->stmt_location = @$;
$$ = (Node *) $5;
}
;
@@ -12431,7 +12481,6 @@ DeleteStmt: opt_with_clause DELETE_P FROM relation_expr_opt_alias
n->whereClause = $6;
n->returningClause = $7;
n->withClause = $1;
- n->stmt_location = @$;
$$ = (Node *) n;
}
;
@@ -12506,7 +12555,6 @@ UpdateStmt: opt_with_clause UPDATE relation_expr_opt_alias
n->whereClause = $7;
n->returningClause = $8;
n->withClause = $1;
- n->stmt_location = @$;
$$ = (Node *) n;
}
;
@@ -12584,7 +12632,6 @@ MergeStmt:
m->joinCondition = $8;
m->mergeWhenClauses = $9;
m->returningClause = $10;
- m->stmt_location = @$;
$$ = (Node *) m;
}
@@ -12825,20 +12872,7 @@ SelectStmt: select_no_parens %prec UMINUS
;
select_with_parens:
- '(' select_no_parens ')'
- {
- SelectStmt *n = (SelectStmt *) $2;
-
- /*
- * As SelectStmt's location starts at the SELECT keyword,
- * we need to track the length of the SelectStmt within
- * parentheses to be able to extract the relevant part
- * of the query. Without this, the RawStmt's length would
- * be used and would include the closing parenthesis.
- */
- n->stmt_len = @3 - @2;
- $$ = $2;
- }
+ '(' select_no_parens ')' { $$ = $2; }
| '(' select_with_parens ')' { $$ = $2; }
;
@@ -12960,7 +12994,6 @@ simple_select:
n->groupDistinct = ($7)->distinct;
n->havingClause = $8;
n->windowClause = $9;
- n->stmt_location = @1;
$$ = (Node *) n;
}
| SELECT distinct_clause target_list
@@ -12978,7 +13011,6 @@ simple_select:
n->groupDistinct = ($7)->distinct;
n->havingClause = $8;
n->windowClause = $9;
- n->stmt_location = @1;
$$ = (Node *) n;
}
| values_clause { $$ = $1; }
@@ -12999,20 +13031,19 @@ simple_select:
n->targetList = list_make1(rt);
n->fromClause = list_make1($2);
- n->stmt_location = @1;
$$ = (Node *) n;
}
| select_clause UNION set_quantifier select_clause
{
- $$ = makeSetOp(SETOP_UNION, $3 == SET_QUANTIFIER_ALL, $1, $4, @1);
+ $$ = makeSetOp(SETOP_UNION, $3 == SET_QUANTIFIER_ALL, $1, $4);
}
| select_clause INTERSECT set_quantifier select_clause
{
- $$ = makeSetOp(SETOP_INTERSECT, $3 == SET_QUANTIFIER_ALL, $1, $4, @1);
+ $$ = makeSetOp(SETOP_INTERSECT, $3 == SET_QUANTIFIER_ALL, $1, $4);
}
| select_clause EXCEPT set_quantifier select_clause
{
- $$ = makeSetOp(SETOP_EXCEPT, $3 == SET_QUANTIFIER_ALL, $1, $4, @1);
+ $$ = makeSetOp(SETOP_EXCEPT, $3 == SET_QUANTIFIER_ALL, $1, $4);
}
;
@@ -13590,7 +13621,6 @@ values_clause:
{
SelectStmt *n = makeNode(SelectStmt);
- n->stmt_location = @1;
n->valuesLists = list_make1($3);
$$ = (Node *) n;
}
@@ -15287,49 +15317,50 @@ a_expr: c_expr { $$ = $1; }
(Node *) list_make2($5, $7),
@2);
}
- | a_expr IN_P in_expr
+ | a_expr IN_P select_with_parens
{
- /* in_expr returns a SubLink or a list of a_exprs */
- if (IsA($3, SubLink))
- {
- /* generate foo = ANY (subquery) */
- SubLink *n = (SubLink *) $3;
+ /* generate foo = ANY (subquery) */
+ SubLink *n = makeNode(SubLink);
- n->subLinkType = ANY_SUBLINK;
- n->subLinkId = 0;
- n->testexpr = $1;
- n->operName = NIL; /* show it's IN not = ANY */
- n->location = @2;
- $$ = (Node *) n;
- }
- else
- {
- /* generate scalar IN expression */
- $$ = (Node *) makeSimpleA_Expr(AEXPR_IN, "=", $1, $3, @2);
- }
+ n->subselect = $3;
+ n->subLinkType = ANY_SUBLINK;
+ n->subLinkId = 0;
+ n->testexpr = $1;
+ n->operName = NIL; /* show it's IN not = ANY */
+ n->location = @2;
+ $$ = (Node *) n;
}
- | a_expr NOT_LA IN_P in_expr %prec NOT_LA
+ | a_expr IN_P '(' expr_list ')'
{
- /* in_expr returns a SubLink or a list of a_exprs */
- if (IsA($4, SubLink))
- {
- /* generate NOT (foo = ANY (subquery)) */
- /* Make an = ANY node */
- SubLink *n = (SubLink *) $4;
-
- n->subLinkType = ANY_SUBLINK;
- n->subLinkId = 0;
- n->testexpr = $1;
- n->operName = NIL; /* show it's IN not = ANY */
- n->location = @2;
- /* Stick a NOT on top; must have same parse location */
- $$ = makeNotExpr((Node *) n, @2);
- }
- else
- {
- /* generate scalar NOT IN expression */
- $$ = (Node *) makeSimpleA_Expr(AEXPR_IN, "<>", $1, $4, @2);
- }
+ /* generate scalar IN expression */
+ A_Expr *n = makeSimpleA_Expr(AEXPR_IN, "=", $1, (Node *) $4, @2);
+
+ n->rexpr_list_start = @3;
+ n->rexpr_list_end = @5;
+ $$ = (Node *) n;
+ }
+ | a_expr NOT_LA IN_P select_with_parens %prec NOT_LA
+ {
+ /* generate NOT (foo = ANY (subquery)) */
+ SubLink *n = makeNode(SubLink);
+
+ n->subselect = $4;
+ n->subLinkType = ANY_SUBLINK;
+ n->subLinkId = 0;
+ n->testexpr = $1;
+ n->operName = NIL; /* show it's IN not = ANY */
+ n->location = @2;
+ /* Stick a NOT on top; must have same parse location */
+ $$ = makeNotExpr((Node *) n, @2);
+ }
+ | a_expr NOT_LA IN_P '(' expr_list ')'
+ {
+ /* generate scalar NOT IN expression */
+ A_Expr *n = makeSimpleA_Expr(AEXPR_IN, "<>", $1, (Node *) $5, @2);
+
+ n->rexpr_list_start = @4;
+ n->rexpr_list_end = @6;
+ $$ = (Node *) n;
}
| a_expr subquery_Op sub_type select_with_parens %prec Op
{
@@ -16764,15 +16795,15 @@ type_list: Typename { $$ = list_make1($1); }
array_expr: '[' expr_list ']'
{
- $$ = makeAArrayExpr($2, @1);
+ $$ = makeAArrayExpr($2, @1, @3);
}
| '[' array_expr_list ']'
{
- $$ = makeAArrayExpr($2, @1);
+ $$ = makeAArrayExpr($2, @1, @3);
}
| '[' ']'
{
- $$ = makeAArrayExpr(NIL, @1);
+ $$ = makeAArrayExpr(NIL, @1, @2);
}
;
@@ -16894,17 +16925,6 @@ trim_list: a_expr FROM expr_list { $$ = lappend($3, $1); }
| expr_list { $$ = $1; }
;
-in_expr: select_with_parens
- {
- SubLink *n = makeNode(SubLink);
-
- n->subselect = $1;
- /* other fields will be filled later */
- $$ = (Node *) n;
- }
- | '(' expr_list ')' { $$ = (Node *) $2; }
- ;
-
/*
* Define SQL-style CASE clause.
* - Full specification
@@ -18748,47 +18768,6 @@ updateRawStmtEnd(RawStmt *rs, int end_location)
rs->stmt_len = end_location - rs->stmt_location;
}
-/*
- * Adjust a PreparableStmt to reflect that it doesn't run to the end of the
- * string.
- */
-static void
-updatePreparableStmtEnd(Node *n, int end_location)
-{
- if (IsA(n, SelectStmt))
- {
- SelectStmt *stmt = (SelectStmt *) n;
-
- stmt->stmt_len = end_location - stmt->stmt_location;
- }
- else if (IsA(n, InsertStmt))
- {
- InsertStmt *stmt = (InsertStmt *) n;
-
- stmt->stmt_len = end_location - stmt->stmt_location;
- }
- else if (IsA(n, UpdateStmt))
- {
- UpdateStmt *stmt = (UpdateStmt *) n;
-
- stmt->stmt_len = end_location - stmt->stmt_location;
- }
- else if (IsA(n, DeleteStmt))
- {
- DeleteStmt *stmt = (DeleteStmt *) n;
-
- stmt->stmt_len = end_location - stmt->stmt_location;
- }
- else if (IsA(n, MergeStmt))
- {
- MergeStmt *stmt = (MergeStmt *) n;
-
- stmt->stmt_len = end_location - stmt->stmt_location;
- }
- else
- elog(ERROR, "unexpected node type %d", (int) n->type);
-}
-
static Node *
makeColumnRef(char *colname, List *indirection,
int location, core_yyscan_t yyscanner)
@@ -19167,14 +19146,11 @@ insertSelectOptions(SelectStmt *stmt,
errmsg("multiple WITH clauses not allowed"),
parser_errposition(exprLocation((Node *) withClause))));
stmt->withClause = withClause;
-
- /* Update SelectStmt's location to the start of the WITH clause */
- stmt->stmt_location = withClause->location;
}
}
static Node *
-makeSetOp(SetOperation op, bool all, Node *larg, Node *rarg, int location)
+makeSetOp(SetOperation op, bool all, Node *larg, Node *rarg)
{
SelectStmt *n = makeNode(SelectStmt);
@@ -19182,7 +19158,6 @@ makeSetOp(SetOperation op, bool all, Node *larg, Node *rarg, int location)
n->all = all;
n->larg = (SelectStmt *) larg;
n->rarg = (SelectStmt *) rarg;
- n->stmt_location = location;
return (Node *) n;
}
@@ -19300,12 +19275,14 @@ makeNotExpr(Node *expr, int location)
}
static Node *
-makeAArrayExpr(List *elements, int location)
+makeAArrayExpr(List *elements, int location, int location_end)
{
A_ArrayExpr *n = makeNode(A_ArrayExpr);
n->elements = elements;
n->location = location;
+ n->list_start = location;
+ n->list_end = location_end;
return (Node *) n;
}
diff --git a/src/backend/parser/parse_expr.c b/src/backend/parser/parse_expr.c
index 1f8e2d54673..d66276801c6 100644
--- a/src/backend/parser/parse_expr.c
+++ b/src/backend/parser/parse_expr.c
@@ -1223,6 +1223,8 @@ transformAExprIn(ParseState *pstate, A_Expr *a)
newa->element_typeid = scalar_type;
newa->elements = aexprs;
newa->multidims = false;
+ newa->list_start = a->rexpr_list_start;
+ newa->list_end = a->rexpr_list_end;
newa->location = -1;
result = (Node *) make_scalar_array_op(pstate,
@@ -2165,6 +2167,8 @@ transformArrayExpr(ParseState *pstate, A_ArrayExpr *a,
/* array_collid will be set by parse_collate.c */
newa->element_typeid = element_type;
newa->elements = newcoercedelems;
+ newa->list_start = a->list_start;
+ newa->list_end = a->list_end;
newa->location = a->location;
return (Node *) newa;
diff --git a/src/backend/parser/parse_utilcmd.c b/src/backend/parser/parse_utilcmd.c
index 62015431fdf..afcf54169c3 100644
--- a/src/backend/parser/parse_utilcmd.c
+++ b/src/backend/parser/parse_utilcmd.c
@@ -1279,6 +1279,28 @@ transformTableLikeClause(CreateStmtContext *cxt, TableLikeClause *table_like_cla
lst = RelationGetNotNullConstraints(RelationGetRelid(relation), false,
true);
cxt->nnconstraints = list_concat(cxt->nnconstraints, lst);
+
+ /* Copy comments on not-null constraints */
+ if (table_like_clause->options & CREATE_TABLE_LIKE_COMMENTS)
+ {
+ foreach_node(Constraint, nnconstr, lst)
+ {
+ if ((comment = GetComment(get_relation_constraint_oid(RelationGetRelid(relation),
+ nnconstr->conname, false),
+ ConstraintRelationId,
+ 0)) != NULL)
+ {
+ CommentStmt *stmt = makeNode(CommentStmt);
+
+ stmt->objtype = OBJECT_TABCONSTRAINT;
+ stmt->object = (Node *) list_make3(makeString(cxt->relation->schemaname),
+ makeString(cxt->relation->relname),
+ makeString(nnconstr->conname));
+ stmt->comment = comment;
+ cxt->alist = lappend(cxt->alist, stmt);
+ }
+ }
+ }
}
/*
diff --git a/src/backend/partitioning/partbounds.c b/src/backend/partitioning/partbounds.c
index 4bdc2941efb..822cf4ec451 100644
--- a/src/backend/partitioning/partbounds.c
+++ b/src/backend/partitioning/partbounds.c
@@ -1007,9 +1007,6 @@ partition_bounds_copy(PartitionBoundInfo src,
int ndatums;
int nindexes;
int partnatts;
- bool hash_part;
- int natts;
- Datum *boundDatums;
dest = (PartitionBoundInfo) palloc(sizeof(PartitionBoundInfoData));
@@ -1023,7 +1020,7 @@ partition_bounds_copy(PartitionBoundInfo src,
dest->datums = (Datum **) palloc(sizeof(Datum *) * ndatums);
- if (src->kind != NULL)
+ if (src->kind != NULL && ndatums > 0)
{
PartitionRangeDatumKind *boundKinds;
@@ -1058,36 +1055,40 @@ partition_bounds_copy(PartitionBoundInfo src,
* For hash partitioning, datums array will have two elements - modulus
* and remainder.
*/
- hash_part = (key->strategy == PARTITION_STRATEGY_HASH);
- natts = hash_part ? 2 : partnatts;
- boundDatums = palloc(ndatums * natts * sizeof(Datum));
-
- for (i = 0; i < ndatums; i++)
+ if (ndatums > 0)
{
- int j;
-
- dest->datums[i] = &boundDatums[i * natts];
+ bool hash_part = (key->strategy == PARTITION_STRATEGY_HASH);
+ int natts = hash_part ? 2 : partnatts;
+ Datum *boundDatums = palloc(ndatums * natts * sizeof(Datum));
- for (j = 0; j < natts; j++)
+ for (i = 0; i < ndatums; i++)
{
- bool byval;
- int typlen;
+ int j;
- if (hash_part)
- {
- typlen = sizeof(int32); /* Always int4 */
- byval = true; /* int4 is pass-by-value */
- }
- else
+ dest->datums[i] = &boundDatums[i * natts];
+
+ for (j = 0; j < natts; j++)
{
- byval = key->parttypbyval[j];
- typlen = key->parttyplen[j];
- }
+ if (dest->kind == NULL ||
+ dest->kind[i][j] == PARTITION_RANGE_DATUM_VALUE)
+ {
+ bool byval;
+ int typlen;
- if (dest->kind == NULL ||
- dest->kind[i][j] == PARTITION_RANGE_DATUM_VALUE)
- dest->datums[i][j] = datumCopy(src->datums[i][j],
- byval, typlen);
+ if (hash_part)
+ {
+ typlen = sizeof(int32); /* Always int4 */
+ byval = true; /* int4 is pass-by-value */
+ }
+ else
+ {
+ byval = key->parttypbyval[j];
+ typlen = key->parttyplen[j];
+ }
+ dest->datums[i][j] = datumCopy(src->datums[i][j],
+ byval, typlen);
+ }
+ }
}
}
diff --git a/src/backend/postmaster/autovacuum.c b/src/backend/postmaster/autovacuum.c
index 451fb90a610..ff96b36d710 100644
--- a/src/backend/postmaster/autovacuum.c
+++ b/src/backend/postmaster/autovacuum.c
@@ -310,6 +310,16 @@ static AutoVacuumShmemStruct *AutoVacuumShmem;
static dlist_head DatabaseList = DLIST_STATIC_INIT(DatabaseList);
static MemoryContext DatabaseListCxt = NULL;
+/*
+ * Dummy pointer to persuade Valgrind that we've not leaked the array of
+ * avl_dbase structs. Make it global to ensure the compiler doesn't
+ * optimize it away.
+ */
+#ifdef USE_VALGRIND
+extern avl_dbase *avl_dbase_array;
+avl_dbase *avl_dbase_array;
+#endif
+
/* Pointer to my own WorkerInfo, valid on each worker */
static WorkerInfo MyWorkerInfo = NULL;
@@ -562,10 +572,10 @@ AutoVacLauncherMain(const void *startup_data, size_t startup_data_len)
/*
* Create the initial database list. The invariant we want this list to
- * keep is that it's ordered by decreasing next_time. As soon as an entry
- * is updated to a higher time, it will be moved to the front (which is
- * correct because the only operation is to add autovacuum_naptime to the
- * entry, and time always increases).
+ * keep is that it's ordered by decreasing next_worker. As soon as an
+ * entry is updated to a higher time, it will be moved to the front (which
+ * is correct because the only operation is to add autovacuum_naptime to
+ * the entry, and time always increases).
*/
rebuild_database_list(InvalidOid);
@@ -1020,6 +1030,10 @@ rebuild_database_list(Oid newdb)
/* put all the hash elements into an array */
dbary = palloc(nelems * sizeof(avl_dbase));
+ /* keep Valgrind quiet */
+#ifdef USE_VALGRIND
+ avl_dbase_array = dbary;
+#endif
i = 0;
hash_seq_init(&seq, dbhash);
@@ -2565,8 +2579,18 @@ deleted:
/*
* We leak table_toast_map here (among other things), but since we're
- * going away soon, it's not a problem.
+ * going away soon, it's not a problem normally. But when using Valgrind,
+ * release some stuff to reduce complaints about leaked storage.
*/
+#ifdef USE_VALGRIND
+ hash_destroy(table_toast_map);
+ FreeTupleDesc(pg_class_desc);
+ if (bstrategy)
+ pfree(bstrategy);
+#endif
+
+ /* Run the rest in xact context, mainly to avoid Valgrind leak warnings */
+ MemoryContextSwitchTo(TopTransactionContext);
/*
* Update pg_database.datfrozenxid, and truncate pg_xact if possible. We
@@ -3190,7 +3214,7 @@ autovacuum_do_vac_analyze(autovac_table *tab, BufferAccessStrategy bstrategy)
rel_list = list_make1(rel);
MemoryContextSwitchTo(old_context);
- vacuum(rel_list, &tab->at_params, bstrategy, vac_context, true);
+ vacuum(rel_list, tab->at_params, bstrategy, vac_context, true);
MemoryContextDelete(vac_context);
}
diff --git a/src/backend/postmaster/bgworker.c b/src/backend/postmaster/bgworker.c
index 116ddf7b835..1ad65c237c3 100644
--- a/src/backend/postmaster/bgworker.c
+++ b/src/backend/postmaster/bgworker.c
@@ -613,6 +613,7 @@ ResetBackgroundWorkerCrashTimes(void)
* resetting.
*/
rw->rw_crashed_at = 0;
+ rw->rw_pid = 0;
/*
* If there was anyone waiting for it, they're history.
diff --git a/src/backend/postmaster/checkpointer.c b/src/backend/postmaster/checkpointer.c
index fda91ffd1ce..8490148a47d 100644
--- a/src/backend/postmaster/checkpointer.c
+++ b/src/backend/postmaster/checkpointer.c
@@ -42,6 +42,8 @@
#include "access/xlog.h"
#include "access/xlog_internal.h"
#include "access/xlogrecovery.h"
+#include "catalog/pg_authid.h"
+#include "commands/defrem.h"
#include "libpq/pqsignal.h"
#include "miscadmin.h"
#include "pgstat.h"
@@ -61,6 +63,7 @@
#include "storage/shmem.h"
#include "storage/smgr.h"
#include "storage/spin.h"
+#include "utils/acl.h"
#include "utils/guc.h"
#include "utils/memutils.h"
#include "utils/resowner.h"
@@ -127,6 +130,13 @@ typedef struct
int num_requests; /* current # of requests */
int max_requests; /* allocated array size */
+
+ int head; /* Index of the first request in the ring
+ * buffer */
+ int tail; /* Index of the last request in the ring
+ * buffer */
+
+ /* The ring buffer of pending checkpointer requests */
CheckpointerRequest requests[FLEXIBLE_ARRAY_MEMBER];
} CheckpointerShmemStruct;
@@ -135,6 +145,12 @@ static CheckpointerShmemStruct *CheckpointerShmem;
/* interval for calling AbsorbSyncRequests in CheckpointWriteDelay */
#define WRITES_PER_ABSORB 1000
+/* Maximum number of checkpointer requests to process in one batch */
+#define CKPT_REQ_BATCH_SIZE 10000
+
+/* Max number of requests the checkpointer request queue can hold */
+#define MAX_CHECKPOINT_REQUESTS 10000000
+
/*
* GUC parameters
*/
@@ -161,7 +177,7 @@ static pg_time_t last_xlog_switch_time;
static void ProcessCheckpointerInterrupts(void);
static void CheckArchiveTimeout(void);
static bool IsCheckpointOnSchedule(double progress);
-static bool ImmediateCheckpointRequested(void);
+static bool FastCheckpointRequested(void);
static bool CompactCheckpointerRequestQueue(void);
static void UpdateSharedMemoryConfig(void);
@@ -734,12 +750,12 @@ CheckArchiveTimeout(void)
}
/*
- * Returns true if an immediate checkpoint request is pending. (Note that
- * this does not check the *current* checkpoint's IMMEDIATE flag, but whether
- * there is one pending behind it.)
+ * Returns true if a fast checkpoint request is pending. (Note that this does
+ * not check the *current* checkpoint's FAST flag, but whether there is one
+ * pending behind it.)
*/
static bool
-ImmediateCheckpointRequested(void)
+FastCheckpointRequested(void)
{
volatile CheckpointerShmemStruct *cps = CheckpointerShmem;
@@ -747,7 +763,7 @@ ImmediateCheckpointRequested(void)
* We don't need to acquire the ckpt_lck in this case because we're only
* looking at a single flag bit.
*/
- if (cps->ckpt_flags & CHECKPOINT_IMMEDIATE)
+ if (cps->ckpt_flags & CHECKPOINT_FAST)
return true;
return false;
}
@@ -760,7 +776,7 @@ ImmediateCheckpointRequested(void)
* checkpoint_completion_target.
*
* The checkpoint request flags should be passed in; currently the only one
- * examined is CHECKPOINT_IMMEDIATE, which disables delays between writes.
+ * examined is CHECKPOINT_FAST, which disables delays between writes.
*
* 'progress' is an estimate of how much of the work has been done, as a
* fraction between 0.0 meaning none, and 1.0 meaning all done.
@@ -778,10 +794,10 @@ CheckpointWriteDelay(int flags, double progress)
* Perform the usual duties and take a nap, unless we're behind schedule,
* in which case we just try to catch up as quickly as possible.
*/
- if (!(flags & CHECKPOINT_IMMEDIATE) &&
+ if (!(flags & CHECKPOINT_FAST) &&
!ShutdownXLOGPending &&
!ShutdownRequestPending &&
- !ImmediateCheckpointRequested() &&
+ !FastCheckpointRequested() &&
IsCheckpointOnSchedule(progress))
{
if (ConfigReloadPending)
@@ -970,24 +986,75 @@ CheckpointerShmemInit(void)
*/
MemSet(CheckpointerShmem, 0, size);
SpinLockInit(&CheckpointerShmem->ckpt_lck);
- CheckpointerShmem->max_requests = NBuffers;
+ CheckpointerShmem->max_requests = Min(NBuffers, MAX_CHECKPOINT_REQUESTS);
+ CheckpointerShmem->head = CheckpointerShmem->tail = 0;
ConditionVariableInit(&CheckpointerShmem->start_cv);
ConditionVariableInit(&CheckpointerShmem->done_cv);
}
}
/*
+ * ExecCheckpoint
+ * Primary entry point for manual CHECKPOINT commands
+ *
+ * This is mainly a wrapper for RequestCheckpoint().
+ */
+void
+ExecCheckpoint(ParseState *pstate, CheckPointStmt *stmt)
+{
+ bool fast = true;
+ bool unlogged = false;
+
+ foreach_ptr(DefElem, opt, stmt->options)
+ {
+ if (strcmp(opt->defname, "mode") == 0)
+ {
+ char *mode = defGetString(opt);
+
+ if (strcmp(mode, "spread") == 0)
+ fast = false;
+ else if (strcmp(mode, "fast") != 0)
+ ereport(ERROR,
+ (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("unrecognized MODE option \"%s\"", mode),
+ parser_errposition(pstate, opt->location)));
+ }
+ else if (strcmp(opt->defname, "flush_unlogged") == 0)
+ unlogged = defGetBoolean(opt);
+ else
+ ereport(ERROR,
+ (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("unrecognized CHECKPOINT option \"%s\"", opt->defname),
+ parser_errposition(pstate, opt->location)));
+ }
+
+ if (!has_privs_of_role(GetUserId(), ROLE_PG_CHECKPOINT))
+ ereport(ERROR,
+ (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
+ /* translator: %s is name of an SQL command (e.g., CHECKPOINT) */
+ errmsg("permission denied to execute %s command",
+ "CHECKPOINT"),
+ errdetail("Only roles with privileges of the \"%s\" role may execute this command.",
+ "pg_checkpoint")));
+
+ RequestCheckpoint(CHECKPOINT_WAIT |
+ (fast ? CHECKPOINT_FAST : 0) |
+ (unlogged ? CHECKPOINT_FLUSH_UNLOGGED : 0) |
+ (RecoveryInProgress() ? 0 : CHECKPOINT_FORCE));
+}
+
+/*
* RequestCheckpoint
* Called in backend processes to request a checkpoint
*
* flags is a bitwise OR of the following:
* CHECKPOINT_IS_SHUTDOWN: checkpoint is for database shutdown.
* CHECKPOINT_END_OF_RECOVERY: checkpoint is for end of WAL recovery.
- * CHECKPOINT_IMMEDIATE: finish the checkpoint ASAP,
+ * CHECKPOINT_FAST: finish the checkpoint ASAP,
* ignoring checkpoint_completion_target parameter.
* CHECKPOINT_FORCE: force a checkpoint even if no XLOG activity has occurred
* since the last one (implied by CHECKPOINT_IS_SHUTDOWN or
- * CHECKPOINT_END_OF_RECOVERY).
+ * CHECKPOINT_END_OF_RECOVERY, and the CHECKPOINT command).
* CHECKPOINT_WAIT: wait for completion before returning (otherwise,
* just signal checkpointer to do it, and return).
* CHECKPOINT_CAUSE_XLOG: checkpoint is requested due to xlog filling.
@@ -1009,7 +1076,7 @@ RequestCheckpoint(int flags)
* There's no point in doing slow checkpoints in a standalone backend,
* because there's no other backends the checkpoint could disrupt.
*/
- CreateCheckPoint(flags | CHECKPOINT_IMMEDIATE);
+ CreateCheckPoint(flags | CHECKPOINT_FAST);
/* Free all smgr objects, as CheckpointerMain() normally would. */
smgrdestroyall();
@@ -1148,6 +1215,7 @@ ForwardSyncRequest(const FileTag *ftag, SyncRequestType type)
{
CheckpointerRequest *request;
bool too_full;
+ int insert_pos;
if (!IsUnderPostmaster)
return false; /* probably shouldn't even get here */
@@ -1171,10 +1239,14 @@ ForwardSyncRequest(const FileTag *ftag, SyncRequestType type)
}
/* OK, insert request */
- request = &CheckpointerShmem->requests[CheckpointerShmem->num_requests++];
+ insert_pos = CheckpointerShmem->tail;
+ request = &CheckpointerShmem->requests[insert_pos];
request->ftag = *ftag;
request->type = type;
+ CheckpointerShmem->tail = (CheckpointerShmem->tail + 1) % CheckpointerShmem->max_requests;
+ CheckpointerShmem->num_requests++;
+
/* If queue is more than half full, nudge the checkpointer to empty it */
too_full = (CheckpointerShmem->num_requests >=
CheckpointerShmem->max_requests / 2);
@@ -1216,12 +1288,16 @@ CompactCheckpointerRequestQueue(void)
struct CheckpointerSlotMapping
{
CheckpointerRequest request;
- int slot;
+ int ring_idx;
};
- int n,
- preserve_count;
+ int n;
int num_skipped = 0;
+ int head;
+ int max_requests;
+ int num_requests;
+ int read_idx,
+ write_idx;
HASHCTL ctl;
HTAB *htab;
bool *skip_slot;
@@ -1233,8 +1309,13 @@ CompactCheckpointerRequestQueue(void)
if (CritSectionCount > 0)
return false;
+ max_requests = CheckpointerShmem->max_requests;
+ num_requests = CheckpointerShmem->num_requests;
+
/* Initialize skip_slot array */
- skip_slot = palloc0(sizeof(bool) * CheckpointerShmem->num_requests);
+ skip_slot = palloc0(sizeof(bool) * max_requests);
+
+ head = CheckpointerShmem->head;
/* Initialize temporary hash table */
ctl.keysize = sizeof(CheckpointerRequest);
@@ -1258,7 +1339,8 @@ CompactCheckpointerRequestQueue(void)
* away preceding entries that would end up being canceled anyhow), but
* it's not clear that the extra complexity would buy us anything.
*/
- for (n = 0; n < CheckpointerShmem->num_requests; n++)
+ read_idx = head;
+ for (n = 0; n < num_requests; n++)
{
CheckpointerRequest *request;
struct CheckpointerSlotMapping *slotmap;
@@ -1271,16 +1353,19 @@ CompactCheckpointerRequestQueue(void)
* CheckpointerShmemInit. Note also that RelFileLocator had better
* contain no pad bytes.
*/
- request = &CheckpointerShmem->requests[n];
+ request = &CheckpointerShmem->requests[read_idx];
slotmap = hash_search(htab, request, HASH_ENTER, &found);
if (found)
{
/* Duplicate, so mark the previous occurrence as skippable */
- skip_slot[slotmap->slot] = true;
+ skip_slot[slotmap->ring_idx] = true;
num_skipped++;
}
/* Remember slot containing latest occurrence of this request value */
- slotmap->slot = n;
+ slotmap->ring_idx = read_idx;
+
+ /* Move to the next request in the ring buffer */
+ read_idx = (read_idx + 1) % max_requests;
}
/* Done with the hash table. */
@@ -1294,17 +1379,34 @@ CompactCheckpointerRequestQueue(void)
}
/* We found some duplicates; remove them. */
- preserve_count = 0;
- for (n = 0; n < CheckpointerShmem->num_requests; n++)
+ read_idx = write_idx = head;
+ for (n = 0; n < num_requests; n++)
{
- if (skip_slot[n])
- continue;
- CheckpointerShmem->requests[preserve_count++] = CheckpointerShmem->requests[n];
+ /* If this slot is NOT skipped, keep it */
+ if (!skip_slot[read_idx])
+ {
+ /* If the read and write positions are different, copy the request */
+ if (write_idx != read_idx)
+ CheckpointerShmem->requests[write_idx] =
+ CheckpointerShmem->requests[read_idx];
+
+ /* Advance the write position */
+ write_idx = (write_idx + 1) % max_requests;
+ }
+
+ read_idx = (read_idx + 1) % max_requests;
}
+
+ /*
+ * Update ring buffer state: head remains the same, tail moves, count
+ * decreases
+ */
+ CheckpointerShmem->tail = write_idx;
+ CheckpointerShmem->num_requests -= num_skipped;
+
ereport(DEBUG1,
(errmsg_internal("compacted fsync request queue from %d entries to %d entries",
- CheckpointerShmem->num_requests, preserve_count)));
- CheckpointerShmem->num_requests = preserve_count;
+ num_requests, CheckpointerShmem->num_requests)));
/* Cleanup. */
pfree(skip_slot);
@@ -1325,40 +1427,64 @@ AbsorbSyncRequests(void)
{
CheckpointerRequest *requests = NULL;
CheckpointerRequest *request;
- int n;
+ int n,
+ i;
+ bool loop;
if (!AmCheckpointerProcess())
return;
- LWLockAcquire(CheckpointerCommLock, LW_EXCLUSIVE);
-
- /*
- * We try to avoid holding the lock for a long time by copying the request
- * array, and processing the requests after releasing the lock.
- *
- * Once we have cleared the requests from shared memory, we have to PANIC
- * if we then fail to absorb them (eg, because our hashtable runs out of
- * memory). This is because the system cannot run safely if we are unable
- * to fsync what we have been told to fsync. Fortunately, the hashtable
- * is so small that the problem is quite unlikely to arise in practice.
- */
- n = CheckpointerShmem->num_requests;
- if (n > 0)
+ do
{
- requests = (CheckpointerRequest *) palloc(n * sizeof(CheckpointerRequest));
- memcpy(requests, CheckpointerShmem->requests, n * sizeof(CheckpointerRequest));
- }
+ LWLockAcquire(CheckpointerCommLock, LW_EXCLUSIVE);
+
+ /*---
+ * We try to avoid holding the lock for a long time by:
+ * 1. Copying the request array and processing the requests after
+ * releasing the lock;
+ * 2. Processing not the whole queue, but only batches of
+ * CKPT_REQ_BATCH_SIZE at once.
+ *
+ * Once we have cleared the requests from shared memory, we must
+ * PANIC if we then fail to absorb them (e.g., because our hashtable
+ * runs out of memory). This is because the system cannot run safely
+ * if we are unable to fsync what we have been told to fsync.
+ * Fortunately, the hashtable is so small that the problem is quite
+ * unlikely to arise in practice.
+ *
+ * Note: The maximum possible size of a ring buffer is
+ * MAX_CHECKPOINT_REQUESTS entries, which fit into a maximum palloc
+ * allocation size of 1Gb. Our maximum batch size,
+ * CKPT_REQ_BATCH_SIZE, is even smaller.
+ */
+ n = Min(CheckpointerShmem->num_requests, CKPT_REQ_BATCH_SIZE);
+ if (n > 0)
+ {
+ if (!requests)
+ requests = (CheckpointerRequest *) palloc(n * sizeof(CheckpointerRequest));
- START_CRIT_SECTION();
+ for (i = 0; i < n; i++)
+ {
+ requests[i] = CheckpointerShmem->requests[CheckpointerShmem->head];
+ CheckpointerShmem->head = (CheckpointerShmem->head + 1) % CheckpointerShmem->max_requests;
+ }
- CheckpointerShmem->num_requests = 0;
+ CheckpointerShmem->num_requests -= n;
- LWLockRelease(CheckpointerCommLock);
+ }
+
+ START_CRIT_SECTION();
+
+ /* Are there any requests in the queue? If so, keep going. */
+ loop = CheckpointerShmem->num_requests != 0;
+
+ LWLockRelease(CheckpointerCommLock);
- for (request = requests; n > 0; request++, n--)
- RememberSyncRequest(&request->ftag, request->type);
+ for (request = requests; n > 0; request++, n--)
+ RememberSyncRequest(&request->ftag, request->type);
- END_CRIT_SECTION();
+ END_CRIT_SECTION();
+ } while (loop);
if (requests)
pfree(requests);
diff --git a/src/backend/postmaster/pgarch.c b/src/backend/postmaster/pgarch.c
index 7e622ae4bd2..78e39e5f866 100644
--- a/src/backend/postmaster/pgarch.c
+++ b/src/backend/postmaster/pgarch.c
@@ -718,15 +718,15 @@ pgarch_readyXlog(char *xlog)
/*
* Store the file in our max-heap if it has a high enough priority.
*/
- if (arch_files->arch_heap->bh_size < NUM_FILES_PER_DIRECTORY_SCAN)
+ if (binaryheap_size(arch_files->arch_heap) < NUM_FILES_PER_DIRECTORY_SCAN)
{
/* If the heap isn't full yet, quickly add it. */
- arch_file = arch_files->arch_filenames[arch_files->arch_heap->bh_size];
+ arch_file = arch_files->arch_filenames[binaryheap_size(arch_files->arch_heap)];
strcpy(arch_file, basename);
binaryheap_add_unordered(arch_files->arch_heap, CStringGetDatum(arch_file));
/* If we just filled the heap, make it a valid one. */
- if (arch_files->arch_heap->bh_size == NUM_FILES_PER_DIRECTORY_SCAN)
+ if (binaryheap_size(arch_files->arch_heap) == NUM_FILES_PER_DIRECTORY_SCAN)
binaryheap_build(arch_files->arch_heap);
}
else if (ready_file_comparator(binaryheap_first(arch_files->arch_heap),
@@ -744,21 +744,21 @@ pgarch_readyXlog(char *xlog)
FreeDir(rldir);
/* If no files were found, simply return. */
- if (arch_files->arch_heap->bh_size == 0)
+ if (binaryheap_empty(arch_files->arch_heap))
return false;
/*
* If we didn't fill the heap, we didn't make it a valid one. Do that
* now.
*/
- if (arch_files->arch_heap->bh_size < NUM_FILES_PER_DIRECTORY_SCAN)
+ if (binaryheap_size(arch_files->arch_heap) < NUM_FILES_PER_DIRECTORY_SCAN)
binaryheap_build(arch_files->arch_heap);
/*
* Fill arch_files array with the files to archive in ascending order of
* priority.
*/
- arch_files->arch_files_size = arch_files->arch_heap->bh_size;
+ arch_files->arch_files_size = binaryheap_size(arch_files->arch_heap);
for (int i = 0; i < arch_files->arch_files_size; i++)
arch_files->arch_files[i] = DatumGetCString(binaryheap_remove_first(arch_files->arch_heap));
diff --git a/src/backend/postmaster/pmchild.c b/src/backend/postmaster/pmchild.c
index cde1d23a4ca..584bb58c8ab 100644
--- a/src/backend/postmaster/pmchild.c
+++ b/src/backend/postmaster/pmchild.c
@@ -60,6 +60,17 @@ NON_EXEC_STATIC int num_pmchild_slots = 0;
dlist_head ActiveChildList;
/*
+ * Dummy pointer to persuade Valgrind that we've not leaked the array of
+ * PMChild structs. Make it global to ensure the compiler doesn't
+ * optimize it away.
+ */
+#ifdef USE_VALGRIND
+extern PMChild *pmchild_array;
+PMChild *pmchild_array;
+#endif
+
+
+/*
* MaxLivePostmasterChildren
*
* This reports the number of postmaster child processes that can be active.
@@ -125,8 +136,13 @@ InitPostmasterChildSlots(void)
for (int i = 0; i < BACKEND_NUM_TYPES; i++)
num_pmchild_slots += pmchild_pools[i].size;
- /* Initialize them */
+ /* Allocate enough slots, and make sure Valgrind doesn't complain */
slots = palloc(num_pmchild_slots * sizeof(PMChild));
+#ifdef USE_VALGRIND
+ pmchild_array = slots;
+#endif
+
+ /* Initialize them */
slotno = 0;
for (int btype = 0; btype < BACKEND_NUM_TYPES; btype++)
{
diff --git a/src/backend/postmaster/postmaster.c b/src/backend/postmaster/postmaster.c
index 490f7ce3664..e01d9f0cfe8 100644
--- a/src/backend/postmaster/postmaster.c
+++ b/src/backend/postmaster/postmaster.c
@@ -2630,6 +2630,13 @@ CleanupBackend(PMChild *bp,
}
bp = NULL;
+ /*
+ * In a crash case, exit immediately without resetting background worker
+ * state. However, if restart_after_crash is enabled, the background
+ * worker state (e.g., rw_pid) still needs be reset so the worker can
+ * restart after crash recovery. This reset is handled in
+ * ResetBackgroundWorkerCrashTimes(), not here.
+ */
if (crashed)
{
HandleChildCrash(bp_pid, exitstatus, procname);
@@ -4337,15 +4344,15 @@ maybe_start_bgworkers(void)
static bool
maybe_reap_io_worker(int pid)
{
- for (int id = 0; id < MAX_IO_WORKERS; ++id)
+ for (int i = 0; i < MAX_IO_WORKERS; ++i)
{
- if (io_worker_children[id] &&
- io_worker_children[id]->pid == pid)
+ if (io_worker_children[i] &&
+ io_worker_children[i]->pid == pid)
{
- ReleasePostmasterChildSlot(io_worker_children[id]);
+ ReleasePostmasterChildSlot(io_worker_children[i]);
--io_worker_count;
- io_worker_children[id] = NULL;
+ io_worker_children[i] = NULL;
return true;
}
}
@@ -4389,22 +4396,22 @@ maybe_adjust_io_workers(void)
while (io_worker_count < io_workers)
{
PMChild *child;
- int id;
+ int i;
/* find unused entry in io_worker_children array */
- for (id = 0; id < MAX_IO_WORKERS; ++id)
+ for (i = 0; i < MAX_IO_WORKERS; ++i)
{
- if (io_worker_children[id] == NULL)
+ if (io_worker_children[i] == NULL)
break;
}
- if (id == MAX_IO_WORKERS)
- elog(ERROR, "could not find a free IO worker ID");
+ if (i == MAX_IO_WORKERS)
+ elog(ERROR, "could not find a free IO worker slot");
/* Try to launch one. */
child = StartChildProcess(B_IO_WORKER);
if (child != NULL)
{
- io_worker_children[id] = child;
+ io_worker_children[i] = child;
++io_worker_count;
}
else
@@ -4415,11 +4422,11 @@ maybe_adjust_io_workers(void)
if (io_worker_count > io_workers)
{
/* ask the IO worker in the highest slot to exit */
- for (int id = MAX_IO_WORKERS - 1; id >= 0; --id)
+ for (int i = MAX_IO_WORKERS - 1; i >= 0; --i)
{
- if (io_worker_children[id] != NULL)
+ if (io_worker_children[i] != NULL)
{
- kill(io_worker_children[id]->pid, SIGUSR2);
+ kill(io_worker_children[i]->pid, SIGUSR2);
break;
}
}
diff --git a/src/backend/postmaster/walsummarizer.c b/src/backend/postmaster/walsummarizer.c
index 0fec4f1f871..777c9a8d555 100644
--- a/src/backend/postmaster/walsummarizer.c
+++ b/src/backend/postmaster/walsummarizer.c
@@ -385,7 +385,7 @@ WalSummarizerMain(const void *startup_data, size_t startup_data_len)
switch_lsn = tliSwitchPoint(current_tli, tles, &switch_tli);
ereport(DEBUG1,
- errmsg_internal("switch point from TLI %u to TLI %u is at %X/%X",
+ errmsg_internal("switch point from TLI %u to TLI %u is at %X/%08X",
current_tli, switch_tli, LSN_FORMAT_ARGS(switch_lsn)));
}
@@ -741,7 +741,7 @@ WaitForWalSummarization(XLogRecPtr lsn)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("WAL summarization is not progressing"),
- errdetail("Summarization is needed through %X/%X, but is stuck at %X/%X on disk and %X/%X in memory.",
+ errdetail("Summarization is needed through %X/%08X, but is stuck at %X/%08X on disk and %X/%08X in memory.",
LSN_FORMAT_ARGS(lsn),
LSN_FORMAT_ARGS(summarized_lsn),
LSN_FORMAT_ARGS(pending_lsn))));
@@ -755,12 +755,12 @@ WaitForWalSummarization(XLogRecPtr lsn)
current_time) / 1000;
ereport(WARNING,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg_plural("still waiting for WAL summarization through %X/%X after %ld second",
- "still waiting for WAL summarization through %X/%X after %ld seconds",
+ errmsg_plural("still waiting for WAL summarization through %X/%08X after %ld second",
+ "still waiting for WAL summarization through %X/%08X after %ld seconds",
elapsed_seconds,
LSN_FORMAT_ARGS(lsn),
elapsed_seconds),
- errdetail("Summarization has reached %X/%X on disk and %X/%X in memory.",
+ errdetail("Summarization has reached %X/%08X on disk and %X/%08X in memory.",
LSN_FORMAT_ARGS(summarized_lsn),
LSN_FORMAT_ARGS(pending_lsn))));
}
@@ -981,7 +981,7 @@ SummarizeWAL(TimeLineID tli, XLogRecPtr start_lsn, bool exact,
if (private_data->end_of_wal)
{
ereport(DEBUG1,
- errmsg_internal("could not read WAL from timeline %u at %X/%X: end of WAL at %X/%X",
+ errmsg_internal("could not read WAL from timeline %u at %X/%08X: end of WAL at %X/%08X",
tli,
LSN_FORMAT_ARGS(start_lsn),
LSN_FORMAT_ARGS(private_data->read_upto)));
@@ -1000,8 +1000,8 @@ SummarizeWAL(TimeLineID tli, XLogRecPtr start_lsn, bool exact,
}
else
ereport(ERROR,
- (errmsg("could not find a valid record after %X/%X",
- LSN_FORMAT_ARGS(start_lsn))));
+ errmsg("could not find a valid record after %X/%08X",
+ LSN_FORMAT_ARGS(start_lsn)));
}
/* We shouldn't go backward. */
@@ -1034,7 +1034,7 @@ SummarizeWAL(TimeLineID tli, XLogRecPtr start_lsn, bool exact,
* able to read a complete record.
*/
ereport(DEBUG1,
- errmsg_internal("could not read WAL from timeline %u at %X/%X: end of WAL at %X/%X",
+ errmsg_internal("could not read WAL from timeline %u at %X/%08X: end of WAL at %X/%08X",
tli,
LSN_FORMAT_ARGS(xlogreader->EndRecPtr),
LSN_FORMAT_ARGS(private_data->read_upto)));
@@ -1045,13 +1045,13 @@ SummarizeWAL(TimeLineID tli, XLogRecPtr start_lsn, bool exact,
if (errormsg)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not read WAL from timeline %u at %X/%X: %s",
+ errmsg("could not read WAL from timeline %u at %X/%08X: %s",
tli, LSN_FORMAT_ARGS(xlogreader->EndRecPtr),
errormsg)));
else
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not read WAL from timeline %u at %X/%X",
+ errmsg("could not read WAL from timeline %u at %X/%08X",
tli, LSN_FORMAT_ARGS(xlogreader->EndRecPtr))));
}
@@ -1222,7 +1222,7 @@ SummarizeWAL(TimeLineID tli, XLogRecPtr start_lsn, bool exact,
/* Tell the user what we did. */
ereport(DEBUG1,
- errmsg_internal("summarized WAL on TLI %u from %X/%X to %X/%X",
+ errmsg_internal("summarized WAL on TLI %u from %X/%08X to %X/%08X",
tli,
LSN_FORMAT_ARGS(summary_start_lsn),
LSN_FORMAT_ARGS(summary_end_lsn)));
@@ -1234,7 +1234,7 @@ SummarizeWAL(TimeLineID tli, XLogRecPtr start_lsn, bool exact,
/* If we skipped a non-zero amount of WAL, log a debug message. */
if (summary_end_lsn > summary_start_lsn && fast_forward)
ereport(DEBUG1,
- errmsg_internal("skipped summarizing WAL on TLI %u from %X/%X to %X/%X",
+ errmsg_internal("skipped summarizing WAL on TLI %u from %X/%08X to %X/%08X",
tli,
LSN_FORMAT_ARGS(summary_start_lsn),
LSN_FORMAT_ARGS(summary_end_lsn)));
@@ -1580,7 +1580,7 @@ summarizer_read_local_xlog_page(XLogReaderState *state,
/* Debugging output. */
ereport(DEBUG1,
- errmsg_internal("timeline %u became historic, can read up to %X/%X",
+ errmsg_internal("timeline %u became historic, can read up to %X/%08X",
private_data->tli, LSN_FORMAT_ARGS(private_data->read_upto)));
}
diff --git a/src/backend/regex/regc_pg_locale.c b/src/backend/regex/regc_pg_locale.c
index 78193cfb964..d9eab5357bc 100644
--- a/src/backend/regex/regc_pg_locale.c
+++ b/src/backend/regex/regc_pg_locale.c
@@ -20,58 +20,13 @@
#include "common/unicode_category.h"
#include "utils/pg_locale.h"
-/*
- * For the libc provider, to provide as much functionality as possible on a
- * variety of platforms without going so far as to implement everything from
- * scratch, we use several implementation strategies depending on the
- * situation:
- *
- * 1. In C/POSIX collations, we use hard-wired code. We can't depend on
- * the <ctype.h> functions since those will obey LC_CTYPE. Note that these
- * collations don't give a fig about multibyte characters.
- *
- * 2. When working in UTF8 encoding, we use the <wctype.h> functions.
- * This assumes that every platform uses Unicode codepoints directly
- * as the wchar_t representation of Unicode. (XXX: ICU makes this assumption
- * even for non-UTF8 encodings, which may be a problem.) On some platforms
- * wchar_t is only 16 bits wide, so we have to punt for codepoints > 0xFFFF.
- *
- * 3. In all other encodings, we use the <ctype.h> functions for pg_wchar
- * values up to 255, and punt for values above that. This is 100% correct
- * only in single-byte encodings such as LATINn. However, non-Unicode
- * multibyte encodings are mostly Far Eastern character sets for which the
- * properties being tested here aren't very relevant for higher code values
- * anyway. The difficulty with using the <wctype.h> functions with
- * non-Unicode multibyte encodings is that we can have no certainty that
- * the platform's wchar_t representation matches what we do in pg_wchar
- * conversions.
- *
- * As a special case, in the "default" collation, (2) and (3) force ASCII
- * letters to follow ASCII upcase/downcase rules, while in a non-default
- * collation we just let the library functions do what they will. The case
- * where this matters is treatment of I/i in Turkish, and the behavior is
- * meant to match the upper()/lower() SQL functions.
- *
- * We store the active collation setting in static variables. In principle
- * it could be passed down to here via the regex library's "struct vars" data
- * structure; but that would require somewhat invasive changes in the regex
- * library, and right now there's no real benefit to be gained from that.
- *
- * NB: the coding here assumes pg_wchar is an unsigned type.
- */
-
-typedef enum
-{
- PG_REGEX_STRATEGY_C, /* C locale (encoding independent) */
- PG_REGEX_STRATEGY_BUILTIN, /* built-in Unicode semantics */
- PG_REGEX_STRATEGY_LIBC_WIDE, /* Use locale_t <wctype.h> functions */
- PG_REGEX_STRATEGY_LIBC_1BYTE, /* Use locale_t <ctype.h> functions */
- PG_REGEX_STRATEGY_ICU, /* Use ICU uchar.h functions */
-} PG_Locale_Strategy;
-
-static PG_Locale_Strategy pg_regex_strategy;
static pg_locale_t pg_regex_locale;
+static struct pg_locale_struct dummy_c_locale = {
+ .collate_is_c = true,
+ .ctype_is_c = true,
+};
+
/*
* Hard-wired character properties for C locale
*/
@@ -228,7 +183,6 @@ void
pg_set_regex_collation(Oid collation)
{
pg_locale_t locale = 0;
- PG_Locale_Strategy strategy;
if (!OidIsValid(collation))
{
@@ -249,8 +203,7 @@ pg_set_regex_collation(Oid collation)
* catalog access is available, so we can't call
* pg_newlocale_from_collation().
*/
- strategy = PG_REGEX_STRATEGY_C;
- locale = 0;
+ locale = &dummy_c_locale;
}
else
{
@@ -267,113 +220,41 @@ pg_set_regex_collation(Oid collation)
* C/POSIX collations use this path regardless of database
* encoding
*/
- strategy = PG_REGEX_STRATEGY_C;
- locale = 0;
- }
- else if (locale->provider == COLLPROVIDER_BUILTIN)
- {
- Assert(GetDatabaseEncoding() == PG_UTF8);
- strategy = PG_REGEX_STRATEGY_BUILTIN;
- }
-#ifdef USE_ICU
- else if (locale->provider == COLLPROVIDER_ICU)
- {
- strategy = PG_REGEX_STRATEGY_ICU;
- }
-#endif
- else
- {
- Assert(locale->provider == COLLPROVIDER_LIBC);
- if (GetDatabaseEncoding() == PG_UTF8)
- strategy = PG_REGEX_STRATEGY_LIBC_WIDE;
- else
- strategy = PG_REGEX_STRATEGY_LIBC_1BYTE;
+ locale = &dummy_c_locale;
}
}
- pg_regex_strategy = strategy;
pg_regex_locale = locale;
}
static int
pg_wc_isdigit(pg_wchar c)
{
- switch (pg_regex_strategy)
- {
- case PG_REGEX_STRATEGY_C:
- return (c <= (pg_wchar) 127 &&
- (pg_char_properties[c] & PG_ISDIGIT));
- case PG_REGEX_STRATEGY_BUILTIN:
- return pg_u_isdigit(c, !pg_regex_locale->info.builtin.casemap_full);
- case PG_REGEX_STRATEGY_LIBC_WIDE:
- if (sizeof(wchar_t) >= 4 || c <= (pg_wchar) 0xFFFF)
- return iswdigit_l((wint_t) c, pg_regex_locale->info.lt);
- /* FALL THRU */
- case PG_REGEX_STRATEGY_LIBC_1BYTE:
- return (c <= (pg_wchar) UCHAR_MAX &&
- isdigit_l((unsigned char) c, pg_regex_locale->info.lt));
- break;
- case PG_REGEX_STRATEGY_ICU:
-#ifdef USE_ICU
- return u_isdigit(c);
-#endif
- break;
- }
- return 0; /* can't get here, but keep compiler quiet */
+ if (pg_regex_locale->ctype_is_c)
+ return (c <= (pg_wchar) 127 &&
+ (pg_char_properties[c] & PG_ISDIGIT));
+ else
+ return pg_regex_locale->ctype->wc_isdigit(c, pg_regex_locale);
}
static int
pg_wc_isalpha(pg_wchar c)
{
- switch (pg_regex_strategy)
- {
- case PG_REGEX_STRATEGY_C:
- return (c <= (pg_wchar) 127 &&
- (pg_char_properties[c] & PG_ISALPHA));
- case PG_REGEX_STRATEGY_BUILTIN:
- return pg_u_isalpha(c);
- case PG_REGEX_STRATEGY_LIBC_WIDE:
- if (sizeof(wchar_t) >= 4 || c <= (pg_wchar) 0xFFFF)
- return iswalpha_l((wint_t) c, pg_regex_locale->info.lt);
- /* FALL THRU */
- case PG_REGEX_STRATEGY_LIBC_1BYTE:
- return (c <= (pg_wchar) UCHAR_MAX &&
- isalpha_l((unsigned char) c, pg_regex_locale->info.lt));
- break;
- case PG_REGEX_STRATEGY_ICU:
-#ifdef USE_ICU
- return u_isalpha(c);
-#endif
- break;
- }
- return 0; /* can't get here, but keep compiler quiet */
+ if (pg_regex_locale->ctype_is_c)
+ return (c <= (pg_wchar) 127 &&
+ (pg_char_properties[c] & PG_ISALPHA));
+ else
+ return pg_regex_locale->ctype->wc_isalpha(c, pg_regex_locale);
}
static int
pg_wc_isalnum(pg_wchar c)
{
- switch (pg_regex_strategy)
- {
- case PG_REGEX_STRATEGY_C:
- return (c <= (pg_wchar) 127 &&
- (pg_char_properties[c] & PG_ISALNUM));
- case PG_REGEX_STRATEGY_BUILTIN:
- return pg_u_isalnum(c, !pg_regex_locale->info.builtin.casemap_full);
- case PG_REGEX_STRATEGY_LIBC_WIDE:
- if (sizeof(wchar_t) >= 4 || c <= (pg_wchar) 0xFFFF)
- return iswalnum_l((wint_t) c, pg_regex_locale->info.lt);
- /* FALL THRU */
- case PG_REGEX_STRATEGY_LIBC_1BYTE:
- return (c <= (pg_wchar) UCHAR_MAX &&
- isalnum_l((unsigned char) c, pg_regex_locale->info.lt));
- break;
- case PG_REGEX_STRATEGY_ICU:
-#ifdef USE_ICU
- return u_isalnum(c);
-#endif
- break;
- }
- return 0; /* can't get here, but keep compiler quiet */
+ if (pg_regex_locale->ctype_is_c)
+ return (c <= (pg_wchar) 127 &&
+ (pg_char_properties[c] & PG_ISALNUM));
+ else
+ return pg_regex_locale->ctype->wc_isalnum(c, pg_regex_locale);
}
static int
@@ -388,231 +269,87 @@ pg_wc_isword(pg_wchar c)
static int
pg_wc_isupper(pg_wchar c)
{
- switch (pg_regex_strategy)
- {
- case PG_REGEX_STRATEGY_C:
- return (c <= (pg_wchar) 127 &&
- (pg_char_properties[c] & PG_ISUPPER));
- case PG_REGEX_STRATEGY_BUILTIN:
- return pg_u_isupper(c);
- case PG_REGEX_STRATEGY_LIBC_WIDE:
- if (sizeof(wchar_t) >= 4 || c <= (pg_wchar) 0xFFFF)
- return iswupper_l((wint_t) c, pg_regex_locale->info.lt);
- /* FALL THRU */
- case PG_REGEX_STRATEGY_LIBC_1BYTE:
- return (c <= (pg_wchar) UCHAR_MAX &&
- isupper_l((unsigned char) c, pg_regex_locale->info.lt));
- break;
- case PG_REGEX_STRATEGY_ICU:
-#ifdef USE_ICU
- return u_isupper(c);
-#endif
- break;
- }
- return 0; /* can't get here, but keep compiler quiet */
+ if (pg_regex_locale->ctype_is_c)
+ return (c <= (pg_wchar) 127 &&
+ (pg_char_properties[c] & PG_ISUPPER));
+ else
+ return pg_regex_locale->ctype->wc_isupper(c, pg_regex_locale);
}
static int
pg_wc_islower(pg_wchar c)
{
- switch (pg_regex_strategy)
- {
- case PG_REGEX_STRATEGY_C:
- return (c <= (pg_wchar) 127 &&
- (pg_char_properties[c] & PG_ISLOWER));
- case PG_REGEX_STRATEGY_BUILTIN:
- return pg_u_islower(c);
- case PG_REGEX_STRATEGY_LIBC_WIDE:
- if (sizeof(wchar_t) >= 4 || c <= (pg_wchar) 0xFFFF)
- return iswlower_l((wint_t) c, pg_regex_locale->info.lt);
- /* FALL THRU */
- case PG_REGEX_STRATEGY_LIBC_1BYTE:
- return (c <= (pg_wchar) UCHAR_MAX &&
- islower_l((unsigned char) c, pg_regex_locale->info.lt));
- break;
- case PG_REGEX_STRATEGY_ICU:
-#ifdef USE_ICU
- return u_islower(c);
-#endif
- break;
- }
- return 0; /* can't get here, but keep compiler quiet */
+ if (pg_regex_locale->ctype_is_c)
+ return (c <= (pg_wchar) 127 &&
+ (pg_char_properties[c] & PG_ISLOWER));
+ else
+ return pg_regex_locale->ctype->wc_islower(c, pg_regex_locale);
}
static int
pg_wc_isgraph(pg_wchar c)
{
- switch (pg_regex_strategy)
- {
- case PG_REGEX_STRATEGY_C:
- return (c <= (pg_wchar) 127 &&
- (pg_char_properties[c] & PG_ISGRAPH));
- case PG_REGEX_STRATEGY_BUILTIN:
- return pg_u_isgraph(c);
- case PG_REGEX_STRATEGY_LIBC_WIDE:
- if (sizeof(wchar_t) >= 4 || c <= (pg_wchar) 0xFFFF)
- return iswgraph_l((wint_t) c, pg_regex_locale->info.lt);
- /* FALL THRU */
- case PG_REGEX_STRATEGY_LIBC_1BYTE:
- return (c <= (pg_wchar) UCHAR_MAX &&
- isgraph_l((unsigned char) c, pg_regex_locale->info.lt));
- break;
- case PG_REGEX_STRATEGY_ICU:
-#ifdef USE_ICU
- return u_isgraph(c);
-#endif
- break;
- }
- return 0; /* can't get here, but keep compiler quiet */
+ if (pg_regex_locale->ctype_is_c)
+ return (c <= (pg_wchar) 127 &&
+ (pg_char_properties[c] & PG_ISGRAPH));
+ else
+ return pg_regex_locale->ctype->wc_isgraph(c, pg_regex_locale);
}
static int
pg_wc_isprint(pg_wchar c)
{
- switch (pg_regex_strategy)
- {
- case PG_REGEX_STRATEGY_C:
- return (c <= (pg_wchar) 127 &&
- (pg_char_properties[c] & PG_ISPRINT));
- case PG_REGEX_STRATEGY_BUILTIN:
- return pg_u_isprint(c);
- case PG_REGEX_STRATEGY_LIBC_WIDE:
- if (sizeof(wchar_t) >= 4 || c <= (pg_wchar) 0xFFFF)
- return iswprint_l((wint_t) c, pg_regex_locale->info.lt);
- /* FALL THRU */
- case PG_REGEX_STRATEGY_LIBC_1BYTE:
- return (c <= (pg_wchar) UCHAR_MAX &&
- isprint_l((unsigned char) c, pg_regex_locale->info.lt));
- break;
- case PG_REGEX_STRATEGY_ICU:
-#ifdef USE_ICU
- return u_isprint(c);
-#endif
- break;
- }
- return 0; /* can't get here, but keep compiler quiet */
+ if (pg_regex_locale->ctype_is_c)
+ return (c <= (pg_wchar) 127 &&
+ (pg_char_properties[c] & PG_ISPRINT));
+ else
+ return pg_regex_locale->ctype->wc_isprint(c, pg_regex_locale);
}
static int
pg_wc_ispunct(pg_wchar c)
{
- switch (pg_regex_strategy)
- {
- case PG_REGEX_STRATEGY_C:
- return (c <= (pg_wchar) 127 &&
- (pg_char_properties[c] & PG_ISPUNCT));
- case PG_REGEX_STRATEGY_BUILTIN:
- return pg_u_ispunct(c, !pg_regex_locale->info.builtin.casemap_full);
- case PG_REGEX_STRATEGY_LIBC_WIDE:
- if (sizeof(wchar_t) >= 4 || c <= (pg_wchar) 0xFFFF)
- return iswpunct_l((wint_t) c, pg_regex_locale->info.lt);
- /* FALL THRU */
- case PG_REGEX_STRATEGY_LIBC_1BYTE:
- return (c <= (pg_wchar) UCHAR_MAX &&
- ispunct_l((unsigned char) c, pg_regex_locale->info.lt));
- break;
- case PG_REGEX_STRATEGY_ICU:
-#ifdef USE_ICU
- return u_ispunct(c);
-#endif
- break;
- }
- return 0; /* can't get here, but keep compiler quiet */
+ if (pg_regex_locale->ctype_is_c)
+ return (c <= (pg_wchar) 127 &&
+ (pg_char_properties[c] & PG_ISPUNCT));
+ else
+ return pg_regex_locale->ctype->wc_ispunct(c, pg_regex_locale);
}
static int
pg_wc_isspace(pg_wchar c)
{
- switch (pg_regex_strategy)
- {
- case PG_REGEX_STRATEGY_C:
- return (c <= (pg_wchar) 127 &&
- (pg_char_properties[c] & PG_ISSPACE));
- case PG_REGEX_STRATEGY_BUILTIN:
- return pg_u_isspace(c);
- case PG_REGEX_STRATEGY_LIBC_WIDE:
- if (sizeof(wchar_t) >= 4 || c <= (pg_wchar) 0xFFFF)
- return iswspace_l((wint_t) c, pg_regex_locale->info.lt);
- /* FALL THRU */
- case PG_REGEX_STRATEGY_LIBC_1BYTE:
- return (c <= (pg_wchar) UCHAR_MAX &&
- isspace_l((unsigned char) c, pg_regex_locale->info.lt));
- break;
- case PG_REGEX_STRATEGY_ICU:
-#ifdef USE_ICU
- return u_isspace(c);
-#endif
- break;
- }
- return 0; /* can't get here, but keep compiler quiet */
+ if (pg_regex_locale->ctype_is_c)
+ return (c <= (pg_wchar) 127 &&
+ (pg_char_properties[c] & PG_ISSPACE));
+ else
+ return pg_regex_locale->ctype->wc_isspace(c, pg_regex_locale);
}
static pg_wchar
pg_wc_toupper(pg_wchar c)
{
- switch (pg_regex_strategy)
+ if (pg_regex_locale->ctype_is_c)
{
- case PG_REGEX_STRATEGY_C:
- if (c <= (pg_wchar) 127)
- return pg_ascii_toupper((unsigned char) c);
- return c;
- case PG_REGEX_STRATEGY_BUILTIN:
- return unicode_uppercase_simple(c);
- case PG_REGEX_STRATEGY_LIBC_WIDE:
- /* force C behavior for ASCII characters, per comments above */
- if (pg_regex_locale->is_default && c <= (pg_wchar) 127)
- return pg_ascii_toupper((unsigned char) c);
- if (sizeof(wchar_t) >= 4 || c <= (pg_wchar) 0xFFFF)
- return towupper_l((wint_t) c, pg_regex_locale->info.lt);
- /* FALL THRU */
- case PG_REGEX_STRATEGY_LIBC_1BYTE:
- /* force C behavior for ASCII characters, per comments above */
- if (pg_regex_locale->is_default && c <= (pg_wchar) 127)
- return pg_ascii_toupper((unsigned char) c);
- if (c <= (pg_wchar) UCHAR_MAX)
- return toupper_l((unsigned char) c, pg_regex_locale->info.lt);
- return c;
- case PG_REGEX_STRATEGY_ICU:
-#ifdef USE_ICU
- return u_toupper(c);
-#endif
- break;
+ if (c <= (pg_wchar) 127)
+ return pg_ascii_toupper((unsigned char) c);
+ return c;
}
- return 0; /* can't get here, but keep compiler quiet */
+ else
+ return pg_regex_locale->ctype->wc_toupper(c, pg_regex_locale);
}
static pg_wchar
pg_wc_tolower(pg_wchar c)
{
- switch (pg_regex_strategy)
+ if (pg_regex_locale->ctype_is_c)
{
- case PG_REGEX_STRATEGY_C:
- if (c <= (pg_wchar) 127)
- return pg_ascii_tolower((unsigned char) c);
- return c;
- case PG_REGEX_STRATEGY_BUILTIN:
- return unicode_lowercase_simple(c);
- case PG_REGEX_STRATEGY_LIBC_WIDE:
- /* force C behavior for ASCII characters, per comments above */
- if (pg_regex_locale->is_default && c <= (pg_wchar) 127)
- return pg_ascii_tolower((unsigned char) c);
- if (sizeof(wchar_t) >= 4 || c <= (pg_wchar) 0xFFFF)
- return towlower_l((wint_t) c, pg_regex_locale->info.lt);
- /* FALL THRU */
- case PG_REGEX_STRATEGY_LIBC_1BYTE:
- /* force C behavior for ASCII characters, per comments above */
- if (pg_regex_locale->is_default && c <= (pg_wchar) 127)
- return pg_ascii_tolower((unsigned char) c);
- if (c <= (pg_wchar) UCHAR_MAX)
- return tolower_l((unsigned char) c, pg_regex_locale->info.lt);
- return c;
- case PG_REGEX_STRATEGY_ICU:
-#ifdef USE_ICU
- return u_tolower(c);
-#endif
- break;
+ if (c <= (pg_wchar) 127)
+ return pg_ascii_tolower((unsigned char) c);
+ return c;
}
- return 0; /* can't get here, but keep compiler quiet */
+ else
+ return pg_regex_locale->ctype->wc_tolower(c, pg_regex_locale);
}
@@ -738,37 +475,25 @@ pg_ctype_get_cache(pg_wc_probefunc probefunc, int cclasscode)
* would always be true for production values of MAX_SIMPLE_CHR, but it's
* useful to allow it to be small for testing purposes.)
*/
- switch (pg_regex_strategy)
+ if (pg_regex_locale->ctype_is_c)
{
- case PG_REGEX_STRATEGY_C:
#if MAX_SIMPLE_CHR >= 127
- max_chr = (pg_wchar) 127;
- pcc->cv.cclasscode = -1;
+ max_chr = (pg_wchar) 127;
+ pcc->cv.cclasscode = -1;
#else
- max_chr = (pg_wchar) MAX_SIMPLE_CHR;
+ max_chr = (pg_wchar) MAX_SIMPLE_CHR;
#endif
- break;
- case PG_REGEX_STRATEGY_BUILTIN:
- max_chr = (pg_wchar) MAX_SIMPLE_CHR;
- break;
- case PG_REGEX_STRATEGY_LIBC_WIDE:
- max_chr = (pg_wchar) MAX_SIMPLE_CHR;
- break;
- case PG_REGEX_STRATEGY_LIBC_1BYTE:
-#if MAX_SIMPLE_CHR >= UCHAR_MAX
- max_chr = (pg_wchar) UCHAR_MAX;
+ }
+ else
+ {
+ if (pg_regex_locale->ctype->max_chr != 0 &&
+ pg_regex_locale->ctype->max_chr <= MAX_SIMPLE_CHR)
+ {
+ max_chr = pg_regex_locale->ctype->max_chr;
pcc->cv.cclasscode = -1;
-#else
- max_chr = (pg_wchar) MAX_SIMPLE_CHR;
-#endif
- break;
- case PG_REGEX_STRATEGY_ICU:
+ }
+ else
max_chr = (pg_wchar) MAX_SIMPLE_CHR;
- break;
- default:
- Assert(false);
- max_chr = 0; /* can't get here, but keep compiler quiet */
- break;
}
/*
diff --git a/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c b/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c
index 7b4ddf7a8f5..239641bfbb6 100644
--- a/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c
+++ b/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c
@@ -232,6 +232,9 @@ libpqrcv_connect(const char *conninfo, bool replication, bool logical,
errhint("Target server's authentication method must be changed, or set password_required=false in the subscription parameters.")));
}
+ PQsetNoticeReceiver(conn->streamConn, libpqsrv_notice_receiver,
+ "received message via replication");
+
/*
* Set always-secure search path for the cases where the connection is
* used to run SQL queries, so malicious users can't get control.
@@ -418,31 +421,22 @@ libpqrcv_identify_system(WalReceiverConn *conn, TimeLineID *primary_tli)
"IDENTIFY_SYSTEM",
WAIT_EVENT_LIBPQWALRECEIVER_RECEIVE);
if (PQresultStatus(res) != PGRES_TUPLES_OK)
- {
- PQclear(res);
ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
errmsg("could not receive database system identifier and timeline ID from "
"the primary server: %s",
pchomp(PQerrorMessage(conn->streamConn)))));
- }
/*
* IDENTIFY_SYSTEM returns 3 columns in 9.3 and earlier, and 4 columns in
* 9.4 and onwards.
*/
if (PQnfields(res) < 3 || PQntuples(res) != 1)
- {
- int ntuples = PQntuples(res);
- int nfields = PQnfields(res);
-
- PQclear(res);
ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
errmsg("invalid response from primary server"),
errdetail("Could not identify system: got %d rows and %d fields, expected %d rows and %d or more fields.",
- ntuples, nfields, 1, 3)));
- }
+ PQntuples(res), PQnfields(res), 1, 3)));
primary_sysid = pstrdup(PQgetvalue(res, 0, 0));
*primary_tli = pg_strtoint32(PQgetvalue(res, 0, 1));
PQclear(res);
@@ -534,7 +528,7 @@ libpqrcv_startstreaming(WalReceiverConn *conn,
if (options->logical)
appendStringInfoString(&cmd, " LOGICAL");
- appendStringInfo(&cmd, " %X/%X", LSN_FORMAT_ARGS(options->startpoint));
+ appendStringInfo(&cmd, " %X/%08X", LSN_FORMAT_ARGS(options->startpoint));
/*
* Additional options are different depending on if we are doing logical
@@ -604,13 +598,10 @@ libpqrcv_startstreaming(WalReceiverConn *conn,
return false;
}
else if (PQresultStatus(res) != PGRES_COPY_BOTH)
- {
- PQclear(res);
ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
errmsg("could not start WAL streaming: %s",
pchomp(PQerrorMessage(conn->streamConn)))));
- }
PQclear(res);
return true;
}
@@ -718,26 +709,17 @@ libpqrcv_readtimelinehistoryfile(WalReceiverConn *conn,
cmd,
WAIT_EVENT_LIBPQWALRECEIVER_RECEIVE);
if (PQresultStatus(res) != PGRES_TUPLES_OK)
- {
- PQclear(res);
ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
errmsg("could not receive timeline history file from "
"the primary server: %s",
pchomp(PQerrorMessage(conn->streamConn)))));
- }
if (PQnfields(res) != 2 || PQntuples(res) != 1)
- {
- int ntuples = PQntuples(res);
- int nfields = PQnfields(res);
-
- PQclear(res);
ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
errmsg("invalid response from primary server"),
errdetail("Expected 1 tuple with 2 fields, got %d tuples with %d fields.",
- ntuples, nfields)));
- }
+ PQntuples(res), PQnfields(res))));
*filename = pstrdup(PQgetvalue(res, 0, 0));
*len = PQgetlength(res, 0, 1);
@@ -841,13 +823,10 @@ libpqrcv_receive(WalReceiverConn *conn, char **buffer,
return -1;
}
else
- {
- PQclear(res);
ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
errmsg("could not receive data from WAL stream: %s",
pchomp(PQerrorMessage(conn->streamConn)))));
- }
}
if (rawlen < -1)
ereport(ERROR,
@@ -971,13 +950,10 @@ libpqrcv_create_slot(WalReceiverConn *conn, const char *slotname,
pfree(cmd.data);
if (PQresultStatus(res) != PGRES_TUPLES_OK)
- {
- PQclear(res);
ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
errmsg("could not create replication slot \"%s\": %s",
slotname, pchomp(PQerrorMessage(conn->streamConn)))));
- }
if (lsn)
*lsn = DatumGetLSN(DirectFunctionCall1Coll(pg_lsn_in, InvalidOid,
diff --git a/src/backend/replication/logical/applyparallelworker.c b/src/backend/replication/logical/applyparallelworker.c
index d25085d3515..1fa931a7422 100644
--- a/src/backend/replication/logical/applyparallelworker.c
+++ b/src/backend/replication/logical/applyparallelworker.c
@@ -441,7 +441,8 @@ pa_launch_parallel_worker(void)
MySubscription->name,
MyLogicalRepWorker->userid,
InvalidOid,
- dsm_segment_handle(winfo->dsm_seg));
+ dsm_segment_handle(winfo->dsm_seg),
+ false);
if (launched)
{
diff --git a/src/backend/replication/logical/conflict.c b/src/backend/replication/logical/conflict.c
index 97c4e26b586..2fd3e8bbda5 100644
--- a/src/backend/replication/logical/conflict.c
+++ b/src/backend/replication/logical/conflict.c
@@ -29,6 +29,7 @@ static const char *const ConflictTypeNames[] = {
[CT_UPDATE_EXISTS] = "update_exists",
[CT_UPDATE_MISSING] = "update_missing",
[CT_DELETE_ORIGIN_DIFFERS] = "delete_origin_differs",
+ [CT_UPDATE_DELETED] = "update_deleted",
[CT_DELETE_MISSING] = "delete_missing",
[CT_MULTIPLE_UNIQUE_CONFLICTS] = "multiple_unique_conflicts"
};
@@ -176,6 +177,7 @@ errcode_apply_conflict(ConflictType type)
case CT_UPDATE_ORIGIN_DIFFERS:
case CT_UPDATE_MISSING:
case CT_DELETE_ORIGIN_DIFFERS:
+ case CT_UPDATE_DELETED:
case CT_DELETE_MISSING:
return errcode(ERRCODE_T_R_SERIALIZATION_FAILURE);
}
@@ -261,6 +263,26 @@ errdetail_apply_conflict(EState *estate, ResultRelInfo *relinfo,
break;
+ case CT_UPDATE_DELETED:
+ if (localts)
+ {
+ if (localorigin == InvalidRepOriginId)
+ appendStringInfo(&err_detail, _("The row to be updated was deleted locally in transaction %u at %s."),
+ localxmin, timestamptz_to_str(localts));
+ else if (replorigin_by_oid(localorigin, true, &origin_name))
+ appendStringInfo(&err_detail, _("The row to be updated was deleted by a different origin \"%s\" in transaction %u at %s."),
+ origin_name, localxmin, timestamptz_to_str(localts));
+
+ /* The origin that modified this row has been removed. */
+ else
+ appendStringInfo(&err_detail, _("The row to be updated was deleted by a non-existent origin in transaction %u at %s."),
+ localxmin, timestamptz_to_str(localts));
+ }
+ else
+ appendStringInfo(&err_detail, _("The row to be updated was deleted."));
+
+ break;
+
case CT_UPDATE_MISSING:
appendStringInfoString(&err_detail, _("Could not find the row to be updated."));
break;
diff --git a/src/backend/replication/logical/launcher.c b/src/backend/replication/logical/launcher.c
index 10677da56b2..37377f7eb63 100644
--- a/src/backend/replication/logical/launcher.c
+++ b/src/backend/replication/logical/launcher.c
@@ -32,6 +32,7 @@
#include "postmaster/interrupt.h"
#include "replication/logicallauncher.h"
#include "replication/origin.h"
+#include "replication/slot.h"
#include "replication/walreceiver.h"
#include "replication/worker_internal.h"
#include "storage/ipc.h"
@@ -91,7 +92,6 @@ static dshash_table *last_start_times = NULL;
static bool on_commit_launcher_wakeup = false;
-static void ApplyLauncherWakeup(void);
static void logicalrep_launcher_onexit(int code, Datum arg);
static void logicalrep_worker_onexit(int code, Datum arg);
static void logicalrep_worker_detach(void);
@@ -100,6 +100,9 @@ static int logicalrep_pa_worker_count(Oid subid);
static void logicalrep_launcher_attach_dshmem(void);
static void ApplyLauncherSetWorkerStartTime(Oid subid, TimestampTz start_time);
static TimestampTz ApplyLauncherGetWorkerStartTime(Oid subid);
+static void compute_min_nonremovable_xid(LogicalRepWorker *worker, TransactionId *xmin);
+static bool acquire_conflict_slot_if_exists(void);
+static void advance_conflict_slot_xmin(TransactionId new_xmin);
/*
@@ -148,6 +151,7 @@ get_subscription_list(void)
sub->owner = subform->subowner;
sub->enabled = subform->subenabled;
sub->name = pstrdup(NameStr(subform->subname));
+ sub->retaindeadtuples = subform->subretaindeadtuples;
/* We don't fill fields we are not interested in. */
res = lappend(res, sub);
@@ -175,12 +179,14 @@ WaitForReplicationWorkerAttach(LogicalRepWorker *worker,
uint16 generation,
BackgroundWorkerHandle *handle)
{
- BgwHandleStatus status;
- int rc;
+ bool result = false;
+ bool dropped_latch = false;
for (;;)
{
+ BgwHandleStatus status;
pid_t pid;
+ int rc;
CHECK_FOR_INTERRUPTS();
@@ -189,8 +195,9 @@ WaitForReplicationWorkerAttach(LogicalRepWorker *worker,
/* Worker either died or has started. Return false if died. */
if (!worker->in_use || worker->proc)
{
+ result = worker->in_use;
LWLockRelease(LogicalRepWorkerLock);
- return worker->in_use;
+ break;
}
LWLockRelease(LogicalRepWorkerLock);
@@ -205,7 +212,7 @@ WaitForReplicationWorkerAttach(LogicalRepWorker *worker,
if (generation == worker->generation)
logicalrep_worker_cleanup(worker);
LWLockRelease(LogicalRepWorkerLock);
- return false;
+ break; /* result is already false */
}
/*
@@ -220,8 +227,18 @@ WaitForReplicationWorkerAttach(LogicalRepWorker *worker,
{
ResetLatch(MyLatch);
CHECK_FOR_INTERRUPTS();
+ dropped_latch = true;
}
}
+
+ /*
+ * If we had to clear a latch event in order to wait, be sure to restore
+ * it before exiting. Otherwise caller may miss events.
+ */
+ if (dropped_latch)
+ SetLatch(MyLatch);
+
+ return result;
}
/*
@@ -296,7 +313,8 @@ logicalrep_workers_find(Oid subid, bool only_running, bool acquire_lock)
bool
logicalrep_worker_launch(LogicalRepWorkerType wtype,
Oid dbid, Oid subid, const char *subname, Oid userid,
- Oid relid, dsm_handle subworker_dsm)
+ Oid relid, dsm_handle subworker_dsm,
+ bool retain_dead_tuples)
{
BackgroundWorker bgw;
BackgroundWorkerHandle *bgw_handle;
@@ -315,10 +333,13 @@ logicalrep_worker_launch(LogicalRepWorkerType wtype,
* - must be valid worker type
* - tablesync workers are only ones to have relid
* - parallel apply worker is the only kind of subworker
+ * - The replication slot used in conflict detection is created when
+ * retain_dead_tuples is enabled
*/
Assert(wtype != WORKERTYPE_UNKNOWN);
Assert(is_tablesync_worker == OidIsValid(relid));
Assert(is_parallel_apply_worker == (subworker_dsm != DSM_HANDLE_INVALID));
+ Assert(!retain_dead_tuples || MyReplicationSlot);
ereport(DEBUG1,
(errmsg_internal("starting logical replication worker for subscription \"%s\"",
@@ -328,7 +349,7 @@ logicalrep_worker_launch(LogicalRepWorkerType wtype,
if (max_active_replication_origins == 0)
ereport(ERROR,
(errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED),
- errmsg("cannot start logical replication workers when \"max_active_replication_origins\"=0")));
+ errmsg("cannot start logical replication workers when \"max_active_replication_origins\" is 0")));
/*
* We need to do the modification of the shared memory under lock so that
@@ -441,6 +462,9 @@ retry:
worker->stream_fileset = NULL;
worker->leader_pid = is_parallel_apply_worker ? MyProcPid : InvalidPid;
worker->parallel_apply = is_parallel_apply_worker;
+ worker->oldest_nonremovable_xid = retain_dead_tuples
+ ? MyReplicationSlot->data.xmin
+ : InvalidTransactionId;
worker->last_lsn = InvalidXLogRecPtr;
TIMESTAMP_NOBEGIN(worker->last_send_time);
TIMESTAMP_NOBEGIN(worker->last_recv_time);
@@ -766,6 +790,8 @@ logicalrep_worker_detach(void)
}
LWLockRelease(LogicalRepWorkerLock);
+
+ list_free(workers);
}
/* Block concurrent access. */
@@ -1016,7 +1042,7 @@ logicalrep_launcher_attach_dshmem(void)
last_start_times_dsa = dsa_attach(LogicalRepCtx->last_start_dsa);
dsa_pin_mapping(last_start_times_dsa);
last_start_times = dshash_attach(last_start_times_dsa, &dsh_params,
- LogicalRepCtx->last_start_dsh, 0);
+ LogicalRepCtx->last_start_dsh, NULL);
}
MemoryContextSwitchTo(oldcontext);
@@ -1105,7 +1131,10 @@ ApplyLauncherWakeupAtCommit(void)
on_commit_launcher_wakeup = true;
}
-static void
+/*
+ * Wakeup the launcher immediately.
+ */
+void
ApplyLauncherWakeup(void)
{
if (LogicalRepCtx->launcher_pid != 0)
@@ -1137,6 +1166,12 @@ ApplyLauncherMain(Datum main_arg)
*/
BackgroundWorkerInitializeConnection(NULL, NULL, 0);
+ /*
+ * Acquire the conflict detection slot at startup to ensure it can be
+ * dropped if no longer needed after a restart.
+ */
+ acquire_conflict_slot_if_exists();
+
/* Enter main loop */
for (;;)
{
@@ -1146,6 +1181,9 @@ ApplyLauncherMain(Datum main_arg)
MemoryContext subctx;
MemoryContext oldctx;
long wait_time = DEFAULT_NAPTIME_PER_CYCLE;
+ bool can_advance_xmin = true;
+ bool retain_dead_tuples = false;
+ TransactionId xmin = InvalidTransactionId;
CHECK_FOR_INTERRUPTS();
@@ -1155,7 +1193,14 @@ ApplyLauncherMain(Datum main_arg)
ALLOCSET_DEFAULT_SIZES);
oldctx = MemoryContextSwitchTo(subctx);
- /* Start any missing workers for enabled subscriptions. */
+ /*
+ * Start any missing workers for enabled subscriptions.
+ *
+ * Also, during the iteration through all subscriptions, we compute
+ * the minimum XID required to protect deleted tuples for conflict
+ * detection if one of the subscription enables retain_dead_tuples
+ * option.
+ */
sublist = get_subscription_list();
foreach(lc, sublist)
{
@@ -1165,6 +1210,38 @@ ApplyLauncherMain(Datum main_arg)
TimestampTz now;
long elapsed;
+ if (sub->retaindeadtuples)
+ {
+ retain_dead_tuples = true;
+
+ /*
+ * Can't advance xmin of the slot unless all the subscriptions
+ * with retain_dead_tuples are enabled. This is required to
+ * ensure that we don't advance the xmin of
+ * CONFLICT_DETECTION_SLOT if one of the subscriptions is not
+ * enabled. Otherwise, we won't be able to detect conflicts
+ * reliably for such a subscription even though it has set the
+ * retain_dead_tuples option.
+ */
+ can_advance_xmin &= sub->enabled;
+
+ /*
+ * Create a replication slot to retain information necessary
+ * for conflict detection such as dead tuples, commit
+ * timestamps, and origins.
+ *
+ * The slot is created before starting the apply worker to
+ * prevent it from unnecessarily maintaining its
+ * oldest_nonremovable_xid.
+ *
+ * The slot is created even for a disabled subscription to
+ * ensure that conflict-related information is available when
+ * applying remote changes that occurred before the
+ * subscription was enabled.
+ */
+ CreateConflictDetectionSlot();
+ }
+
if (!sub->enabled)
continue;
@@ -1173,7 +1250,27 @@ ApplyLauncherMain(Datum main_arg)
LWLockRelease(LogicalRepWorkerLock);
if (w != NULL)
- continue; /* worker is running already */
+ {
+ /*
+ * Compute the minimum xmin required to protect dead tuples
+ * required for conflict detection among all running apply
+ * workers that enables retain_dead_tuples.
+ */
+ if (sub->retaindeadtuples && can_advance_xmin)
+ compute_min_nonremovable_xid(w, &xmin);
+
+ /* worker is running already */
+ continue;
+ }
+
+ /*
+ * Can't advance xmin of the slot unless all the workers
+ * corresponding to subscriptions with retain_dead_tuples are
+ * running, disabling the further computation of the minimum
+ * nonremovable xid.
+ */
+ if (sub->retaindeadtuples)
+ can_advance_xmin = false;
/*
* If the worker is eligible to start now, launch it. Otherwise,
@@ -1194,10 +1291,22 @@ ApplyLauncherMain(Datum main_arg)
(elapsed = TimestampDifferenceMilliseconds(last_start, now)) >= wal_retrieve_retry_interval)
{
ApplyLauncherSetWorkerStartTime(sub->oid, now);
- logicalrep_worker_launch(WORKERTYPE_APPLY,
- sub->dbid, sub->oid, sub->name,
- sub->owner, InvalidOid,
- DSM_HANDLE_INVALID);
+ if (!logicalrep_worker_launch(WORKERTYPE_APPLY,
+ sub->dbid, sub->oid, sub->name,
+ sub->owner, InvalidOid,
+ DSM_HANDLE_INVALID,
+ sub->retaindeadtuples))
+ {
+ /*
+ * We get here either if we failed to launch a worker
+ * (perhaps for resource-exhaustion reasons) or if we
+ * launched one but it immediately quit. Either way, it
+ * seems appropriate to try again after
+ * wal_retrieve_retry_interval.
+ */
+ wait_time = Min(wait_time,
+ wal_retrieve_retry_interval);
+ }
}
else
{
@@ -1206,6 +1315,20 @@ ApplyLauncherMain(Datum main_arg)
}
}
+ /*
+ * Drop the CONFLICT_DETECTION_SLOT slot if there is no subscription
+ * that requires us to retain dead tuples. Otherwise, if required,
+ * advance the slot's xmin to protect dead tuples required for the
+ * conflict detection.
+ */
+ if (MyReplicationSlot)
+ {
+ if (!retain_dead_tuples)
+ ReplicationSlotDropAcquired();
+ else if (can_advance_xmin)
+ advance_conflict_slot_xmin(xmin);
+ }
+
/* Switch back to original memory context. */
MemoryContextSwitchTo(oldctx);
/* Clean the temporary memory. */
@@ -1234,6 +1357,125 @@ ApplyLauncherMain(Datum main_arg)
}
/*
+ * Determine the minimum non-removable transaction ID across all apply workers
+ * for subscriptions that have retain_dead_tuples enabled. Store the result
+ * in *xmin.
+ */
+static void
+compute_min_nonremovable_xid(LogicalRepWorker *worker, TransactionId *xmin)
+{
+ TransactionId nonremovable_xid;
+
+ Assert(worker != NULL);
+
+ /*
+ * The replication slot for conflict detection must be created before the
+ * worker starts.
+ */
+ Assert(MyReplicationSlot);
+
+ SpinLockAcquire(&worker->relmutex);
+ nonremovable_xid = worker->oldest_nonremovable_xid;
+ SpinLockRelease(&worker->relmutex);
+
+ Assert(TransactionIdIsValid(nonremovable_xid));
+
+ if (!TransactionIdIsValid(*xmin) ||
+ TransactionIdPrecedes(nonremovable_xid, *xmin))
+ *xmin = nonremovable_xid;
+}
+
+/*
+ * Acquire the replication slot used to retain information for conflict
+ * detection, if it exists.
+ *
+ * Return true if successfully acquired, otherwise return false.
+ */
+static bool
+acquire_conflict_slot_if_exists(void)
+{
+ if (!SearchNamedReplicationSlot(CONFLICT_DETECTION_SLOT, true))
+ return false;
+
+ ReplicationSlotAcquire(CONFLICT_DETECTION_SLOT, true, false);
+ return true;
+}
+
+/*
+ * Advance the xmin the replication slot used to retain information required
+ * for conflict detection.
+ */
+static void
+advance_conflict_slot_xmin(TransactionId new_xmin)
+{
+ Assert(MyReplicationSlot);
+ Assert(TransactionIdIsValid(new_xmin));
+ Assert(TransactionIdPrecedesOrEquals(MyReplicationSlot->data.xmin, new_xmin));
+
+ /* Return if the xmin value of the slot cannot be advanced */
+ if (TransactionIdEquals(MyReplicationSlot->data.xmin, new_xmin))
+ return;
+
+ SpinLockAcquire(&MyReplicationSlot->mutex);
+ MyReplicationSlot->effective_xmin = new_xmin;
+ MyReplicationSlot->data.xmin = new_xmin;
+ SpinLockRelease(&MyReplicationSlot->mutex);
+
+ elog(DEBUG1, "updated xmin: %u", MyReplicationSlot->data.xmin);
+
+ ReplicationSlotMarkDirty();
+ ReplicationSlotsComputeRequiredXmin(false);
+
+ /*
+ * Like PhysicalConfirmReceivedLocation(), do not save slot information
+ * each time. This is acceptable because all concurrent transactions on
+ * the publisher that require the data preceding the slot's xmin should
+ * have already been applied and flushed on the subscriber before the xmin
+ * is advanced. So, even if the slot's xmin regresses after a restart, it
+ * will be advanced again in the next cycle. Therefore, no data required
+ * for conflict detection will be prematurely removed.
+ */
+ return;
+}
+
+/*
+ * Create and acquire the replication slot used to retain information for
+ * conflict detection, if not yet.
+ */
+void
+CreateConflictDetectionSlot(void)
+{
+ TransactionId xmin_horizon;
+
+ /* Exit early, if the replication slot is already created and acquired */
+ if (MyReplicationSlot)
+ return;
+
+ ereport(LOG,
+ errmsg("creating replication conflict detection slot"));
+
+ ReplicationSlotCreate(CONFLICT_DETECTION_SLOT, false, RS_PERSISTENT, false,
+ false, false);
+
+ LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
+
+ xmin_horizon = GetOldestSafeDecodingTransactionId(false);
+
+ SpinLockAcquire(&MyReplicationSlot->mutex);
+ MyReplicationSlot->effective_xmin = xmin_horizon;
+ MyReplicationSlot->data.xmin = xmin_horizon;
+ SpinLockRelease(&MyReplicationSlot->mutex);
+
+ ReplicationSlotsComputeRequiredXmin(true);
+
+ LWLockRelease(ProcArrayLock);
+
+ /* Write this slot to disk */
+ ReplicationSlotMarkDirty();
+ ReplicationSlotSave();
+}
+
+/*
* Is current process the logical replication launcher?
*/
bool
diff --git a/src/backend/replication/logical/logical.c b/src/backend/replication/logical/logical.c
index 1d56d0c4ef3..7e363a7c05b 100644
--- a/src/backend/replication/logical/logical.c
+++ b/src/backend/replication/logical/logical.c
@@ -29,6 +29,7 @@
#include "postgres.h"
#include "access/xact.h"
+#include "access/xlog_internal.h"
#include "access/xlogutils.h"
#include "fmgr.h"
#include "miscadmin.h"
@@ -41,6 +42,7 @@
#include "storage/proc.h"
#include "storage/procarray.h"
#include "utils/builtins.h"
+#include "utils/injection_point.h"
#include "utils/inval.h"
#include "utils/memutils.h"
@@ -565,7 +567,7 @@ CreateDecodingContext(XLogRecPtr start_lsn,
* kinds of client errors; so the client may wish to check that
* confirmed_flush_lsn matches its expectations.
*/
- elog(LOG, "%X/%X has been already streamed, forwarding to %X/%X",
+ elog(LOG, "%X/%08X has been already streamed, forwarding to %X/%08X",
LSN_FORMAT_ARGS(start_lsn),
LSN_FORMAT_ARGS(slot->data.confirmed_flush));
@@ -608,7 +610,7 @@ CreateDecodingContext(XLogRecPtr start_lsn,
ereport(LOG,
(errmsg("starting logical decoding for slot \"%s\"",
NameStr(slot->data.name)),
- errdetail("Streaming transactions committing after %X/%X, reading WAL from %X/%X.",
+ errdetail("Streaming transactions committing after %X/%08X, reading WAL from %X/%08X.",
LSN_FORMAT_ARGS(slot->data.confirmed_flush),
LSN_FORMAT_ARGS(slot->data.restart_lsn))));
@@ -635,7 +637,7 @@ DecodingContextFindStartpoint(LogicalDecodingContext *ctx)
/* Initialize from where to start reading WAL. */
XLogBeginRead(ctx->reader, slot->data.restart_lsn);
- elog(DEBUG1, "searching for logical decoding starting point, starting at %X/%X",
+ elog(DEBUG1, "searching for logical decoding starting point, starting at %X/%08X",
LSN_FORMAT_ARGS(slot->data.restart_lsn));
/* Wait for a consistent starting point */
@@ -756,7 +758,7 @@ output_plugin_error_callback(void *arg)
/* not all callbacks have an associated LSN */
if (state->report_location != InvalidXLogRecPtr)
- errcontext("slot \"%s\", output plugin \"%s\", in the %s callback, associated LSN %X/%X",
+ errcontext("slot \"%s\", output plugin \"%s\", in the %s callback, associated LSN %X/%08X",
NameStr(state->ctx->slot->data.name),
NameStr(state->ctx->slot->data.plugin),
state->callback_name,
@@ -1723,7 +1725,7 @@ LogicalIncreaseXminForSlot(XLogRecPtr current_lsn, TransactionId xmin)
SpinLockRelease(&slot->mutex);
if (got_new_xmin)
- elog(DEBUG1, "got new catalog xmin %u at %X/%X", xmin,
+ elog(DEBUG1, "got new catalog xmin %u at %X/%08X", xmin,
LSN_FORMAT_ARGS(current_lsn));
/* candidate already valid with the current flush position, apply */
@@ -1783,7 +1785,7 @@ LogicalIncreaseRestartDecodingForSlot(XLogRecPtr current_lsn, XLogRecPtr restart
slot->candidate_restart_lsn = restart_lsn;
SpinLockRelease(&slot->mutex);
- elog(DEBUG1, "got new restart lsn %X/%X at %X/%X",
+ elog(DEBUG1, "got new restart lsn %X/%08X at %X/%08X",
LSN_FORMAT_ARGS(restart_lsn),
LSN_FORMAT_ARGS(current_lsn));
}
@@ -1798,7 +1800,7 @@ LogicalIncreaseRestartDecodingForSlot(XLogRecPtr current_lsn, XLogRecPtr restart
confirmed_flush = slot->data.confirmed_flush;
SpinLockRelease(&slot->mutex);
- elog(DEBUG1, "failed to increase restart lsn: proposed %X/%X, after %X/%X, current candidate %X/%X, current after %X/%X, flushed up to %X/%X",
+ elog(DEBUG1, "failed to increase restart lsn: proposed %X/%08X, after %X/%08X, current candidate %X/%08X, current after %X/%08X, flushed up to %X/%08X",
LSN_FORMAT_ARGS(restart_lsn),
LSN_FORMAT_ARGS(current_lsn),
LSN_FORMAT_ARGS(candidate_restart_lsn),
@@ -1825,9 +1827,13 @@ LogicalConfirmReceivedLocation(XLogRecPtr lsn)
{
bool updated_xmin = false;
bool updated_restart = false;
+ XLogRecPtr restart_lsn pg_attribute_unused();
SpinLockAcquire(&MyReplicationSlot->mutex);
+ /* remember the old restart lsn */
+ restart_lsn = MyReplicationSlot->data.restart_lsn;
+
/*
* Prevent moving the confirmed_flush backwards, as this could lead to
* data duplication issues caused by replicating already replicated
@@ -1881,6 +1887,18 @@ LogicalConfirmReceivedLocation(XLogRecPtr lsn)
/* first write new xmin to disk, so we know what's up after a crash */
if (updated_xmin || updated_restart)
{
+#ifdef USE_INJECTION_POINTS
+ XLogSegNo seg1,
+ seg2;
+
+ XLByteToSeg(restart_lsn, seg1, wal_segment_size);
+ XLByteToSeg(MyReplicationSlot->data.restart_lsn, seg2, wal_segment_size);
+
+ /* trigger injection point, but only if segment changes */
+ if (seg1 != seg2)
+ INJECTION_POINT("logical-replication-slot-advance-segment", NULL);
+#endif
+
ReplicationSlotMarkDirty();
ReplicationSlotSave();
elog(DEBUG1, "updated xmin: %u restart: %u", updated_xmin, updated_restart);
diff --git a/src/backend/replication/logical/origin.c b/src/backend/replication/logical/origin.c
index a17bacf88e7..87f10e50dcc 100644
--- a/src/backend/replication/logical/origin.c
+++ b/src/backend/replication/logical/origin.c
@@ -826,9 +826,9 @@ StartupReplicationOrigin(void)
last_state++;
ereport(LOG,
- (errmsg("recovered replication state of node %d to %X/%X",
- disk_state.roident,
- LSN_FORMAT_ARGS(disk_state.remote_lsn))));
+ errmsg("recovered replication state of node %d to %X/%08X",
+ disk_state.roident,
+ LSN_FORMAT_ARGS(disk_state.remote_lsn)));
}
/* now check checksum */
diff --git a/src/backend/replication/logical/proto.c b/src/backend/replication/logical/proto.c
index 1a352b542dc..1b3d9eb49dd 100644
--- a/src/backend/replication/logical/proto.c
+++ b/src/backend/replication/logical/proto.c
@@ -809,7 +809,7 @@ logicalrep_write_tuple(StringInfo out, Relation rel, TupleTableSlot *slot,
continue;
}
- if (att->attlen == -1 && VARATT_IS_EXTERNAL_ONDISK(values[i]))
+ if (att->attlen == -1 && VARATT_IS_EXTERNAL_ONDISK(DatumGetPointer(values[i])))
{
/*
* Unchanged toasted datum. (Note that we don't promise to detect
diff --git a/src/backend/replication/logical/reorderbuffer.c b/src/backend/replication/logical/reorderbuffer.c
index 67655111875..34cf05668ae 100644
--- a/src/backend/replication/logical/reorderbuffer.c
+++ b/src/backend/replication/logical/reorderbuffer.c
@@ -109,10 +109,22 @@
#include "storage/procarray.h"
#include "storage/sinval.h"
#include "utils/builtins.h"
+#include "utils/inval.h"
#include "utils/memutils.h"
#include "utils/rel.h"
#include "utils/relfilenumbermap.h"
+/*
+ * Each transaction has an 8MB limit for invalidation messages distributed from
+ * other transactions. This limit is set considering scenarios with many
+ * concurrent logical decoding operations. When the distributed invalidation
+ * messages reach this threshold, the transaction is marked as
+ * RBTXN_DISTR_INVAL_OVERFLOWED to invalidate the complete cache as we have lost
+ * some inval messages and hence don't know what needs to be invalidated.
+ */
+#define MAX_DISTR_INVAL_MSG_PER_TXN \
+ ((8 * 1024 * 1024) / sizeof(SharedInvalidationMessage))
+
/* entry for a hash table we use to map from xid to our transaction state */
typedef struct ReorderBufferTXNByIdEnt
{
@@ -472,6 +484,12 @@ ReorderBufferFreeTXN(ReorderBuffer *rb, ReorderBufferTXN *txn)
txn->invalidations = NULL;
}
+ if (txn->invalidations_distributed)
+ {
+ pfree(txn->invalidations_distributed);
+ txn->invalidations_distributed = NULL;
+ }
+
/* Reset the toast hash */
ReorderBufferToastReset(rb, txn);
@@ -1397,7 +1415,7 @@ ReorderBufferIterTXNNext(ReorderBuffer *rb, ReorderBufferIterTXNState *state)
int32 off;
/* nothing there anymore */
- if (state->heap->bh_size == 0)
+ if (binaryheap_empty(state->heap))
return NULL;
off = DatumGetInt32(binaryheap_first(state->heap));
@@ -2581,7 +2599,7 @@ ReorderBufferProcessTXN(ReorderBuffer *rb, ReorderBufferTXN *txn,
if (++changes_count >= CHANGES_THRESHOLD)
{
- rb->update_progress_txn(rb, txn, change->lsn);
+ rb->update_progress_txn(rb, txn, prev_lsn);
changes_count = 0;
}
}
@@ -2661,7 +2679,17 @@ ReorderBufferProcessTXN(ReorderBuffer *rb, ReorderBufferTXN *txn,
AbortCurrentTransaction();
/* make sure there's no cache pollution */
- ReorderBufferExecuteInvalidations(txn->ninvalidations, txn->invalidations);
+ if (rbtxn_distr_inval_overflowed(txn))
+ {
+ Assert(txn->ninvalidations_distributed == 0);
+ InvalidateSystemCaches();
+ }
+ else
+ {
+ ReorderBufferExecuteInvalidations(txn->ninvalidations, txn->invalidations);
+ ReorderBufferExecuteInvalidations(txn->ninvalidations_distributed,
+ txn->invalidations_distributed);
+ }
if (using_subtxn)
RollbackAndReleaseCurrentSubTransaction();
@@ -2710,8 +2738,17 @@ ReorderBufferProcessTXN(ReorderBuffer *rb, ReorderBufferTXN *txn,
AbortCurrentTransaction();
/* make sure there's no cache pollution */
- ReorderBufferExecuteInvalidations(txn->ninvalidations,
- txn->invalidations);
+ if (rbtxn_distr_inval_overflowed(txn))
+ {
+ Assert(txn->ninvalidations_distributed == 0);
+ InvalidateSystemCaches();
+ }
+ else
+ {
+ ReorderBufferExecuteInvalidations(txn->ninvalidations, txn->invalidations);
+ ReorderBufferExecuteInvalidations(txn->ninvalidations_distributed,
+ txn->invalidations_distributed);
+ }
if (using_subtxn)
RollbackAndReleaseCurrentSubTransaction();
@@ -3060,7 +3097,8 @@ ReorderBufferAbort(ReorderBuffer *rb, TransactionId xid, XLogRecPtr lsn,
* We might have decoded changes for this transaction that could load
* the cache as per the current transaction's view (consider DDL's
* happened in this transaction). We don't want the decoding of future
- * transactions to use those cache entries so execute invalidations.
+ * transactions to use those cache entries so execute only the inval
+ * messages in this transaction.
*/
if (txn->ninvalidations > 0)
ReorderBufferImmediateInvalidation(rb, txn->ninvalidations,
@@ -3147,9 +3185,10 @@ ReorderBufferForget(ReorderBuffer *rb, TransactionId xid, XLogRecPtr lsn)
txn->final_lsn = lsn;
/*
- * Process cache invalidation messages if there are any. Even if we're not
- * interested in the transaction's contents, it could have manipulated the
- * catalog and we need to update the caches according to that.
+ * Process only cache invalidation messages in this transaction if there
+ * are any. Even if we're not interested in the transaction's contents, it
+ * could have manipulated the catalog and we need to update the caches
+ * according to that.
*/
if (txn->base_snapshot != NULL && txn->ninvalidations > 0)
ReorderBufferImmediateInvalidation(rb, txn->ninvalidations,
@@ -3422,6 +3461,57 @@ ReorderBufferAddNewTupleCids(ReorderBuffer *rb, TransactionId xid,
}
/*
+ * Add new invalidation messages to the reorder buffer queue.
+ */
+static void
+ReorderBufferQueueInvalidations(ReorderBuffer *rb, TransactionId xid,
+ XLogRecPtr lsn, Size nmsgs,
+ SharedInvalidationMessage *msgs)
+{
+ ReorderBufferChange *change;
+
+ change = ReorderBufferAllocChange(rb);
+ change->action = REORDER_BUFFER_CHANGE_INVALIDATION;
+ change->data.inval.ninvalidations = nmsgs;
+ change->data.inval.invalidations = (SharedInvalidationMessage *)
+ palloc(sizeof(SharedInvalidationMessage) * nmsgs);
+ memcpy(change->data.inval.invalidations, msgs,
+ sizeof(SharedInvalidationMessage) * nmsgs);
+
+ ReorderBufferQueueChange(rb, xid, lsn, change, false);
+}
+
+/*
+ * A helper function for ReorderBufferAddInvalidations() and
+ * ReorderBufferAddDistributedInvalidations() to accumulate the invalidation
+ * messages to the **invals_out.
+ */
+static void
+ReorderBufferAccumulateInvalidations(SharedInvalidationMessage **invals_out,
+ uint32 *ninvals_out,
+ SharedInvalidationMessage *msgs_new,
+ Size nmsgs_new)
+{
+ if (*ninvals_out == 0)
+ {
+ *ninvals_out = nmsgs_new;
+ *invals_out = (SharedInvalidationMessage *)
+ palloc(sizeof(SharedInvalidationMessage) * nmsgs_new);
+ memcpy(*invals_out, msgs_new, sizeof(SharedInvalidationMessage) * nmsgs_new);
+ }
+ else
+ {
+ /* Enlarge the array of inval messages */
+ *invals_out = (SharedInvalidationMessage *)
+ repalloc(*invals_out, sizeof(SharedInvalidationMessage) *
+ (*ninvals_out + nmsgs_new));
+ memcpy(*invals_out + *ninvals_out, msgs_new,
+ nmsgs_new * sizeof(SharedInvalidationMessage));
+ *ninvals_out += nmsgs_new;
+ }
+}
+
+/*
* Accumulate the invalidations for executing them later.
*
* This needs to be called for each XLOG_XACT_INVALIDATIONS message and
@@ -3441,7 +3531,6 @@ ReorderBufferAddInvalidations(ReorderBuffer *rb, TransactionId xid,
{
ReorderBufferTXN *txn;
MemoryContext oldcontext;
- ReorderBufferChange *change;
txn = ReorderBufferTXNByXid(rb, xid, true, NULL, lsn, true);
@@ -3456,35 +3545,76 @@ ReorderBufferAddInvalidations(ReorderBuffer *rb, TransactionId xid,
Assert(nmsgs > 0);
- /* Accumulate invalidations. */
- if (txn->ninvalidations == 0)
- {
- txn->ninvalidations = nmsgs;
- txn->invalidations = (SharedInvalidationMessage *)
- palloc(sizeof(SharedInvalidationMessage) * nmsgs);
- memcpy(txn->invalidations, msgs,
- sizeof(SharedInvalidationMessage) * nmsgs);
- }
- else
+ ReorderBufferAccumulateInvalidations(&txn->invalidations,
+ &txn->ninvalidations,
+ msgs, nmsgs);
+
+ ReorderBufferQueueInvalidations(rb, xid, lsn, nmsgs, msgs);
+
+ MemoryContextSwitchTo(oldcontext);
+}
+
+/*
+ * Accumulate the invalidations distributed by other committed transactions
+ * for executing them later.
+ *
+ * This function is similar to ReorderBufferAddInvalidations() but stores
+ * the given inval messages to the txn->invalidations_distributed with the
+ * overflow check.
+ *
+ * This needs to be called by committed transactions to distribute their
+ * inval messages to in-progress transactions.
+ */
+void
+ReorderBufferAddDistributedInvalidations(ReorderBuffer *rb, TransactionId xid,
+ XLogRecPtr lsn, Size nmsgs,
+ SharedInvalidationMessage *msgs)
+{
+ ReorderBufferTXN *txn;
+ MemoryContext oldcontext;
+
+ txn = ReorderBufferTXNByXid(rb, xid, true, NULL, lsn, true);
+
+ oldcontext = MemoryContextSwitchTo(rb->context);
+
+ /*
+ * Collect all the invalidations under the top transaction, if available,
+ * so that we can execute them all together. See comments
+ * ReorderBufferAddInvalidations.
+ */
+ txn = rbtxn_get_toptxn(txn);
+
+ Assert(nmsgs > 0);
+
+ if (!rbtxn_distr_inval_overflowed(txn))
{
- txn->invalidations = (SharedInvalidationMessage *)
- repalloc(txn->invalidations, sizeof(SharedInvalidationMessage) *
- (txn->ninvalidations + nmsgs));
+ /*
+ * Check the transaction has enough space for storing distributed
+ * invalidation messages.
+ */
+ if (txn->ninvalidations_distributed + nmsgs >= MAX_DISTR_INVAL_MSG_PER_TXN)
+ {
+ /*
+ * Mark the invalidation message as overflowed and free up the
+ * messages accumulated so far.
+ */
+ txn->txn_flags |= RBTXN_DISTR_INVAL_OVERFLOWED;
- memcpy(txn->invalidations + txn->ninvalidations, msgs,
- nmsgs * sizeof(SharedInvalidationMessage));
- txn->ninvalidations += nmsgs;
+ if (txn->invalidations_distributed)
+ {
+ pfree(txn->invalidations_distributed);
+ txn->invalidations_distributed = NULL;
+ txn->ninvalidations_distributed = 0;
+ }
+ }
+ else
+ ReorderBufferAccumulateInvalidations(&txn->invalidations_distributed,
+ &txn->ninvalidations_distributed,
+ msgs, nmsgs);
}
- change = ReorderBufferAllocChange(rb);
- change->action = REORDER_BUFFER_CHANGE_INVALIDATION;
- change->data.inval.ninvalidations = nmsgs;
- change->data.inval.invalidations = (SharedInvalidationMessage *)
- palloc(sizeof(SharedInvalidationMessage) * nmsgs);
- memcpy(change->data.inval.invalidations, msgs,
- sizeof(SharedInvalidationMessage) * nmsgs);
-
- ReorderBufferQueueChange(rb, xid, lsn, change, false);
+ /* Queue the invalidation messages into the transaction */
+ ReorderBufferQueueInvalidations(rb, xid, lsn, nmsgs, msgs);
MemoryContextSwitchTo(oldcontext);
}
@@ -4787,7 +4917,7 @@ StartupReorderBuffer(void)
continue;
/* if it cannot be a slot, skip the directory */
- if (!ReplicationSlotValidateName(logical_de->d_name, DEBUG2))
+ if (!ReplicationSlotValidateName(logical_de->d_name, true, DEBUG2))
continue;
/*
diff --git a/src/backend/replication/logical/slotsync.c b/src/backend/replication/logical/slotsync.c
index 656e66e0ae0..37738440113 100644
--- a/src/backend/replication/logical/slotsync.c
+++ b/src/backend/replication/logical/slotsync.c
@@ -211,9 +211,9 @@ update_local_synced_slot(RemoteSlot *remote_slot, Oid remote_dbid,
* impact the users, so we used DEBUG1 level to log the message.
*/
ereport(slot->data.persistency == RS_TEMPORARY ? LOG : DEBUG1,
- errmsg("could not synchronize replication slot \"%s\" because remote slot precedes local slot",
+ errmsg("could not synchronize replication slot \"%s\"",
remote_slot->name),
- errdetail("The remote slot has LSN %X/%X and catalog xmin %u, but the local slot has LSN %X/%X and catalog xmin %u.",
+ errdetail("Synchronization could lead to data loss, because the remote slot needs WAL at LSN %X/%08X and catalog xmin %u, but the standby has LSN %X/%08X and catalog xmin %u.",
LSN_FORMAT_ARGS(remote_slot->restart_lsn),
remote_slot->catalog_xmin,
LSN_FORMAT_ARGS(slot->data.restart_lsn),
@@ -275,7 +275,7 @@ update_local_synced_slot(RemoteSlot *remote_slot, Oid remote_dbid,
ereport(ERROR,
errmsg_internal("synchronized confirmed_flush for slot \"%s\" differs from remote slot",
remote_slot->name),
- errdetail_internal("Remote slot has LSN %X/%X but local slot has LSN %X/%X.",
+ errdetail_internal("Remote slot has LSN %X/%08X but local slot has LSN %X/%08X.",
LSN_FORMAT_ARGS(remote_slot->confirmed_lsn),
LSN_FORMAT_ARGS(slot->data.confirmed_flush)));
}
@@ -593,7 +593,7 @@ update_and_persist_local_synced_slot(RemoteSlot *remote_slot, Oid remote_dbid)
{
ereport(LOG,
errmsg("could not synchronize replication slot \"%s\"", remote_slot->name),
- errdetail("Logical decoding could not find consistent point from local slot's LSN %X/%X.",
+ errdetail("Synchronization could lead to data loss, because the standby could not build a consistent snapshot to decode WALs at LSN %X/%08X.",
LSN_FORMAT_ARGS(slot->data.restart_lsn)));
return false;
@@ -642,7 +642,7 @@ synchronize_one_slot(RemoteSlot *remote_slot, Oid remote_dbid)
ereport(AmLogicalSlotSyncWorkerProcess() ? LOG : ERROR,
errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("skipping slot synchronization because the received slot sync"
- " LSN %X/%X for slot \"%s\" is ahead of the standby position %X/%X",
+ " LSN %X/%08X for slot \"%s\" is ahead of the standby position %X/%08X",
LSN_FORMAT_ARGS(remote_slot->confirmed_lsn),
remote_slot->name,
LSN_FORMAT_ARGS(latestFlushPtr)));
@@ -733,7 +733,7 @@ synchronize_one_slot(RemoteSlot *remote_slot, Oid remote_dbid)
ereport(ERROR,
errmsg_internal("cannot synchronize local slot \"%s\"",
remote_slot->name),
- errdetail_internal("Local slot's start streaming location LSN(%X/%X) is ahead of remote slot's LSN(%X/%X).",
+ errdetail_internal("Local slot's start streaming location LSN(%X/%08X) is ahead of remote slot's LSN(%X/%08X).",
LSN_FORMAT_ARGS(slot->data.confirmed_flush),
LSN_FORMAT_ARGS(remote_slot->confirmed_lsn)));
@@ -1059,14 +1059,14 @@ ValidateSlotSyncParams(int elevel)
{
/*
* Logical slot sync/creation requires wal_level >= logical.
- *
- * Since altering the wal_level requires a server restart, so error out in
- * this case regardless of elevel provided by caller.
*/
if (wal_level < WAL_LEVEL_LOGICAL)
- ereport(ERROR,
+ {
+ ereport(elevel,
errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("replication slot synchronization requires \"wal_level\" >= \"logical\""));
+ return false;
+ }
/*
* A physical replication slot(primary_slot_name) is required on the
diff --git a/src/backend/replication/logical/snapbuild.c b/src/backend/replication/logical/snapbuild.c
index 0d7bddbe4ed..8532bfd27e5 100644
--- a/src/backend/replication/logical/snapbuild.c
+++ b/src/backend/replication/logical/snapbuild.c
@@ -774,7 +774,7 @@ SnapBuildDistributeSnapshotAndInval(SnapBuild *builder, XLogRecPtr lsn, Transact
if (rbtxn_is_prepared(txn))
continue;
- elog(DEBUG2, "adding a new snapshot and invalidations to %u at %X/%X",
+ elog(DEBUG2, "adding a new snapshot and invalidations to %u at %X/%08X",
txn->xid, LSN_FORMAT_ARGS(lsn));
/*
@@ -794,6 +794,13 @@ SnapBuildDistributeSnapshotAndInval(SnapBuild *builder, XLogRecPtr lsn, Transact
* contents built by the current transaction even after its decoding,
* which should have been invalidated due to concurrent catalog
* changing transaction.
+ *
+ * Distribute only the invalidation messages generated by the current
+ * committed transaction. Invalidation messages received from other
+ * transactions would have already been propagated to the relevant
+ * in-progress transactions. This transaction would have processed
+ * those invalidations, ensuring that subsequent transactions observe
+ * a consistent cache state.
*/
if (txn->xid != xid)
{
@@ -807,8 +814,9 @@ SnapBuildDistributeSnapshotAndInval(SnapBuild *builder, XLogRecPtr lsn, Transact
{
Assert(msgs != NULL);
- ReorderBufferAddInvalidations(builder->reorder, txn->xid, lsn,
- ninvalidations, msgs);
+ ReorderBufferAddDistributedInvalidations(builder->reorder,
+ txn->xid, lsn,
+ ninvalidations, msgs);
}
}
}
@@ -1263,10 +1271,10 @@ SnapBuildFindSnapshot(SnapBuild *builder, XLogRecPtr lsn, xl_running_xacts *runn
builder->initial_xmin_horizon))
{
ereport(DEBUG1,
- (errmsg_internal("skipping snapshot at %X/%X while building logical decoding snapshot, xmin horizon too low",
- LSN_FORMAT_ARGS(lsn)),
- errdetail_internal("initial xmin horizon of %u vs the snapshot's %u",
- builder->initial_xmin_horizon, running->oldestRunningXid)));
+ errmsg_internal("skipping snapshot at %X/%08X while building logical decoding snapshot, xmin horizon too low",
+ LSN_FORMAT_ARGS(lsn)),
+ errdetail_internal("initial xmin horizon of %u vs the snapshot's %u",
+ builder->initial_xmin_horizon, running->oldestRunningXid));
SnapBuildWaitSnapshot(running, builder->initial_xmin_horizon);
@@ -1302,9 +1310,9 @@ SnapBuildFindSnapshot(SnapBuild *builder, XLogRecPtr lsn, xl_running_xacts *runn
builder->next_phase_at = InvalidTransactionId;
ereport(LOG,
- (errmsg("logical decoding found consistent point at %X/%X",
- LSN_FORMAT_ARGS(lsn)),
- errdetail("There are no running transactions.")));
+ errmsg("logical decoding found consistent point at %X/%08X",
+ LSN_FORMAT_ARGS(lsn)),
+ errdetail("There are no running transactions."));
return false;
}
@@ -1351,10 +1359,10 @@ SnapBuildFindSnapshot(SnapBuild *builder, XLogRecPtr lsn, xl_running_xacts *runn
Assert(TransactionIdIsNormal(builder->xmax));
ereport(LOG,
- (errmsg("logical decoding found initial starting point at %X/%X",
- LSN_FORMAT_ARGS(lsn)),
- errdetail("Waiting for transactions (approximately %d) older than %u to end.",
- running->xcnt, running->nextXid)));
+ errmsg("logical decoding found initial starting point at %X/%08X",
+ LSN_FORMAT_ARGS(lsn)),
+ errdetail("Waiting for transactions (approximately %d) older than %u to end.",
+ running->xcnt, running->nextXid));
SnapBuildWaitSnapshot(running, running->nextXid);
}
@@ -1375,10 +1383,10 @@ SnapBuildFindSnapshot(SnapBuild *builder, XLogRecPtr lsn, xl_running_xacts *runn
builder->next_phase_at = running->nextXid;
ereport(LOG,
- (errmsg("logical decoding found initial consistent point at %X/%X",
- LSN_FORMAT_ARGS(lsn)),
- errdetail("Waiting for transactions (approximately %d) older than %u to end.",
- running->xcnt, running->nextXid)));
+ errmsg("logical decoding found initial consistent point at %X/%08X",
+ LSN_FORMAT_ARGS(lsn)),
+ errdetail("Waiting for transactions (approximately %d) older than %u to end.",
+ running->xcnt, running->nextXid));
SnapBuildWaitSnapshot(running, running->nextXid);
}
@@ -1399,9 +1407,9 @@ SnapBuildFindSnapshot(SnapBuild *builder, XLogRecPtr lsn, xl_running_xacts *runn
builder->next_phase_at = InvalidTransactionId;
ereport(LOG,
- (errmsg("logical decoding found consistent point at %X/%X",
- LSN_FORMAT_ARGS(lsn)),
- errdetail("There are no old transactions anymore.")));
+ errmsg("logical decoding found consistent point at %X/%08X",
+ LSN_FORMAT_ARGS(lsn)),
+ errdetail("There are no old transactions anymore."));
}
/*
@@ -1905,9 +1913,9 @@ SnapBuildRestore(SnapBuild *builder, XLogRecPtr lsn)
Assert(builder->state == SNAPBUILD_CONSISTENT);
ereport(LOG,
- (errmsg("logical decoding found consistent point at %X/%X",
- LSN_FORMAT_ARGS(lsn)),
- errdetail("Logical decoding will begin using saved snapshot.")));
+ errmsg("logical decoding found consistent point at %X/%08X",
+ LSN_FORMAT_ARGS(lsn)),
+ errdetail("Logical decoding will begin using saved snapshot."));
return true;
snapshot_not_interesting:
@@ -2053,7 +2061,7 @@ SnapBuildSnapshotExists(XLogRecPtr lsn)
int ret;
struct stat stat_buf;
- sprintf(path, "%s/%X-%X.snap",
+ sprintf(path, "%s/%08X-%08X.snap",
PG_LOGICAL_SNAPSHOTS_DIR,
LSN_FORMAT_ARGS(lsn));
diff --git a/src/backend/replication/logical/tablesync.c b/src/backend/replication/logical/tablesync.c
index 8e1e8762f62..d3356bc84ee 100644
--- a/src/backend/replication/logical/tablesync.c
+++ b/src/backend/replication/logical/tablesync.c
@@ -316,7 +316,8 @@ process_syncing_tables_for_sync(XLogRecPtr current_lsn)
UpdateSubscriptionRelState(MyLogicalRepWorker->subid,
MyLogicalRepWorker->relid,
MyLogicalRepWorker->relstate,
- MyLogicalRepWorker->relstate_lsn);
+ MyLogicalRepWorker->relstate_lsn,
+ false);
/*
* End streaming so that LogRepWorkerWalRcvConn can be used to drop
@@ -425,6 +426,7 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn)
ListCell *lc;
bool started_tx = false;
bool should_exit = false;
+ Relation rel = NULL;
Assert(!IsTransactionState());
@@ -492,7 +494,17 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn)
* worker to remove the origin tracking as if there is any
* error while dropping we won't restart it to drop the
* origin. So passing missing_ok = true.
+ *
+ * Lock the subscription and origin in the same order as we
+ * are doing during DDL commands to avoid deadlocks. See
+ * AlterSubscription_refresh.
*/
+ LockSharedObject(SubscriptionRelationId, MyLogicalRepWorker->subid,
+ 0, AccessShareLock);
+
+ if (!rel)
+ rel = table_open(SubscriptionRelRelationId, RowExclusiveLock);
+
ReplicationOriginNameForLogicalRep(MyLogicalRepWorker->subid,
rstate->relid,
originname,
@@ -504,7 +516,7 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn)
*/
UpdateSubscriptionRelState(MyLogicalRepWorker->subid,
rstate->relid, rstate->state,
- rstate->lsn);
+ rstate->lsn, true);
}
}
else
@@ -555,7 +567,14 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn)
* This is required to avoid any undetected deadlocks
* due to any existing lock as deadlock detector won't
* be able to detect the waits on the latch.
+ *
+ * Also close any tables prior to the commit.
*/
+ if (rel)
+ {
+ table_close(rel, NoLock);
+ rel = NULL;
+ }
CommitTransactionCommand();
pgstat_report_stat(false);
}
@@ -603,20 +622,31 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn)
TimestampDifferenceExceeds(hentry->last_start_time, now,
wal_retrieve_retry_interval))
{
- logicalrep_worker_launch(WORKERTYPE_TABLESYNC,
- MyLogicalRepWorker->dbid,
- MySubscription->oid,
- MySubscription->name,
- MyLogicalRepWorker->userid,
- rstate->relid,
- DSM_HANDLE_INVALID);
+ /*
+ * Set the last_start_time even if we fail to start
+ * the worker, so that we won't retry until
+ * wal_retrieve_retry_interval has elapsed.
+ */
hentry->last_start_time = now;
+ (void) logicalrep_worker_launch(WORKERTYPE_TABLESYNC,
+ MyLogicalRepWorker->dbid,
+ MySubscription->oid,
+ MySubscription->name,
+ MyLogicalRepWorker->userid,
+ rstate->relid,
+ DSM_HANDLE_INVALID,
+ false);
}
}
}
}
}
+ /* Close table if opened */
+ if (rel)
+ table_close(rel, NoLock);
+
+
if (started_tx)
{
/*
@@ -1408,7 +1438,8 @@ LogicalRepSyncTableStart(XLogRecPtr *origin_startpos)
UpdateSubscriptionRelState(MyLogicalRepWorker->subid,
MyLogicalRepWorker->relid,
MyLogicalRepWorker->relstate,
- MyLogicalRepWorker->relstate_lsn);
+ MyLogicalRepWorker->relstate_lsn,
+ false);
CommitTransactionCommand();
pgstat_report_stat(true);
@@ -1541,14 +1572,15 @@ LogicalRepSyncTableStart(XLogRecPtr *origin_startpos)
UpdateSubscriptionRelState(MyLogicalRepWorker->subid,
MyLogicalRepWorker->relid,
SUBREL_STATE_FINISHEDCOPY,
- MyLogicalRepWorker->relstate_lsn);
+ MyLogicalRepWorker->relstate_lsn,
+ false);
CommitTransactionCommand();
copy_table_done:
elog(DEBUG1,
- "LogicalRepSyncTableStart: '%s' origin_startpos lsn %X/%X",
+ "LogicalRepSyncTableStart: '%s' origin_startpos lsn %X/%08X",
originname, LSN_FORMAT_ARGS(*origin_startpos));
/*
diff --git a/src/backend/replication/logical/worker.c b/src/backend/replication/logical/worker.c
index a23262957ac..89e241c8392 100644
--- a/src/backend/replication/logical/worker.c
+++ b/src/backend/replication/logical/worker.c
@@ -109,13 +109,6 @@
* If ever a user needs to be aware of the tri-state value, they can fetch it
* from the pg_subscription catalog (see column subtwophasestate).
*
- * We don't allow to toggle two_phase option of a subscription because it can
- * lead to an inconsistent replica. Consider, initially, it was on and we have
- * received some prepare then we turn it off, now at commit time the server
- * will send the entire transaction data along with the commit. With some more
- * analysis, we can allow changing this option from off to on but not sure if
- * that alone would be useful.
- *
* Finally, to avoid problems mentioned in previous paragraphs from any
* subsequent (not READY) tablesyncs (need to toggle two_phase option from 'on'
* to 'off' and then again back to 'on') there is a restriction for
@@ -139,6 +132,96 @@
* failover = true when creating the subscription. Enabling failover allows us
* to smoothly transition to the promoted standby, ensuring that we can
* subscribe to the new primary without losing any data.
+ *
+ * RETAIN DEAD TUPLES
+ * ----------------------
+ * Each apply worker that enabled retain_dead_tuples option maintains a
+ * non-removable transaction ID (oldest_nonremovable_xid) in shared memory to
+ * prevent dead rows from being removed prematurely when the apply worker still
+ * needs them to detect update_deleted conflicts. Additionally, this helps to
+ * retain the required commit_ts module information, which further helps to
+ * detect update_origin_differs and delete_origin_differs conflicts reliably, as
+ * otherwise, vacuum freeze could remove the required information.
+ *
+ * The logical replication launcher manages an internal replication slot named
+ * "pg_conflict_detection". It asynchronously aggregates the non-removable
+ * transaction ID from all apply workers to determine the appropriate xmin for
+ * the slot, thereby retaining necessary tuples.
+ *
+ * The non-removable transaction ID in the apply worker is advanced to the
+ * oldest running transaction ID once all concurrent transactions on the
+ * publisher have been applied and flushed locally. The process involves:
+ *
+ * - RDT_GET_CANDIDATE_XID:
+ * Call GetOldestActiveTransactionId() to take oldestRunningXid as the
+ * candidate xid.
+ *
+ * - RDT_REQUEST_PUBLISHER_STATUS:
+ * Send a message to the walsender requesting the publisher status, which
+ * includes the latest WAL write position and information about transactions
+ * that are in the commit phase.
+ *
+ * - RDT_WAIT_FOR_PUBLISHER_STATUS:
+ * Wait for the status from the walsender. After receiving the first status,
+ * do not proceed if there are concurrent remote transactions that are still
+ * in the commit phase. These transactions might have been assigned an
+ * earlier commit timestamp but have not yet written the commit WAL record.
+ * Continue to request the publisher status (RDT_REQUEST_PUBLISHER_STATUS)
+ * until all these transactions have completed.
+ *
+ * - RDT_WAIT_FOR_LOCAL_FLUSH:
+ * Advance the non-removable transaction ID if the current flush location has
+ * reached or surpassed the last received WAL position.
+ *
+ * The overall state progression is: GET_CANDIDATE_XID ->
+ * REQUEST_PUBLISHER_STATUS -> WAIT_FOR_PUBLISHER_STATUS -> (loop to
+ * REQUEST_PUBLISHER_STATUS till concurrent remote transactions end) ->
+ * WAIT_FOR_LOCAL_FLUSH -> loop back to GET_CANDIDATE_XID.
+ *
+ * Retaining the dead tuples for this period is sufficient for ensuring
+ * eventual consistency using last-update-wins strategy, as dead tuples are
+ * useful for detecting conflicts only during the application of concurrent
+ * transactions from remote nodes. After applying and flushing all remote
+ * transactions that occurred concurrently with the tuple DELETE, any
+ * subsequent UPDATE from a remote node should have a later timestamp. In such
+ * cases, it is acceptable to detect an update_missing scenario and convert the
+ * UPDATE to an INSERT when applying it. But, for concurrent remote
+ * transactions with earlier timestamps than the DELETE, detecting
+ * update_deleted is necessary, as the UPDATEs in remote transactions should be
+ * ignored if their timestamp is earlier than that of the dead tuples.
+ *
+ * Note that advancing the non-removable transaction ID is not supported if the
+ * publisher is also a physical standby. This is because the logical walsender
+ * on the standby can only get the WAL replay position but there may be more
+ * WALs that are being replicated from the primary and those WALs could have
+ * earlier commit timestamp.
+ *
+ * Similarly, when the publisher has subscribed to another publisher,
+ * information necessary for conflict detection cannot be retained for
+ * changes from origins other than the publisher. This is because publisher
+ * lacks the information on concurrent transactions of other publishers to
+ * which it subscribes. As the information on concurrent transactions is
+ * unavailable beyond subscriber's immediate publishers, the non-removable
+ * transaction ID might be advanced prematurely before changes from other
+ * origins have been fully applied.
+ *
+ * XXX Retaining information for changes from other origins might be possible
+ * by requesting the subscription on that origin to enable retain_dead_tuples
+ * and fetching the conflict detection slot.xmin along with the publisher's
+ * status. In the RDT_WAIT_FOR_PUBLISHER_STATUS phase, the apply worker could
+ * wait for the remote slot's xmin to reach the oldest active transaction ID,
+ * ensuring that all transactions from other origins have been applied on the
+ * publisher, thereby getting the latest WAL position that includes all
+ * concurrent changes. However, this approach may impact performance, so it
+ * might not worth the effort.
+ *
+ * XXX It seems feasible to get the latest commit's WAL location from the
+ * publisher and wait till that is applied. However, we can't do that
+ * because commit timestamps can regress as a commit with a later LSN is not
+ * guaranteed to have a later timestamp than those with earlier LSNs. Having
+ * said that, even if that is possible, it won't improve performance much as
+ * the apply always lag and moves slowly as compared with the transactions
+ * on the publisher.
*-------------------------------------------------------------------------
*/
@@ -147,6 +230,7 @@
#include <sys/stat.h>
#include <unistd.h>
+#include "access/commit_ts.h"
#include "access/table.h"
#include "access/tableam.h"
#include "access/twophase.h"
@@ -155,6 +239,7 @@
#include "catalog/pg_inherits.h"
#include "catalog/pg_subscription.h"
#include "catalog/pg_subscription_rel.h"
+#include "commands/subscriptioncmds.h"
#include "commands/tablecmds.h"
#include "commands/trigger.h"
#include "executor/executor.h"
@@ -173,12 +258,14 @@
#include "replication/logicalrelation.h"
#include "replication/logicalworker.h"
#include "replication/origin.h"
+#include "replication/slot.h"
#include "replication/walreceiver.h"
#include "replication/worker_internal.h"
#include "rewrite/rewriteHandler.h"
#include "storage/buffile.h"
#include "storage/ipc.h"
#include "storage/lmgr.h"
+#include "storage/procarray.h"
#include "tcop/tcopprot.h"
#include "utils/acl.h"
#include "utils/dynahash.h"
@@ -275,6 +362,78 @@ typedef enum
TRANS_PARALLEL_APPLY,
} TransApplyAction;
+/*
+ * The phases involved in advancing the non-removable transaction ID.
+ *
+ * See comments atop worker.c for details of the transition between these
+ * phases.
+ */
+typedef enum
+{
+ RDT_GET_CANDIDATE_XID,
+ RDT_REQUEST_PUBLISHER_STATUS,
+ RDT_WAIT_FOR_PUBLISHER_STATUS,
+ RDT_WAIT_FOR_LOCAL_FLUSH
+} RetainDeadTuplesPhase;
+
+/*
+ * Critical information for managing phase transitions within the
+ * RetainDeadTuplesPhase.
+ */
+typedef struct RetainDeadTuplesData
+{
+ RetainDeadTuplesPhase phase; /* current phase */
+ XLogRecPtr remote_lsn; /* WAL write position on the publisher */
+
+ /*
+ * Oldest transaction ID that was in the commit phase on the publisher.
+ * Use FullTransactionId to prevent issues with transaction ID wraparound,
+ * where a new remote_oldestxid could falsely appear to originate from the
+ * past and block advancement.
+ */
+ FullTransactionId remote_oldestxid;
+
+ /*
+ * Next transaction ID to be assigned on the publisher. Use
+ * FullTransactionId for consistency and to allow straightforward
+ * comparisons with remote_oldestxid.
+ */
+ FullTransactionId remote_nextxid;
+
+ TimestampTz reply_time; /* when the publisher responds with status */
+
+ /*
+ * Publisher transaction ID that must be awaited to complete before
+ * entering the final phase (RDT_WAIT_FOR_LOCAL_FLUSH). Use
+ * FullTransactionId for the same reason as remote_nextxid.
+ */
+ FullTransactionId remote_wait_for;
+
+ TransactionId candidate_xid; /* candidate for the non-removable
+ * transaction ID */
+ TimestampTz flushpos_update_time; /* when the remote flush position was
+ * updated in final phase
+ * (RDT_WAIT_FOR_LOCAL_FLUSH) */
+
+ /*
+ * The following fields are used to determine the timing for the next
+ * round of transaction ID advancement.
+ */
+ TimestampTz last_recv_time; /* when the last message was received */
+ TimestampTz candidate_xid_time; /* when the candidate_xid is decided */
+ int xid_advance_interval; /* how much time (ms) to wait before
+ * attempting to advance the
+ * non-removable transaction ID */
+} RetainDeadTuplesData;
+
+/*
+ * The minimum (100ms) and maximum (3 minutes) intervals for advancing
+ * non-removable transaction IDs. The maximum interval is a bit arbitrary but
+ * is sufficient to not cause any undue network traffic.
+ */
+#define MIN_XID_ADVANCE_INTERVAL 100
+#define MAX_XID_ADVANCE_INTERVAL 180000
+
/* errcontext tracker */
static ApplyErrorCallbackArg apply_error_callback_arg =
{
@@ -339,6 +498,13 @@ static XLogRecPtr skip_xact_finish_lsn = InvalidXLogRecPtr;
/* BufFile handle of the current streaming file */
static BufFile *stream_fd = NULL;
+/*
+ * The remote WAL position that has been applied and flushed locally. We record
+ * and use this information both while sending feedback to the server and
+ * advancing oldest_nonremovable_xid.
+ */
+static XLogRecPtr last_flushpos = InvalidXLogRecPtr;
+
typedef struct SubXactInfo
{
TransactionId xid; /* XID of the subxact */
@@ -379,6 +545,19 @@ static void stream_close_file(void);
static void send_feedback(XLogRecPtr recvpos, bool force, bool requestReply);
+static void maybe_advance_nonremovable_xid(RetainDeadTuplesData *rdt_data,
+ bool status_received);
+static bool can_advance_nonremovable_xid(RetainDeadTuplesData *rdt_data);
+static void process_rdt_phase_transition(RetainDeadTuplesData *rdt_data,
+ bool status_received);
+static void get_candidate_xid(RetainDeadTuplesData *rdt_data);
+static void request_publisher_status(RetainDeadTuplesData *rdt_data);
+static void wait_for_publisher_status(RetainDeadTuplesData *rdt_data,
+ bool status_received);
+static void wait_for_local_flush(RetainDeadTuplesData *rdt_data);
+static void adjust_xid_advance_interval(RetainDeadTuplesData *rdt_data,
+ bool new_xid_found);
+
static void apply_handle_commit_internal(LogicalRepCommitData *commit_data);
static void apply_handle_insert_internal(ApplyExecutionData *edata,
ResultRelInfo *relinfo,
@@ -397,6 +576,12 @@ static bool FindReplTupleInLocalRel(ApplyExecutionData *edata, Relation localrel
Oid localidxoid,
TupleTableSlot *remoteslot,
TupleTableSlot **localslot);
+static bool FindDeletedTupleInLocalRel(Relation localrel,
+ Oid localidxoid,
+ TupleTableSlot *remoteslot,
+ TransactionId *delete_xid,
+ RepOriginId *delete_origin,
+ TimestampTz *delete_time);
static void apply_handle_tuple_routing(ApplyExecutionData *edata,
TupleTableSlot *remoteslot,
LogicalRepTupleData *newtup,
@@ -1023,7 +1208,7 @@ apply_handle_commit(StringInfo s)
if (commit_data.commit_lsn != remote_final_lsn)
ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- errmsg_internal("incorrect commit LSN %X/%X in commit message (expected %X/%X)",
+ errmsg_internal("incorrect commit LSN %X/%08X in commit message (expected %X/%08X)",
LSN_FORMAT_ARGS(commit_data.commit_lsn),
LSN_FORMAT_ARGS(remote_final_lsn))));
@@ -1115,7 +1300,7 @@ apply_handle_prepare(StringInfo s)
if (prepare_data.prepare_lsn != remote_final_lsn)
ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- errmsg_internal("incorrect prepare LSN %X/%X in prepare message (expected %X/%X)",
+ errmsg_internal("incorrect prepare LSN %X/%08X in prepare message (expected %X/%08X)",
LSN_FORMAT_ARGS(prepare_data.prepare_lsn),
LSN_FORMAT_ARGS(remote_final_lsn))));
@@ -2733,17 +2918,31 @@ apply_handle_update_internal(ApplyExecutionData *edata,
}
else
{
+ ConflictType type;
TupleTableSlot *newslot = localslot;
+ /*
+ * Detecting whether the tuple was recently deleted or never existed
+ * is crucial to avoid misleading the user during confict handling.
+ */
+ if (FindDeletedTupleInLocalRel(localrel, localindexoid, remoteslot,
+ &conflicttuple.xmin,
+ &conflicttuple.origin,
+ &conflicttuple.ts) &&
+ conflicttuple.origin != replorigin_session_origin)
+ type = CT_UPDATE_DELETED;
+ else
+ type = CT_UPDATE_MISSING;
+
/* Store the new tuple for conflict reporting */
slot_store_data(newslot, relmapentry, newtup);
/*
- * The tuple to be updated could not be found. Do nothing except for
- * emitting a log message.
+ * The tuple to be updated could not be found or was deleted. Do
+ * nothing except for emitting a log message.
*/
- ReportApplyConflict(estate, relinfo, LOG, CT_UPDATE_MISSING,
- remoteslot, newslot, list_make1(&conflicttuple));
+ ReportApplyConflict(estate, relinfo, LOG, type, remoteslot, newslot,
+ list_make1(&conflicttuple));
}
/* Cleanup. */
@@ -2964,6 +3163,112 @@ FindReplTupleInLocalRel(ApplyExecutionData *edata, Relation localrel,
}
/*
+ * Determine whether the index can reliably locate the deleted tuple in the
+ * local relation.
+ *
+ * An index may exclude deleted tuples if it was re-indexed or re-created during
+ * change application. Therefore, an index is considered usable only if the
+ * conflict detection slot.xmin (conflict_detection_xmin) is greater than the
+ * index tuple's xmin. This ensures that any tuples deleted prior to the index
+ * creation or re-indexing are not relevant for conflict detection in the
+ * current apply worker.
+ *
+ * Note that indexes may also be excluded if they were modified by other DDL
+ * operations, such as ALTER INDEX. However, this is acceptable, as the
+ * likelihood of such DDL changes coinciding with the need to scan dead
+ * tuples for the update_deleted is low.
+ */
+static bool
+IsIndexUsableForFindingDeletedTuple(Oid localindexoid,
+ TransactionId conflict_detection_xmin)
+{
+ HeapTuple index_tuple;
+ TransactionId index_xmin;
+
+ index_tuple = SearchSysCache1(INDEXRELID, ObjectIdGetDatum(localindexoid));
+
+ if (!HeapTupleIsValid(index_tuple)) /* should not happen */
+ elog(ERROR, "cache lookup failed for index %u", localindexoid);
+
+ /*
+ * No need to check for a frozen transaction ID, as
+ * TransactionIdPrecedes() manages it internally, treating it as falling
+ * behind the conflict_detection_xmin.
+ */
+ index_xmin = HeapTupleHeaderGetXmin(index_tuple->t_data);
+
+ ReleaseSysCache(index_tuple);
+
+ return TransactionIdPrecedes(index_xmin, conflict_detection_xmin);
+}
+
+/*
+ * Attempts to locate a deleted tuple in the local relation that matches the
+ * values of the tuple received from the publication side (in 'remoteslot').
+ * The search is performed using either the replica identity index, primary
+ * key, other available index, or a sequential scan if necessary.
+ *
+ * Returns true if the deleted tuple is found. If found, the transaction ID,
+ * origin, and commit timestamp of the deletion are stored in '*delete_xid',
+ * '*delete_origin', and '*delete_time' respectively.
+ */
+static bool
+FindDeletedTupleInLocalRel(Relation localrel, Oid localidxoid,
+ TupleTableSlot *remoteslot,
+ TransactionId *delete_xid, RepOriginId *delete_origin,
+ TimestampTz *delete_time)
+{
+ TransactionId oldestxmin;
+ ReplicationSlot *slot;
+
+ /*
+ * Return false if either dead tuples are not retained or commit timestamp
+ * data is not available.
+ */
+ if (!MySubscription->retaindeadtuples || !track_commit_timestamp)
+ return false;
+
+ /*
+ * For conflict detection, we use the conflict slot's xmin value instead
+ * of invoking GetOldestNonRemovableTransactionId(). The slot.xmin acts as
+ * a threshold to identify tuples that were recently deleted. These tuples
+ * are not visible to concurrent transactions, but we log an
+ * update_deleted conflict if such a tuple matches the remote update being
+ * applied.
+ *
+ * Although GetOldestNonRemovableTransactionId() can return a value older
+ * than the slot's xmin, for our current purpose it is acceptable to treat
+ * tuples deleted by transactions prior to slot.xmin as update_missing
+ * conflicts.
+ *
+ * Ideally, we would use oldest_nonremovable_xid, which is directly
+ * maintained by the leader apply worker. However, this value is not
+ * available to table synchronization or parallel apply workers, making
+ * slot.xmin a practical alternative in those contexts.
+ */
+ slot = SearchNamedReplicationSlot(CONFLICT_DETECTION_SLOT, true);
+
+ Assert(slot);
+
+ SpinLockAcquire(&slot->mutex);
+ oldestxmin = slot->data.xmin;
+ SpinLockRelease(&slot->mutex);
+
+ Assert(TransactionIdIsValid(oldestxmin));
+
+ if (OidIsValid(localidxoid) &&
+ IsIndexUsableForFindingDeletedTuple(localidxoid, oldestxmin))
+ return RelationFindDeletedTupleInfoByIndex(localrel, localidxoid,
+ remoteslot, oldestxmin,
+ delete_xid, delete_origin,
+ delete_time);
+ else
+ return RelationFindDeletedTupleInfoSeq(localrel, remoteslot,
+ oldestxmin, delete_xid,
+ delete_origin, delete_time);
+}
+
+/*
* This handles insert, update, delete on a partitioned table.
*/
static void
@@ -3081,18 +3386,35 @@ apply_handle_tuple_routing(ApplyExecutionData *edata,
remoteslot_part, &localslot);
if (!found)
{
+ ConflictType type;
TupleTableSlot *newslot = localslot;
+ /*
+ * Detecting whether the tuple was recently deleted or
+ * never existed is crucial to avoid misleading the user
+ * during confict handling.
+ */
+ if (FindDeletedTupleInLocalRel(partrel,
+ part_entry->localindexoid,
+ remoteslot_part,
+ &conflicttuple.xmin,
+ &conflicttuple.origin,
+ &conflicttuple.ts) &&
+ conflicttuple.origin != replorigin_session_origin)
+ type = CT_UPDATE_DELETED;
+ else
+ type = CT_UPDATE_MISSING;
+
/* Store the new tuple for conflict reporting */
slot_store_data(newslot, part_entry, newtup);
/*
- * The tuple to be updated could not be found. Do nothing
- * except for emitting a log message.
+ * The tuple to be updated could not be found or was
+ * deleted. Do nothing except for emitting a log message.
*/
ReportApplyConflict(estate, partrelinfo, LOG,
- CT_UPDATE_MISSING, remoteslot_part,
- newslot, list_make1(&conflicttuple));
+ type, remoteslot_part, newslot,
+ list_make1(&conflicttuple));
return;
}
@@ -3584,6 +3906,7 @@ LogicalRepApplyLoop(XLogRecPtr last_received)
bool ping_sent = false;
TimeLineID tli;
ErrorContextCallback errcallback;
+ RetainDeadTuplesData rdt_data = {0};
/*
* Init the ApplyMessageContext which we clean up after each replication
@@ -3662,6 +3985,8 @@ LogicalRepApplyLoop(XLogRecPtr last_received)
last_recv_timestamp = GetCurrentTimestamp();
ping_sent = false;
+ rdt_data.last_recv_time = last_recv_timestamp;
+
/* Ensure we are reading the data into our memory context. */
MemoryContextSwitchTo(ApplyMessageContext);
@@ -3688,6 +4013,8 @@ LogicalRepApplyLoop(XLogRecPtr last_received)
UpdateWorkerStats(last_received, send_time, false);
apply_dispatch(&s);
+
+ maybe_advance_nonremovable_xid(&rdt_data, false);
}
else if (c == 'k')
{
@@ -3703,8 +4030,31 @@ LogicalRepApplyLoop(XLogRecPtr last_received)
last_received = end_lsn;
send_feedback(last_received, reply_requested, false);
+
+ maybe_advance_nonremovable_xid(&rdt_data, false);
+
UpdateWorkerStats(last_received, timestamp, true);
}
+ else if (c == 's') /* Primary status update */
+ {
+ rdt_data.remote_lsn = pq_getmsgint64(&s);
+ rdt_data.remote_oldestxid = FullTransactionIdFromU64((uint64) pq_getmsgint64(&s));
+ rdt_data.remote_nextxid = FullTransactionIdFromU64((uint64) pq_getmsgint64(&s));
+ rdt_data.reply_time = pq_getmsgint64(&s);
+
+ /*
+ * This should never happen, see
+ * ProcessStandbyPSRequestMessage. But if it happens
+ * due to a bug, we don't want to proceed as it can
+ * incorrectly advance oldest_nonremovable_xid.
+ */
+ if (XLogRecPtrIsInvalid(rdt_data.remote_lsn))
+ elog(ERROR, "cannot get the latest WAL position from the publisher");
+
+ maybe_advance_nonremovable_xid(&rdt_data, true);
+
+ UpdateWorkerStats(last_received, rdt_data.reply_time, false);
+ }
/* other message types are purposefully ignored */
MemoryContextReset(ApplyMessageContext);
@@ -3717,6 +4067,11 @@ LogicalRepApplyLoop(XLogRecPtr last_received)
/* confirm all writes so far */
send_feedback(last_received, false, false);
+ /* Reset the timestamp if no message was received */
+ rdt_data.last_recv_time = 0;
+
+ maybe_advance_nonremovable_xid(&rdt_data, false);
+
if (!in_remote_transaction && !in_streamed_transaction)
{
/*
@@ -3751,6 +4106,14 @@ LogicalRepApplyLoop(XLogRecPtr last_received)
else
wait_time = NAPTIME_PER_CYCLE;
+ /*
+ * Ensure to wake up when it's possible to advance the non-removable
+ * transaction ID.
+ */
+ if (rdt_data.phase == RDT_GET_CANDIDATE_XID &&
+ rdt_data.xid_advance_interval)
+ wait_time = Min(wait_time, rdt_data.xid_advance_interval);
+
rc = WaitLatchOrSocket(MyLatch,
WL_SOCKET_READABLE | WL_LATCH_SET |
WL_TIMEOUT | WL_EXIT_ON_PM_DEATH,
@@ -3814,6 +4177,8 @@ LogicalRepApplyLoop(XLogRecPtr last_received)
send_feedback(last_received, requestReply, requestReply);
+ maybe_advance_nonremovable_xid(&rdt_data, false);
+
/*
* Force reporting to ensure long idle periods don't lead to
* arbitrarily delayed stats. Stats can only be reported outside
@@ -3849,7 +4214,6 @@ send_feedback(XLogRecPtr recvpos, bool force, bool requestReply)
static XLogRecPtr last_recvpos = InvalidXLogRecPtr;
static XLogRecPtr last_writepos = InvalidXLogRecPtr;
- static XLogRecPtr last_flushpos = InvalidXLogRecPtr;
XLogRecPtr writepos;
XLogRecPtr flushpos;
@@ -3910,7 +4274,7 @@ send_feedback(XLogRecPtr recvpos, bool force, bool requestReply)
pq_sendint64(reply_message, now); /* sendTime */
pq_sendbyte(reply_message, requestReply); /* replyRequested */
- elog(DEBUG2, "sending feedback (force %d) to recv %X/%X, write %X/%X, flush %X/%X",
+ elog(DEBUG2, "sending feedback (force %d) to recv %X/%08X, write %X/%08X, flush %X/%08X",
force,
LSN_FORMAT_ARGS(recvpos),
LSN_FORMAT_ARGS(writepos),
@@ -3928,6 +4292,368 @@ send_feedback(XLogRecPtr recvpos, bool force, bool requestReply)
}
/*
+ * Attempt to advance the non-removable transaction ID.
+ *
+ * See comments atop worker.c for details.
+ */
+static void
+maybe_advance_nonremovable_xid(RetainDeadTuplesData *rdt_data,
+ bool status_received)
+{
+ if (!can_advance_nonremovable_xid(rdt_data))
+ return;
+
+ process_rdt_phase_transition(rdt_data, status_received);
+}
+
+/*
+ * Preliminary check to determine if advancing the non-removable transaction ID
+ * is allowed.
+ */
+static bool
+can_advance_nonremovable_xid(RetainDeadTuplesData *rdt_data)
+{
+ /*
+ * It is sufficient to manage non-removable transaction ID for a
+ * subscription by the main apply worker to detect update_deleted reliably
+ * even for table sync or parallel apply workers.
+ */
+ if (!am_leader_apply_worker())
+ return false;
+
+ /* No need to advance if retaining dead tuples is not required */
+ if (!MySubscription->retaindeadtuples)
+ return false;
+
+ return true;
+}
+
+/*
+ * Process phase transitions during the non-removable transaction ID
+ * advancement. See comments atop worker.c for details of the transition.
+ */
+static void
+process_rdt_phase_transition(RetainDeadTuplesData *rdt_data,
+ bool status_received)
+{
+ switch (rdt_data->phase)
+ {
+ case RDT_GET_CANDIDATE_XID:
+ get_candidate_xid(rdt_data);
+ break;
+ case RDT_REQUEST_PUBLISHER_STATUS:
+ request_publisher_status(rdt_data);
+ break;
+ case RDT_WAIT_FOR_PUBLISHER_STATUS:
+ wait_for_publisher_status(rdt_data, status_received);
+ break;
+ case RDT_WAIT_FOR_LOCAL_FLUSH:
+ wait_for_local_flush(rdt_data);
+ break;
+ }
+}
+
+/*
+ * Workhorse for the RDT_GET_CANDIDATE_XID phase.
+ */
+static void
+get_candidate_xid(RetainDeadTuplesData *rdt_data)
+{
+ TransactionId oldest_running_xid;
+ TimestampTz now;
+
+ /*
+ * Use last_recv_time when applying changes in the loop to avoid
+ * unnecessary system time retrieval. If last_recv_time is not available,
+ * obtain the current timestamp.
+ */
+ now = rdt_data->last_recv_time ? rdt_data->last_recv_time : GetCurrentTimestamp();
+
+ /*
+ * Compute the candidate_xid and request the publisher status at most once
+ * per xid_advance_interval. Refer to adjust_xid_advance_interval() for
+ * details on how this value is dynamically adjusted. This is to avoid
+ * using CPU and network resources without making much progress.
+ */
+ if (!TimestampDifferenceExceeds(rdt_data->candidate_xid_time, now,
+ rdt_data->xid_advance_interval))
+ return;
+
+ /*
+ * Immediately update the timer, even if the function returns later
+ * without setting candidate_xid due to inactivity on the subscriber. This
+ * avoids frequent calls to GetOldestActiveTransactionId.
+ */
+ rdt_data->candidate_xid_time = now;
+
+ /*
+ * Consider transactions in the current database, as only dead tuples from
+ * this database are required for conflict detection.
+ */
+ oldest_running_xid = GetOldestActiveTransactionId(false, false);
+
+ /*
+ * Oldest active transaction ID (oldest_running_xid) can't be behind any
+ * of its previously computed value.
+ */
+ Assert(TransactionIdPrecedesOrEquals(MyLogicalRepWorker->oldest_nonremovable_xid,
+ oldest_running_xid));
+
+ /* Return if the oldest_nonremovable_xid cannot be advanced */
+ if (TransactionIdEquals(MyLogicalRepWorker->oldest_nonremovable_xid,
+ oldest_running_xid))
+ {
+ adjust_xid_advance_interval(rdt_data, false);
+ return;
+ }
+
+ adjust_xid_advance_interval(rdt_data, true);
+
+ rdt_data->candidate_xid = oldest_running_xid;
+ rdt_data->phase = RDT_REQUEST_PUBLISHER_STATUS;
+
+ /* process the next phase */
+ process_rdt_phase_transition(rdt_data, false);
+}
+
+/*
+ * Workhorse for the RDT_REQUEST_PUBLISHER_STATUS phase.
+ */
+static void
+request_publisher_status(RetainDeadTuplesData *rdt_data)
+{
+ static StringInfo request_message = NULL;
+
+ if (!request_message)
+ {
+ MemoryContext oldctx = MemoryContextSwitchTo(ApplyContext);
+
+ request_message = makeStringInfo();
+ MemoryContextSwitchTo(oldctx);
+ }
+ else
+ resetStringInfo(request_message);
+
+ /*
+ * Send the current time to update the remote walsender's latest reply
+ * message received time.
+ */
+ pq_sendbyte(request_message, 'p');
+ pq_sendint64(request_message, GetCurrentTimestamp());
+
+ elog(DEBUG2, "sending publisher status request message");
+
+ /* Send a request for the publisher status */
+ walrcv_send(LogRepWorkerWalRcvConn,
+ request_message->data, request_message->len);
+
+ rdt_data->phase = RDT_WAIT_FOR_PUBLISHER_STATUS;
+
+ /*
+ * Skip calling maybe_advance_nonremovable_xid() since further transition
+ * is possible only once we receive the publisher status message.
+ */
+}
+
+/*
+ * Workhorse for the RDT_WAIT_FOR_PUBLISHER_STATUS phase.
+ */
+static void
+wait_for_publisher_status(RetainDeadTuplesData *rdt_data,
+ bool status_received)
+{
+ /*
+ * Return if we have requested but not yet received the publisher status.
+ */
+ if (!status_received)
+ return;
+
+ if (!FullTransactionIdIsValid(rdt_data->remote_wait_for))
+ rdt_data->remote_wait_for = rdt_data->remote_nextxid;
+
+ /*
+ * Check if all remote concurrent transactions that were active at the
+ * first status request have now completed. If completed, proceed to the
+ * next phase; otherwise, continue checking the publisher status until
+ * these transactions finish.
+ *
+ * It's possible that transactions in the commit phase during the last
+ * cycle have now finished committing, but remote_oldestxid remains older
+ * than remote_wait_for. This can happen if some old transaction came in
+ * the commit phase when we requested status in this cycle. We do not
+ * handle this case explicitly as it's rare and the benefit doesn't
+ * justify the required complexity. Tracking would require either caching
+ * all xids at the publisher or sending them to subscribers. The condition
+ * will resolve naturally once the remaining transactions are finished.
+ *
+ * Directly advancing the non-removable transaction ID is possible if
+ * there are no activities on the publisher since the last advancement
+ * cycle. However, it requires maintaining two fields, last_remote_nextxid
+ * and last_remote_lsn, within the structure for comparison with the
+ * current cycle's values. Considering the minimal cost of continuing in
+ * RDT_WAIT_FOR_LOCAL_FLUSH without awaiting changes, we opted not to
+ * advance the transaction ID here.
+ */
+ if (FullTransactionIdPrecedesOrEquals(rdt_data->remote_wait_for,
+ rdt_data->remote_oldestxid))
+ rdt_data->phase = RDT_WAIT_FOR_LOCAL_FLUSH;
+ else
+ rdt_data->phase = RDT_REQUEST_PUBLISHER_STATUS;
+
+ /* process the next phase */
+ process_rdt_phase_transition(rdt_data, false);
+}
+
+/*
+ * Workhorse for the RDT_WAIT_FOR_LOCAL_FLUSH phase.
+ */
+static void
+wait_for_local_flush(RetainDeadTuplesData *rdt_data)
+{
+ Assert(!XLogRecPtrIsInvalid(rdt_data->remote_lsn) &&
+ TransactionIdIsValid(rdt_data->candidate_xid));
+
+ /*
+ * We expect the publisher and subscriber clocks to be in sync using time
+ * sync service like NTP. Otherwise, we will advance this worker's
+ * oldest_nonremovable_xid prematurely, leading to the removal of rows
+ * required to detect update_deleted reliably. This check primarily
+ * addresses scenarios where the publisher's clock falls behind; if the
+ * publisher's clock is ahead, subsequent transactions will naturally bear
+ * later commit timestamps, conforming to the design outlined atop
+ * worker.c.
+ *
+ * XXX Consider waiting for the publisher's clock to catch up with the
+ * subscriber's before proceeding to the next phase.
+ */
+ if (TimestampDifferenceExceeds(rdt_data->reply_time,
+ rdt_data->candidate_xid_time, 0))
+ ereport(ERROR,
+ errmsg_internal("oldest_nonremovable_xid transaction ID could be advanced prematurely"),
+ errdetail_internal("The clock on the publisher is behind that of the subscriber."));
+
+ /*
+ * Do not attempt to advance the non-removable transaction ID when table
+ * sync is in progress. During this time, changes from a single
+ * transaction may be applied by multiple table sync workers corresponding
+ * to the target tables. So, it's necessary for all table sync workers to
+ * apply and flush the corresponding changes before advancing the
+ * transaction ID, otherwise, dead tuples that are still needed for
+ * conflict detection in table sync workers could be removed prematurely.
+ * However, confirming the apply and flush progress across all table sync
+ * workers is complex and not worth the effort, so we simply return if not
+ * all tables are in the READY state.
+ *
+ * It is safe to add new tables with initial states to the subscription
+ * after this check because any changes applied to these tables should
+ * have a WAL position greater than the rdt_data->remote_lsn.
+ */
+ if (!AllTablesyncsReady())
+ return;
+
+ /*
+ * Update and check the remote flush position if we are applying changes
+ * in a loop. This is done at most once per WalWriterDelay to avoid
+ * performing costly operations in get_flush_position() too frequently
+ * during change application.
+ */
+ if (last_flushpos < rdt_data->remote_lsn && rdt_data->last_recv_time &&
+ TimestampDifferenceExceeds(rdt_data->flushpos_update_time,
+ rdt_data->last_recv_time, WalWriterDelay))
+ {
+ XLogRecPtr writepos;
+ XLogRecPtr flushpos;
+ bool have_pending_txes;
+
+ /* Fetch the latest remote flush position */
+ get_flush_position(&writepos, &flushpos, &have_pending_txes);
+
+ if (flushpos > last_flushpos)
+ last_flushpos = flushpos;
+
+ rdt_data->flushpos_update_time = rdt_data->last_recv_time;
+ }
+
+ /* Return to wait for the changes to be applied */
+ if (last_flushpos < rdt_data->remote_lsn)
+ return;
+
+ /*
+ * Reaching here means the remote WAL position has been received, and all
+ * transactions up to that position on the publisher have been applied and
+ * flushed locally. So, we can advance the non-removable transaction ID.
+ */
+ SpinLockAcquire(&MyLogicalRepWorker->relmutex);
+ MyLogicalRepWorker->oldest_nonremovable_xid = rdt_data->candidate_xid;
+ SpinLockRelease(&MyLogicalRepWorker->relmutex);
+
+ elog(DEBUG2, "confirmed flush up to remote lsn %X/%X: new oldest_nonremovable_xid %u",
+ LSN_FORMAT_ARGS(rdt_data->remote_lsn),
+ rdt_data->candidate_xid);
+
+ /* Notify launcher to update the xmin of the conflict slot */
+ ApplyLauncherWakeup();
+
+ /*
+ * Reset all data fields except those used to determine the timing for the
+ * next round of transaction ID advancement. We can even use
+ * flushpos_update_time in the next round to decide whether to get the
+ * latest flush position.
+ */
+ rdt_data->phase = RDT_GET_CANDIDATE_XID;
+ rdt_data->remote_lsn = InvalidXLogRecPtr;
+ rdt_data->remote_oldestxid = InvalidFullTransactionId;
+ rdt_data->remote_nextxid = InvalidFullTransactionId;
+ rdt_data->reply_time = 0;
+ rdt_data->remote_wait_for = InvalidFullTransactionId;
+ rdt_data->candidate_xid = InvalidTransactionId;
+
+ /* process the next phase */
+ process_rdt_phase_transition(rdt_data, false);
+}
+
+/*
+ * Adjust the interval for advancing non-removable transaction IDs.
+ *
+ * We double the interval to try advancing the non-removable transaction IDs
+ * if there is no activity on the node. The maximum value of the interval is
+ * capped by wal_receiver_status_interval if it is not zero, otherwise to a
+ * 3 minutes which should be sufficient to avoid using CPU or network
+ * resources without much benefit.
+ *
+ * The interval is reset to a minimum value of 100ms once there is some
+ * activity on the node.
+ *
+ * XXX The use of wal_receiver_status_interval is a bit arbitrary so we can
+ * consider the other interval or a separate GUC if the need arises.
+ */
+static void
+adjust_xid_advance_interval(RetainDeadTuplesData *rdt_data, bool new_xid_found)
+{
+ if (!new_xid_found && rdt_data->xid_advance_interval)
+ {
+ int max_interval = wal_receiver_status_interval
+ ? wal_receiver_status_interval * 1000
+ : MAX_XID_ADVANCE_INTERVAL;
+
+ /*
+ * No new transaction ID has been assigned since the last check, so
+ * double the interval, but not beyond the maximum allowable value.
+ */
+ rdt_data->xid_advance_interval = Min(rdt_data->xid_advance_interval * 2,
+ max_interval);
+ }
+ else
+ {
+ /*
+ * A new transaction ID was found or the interval is not yet
+ * initialized, so set the interval to the minimum value.
+ */
+ rdt_data->xid_advance_interval = MIN_XID_ADVANCE_INTERVAL;
+ }
+}
+
+/*
* Exit routine for apply workers due to subscription parameter changes.
*/
static void
@@ -4715,6 +5441,30 @@ InitializeLogRepWorker(void)
apply_worker_exit();
}
+ /*
+ * Restart the worker if retain_dead_tuples was enabled during startup.
+ *
+ * At this point, the replication slot used for conflict detection might
+ * not exist yet, or could be dropped soon if the launcher perceives
+ * retain_dead_tuples as disabled. To avoid unnecessary tracking of
+ * oldest_nonremovable_xid when the slot is absent or at risk of being
+ * dropped, a restart is initiated.
+ *
+ * The oldest_nonremovable_xid should be initialized only when the
+ * retain_dead_tuples is enabled before launching the worker. See
+ * logicalrep_worker_launch.
+ */
+ if (am_leader_apply_worker() &&
+ MySubscription->retaindeadtuples &&
+ !TransactionIdIsValid(MyLogicalRepWorker->oldest_nonremovable_xid))
+ {
+ ereport(LOG,
+ errmsg("logical replication worker for subscription \"%s\" will restart because the option %s was enabled during startup",
+ MySubscription->name, "retain_dead_tuples"));
+
+ apply_worker_exit();
+ }
+
/* Setup synchronous commit according to the user's wishes */
SetConfigOption("synchronous_commit", MySubscription->synccommit,
PGC_BACKEND, PGC_S_OVERRIDE);
@@ -4871,6 +5621,14 @@ DisableSubscriptionAndExit(void)
errmsg("subscription \"%s\" has been disabled because of an error",
MySubscription->name));
+ /*
+ * Skip the track_commit_timestamp check when disabling the worker due to
+ * an error, as verifying commit timestamps is unnecessary in this
+ * context.
+ */
+ if (MySubscription->retaindeadtuples)
+ CheckSubDeadTupleRetention(false, true, WARNING);
+
proc_exit(0);
}
@@ -4916,7 +5674,7 @@ maybe_start_skipping_changes(XLogRecPtr finish_lsn)
skip_xact_finish_lsn = finish_lsn;
ereport(LOG,
- errmsg("logical replication starts skipping transaction at LSN %X/%X",
+ errmsg("logical replication starts skipping transaction at LSN %X/%08X",
LSN_FORMAT_ARGS(skip_xact_finish_lsn)));
}
@@ -4930,8 +5688,8 @@ stop_skipping_changes(void)
return;
ereport(LOG,
- (errmsg("logical replication completed skipping transaction at LSN %X/%X",
- LSN_FORMAT_ARGS(skip_xact_finish_lsn))));
+ errmsg("logical replication completed skipping transaction at LSN %X/%08X",
+ LSN_FORMAT_ARGS(skip_xact_finish_lsn)));
/* Stop skipping changes */
skip_xact_finish_lsn = InvalidXLogRecPtr;
@@ -5019,7 +5777,7 @@ clear_subscription_skip_lsn(XLogRecPtr finish_lsn)
if (myskiplsn != finish_lsn)
ereport(WARNING,
errmsg("skip-LSN of subscription \"%s\" cleared", MySubscription->name),
- errdetail("Remote transaction's finish WAL location (LSN) %X/%X did not match skip-LSN %X/%X.",
+ errdetail("Remote transaction's finish WAL location (LSN) %X/%08X did not match skip-LSN %X/%08X.",
LSN_FORMAT_ARGS(finish_lsn),
LSN_FORMAT_ARGS(myskiplsn)));
}
@@ -5056,7 +5814,7 @@ apply_error_callback(void *arg)
logicalrep_message_type(errarg->command),
errarg->remote_xid);
else
- errcontext("processing remote data for replication origin \"%s\" during message type \"%s\" in transaction %u, finished at %X/%X",
+ errcontext("processing remote data for replication origin \"%s\" during message type \"%s\" in transaction %u, finished at %X/%08X",
errarg->origin_name,
logicalrep_message_type(errarg->command),
errarg->remote_xid,
@@ -5074,7 +5832,7 @@ apply_error_callback(void *arg)
errarg->rel->remoterel.relname,
errarg->remote_xid);
else
- errcontext("processing remote data for replication origin \"%s\" during message type \"%s\" for replication target relation \"%s.%s\" in transaction %u, finished at %X/%X",
+ errcontext("processing remote data for replication origin \"%s\" during message type \"%s\" for replication target relation \"%s.%s\" in transaction %u, finished at %X/%08X",
errarg->origin_name,
logicalrep_message_type(errarg->command),
errarg->rel->remoterel.nspname,
@@ -5093,7 +5851,7 @@ apply_error_callback(void *arg)
errarg->rel->remoterel.attnames[errarg->remote_attnum],
errarg->remote_xid);
else
- errcontext("processing remote data for replication origin \"%s\" during message type \"%s\" for replication target relation \"%s.%s\" column \"%s\" in transaction %u, finished at %X/%X",
+ errcontext("processing remote data for replication origin \"%s\" during message type \"%s\" for replication target relation \"%s.%s\" column \"%s\" in transaction %u, finished at %X/%08X",
errarg->origin_name,
logicalrep_message_type(errarg->command),
errarg->rel->remoterel.nspname,
diff --git a/src/backend/replication/pgoutput/pgoutput.c b/src/backend/replication/pgoutput/pgoutput.c
index 693a766e6d7..80540c017bd 100644
--- a/src/backend/replication/pgoutput/pgoutput.c
+++ b/src/backend/replication/pgoutput/pgoutput.c
@@ -297,10 +297,12 @@ parse_output_parameters(List *options, PGOutputData *data)
bool two_phase_option_given = false;
bool origin_option_given = false;
+ /* Initialize optional parameters to defaults */
data->binary = false;
data->streaming = LOGICALREP_STREAM_OFF;
data->messages = false;
data->two_phase = false;
+ data->publish_no_origin = false;
foreach(lc, options)
{
@@ -1372,8 +1374,8 @@ pgoutput_row_filter(Relation relation, TupleTableSlot *old_slot,
* VARTAG_INDIRECT. See ReorderBufferToastReplace.
*/
if (att->attlen == -1 &&
- VARATT_IS_EXTERNAL_ONDISK(new_slot->tts_values[i]) &&
- !VARATT_IS_EXTERNAL_ONDISK(old_slot->tts_values[i]))
+ VARATT_IS_EXTERNAL_ONDISK(DatumGetPointer(new_slot->tts_values[i])) &&
+ !VARATT_IS_EXTERNAL_ONDISK(DatumGetPointer(old_slot->tts_values[i])))
{
if (!tmp_new_slot)
{
@@ -1789,7 +1791,7 @@ LoadPublications(List *pubnames)
else
ereport(WARNING,
errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("skipped loading publication: %s", pubname),
+ errmsg("skipped loading publication \"%s\"", pubname),
errdetail("The publication does not exist at this point in the WAL."),
errhint("Create the publication if it does not exist."));
}
diff --git a/src/backend/replication/repl_gram.y b/src/backend/replication/repl_gram.y
index 7440aae5a1a..8a649199ec6 100644
--- a/src/backend/replication/repl_gram.y
+++ b/src/backend/replication/repl_gram.y
@@ -279,7 +279,7 @@ alter_replication_slot:
;
/*
- * START_REPLICATION [SLOT slot] [PHYSICAL] %X/%X [TIMELINE %u]
+ * START_REPLICATION [SLOT slot] [PHYSICAL] %X/%08X [TIMELINE %u]
*/
start_replication:
K_START_REPLICATION opt_slot opt_physical RECPTR opt_timeline
@@ -295,7 +295,7 @@ start_replication:
}
;
-/* START_REPLICATION SLOT slot LOGICAL %X/%X options */
+/* START_REPLICATION SLOT slot LOGICAL %X/%08X options */
start_logical_replication:
K_START_REPLICATION K_SLOT IDENT K_LOGICAL RECPTR plugin_options
{
diff --git a/src/backend/replication/repl_scanner.l b/src/backend/replication/repl_scanner.l
index 014ea8d25c6..b6930e28659 100644
--- a/src/backend/replication/repl_scanner.l
+++ b/src/backend/replication/repl_scanner.l
@@ -155,7 +155,7 @@ UPLOAD_MANIFEST { return K_UPLOAD_MANIFEST; }
{hexdigit}+\/{hexdigit}+ {
uint32 hi,
lo;
- if (sscanf(yytext, "%X/%X", &hi, &lo) != 2)
+ if (sscanf(yytext, "%X/%08X", &hi, &lo) != 2)
replication_yyerror(NULL, yyscanner, "invalid streaming start location");
yylval->recptr = ((uint64) hi) << 32 | lo;
return RECPTR;
diff --git a/src/backend/replication/slot.c b/src/backend/replication/slot.c
index 600b87fa9cb..8605776ad86 100644
--- a/src/backend/replication/slot.c
+++ b/src/backend/replication/slot.c
@@ -47,6 +47,7 @@
#include "miscadmin.h"
#include "pgstat.h"
#include "postmaster/interrupt.h"
+#include "replication/logicallauncher.h"
#include "replication/slotsync.h"
#include "replication/slot.h"
#include "replication/walsender_private.h"
@@ -154,7 +155,7 @@ int max_replication_slots = 10; /* the maximum number of replication
* Invalidate replication slots that have remained idle longer than this
* duration; '0' disables it.
*/
-int idle_replication_slot_timeout_mins = 0;
+int idle_replication_slot_timeout_secs = 0;
/*
* This GUC lists streaming replication standby server slot names that
@@ -172,6 +173,7 @@ static SyncStandbySlotsConfigData *synchronized_standby_slots_config;
static XLogRecPtr ss_oldest_flush_lsn = InvalidXLogRecPtr;
static void ReplicationSlotShmemExit(int code, Datum arg);
+static bool IsSlotForConflictCheck(const char *name);
static void ReplicationSlotDropPtr(ReplicationSlot *slot);
/* internal persistency functions */
@@ -258,13 +260,17 @@ ReplicationSlotShmemExit(int code, Datum arg)
/*
* Check whether the passed slot name is valid and report errors at elevel.
*
+ * An error will be reported for a reserved replication slot name if
+ * allow_reserved_name is set to false.
+ *
* Slot names may consist out of [a-z0-9_]{1,NAMEDATALEN-1} which should allow
* the name to be used as a directory name on every supported OS.
*
* Returns whether the directory name is valid or not if elevel < ERROR.
*/
bool
-ReplicationSlotValidateName(const char *name, int elevel)
+ReplicationSlotValidateName(const char *name, bool allow_reserved_name,
+ int elevel)
{
const char *cp;
@@ -300,10 +306,32 @@ ReplicationSlotValidateName(const char *name, int elevel)
return false;
}
}
+
+ if (!allow_reserved_name && IsSlotForConflictCheck(name))
+ {
+ ereport(elevel,
+ errcode(ERRCODE_RESERVED_NAME),
+ errmsg("replication slot name \"%s\" is reserved",
+ name),
+ errdetail("The name \"%s\" is reserved for the conflict detection slot.",
+ CONFLICT_DETECTION_SLOT));
+
+ return false;
+ }
+
return true;
}
/*
+ * Return true if the replication slot name is "pg_conflict_detection".
+ */
+static bool
+IsSlotForConflictCheck(const char *name)
+{
+ return (strcmp(name, CONFLICT_DETECTION_SLOT) == 0);
+}
+
+/*
* Create a new replication slot and mark it as used by this backend.
*
* name: Name of the slot
@@ -330,7 +358,12 @@ ReplicationSlotCreate(const char *name, bool db_specific,
Assert(MyReplicationSlot == NULL);
- ReplicationSlotValidateName(name, ERROR);
+ /*
+ * The logical launcher or pg_upgrade may create or migrate an internal
+ * slot, so using a reserved name is allowed in these cases.
+ */
+ ReplicationSlotValidateName(name, IsBinaryUpgrade || IsLogicalLauncher(),
+ ERROR);
if (failover)
{
@@ -424,6 +457,7 @@ ReplicationSlotCreate(const char *name, bool db_specific,
slot->candidate_restart_valid = InvalidXLogRecPtr;
slot->candidate_restart_lsn = InvalidXLogRecPtr;
slot->last_saved_confirmed_flush = InvalidXLogRecPtr;
+ slot->last_saved_restart_lsn = InvalidXLogRecPtr;
slot->inactive_since = 0;
/*
@@ -581,6 +615,17 @@ retry:
}
/*
+ * Do not allow users to acquire the reserved slot. This scenario may
+ * occur if the launcher that owns the slot has terminated unexpectedly
+ * due to an error, and a backend process attempts to reuse the slot.
+ */
+ if (!IsLogicalLauncher() && IsSlotForConflictCheck(name))
+ ereport(ERROR,
+ errcode(ERRCODE_UNDEFINED_OBJECT),
+ errmsg("cannot acquire replication slot \"%s\"", name),
+ errdetail("The slot is reserved for conflict detection and can only be acquired by logical replication launcher."));
+
+ /*
* This is the slot we want; check if it's active under some other
* process. In single user mode, we don't need this check.
*/
@@ -1165,20 +1210,41 @@ ReplicationSlotsComputeRequiredLSN(void)
{
ReplicationSlot *s = &ReplicationSlotCtl->replication_slots[i];
XLogRecPtr restart_lsn;
+ XLogRecPtr last_saved_restart_lsn;
bool invalidated;
+ ReplicationSlotPersistency persistency;
if (!s->in_use)
continue;
SpinLockAcquire(&s->mutex);
+ persistency = s->data.persistency;
restart_lsn = s->data.restart_lsn;
invalidated = s->data.invalidated != RS_INVAL_NONE;
+ last_saved_restart_lsn = s->last_saved_restart_lsn;
SpinLockRelease(&s->mutex);
/* invalidated slots need not apply */
if (invalidated)
continue;
+ /*
+ * For persistent slot use last_saved_restart_lsn to compute the
+ * oldest LSN for removal of WAL segments. The segments between
+ * last_saved_restart_lsn and restart_lsn might be needed by a
+ * persistent slot in the case of database crash. Non-persistent
+ * slots can't survive the database crash, so we don't care about
+ * last_saved_restart_lsn for them.
+ */
+ if (persistency == RS_PERSISTENT)
+ {
+ if (last_saved_restart_lsn != InvalidXLogRecPtr &&
+ restart_lsn > last_saved_restart_lsn)
+ {
+ restart_lsn = last_saved_restart_lsn;
+ }
+ }
+
if (restart_lsn != InvalidXLogRecPtr &&
(min_required == InvalidXLogRecPtr ||
restart_lsn < min_required))
@@ -1216,7 +1282,9 @@ ReplicationSlotsComputeLogicalRestartLSN(void)
{
ReplicationSlot *s;
XLogRecPtr restart_lsn;
+ XLogRecPtr last_saved_restart_lsn;
bool invalidated;
+ ReplicationSlotPersistency persistency;
s = &ReplicationSlotCtl->replication_slots[i];
@@ -1230,14 +1298,33 @@ ReplicationSlotsComputeLogicalRestartLSN(void)
/* read once, it's ok if it increases while we're checking */
SpinLockAcquire(&s->mutex);
+ persistency = s->data.persistency;
restart_lsn = s->data.restart_lsn;
invalidated = s->data.invalidated != RS_INVAL_NONE;
+ last_saved_restart_lsn = s->last_saved_restart_lsn;
SpinLockRelease(&s->mutex);
/* invalidated slots need not apply */
if (invalidated)
continue;
+ /*
+ * For persistent slot use last_saved_restart_lsn to compute the
+ * oldest LSN for removal of WAL segments. The segments between
+ * last_saved_restart_lsn and restart_lsn might be needed by a
+ * persistent slot in the case of database crash. Non-persistent
+ * slots can't survive the database crash, so we don't care about
+ * last_saved_restart_lsn for them.
+ */
+ if (persistency == RS_PERSISTENT)
+ {
+ if (last_saved_restart_lsn != InvalidXLogRecPtr &&
+ restart_lsn > last_saved_restart_lsn)
+ {
+ restart_lsn = last_saved_restart_lsn;
+ }
+ }
+
if (restart_lsn == InvalidXLogRecPtr)
continue;
@@ -1455,6 +1542,7 @@ ReplicationSlotReserveWal(void)
Assert(slot != NULL);
Assert(slot->data.restart_lsn == InvalidXLogRecPtr);
+ Assert(slot->last_saved_restart_lsn == InvalidXLogRecPtr);
/*
* The replication slot mechanism is used to prevent removal of required
@@ -1547,8 +1635,8 @@ ReportSlotInvalidation(ReplicationSlotInvalidationCause cause,
uint64 ex = oldestLSN - restart_lsn;
appendStringInfo(&err_detail,
- ngettext("The slot's restart_lsn %X/%X exceeds the limit by %" PRIu64 " byte.",
- "The slot's restart_lsn %X/%X exceeds the limit by %" PRIu64 " bytes.",
+ ngettext("The slot's restart_lsn %X/%08X exceeds the limit by %" PRIu64 " byte.",
+ "The slot's restart_lsn %X/%08X exceeds the limit by %" PRIu64 " bytes.",
ex),
LSN_FORMAT_ARGS(restart_lsn),
ex);
@@ -1568,13 +1656,10 @@ ReportSlotInvalidation(ReplicationSlotInvalidationCause cause,
case RS_INVAL_IDLE_TIMEOUT:
{
- int minutes = slot_idle_seconds / SECS_PER_MINUTE;
- int secs = slot_idle_seconds % SECS_PER_MINUTE;
-
/* translator: %s is a GUC variable name */
- appendStringInfo(&err_detail, _("The slot's idle time of %dmin %02ds exceeds the configured \"%s\" duration of %dmin."),
- minutes, secs, "idle_replication_slot_timeout",
- idle_replication_slot_timeout_mins);
+ appendStringInfo(&err_detail, _("The slot's idle time of %lds exceeds the configured \"%s\" duration of %ds."),
+ slot_idle_seconds, "idle_replication_slot_timeout",
+ idle_replication_slot_timeout_secs);
/* translator: %s is a GUC variable name */
appendStringInfo(&err_hint, _("You might need to increase \"%s\"."),
"idle_replication_slot_timeout");
@@ -1612,7 +1697,7 @@ ReportSlotInvalidation(ReplicationSlotInvalidationCause cause,
static inline bool
CanInvalidateIdleSlot(ReplicationSlot *s)
{
- return (idle_replication_slot_timeout_mins != 0 &&
+ return (idle_replication_slot_timeout_secs != 0 &&
!XLogRecPtrIsInvalid(s->data.restart_lsn) &&
s->inactive_since > 0 &&
!(RecoveryInProgress() && s->data.synced));
@@ -1673,9 +1758,9 @@ DetermineSlotInvalidationCause(uint32 possible_causes, ReplicationSlot *s,
if (CanInvalidateIdleSlot(s))
{
/*
- * We simulate the invalidation due to idle_timeout as the minimum
- * time idle time is one minute which makes tests take a long
- * time.
+ * Simulate the invalidation due to idle_timeout to test the
+ * timeout behavior promptly, without waiting for it to trigger
+ * naturally.
*/
#ifdef USE_INJECTION_POINTS
if (IS_INJECTION_POINT_ATTACHED("slot-timeout-inval"))
@@ -1690,7 +1775,7 @@ DetermineSlotInvalidationCause(uint32 possible_causes, ReplicationSlot *s,
* idle_replication_slot_timeout GUC.
*/
if (TimestampDifferenceExceedsSeconds(s->inactive_since, now,
- idle_replication_slot_timeout_mins * SECS_PER_MINUTE))
+ idle_replication_slot_timeout_secs))
{
*inactive_since = s->inactive_since;
return RS_INVAL_IDLE_TIMEOUT;
@@ -1835,7 +1920,10 @@ InvalidatePossiblyObsoleteSlot(uint32 possible_causes,
* just rely on .invalidated.
*/
if (invalidation_cause == RS_INVAL_WAL_REMOVED)
+ {
s->data.restart_lsn = InvalidXLogRecPtr;
+ s->last_saved_restart_lsn = InvalidXLogRecPtr;
+ }
/* Let caller know */
*invalidated = true;
@@ -1844,15 +1932,6 @@ InvalidatePossiblyObsoleteSlot(uint32 possible_causes,
SpinLockRelease(&s->mutex);
/*
- * The logical replication slots shouldn't be invalidated as GUC
- * max_slot_wal_keep_size is set to -1 and
- * idle_replication_slot_timeout is set to 0 during the binary
- * upgrade. See check_old_cluster_for_valid_slots() where we ensure
- * that no invalidated before the upgrade.
- */
- Assert(!(*invalidated && SlotIsLogical(s) && IsBinaryUpgrade));
-
- /*
* Calculate the idle time duration of the slot if slot is marked
* invalidated with RS_INVAL_IDLE_TIMEOUT.
*/
@@ -1998,6 +2077,10 @@ restart:
if (!s->in_use)
continue;
+ /* Prevent invalidation of logical slots during binary upgrade */
+ if (SlotIsLogical(s) && IsBinaryUpgrade)
+ continue;
+
if (InvalidatePossiblyObsoleteSlot(possible_causes, s, oldestLSN, dboid,
snapshotConflictHorizon,
&invalidated))
@@ -2032,6 +2115,7 @@ void
CheckPointReplicationSlots(bool is_shutdown)
{
int i;
+ bool last_saved_restart_lsn_updated = false;
elog(DEBUG1, "performing replication slot checkpoint");
@@ -2076,9 +2160,23 @@ CheckPointReplicationSlots(bool is_shutdown)
SpinLockRelease(&s->mutex);
}
+ /*
+ * Track if we're going to update slot's last_saved_restart_lsn. We
+ * need this to know if we need to recompute the required LSN.
+ */
+ if (s->last_saved_restart_lsn != s->data.restart_lsn)
+ last_saved_restart_lsn_updated = true;
+
SaveSlotToPath(s, path, LOG);
}
LWLockRelease(ReplicationSlotAllocationLock);
+
+ /*
+ * Recompute the required LSN if SaveSlotToPath() updated
+ * last_saved_restart_lsn for any slot.
+ */
+ if (last_saved_restart_lsn_updated)
+ ReplicationSlotsComputeRequiredLSN();
}
/*
@@ -2354,6 +2452,7 @@ SaveSlotToPath(ReplicationSlot *slot, const char *dir, int elevel)
if (!slot->just_dirtied)
slot->dirty = false;
slot->last_saved_confirmed_flush = cp.slotdata.confirmed_flush;
+ slot->last_saved_restart_lsn = cp.slotdata.restart_lsn;
SpinLockRelease(&slot->mutex);
LWLockRelease(&slot->io_in_progress_lock);
@@ -2569,6 +2668,7 @@ RestoreSlotFromDisk(const char *name)
slot->effective_xmin = cp.slotdata.xmin;
slot->effective_catalog_xmin = cp.slotdata.catalog_xmin;
slot->last_saved_confirmed_flush = cp.slotdata.confirmed_flush;
+ slot->last_saved_restart_lsn = cp.slotdata.restart_lsn;
slot->candidate_catalog_xmin = InvalidTransactionId;
slot->candidate_xmin_lsn = InvalidXLogRecPtr;
@@ -2993,22 +3093,3 @@ WaitForStandbyConfirmation(XLogRecPtr wait_for_lsn)
ConditionVariableCancelSleep();
}
-
-/*
- * GUC check_hook for idle_replication_slot_timeout
- *
- * The value of idle_replication_slot_timeout must be set to 0 during
- * a binary upgrade. See start_postmaster() in pg_upgrade for more details.
- */
-bool
-check_idle_replication_slot_timeout(int *newval, void **extra, GucSource source)
-{
- if (IsBinaryUpgrade && *newval != 0)
- {
- GUC_check_errdetail("\"%s\" must be set to 0 during binary upgrade mode.",
- "idle_replication_slot_timeout");
- return false;
- }
-
- return true;
-}
diff --git a/src/backend/replication/slotfuncs.c b/src/backend/replication/slotfuncs.c
index 36cc2ed4e44..69f4c6157c5 100644
--- a/src/backend/replication/slotfuncs.c
+++ b/src/backend/replication/slotfuncs.c
@@ -566,7 +566,7 @@ pg_replication_slot_advance(PG_FUNCTION_ARGS)
if (moveto < minlsn)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("cannot advance replication slot to %X/%X, minimum is %X/%X",
+ errmsg("cannot advance replication slot to %X/%08X, minimum is %X/%08X",
LSN_FORMAT_ARGS(moveto), LSN_FORMAT_ARGS(minlsn))));
/* Do the actual slot update, depending on the slot type */
diff --git a/src/backend/replication/syncrep.c b/src/backend/replication/syncrep.c
index cc35984ad00..32cf3a48b89 100644
--- a/src/backend/replication/syncrep.c
+++ b/src/backend/replication/syncrep.c
@@ -258,7 +258,7 @@ SyncRepWaitForLSN(XLogRecPtr lsn, bool commit)
{
char buffer[32];
- sprintf(buffer, "waiting for %X/%X", LSN_FORMAT_ARGS(lsn));
+ sprintf(buffer, "waiting for %X/%08X", LSN_FORMAT_ARGS(lsn));
set_ps_display_suffix(buffer);
}
@@ -566,7 +566,7 @@ SyncRepReleaseWaiters(void)
LWLockRelease(SyncRepLock);
- elog(DEBUG3, "released %d procs up to write %X/%X, %d procs up to flush %X/%X, %d procs up to apply %X/%X",
+ elog(DEBUG3, "released %d procs up to write %X/%08X, %d procs up to flush %X/%08X, %d procs up to apply %X/%08X",
numwrite, LSN_FORMAT_ARGS(writePtr),
numflush, LSN_FORMAT_ARGS(flushPtr),
numapply, LSN_FORMAT_ARGS(applyPtr));
diff --git a/src/backend/replication/syncrep_scanner.l b/src/backend/replication/syncrep_scanner.l
index 7dec1f869c7..02004d621e7 100644
--- a/src/backend/replication/syncrep_scanner.l
+++ b/src/backend/replication/syncrep_scanner.l
@@ -157,17 +157,16 @@ syncrep_yyerror(SyncRepConfigData **syncrep_parse_result_p, char **syncrep_parse
{
struct yyguts_t *yyg = (struct yyguts_t *) yyscanner; /* needed for yytext
* macro */
- char *syncrep_parse_error_msg = *syncrep_parse_error_msg_p;
/* report only the first error in a parse operation */
- if (syncrep_parse_error_msg)
+ if (*syncrep_parse_error_msg_p)
return;
if (yytext[0])
- syncrep_parse_error_msg = psprintf("%s at or near \"%s\"",
- message, yytext);
+ *syncrep_parse_error_msg_p = psprintf("%s at or near \"%s\"",
+ message, yytext);
else
- syncrep_parse_error_msg = psprintf("%s at end of input",
- message);
+ *syncrep_parse_error_msg_p = psprintf("%s at end of input",
+ message);
}
void
diff --git a/src/backend/replication/walreceiver.c b/src/backend/replication/walreceiver.c
index 8c4d0fd9aed..b6281101711 100644
--- a/src/backend/replication/walreceiver.c
+++ b/src/backend/replication/walreceiver.c
@@ -386,12 +386,12 @@ WalReceiverMain(const void *startup_data, size_t startup_data_len)
{
if (first_stream)
ereport(LOG,
- (errmsg("started streaming WAL from primary at %X/%X on timeline %u",
- LSN_FORMAT_ARGS(startpoint), startpointTLI)));
+ errmsg("started streaming WAL from primary at %X/%08X on timeline %u",
+ LSN_FORMAT_ARGS(startpoint), startpointTLI));
else
ereport(LOG,
- (errmsg("restarted WAL streaming at %X/%X on timeline %u",
- LSN_FORMAT_ARGS(startpoint), startpointTLI)));
+ errmsg("restarted WAL streaming at %X/%08X on timeline %u",
+ LSN_FORMAT_ARGS(startpoint), startpointTLI));
first_stream = false;
/* Initialize LogstreamResult and buffers for processing messages */
@@ -470,7 +470,7 @@ WalReceiverMain(const void *startup_data, size_t startup_data_len)
{
ereport(LOG,
(errmsg("replication terminated by primary server"),
- errdetail("End of WAL reached on timeline %u at %X/%X.",
+ errdetail("End of WAL reached on timeline %u at %X/%08X.",
startpointTLI,
LSN_FORMAT_ARGS(LogstreamResult.Write))));
endofwal = true;
@@ -711,7 +711,7 @@ WalRcvWaitForStartPosition(XLogRecPtr *startpoint, TimeLineID *startpointTLI)
{
char activitymsg[50];
- snprintf(activitymsg, sizeof(activitymsg), "restarting at %X/%X",
+ snprintf(activitymsg, sizeof(activitymsg), "restarting at %X/%08X",
LSN_FORMAT_ARGS(*startpoint));
set_ps_display(activitymsg);
}
@@ -1014,7 +1014,7 @@ XLogWalRcvFlush(bool dying, TimeLineID tli)
{
char activitymsg[50];
- snprintf(activitymsg, sizeof(activitymsg), "streaming %X/%X",
+ snprintf(activitymsg, sizeof(activitymsg), "streaming %X/%08X",
LSN_FORMAT_ARGS(LogstreamResult.Write));
set_ps_display(activitymsg);
}
@@ -1138,7 +1138,7 @@ XLogWalRcvSendReply(bool force, bool requestReply)
pq_sendbyte(&reply_message, requestReply ? 1 : 0);
/* Send it */
- elog(DEBUG2, "sending write %X/%X flush %X/%X apply %X/%X%s",
+ elog(DEBUG2, "sending write %X/%08X flush %X/%08X apply %X/%08X%s",
LSN_FORMAT_ARGS(writePtr),
LSN_FORMAT_ARGS(flushPtr),
LSN_FORMAT_ARGS(applyPtr),
diff --git a/src/backend/replication/walsender.c b/src/backend/replication/walsender.c
index 9fa8beb6103..ee911394a23 100644
--- a/src/backend/replication/walsender.c
+++ b/src/backend/replication/walsender.c
@@ -65,6 +65,7 @@
#include "funcapi.h"
#include "libpq/libpq.h"
#include "libpq/pqformat.h"
+#include "libpq/protocol.h"
#include "miscadmin.h"
#include "nodes/replnodes.h"
#include "pgstat.h"
@@ -84,6 +85,7 @@
#include "storage/ipc.h"
#include "storage/pmsignal.h"
#include "storage/proc.h"
+#include "storage/procarray.h"
#include "tcop/dest.h"
#include "tcop/tcopprot.h"
#include "utils/acl.h"
@@ -258,6 +260,7 @@ static void StartLogicalReplication(StartReplicationCmd *cmd);
static void ProcessStandbyMessage(void);
static void ProcessStandbyReplyMessage(void);
static void ProcessStandbyHSFeedbackMessage(void);
+static void ProcessStandbyPSRequestMessage(void);
static void ProcessRepliesIfAny(void);
static void ProcessPendingWrites(void);
static void WalSndKeepalive(bool requestReply, XLogRecPtr writePtr);
@@ -408,7 +411,7 @@ IdentifySystem(void)
else
logptr = GetFlushRecPtr(&currTLI);
- snprintf(xloc, sizeof(xloc), "%X/%X", LSN_FORMAT_ARGS(logptr));
+ snprintf(xloc, sizeof(xloc), "%X/%08X", LSN_FORMAT_ARGS(logptr));
if (MyDatabaseId != InvalidOid)
{
@@ -515,7 +518,7 @@ ReadReplicationSlot(ReadReplicationSlotCmd *cmd)
{
char xloc[64];
- snprintf(xloc, sizeof(xloc), "%X/%X",
+ snprintf(xloc, sizeof(xloc), "%X/%08X",
LSN_FORMAT_ARGS(slot_contents.data.restart_lsn));
values[i] = CStringGetTextDatum(xloc);
nulls[i] = false;
@@ -733,13 +736,13 @@ HandleUploadManifestPacket(StringInfo buf, off_t *offset,
switch (mtype)
{
- case 'd': /* CopyData */
+ case PqMsg_CopyData:
maxmsglen = PQ_LARGE_MESSAGE_LIMIT;
break;
- case 'c': /* CopyDone */
- case 'f': /* CopyFail */
- case 'H': /* Flush */
- case 'S': /* Sync */
+ case PqMsg_CopyDone:
+ case PqMsg_CopyFail:
+ case PqMsg_Flush:
+ case PqMsg_Sync:
maxmsglen = PQ_SMALL_MESSAGE_LIMIT;
break;
default:
@@ -761,19 +764,19 @@ HandleUploadManifestPacket(StringInfo buf, off_t *offset,
/* Process the message */
switch (mtype)
{
- case 'd': /* CopyData */
+ case PqMsg_CopyData:
AppendIncrementalManifestData(ib, buf->data, buf->len);
return true;
- case 'c': /* CopyDone */
+ case PqMsg_CopyDone:
return false;
- case 'H': /* Sync */
- case 'S': /* Flush */
+ case PqMsg_Sync:
+ case PqMsg_Flush:
/* Ignore these while in CopyOut mode as we do elsewhere. */
return true;
- case 'f':
+ case PqMsg_CopyFail:
ereport(ERROR,
(errcode(ERRCODE_QUERY_CANCELED),
errmsg("COPY from stdin failed: %s",
@@ -892,12 +895,12 @@ StartReplication(StartReplicationCmd *cmd)
switchpoint < cmd->startpoint)
{
ereport(ERROR,
- (errmsg("requested starting point %X/%X on timeline %u is not in this server's history",
- LSN_FORMAT_ARGS(cmd->startpoint),
- cmd->timeline),
- errdetail("This server's history forked from timeline %u at %X/%X.",
- cmd->timeline,
- LSN_FORMAT_ARGS(switchpoint))));
+ errmsg("requested starting point %X/%08X on timeline %u is not in this server's history",
+ LSN_FORMAT_ARGS(cmd->startpoint),
+ cmd->timeline),
+ errdetail("This server's history forked from timeline %u at %X/%08X.",
+ cmd->timeline,
+ LSN_FORMAT_ARGS(switchpoint)));
}
sendTimeLineValidUpto = switchpoint;
}
@@ -939,9 +942,9 @@ StartReplication(StartReplicationCmd *cmd)
if (FlushPtr < cmd->startpoint)
{
ereport(ERROR,
- (errmsg("requested starting point %X/%X is ahead of the WAL flush position of this server %X/%X",
- LSN_FORMAT_ARGS(cmd->startpoint),
- LSN_FORMAT_ARGS(FlushPtr))));
+ errmsg("requested starting point %X/%08X is ahead of the WAL flush position of this server %X/%08X",
+ LSN_FORMAT_ARGS(cmd->startpoint),
+ LSN_FORMAT_ARGS(FlushPtr)));
}
/* Start streaming from the requested point */
@@ -983,7 +986,7 @@ StartReplication(StartReplicationCmd *cmd)
Datum values[2];
bool nulls[2] = {0};
- snprintf(startpos_str, sizeof(startpos_str), "%X/%X",
+ snprintf(startpos_str, sizeof(startpos_str), "%X/%08X",
LSN_FORMAT_ARGS(sendTimeLineValidUpto));
dest = CreateDestReceiver(DestRemoteSimple);
@@ -1324,7 +1327,7 @@ CreateReplicationSlot(CreateReplicationSlotCmd *cmd)
ReplicationSlotPersist();
}
- snprintf(xloc, sizeof(xloc), "%X/%X",
+ snprintf(xloc, sizeof(xloc), "%X/%08X",
LSN_FORMAT_ARGS(MyReplicationSlot->data.confirmed_flush));
dest = CreateDestReceiver(DestRemoteSimple);
@@ -1567,7 +1570,7 @@ WalSndWriteData(LogicalDecodingContext *ctx, XLogRecPtr lsn, TransactionId xid,
tmpbuf.data, sizeof(int64));
/* output previously gathered data in a CopyData packet */
- pq_putmessage_noblock('d', ctx->out->data, ctx->out->len);
+ pq_putmessage_noblock(PqMsg_CopyData, ctx->out->data, ctx->out->len);
CHECK_FOR_INTERRUPTS();
@@ -2303,7 +2306,7 @@ ProcessRepliesIfAny(void)
case PqMsg_CopyDone:
if (!streamingDoneSending)
{
- pq_putmessage_noblock('c', NULL, 0);
+ pq_putmessage_noblock(PqMsg_CopyDone, NULL, 0);
streamingDoneSending = true;
}
@@ -2355,6 +2358,10 @@ ProcessStandbyMessage(void)
ProcessStandbyHSFeedbackMessage();
break;
+ case 'p':
+ ProcessStandbyPSRequestMessage();
+ break;
+
default:
ereport(COMMERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
@@ -2429,7 +2436,7 @@ ProcessStandbyReplyMessage(void)
/* Copy because timestamptz_to_str returns a static buffer */
replyTimeStr = pstrdup(timestamptz_to_str(replyTime));
- elog(DEBUG2, "write %X/%X flush %X/%X apply %X/%X%s reply_time %s",
+ elog(DEBUG2, "write %X/%08X flush %X/%08X apply %X/%08X%s reply_time %s",
LSN_FORMAT_ARGS(writePtr),
LSN_FORMAT_ARGS(flushPtr),
LSN_FORMAT_ARGS(applyPtr),
@@ -2702,6 +2709,60 @@ ProcessStandbyHSFeedbackMessage(void)
}
/*
+ * Process the request for a primary status update message.
+ */
+static void
+ProcessStandbyPSRequestMessage(void)
+{
+ XLogRecPtr lsn = InvalidXLogRecPtr;
+ TransactionId oldestXidInCommit;
+ FullTransactionId nextFullXid;
+ FullTransactionId fullOldestXidInCommit;
+ WalSnd *walsnd = MyWalSnd;
+ TimestampTz replyTime;
+
+ /*
+ * This shouldn't happen because we don't support getting primary status
+ * message from standby.
+ */
+ if (RecoveryInProgress())
+ elog(ERROR, "the primary status is unavailable during recovery");
+
+ replyTime = pq_getmsgint64(&reply_message);
+
+ /*
+ * Update shared state for this WalSender process based on reply data from
+ * standby.
+ */
+ SpinLockAcquire(&walsnd->mutex);
+ walsnd->replyTime = replyTime;
+ SpinLockRelease(&walsnd->mutex);
+
+ /*
+ * Consider transactions in the current database, as only these are the
+ * ones replicated.
+ */
+ oldestXidInCommit = GetOldestActiveTransactionId(true, false);
+ nextFullXid = ReadNextFullTransactionId();
+ fullOldestXidInCommit = FullTransactionIdFromAllowableAt(nextFullXid,
+ oldestXidInCommit);
+ lsn = GetXLogWriteRecPtr();
+
+ elog(DEBUG2, "sending primary status");
+
+ /* construct the message... */
+ resetStringInfo(&output_message);
+ pq_sendbyte(&output_message, 's');
+ pq_sendint64(&output_message, lsn);
+ pq_sendint64(&output_message, (int64) U64FromFullTransactionId(fullOldestXidInCommit));
+ pq_sendint64(&output_message, (int64) U64FromFullTransactionId(nextFullXid));
+ pq_sendint64(&output_message, GetCurrentTimestamp());
+
+ /* ... and send it wrapped in CopyData */
+ pq_putmessage_noblock(PqMsg_CopyData, output_message.data, output_message.len);
+}
+
+/*
* Compute how long send/receive loops should sleep.
*
* If wal_sender_timeout is enabled we want to wake up in time to send
@@ -3246,12 +3307,12 @@ XLogSendPhysical(void)
wal_segment_close(xlogreader);
/* Send CopyDone */
- pq_putmessage_noblock('c', NULL, 0);
+ pq_putmessage_noblock(PqMsg_CopyDone, NULL, 0);
streamingDoneSending = true;
WalSndCaughtUp = true;
- elog(DEBUG1, "walsender reached end of timeline at %X/%X (sent up to %X/%X)",
+ elog(DEBUG1, "walsender reached end of timeline at %X/%08X (sent up to %X/%08X)",
LSN_FORMAT_ARGS(sendTimeLineValidUpto),
LSN_FORMAT_ARGS(sentPtr));
return;
@@ -3374,7 +3435,7 @@ retry:
memcpy(&output_message.data[1 + sizeof(int64) + sizeof(int64)],
tmpbuf.data, sizeof(int64));
- pq_putmessage_noblock('d', output_message.data, output_message.len);
+ pq_putmessage_noblock(PqMsg_CopyData, output_message.data, output_message.len);
sentPtr = endptr;
@@ -3392,7 +3453,7 @@ retry:
{
char activitymsg[50];
- snprintf(activitymsg, sizeof(activitymsg), "streaming %X/%X",
+ snprintf(activitymsg, sizeof(activitymsg), "streaming %X/%08X",
LSN_FORMAT_ARGS(sentPtr));
set_ps_display(activitymsg);
}
@@ -3449,8 +3510,16 @@ XLogSendLogical(void)
if (flushPtr == InvalidXLogRecPtr ||
logical_decoding_ctx->reader->EndRecPtr >= flushPtr)
{
+ /*
+ * For cascading logical WAL senders, we use the replay LSN instead of
+ * the flush LSN, since logical decoding on a standby only processes
+ * WAL that has been replayed. This distinction becomes particularly
+ * important during shutdown, as new WAL is no longer replayed and the
+ * last replayed LSN marks the furthest point up to which decoding can
+ * proceed.
+ */
if (am_cascading_walsender)
- flushPtr = GetStandbyFlushRecPtr(NULL);
+ flushPtr = GetXLogReplayRecPtr(NULL);
else
flushPtr = GetFlushRecPtr(NULL);
}
@@ -4072,7 +4141,7 @@ WalSndKeepalive(bool requestReply, XLogRecPtr writePtr)
pq_sendbyte(&output_message, requestReply ? 1 : 0);
/* ... and send it wrapped in CopyData */
- pq_putmessage_noblock('d', output_message.data, output_message.len);
+ pq_putmessage_noblock(PqMsg_CopyData, output_message.data, output_message.len);
/* Set local flag */
if (requestReply)
diff --git a/src/backend/rewrite/rewriteHandler.c b/src/backend/rewrite/rewriteHandler.c
index 2ef0e7fbf3a..adc9e7600e1 100644
--- a/src/backend/rewrite/rewriteHandler.c
+++ b/src/backend/rewrite/rewriteHandler.c
@@ -923,8 +923,9 @@ rewriteTargetListIU(List *targetList,
apply_default = true;
/*
- * Can only insert DEFAULT into generated columns, regardless of
- * any OVERRIDING clauses.
+ * Can only insert DEFAULT into generated columns. (The
+ * OVERRIDING clause does not apply to generated columns, so we
+ * don't consider it here.)
*/
if (att_tup->attgenerated && !apply_default)
{
diff --git a/src/backend/statistics/mcv.c b/src/backend/statistics/mcv.c
index d98cda698d9..f59fb821543 100644
--- a/src/backend/statistics/mcv.c
+++ b/src/backend/statistics/mcv.c
@@ -767,7 +767,7 @@ statext_mcv_serialize(MCVList *mcvlist, VacAttrStats **stats)
values[dim][i] = PointerGetDatum(PG_DETOAST_DATUM(values[dim][i]));
/* serialized length (uint32 length + data) */
- len = VARSIZE_ANY_EXHDR(values[dim][i]);
+ len = VARSIZE_ANY_EXHDR(DatumGetPointer(values[dim][i]));
info[dim].nbytes += sizeof(uint32); /* length */
info[dim].nbytes += len; /* value (no header) */
diff --git a/src/backend/storage/aio/README.md b/src/backend/storage/aio/README.md
index f10b5c7e31e..72ae3b3737d 100644
--- a/src/backend/storage/aio/README.md
+++ b/src/backend/storage/aio/README.md
@@ -94,7 +94,7 @@ pgaio_io_register_callbacks(ioh, PGAIO_HCB_SHARED_BUFFER_READV, 0);
*
* In this example we're reading only a single buffer, hence the 1.
*/
-pgaio_io_set_handle_data_32(ioh, (uint32 *) buffer, 1);
+pgaio_io_set_handle_data_32(ioh, (uint32 *) &buffer, 1);
/*
* Pass the AIO handle to lower-level function. When operating on the level of
@@ -119,8 +119,9 @@ pgaio_io_set_handle_data_32(ioh, (uint32 *) buffer, 1);
* e.g. due to reaching a limit on the number of unsubmitted IOs, and even
* complete before smgrstartreadv() returns.
*/
+void *page = BufferGetBlock(buffer);
smgrstartreadv(ioh, operation->smgr, forknum, blkno,
- BufferGetBlock(buffer), 1);
+ &page, 1);
/*
* To benefit from AIO, it is beneficial to perform other work, including
diff --git a/src/backend/storage/aio/aio.c b/src/backend/storage/aio/aio.c
index c64d815ebd1..3643f27ad6e 100644
--- a/src/backend/storage/aio/aio.c
+++ b/src/backend/storage/aio/aio.c
@@ -556,6 +556,13 @@ bool
pgaio_io_was_recycled(PgAioHandle *ioh, uint64 ref_generation, PgAioHandleState *state)
{
*state = ioh->state;
+
+ /*
+ * Ensure that we don't see an earlier state of the handle than ioh->state
+ * due to compiler or CPU reordering. This protects both ->generation as
+ * directly used here, and other fields in the handle accessed in the
+ * caller if the handle was not reused.
+ */
pg_read_barrier();
return ioh->generation != ref_generation;
@@ -752,7 +759,7 @@ pgaio_io_wait_for_free(void)
{
int reclaimed = 0;
- pgaio_debug(DEBUG2, "waiting for free IO with %d pending, %d in-flight, %d idle IOs",
+ pgaio_debug(DEBUG2, "waiting for free IO with %d pending, %u in-flight, %u idle IOs",
pgaio_my_backend->num_staged_ios,
dclist_count(&pgaio_my_backend->in_flight_ios),
dclist_count(&pgaio_my_backend->idle_ios));
@@ -773,7 +780,12 @@ pgaio_io_wait_for_free(void)
* Note that no interrupts are processed between the state check
* and the call to reclaim - that's important as otherwise an
* interrupt could have already reclaimed the handle.
+ *
+ * Need to ensure that there's no reordering, in the more common
+ * paths, where we wait for IO, that's done by
+ * pgaio_io_was_recycled().
*/
+ pg_read_barrier();
pgaio_io_reclaim(ioh);
reclaimed++;
}
@@ -797,7 +809,7 @@ pgaio_io_wait_for_free(void)
if (dclist_count(&pgaio_my_backend->in_flight_ios) == 0)
ereport(ERROR,
errmsg_internal("no free IOs despite no in-flight IOs"),
- errdetail_internal("%d pending, %d in-flight, %d idle IOs",
+ errdetail_internal("%d pending, %u in-flight, %u idle IOs",
pgaio_my_backend->num_staged_ios,
dclist_count(&pgaio_my_backend->in_flight_ios),
dclist_count(&pgaio_my_backend->idle_ios)));
@@ -828,7 +840,7 @@ pgaio_io_wait_for_free(void)
case PGAIO_HS_COMPLETED_IO:
case PGAIO_HS_SUBMITTED:
pgaio_debug_io(DEBUG2, ioh,
- "waiting for free io with %d in flight",
+ "waiting for free io with %u in flight",
dclist_count(&pgaio_my_backend->in_flight_ios));
/*
@@ -852,7 +864,12 @@ pgaio_io_wait_for_free(void)
* check and the call to reclaim - that's important as
* otherwise an interrupt could have already reclaimed the
* handle.
+ *
+ * Need to ensure that there's no reordering, in the more
+ * common paths, where we wait for IO, that's done by
+ * pgaio_io_was_recycled().
*/
+ pg_read_barrier();
pgaio_io_reclaim(ioh);
break;
}
@@ -1252,7 +1269,7 @@ pgaio_closing_fd(int fd)
break;
pgaio_debug_io(DEBUG2, ioh,
- "waiting for IO before FD %d gets closed, %d in-flight IOs",
+ "waiting for IO before FD %d gets closed, %u in-flight IOs",
fd, dclist_count(&pgaio_my_backend->in_flight_ios));
/* see comment in pgaio_io_wait_for_free() about raciness */
@@ -1288,7 +1305,7 @@ pgaio_shutdown(int code, Datum arg)
uint64 generation = ioh->generation;
pgaio_debug_io(DEBUG2, ioh,
- "waiting for IO to complete during shutdown, %d in-flight IOs",
+ "waiting for IO to complete during shutdown, %u in-flight IOs",
dclist_count(&pgaio_my_backend->in_flight_ios));
/* see comment in pgaio_io_wait_for_free() about raciness */
diff --git a/src/backend/storage/aio/aio_callback.c b/src/backend/storage/aio/aio_callback.c
index 0ad9795bb7e..03c9bba0802 100644
--- a/src/backend/storage/aio/aio_callback.c
+++ b/src/backend/storage/aio/aio_callback.c
@@ -256,6 +256,9 @@ pgaio_io_call_complete_shared(PgAioHandle *ioh)
pgaio_result_status_string(result.status),
result.id, result.error_data, result.result);
result = ce->cb->complete_shared(ioh, result, cb_data);
+
+ /* the callback should never transition to unknown */
+ Assert(result.status != PGAIO_RS_UNKNOWN);
}
ioh->distilled_result = result;
@@ -290,6 +293,7 @@ pgaio_io_call_complete_local(PgAioHandle *ioh)
/* start with distilled result from shared callback */
result = ioh->distilled_result;
+ Assert(result.status != PGAIO_RS_UNKNOWN);
for (int i = ioh->num_callbacks; i > 0; i--)
{
@@ -306,6 +310,9 @@ pgaio_io_call_complete_local(PgAioHandle *ioh)
pgaio_result_status_string(result.status),
result.id, result.error_data, result.result);
result = ce->cb->complete_local(ioh, result, cb_data);
+
+ /* the callback should never transition to unknown */
+ Assert(result.status != PGAIO_RS_UNKNOWN);
}
/*
diff --git a/src/backend/storage/aio/method_io_uring.c b/src/backend/storage/aio/method_io_uring.c
index c719ba2727a..0a8c054162f 100644
--- a/src/backend/storage/aio/method_io_uring.c
+++ b/src/backend/storage/aio/method_io_uring.c
@@ -29,6 +29,9 @@
#ifdef IOMETHOD_IO_URING_ENABLED
+#include <sys/mman.h>
+#include <unistd.h>
+
#include <liburing.h>
#include "miscadmin.h"
@@ -94,12 +97,32 @@ PgAioUringContext
struct io_uring io_uring_ring;
} PgAioUringContext;
+/*
+ * Information about the capabilities that io_uring has.
+ *
+ * Depending on liburing and kernel version different features are
+ * supported. At least for the kernel a kernel version check does not suffice
+ * as various vendors do backport features to older kernels :(.
+ */
+typedef struct PgAioUringCaps
+{
+ bool checked;
+ /* -1 if io_uring_queue_init_mem() is unsupported */
+ int mem_init_size;
+} PgAioUringCaps;
+
+
/* PgAioUringContexts for all backends */
static PgAioUringContext *pgaio_uring_contexts;
/* the current backend's context */
static PgAioUringContext *pgaio_my_uring_context;
+static PgAioUringCaps pgaio_uring_caps =
+{
+ .checked = false,
+ .mem_init_size = -1,
+};
static uint32
pgaio_uring_procs(void)
@@ -111,30 +134,184 @@ pgaio_uring_procs(void)
return MaxBackends + NUM_AUXILIARY_PROCS - MAX_IO_WORKERS;
}
-static Size
+/*
+ * Initializes pgaio_uring_caps, unless that's already done.
+ */
+static void
+pgaio_uring_check_capabilities(void)
+{
+ if (pgaio_uring_caps.checked)
+ return;
+
+ /*
+ * By default io_uring creates a shared memory mapping for each io_uring
+ * instance, leading to a large number of memory mappings. Unfortunately a
+ * large number of memory mappings slows things down, backend exit is
+ * particularly affected. To address that, newer kernels (6.5) support
+ * using user-provided memory for the memory, by putting the relevant
+ * memory into shared memory we don't need any additional mappings.
+ *
+ * To know whether this is supported, we unfortunately need to probe the
+ * kernel by trying to create a ring with userspace-provided memory. This
+ * also has a secondary benefit: We can determine precisely how much
+ * memory we need for each io_uring instance.
+ */
+#if defined(HAVE_LIBURING_QUEUE_INIT_MEM) && defined(IORING_SETUP_NO_MMAP)
+ {
+ struct io_uring test_ring;
+ size_t ring_size;
+ void *ring_ptr;
+ struct io_uring_params p = {0};
+ int ret;
+
+ /*
+ * Liburing does not yet provide an API to query how much memory a
+ * ring will need. So we over-estimate it here. As the memory is freed
+ * just below that's small temporary waste of memory.
+ *
+ * 1MB is more than enough for rings within io_max_concurrency's
+ * range.
+ */
+ ring_size = 1024 * 1024;
+
+ /*
+ * Hard to believe a system exists where 1MB would not be a multiple
+ * of the page size. But it's cheap to ensure...
+ */
+ ring_size -= ring_size % sysconf(_SC_PAGESIZE);
+
+ ring_ptr = mmap(NULL, ring_size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
+ if (ring_ptr == MAP_FAILED)
+ elog(ERROR,
+ "mmap(%zu) to determine io_uring_queue_init_mem() support failed: %m",
+ ring_size);
+
+ ret = io_uring_queue_init_mem(io_max_concurrency, &test_ring, &p, ring_ptr, ring_size);
+ if (ret > 0)
+ {
+ pgaio_uring_caps.mem_init_size = ret;
+
+ elog(DEBUG1,
+ "can use combined memory mapping for io_uring, each ring needs %d bytes",
+ ret);
+
+ /* clean up the created ring, it was just for a test */
+ io_uring_queue_exit(&test_ring);
+ }
+ else
+ {
+ /*
+ * There are different reasons for ring creation to fail, but it's
+ * ok to treat that just as io_uring_queue_init_mem() not being
+ * supported. We'll report a more detailed error in
+ * pgaio_uring_shmem_init().
+ */
+ errno = -ret;
+ elog(DEBUG1,
+ "cannot use combined memory mapping for io_uring, ring creation failed: %m");
+
+ }
+
+ if (munmap(ring_ptr, ring_size) != 0)
+ elog(ERROR, "munmap() failed: %m");
+ }
+#else
+ {
+ elog(DEBUG1,
+ "can't use combined memory mapping for io_uring, kernel or liburing too old");
+ }
+#endif
+
+ pgaio_uring_caps.checked = true;
+}
+
+/*
+ * Memory for all PgAioUringContext instances
+ */
+static size_t
pgaio_uring_context_shmem_size(void)
{
return mul_size(pgaio_uring_procs(), sizeof(PgAioUringContext));
}
+/*
+ * Memory for the combined memory used by io_uring instances. Returns 0 if
+ * that is not supported by kernel/liburing.
+ */
+static size_t
+pgaio_uring_ring_shmem_size(void)
+{
+ size_t sz = 0;
+
+ if (pgaio_uring_caps.mem_init_size > 0)
+ {
+ /*
+ * Memory for rings needs to be allocated to the page boundary,
+ * reserve space. Luckily it does not need to be aligned to hugepage
+ * boundaries, even if huge pages are used.
+ */
+ sz = add_size(sz, sysconf(_SC_PAGESIZE));
+ sz = add_size(sz, mul_size(pgaio_uring_procs(),
+ pgaio_uring_caps.mem_init_size));
+ }
+
+ return sz;
+}
+
static size_t
pgaio_uring_shmem_size(void)
{
- return pgaio_uring_context_shmem_size();
+ size_t sz;
+
+ /*
+ * Kernel and liburing support for various features influences how much
+ * shmem we need, perform the necessary checks.
+ */
+ pgaio_uring_check_capabilities();
+
+ sz = pgaio_uring_context_shmem_size();
+ sz = add_size(sz, pgaio_uring_ring_shmem_size());
+
+ return sz;
}
static void
pgaio_uring_shmem_init(bool first_time)
{
- int TotalProcs = MaxBackends + NUM_AUXILIARY_PROCS - MAX_IO_WORKERS;
+ int TotalProcs = pgaio_uring_procs();
bool found;
+ char *shmem;
+ size_t ring_mem_remain = 0;
+ char *ring_mem_next = 0;
- pgaio_uring_contexts = (PgAioUringContext *)
- ShmemInitStruct("AioUring", pgaio_uring_shmem_size(), &found);
-
+ /*
+ * We allocate memory for all PgAioUringContext instances and, if
+ * supported, the memory required for each of the io_uring instances, in
+ * one ShmemInitStruct().
+ */
+ shmem = ShmemInitStruct("AioUringContext", pgaio_uring_shmem_size(), &found);
if (found)
return;
+ pgaio_uring_contexts = (PgAioUringContext *) shmem;
+ shmem += pgaio_uring_context_shmem_size();
+
+ /* if supported, handle memory alignment / sizing for io_uring memory */
+ if (pgaio_uring_caps.mem_init_size > 0)
+ {
+ ring_mem_remain = pgaio_uring_ring_shmem_size();
+ ring_mem_next = (char *) shmem;
+
+ /* align to page boundary, see also pgaio_uring_ring_shmem_size() */
+ ring_mem_next = (char *) TYPEALIGN(sysconf(_SC_PAGESIZE), ring_mem_next);
+
+ /* account for alignment */
+ ring_mem_remain -= ring_mem_next - shmem;
+ shmem += ring_mem_next - shmem;
+
+ shmem += ring_mem_remain;
+ }
+
for (int contextno = 0; contextno < TotalProcs; contextno++)
{
PgAioUringContext *context = &pgaio_uring_contexts[contextno];
@@ -158,7 +335,28 @@ pgaio_uring_shmem_init(bool first_time)
* be worth using that - also need to evaluate if that causes
* noticeable additional contention?
*/
- ret = io_uring_queue_init(io_max_concurrency, &context->io_uring_ring, 0);
+
+ /*
+ * If supported (c.f. pgaio_uring_check_capabilities()), create ring
+ * with its data in shared memory. Otherwise fall back io_uring
+ * creating a memory mapping for each ring.
+ */
+#if defined(HAVE_LIBURING_QUEUE_INIT_MEM) && defined(IORING_SETUP_NO_MMAP)
+ if (pgaio_uring_caps.mem_init_size > 0)
+ {
+ struct io_uring_params p = {0};
+
+ ret = io_uring_queue_init_mem(io_max_concurrency, &context->io_uring_ring, &p, ring_mem_next, ring_mem_remain);
+
+ ring_mem_remain -= ret;
+ ring_mem_next += ret;
+ }
+ else
+#endif
+ {
+ ret = io_uring_queue_init(io_max_concurrency, &context->io_uring_ring, 0);
+ }
+
if (ret < 0)
{
char *hint = NULL;
@@ -400,9 +598,9 @@ pgaio_uring_wait_one(PgAioHandle *ioh, uint64 ref_generation)
while (true)
{
pgaio_debug_io(DEBUG3, ioh,
- "wait_one io_gen: %llu, ref_gen: %llu, cycle %d",
- (long long unsigned) ioh->generation,
- (long long unsigned) ref_generation,
+ "wait_one io_gen: %" PRIu64 ", ref_gen: %" PRIu64 ", cycle %d",
+ ioh->generation,
+ ref_generation,
waited);
if (pgaio_io_was_recycled(ioh, ref_generation, &state) ||
diff --git a/src/backend/storage/aio/method_worker.c b/src/backend/storage/aio/method_worker.c
index 743cccc2acd..bf8f77e6ff6 100644
--- a/src/backend/storage/aio/method_worker.c
+++ b/src/backend/storage/aio/method_worker.c
@@ -52,26 +52,26 @@
#define IO_WORKER_WAKEUP_FANOUT 2
-typedef struct AioWorkerSubmissionQueue
+typedef struct PgAioWorkerSubmissionQueue
{
uint32 size;
uint32 mask;
uint32 head;
uint32 tail;
- uint32 ios[FLEXIBLE_ARRAY_MEMBER];
-} AioWorkerSubmissionQueue;
+ uint32 sqes[FLEXIBLE_ARRAY_MEMBER];
+} PgAioWorkerSubmissionQueue;
-typedef struct AioWorkerSlot
+typedef struct PgAioWorkerSlot
{
Latch *latch;
bool in_use;
-} AioWorkerSlot;
+} PgAioWorkerSlot;
-typedef struct AioWorkerControl
+typedef struct PgAioWorkerControl
{
uint64 idle_worker_mask;
- AioWorkerSlot workers[FLEXIBLE_ARRAY_MEMBER];
-} AioWorkerControl;
+ PgAioWorkerSlot workers[FLEXIBLE_ARRAY_MEMBER];
+} PgAioWorkerControl;
static size_t pgaio_worker_shmem_size(void);
@@ -96,8 +96,8 @@ int io_workers = 3;
static int io_worker_queue_size = 64;
static int MyIoWorkerId;
-static AioWorkerSubmissionQueue *io_worker_submission_queue;
-static AioWorkerControl *io_worker_control;
+static PgAioWorkerSubmissionQueue *io_worker_submission_queue;
+static PgAioWorkerControl *io_worker_control;
static size_t
@@ -106,15 +106,15 @@ pgaio_worker_queue_shmem_size(int *queue_size)
/* Round size up to next power of two so we can make a mask. */
*queue_size = pg_nextpower2_32(io_worker_queue_size);
- return offsetof(AioWorkerSubmissionQueue, ios) +
+ return offsetof(PgAioWorkerSubmissionQueue, sqes) +
sizeof(uint32) * *queue_size;
}
static size_t
pgaio_worker_control_shmem_size(void)
{
- return offsetof(AioWorkerControl, workers) +
- sizeof(AioWorkerSlot) * MAX_IO_WORKERS;
+ return offsetof(PgAioWorkerControl, workers) +
+ sizeof(PgAioWorkerSlot) * MAX_IO_WORKERS;
}
static size_t
@@ -162,7 +162,7 @@ pgaio_worker_shmem_init(bool first_time)
}
static int
-pgaio_choose_idle_worker(void)
+pgaio_worker_choose_idle(void)
{
int worker;
@@ -172,6 +172,7 @@ pgaio_choose_idle_worker(void)
/* Find the lowest bit position, and clear it. */
worker = pg_rightmost_one_pos64(io_worker_control->idle_worker_mask);
io_worker_control->idle_worker_mask &= ~(UINT64_C(1) << worker);
+ Assert(io_worker_control->workers[worker].in_use);
return worker;
}
@@ -179,7 +180,7 @@ pgaio_choose_idle_worker(void)
static bool
pgaio_worker_submission_queue_insert(PgAioHandle *ioh)
{
- AioWorkerSubmissionQueue *queue;
+ PgAioWorkerSubmissionQueue *queue;
uint32 new_head;
queue = io_worker_submission_queue;
@@ -191,7 +192,7 @@ pgaio_worker_submission_queue_insert(PgAioHandle *ioh)
return false; /* full */
}
- queue->ios[queue->head] = pgaio_io_get_id(ioh);
+ queue->sqes[queue->head] = pgaio_io_get_id(ioh);
queue->head = new_head;
return true;
@@ -200,14 +201,14 @@ pgaio_worker_submission_queue_insert(PgAioHandle *ioh)
static uint32
pgaio_worker_submission_queue_consume(void)
{
- AioWorkerSubmissionQueue *queue;
+ PgAioWorkerSubmissionQueue *queue;
uint32 result;
queue = io_worker_submission_queue;
if (queue->tail == queue->head)
return UINT32_MAX; /* empty */
- result = queue->ios[queue->tail];
+ result = queue->sqes[queue->tail];
queue->tail = (queue->tail + 1) & (queue->size - 1);
return result;
@@ -240,37 +241,37 @@ pgaio_worker_needs_synchronous_execution(PgAioHandle *ioh)
}
static void
-pgaio_worker_submit_internal(int nios, PgAioHandle *ios[])
+pgaio_worker_submit_internal(int num_staged_ios, PgAioHandle **staged_ios)
{
PgAioHandle *synchronous_ios[PGAIO_SUBMIT_BATCH_SIZE];
int nsync = 0;
Latch *wakeup = NULL;
int worker;
- Assert(nios <= PGAIO_SUBMIT_BATCH_SIZE);
+ Assert(num_staged_ios <= PGAIO_SUBMIT_BATCH_SIZE);
LWLockAcquire(AioWorkerSubmissionQueueLock, LW_EXCLUSIVE);
- for (int i = 0; i < nios; ++i)
+ for (int i = 0; i < num_staged_ios; ++i)
{
- Assert(!pgaio_worker_needs_synchronous_execution(ios[i]));
- if (!pgaio_worker_submission_queue_insert(ios[i]))
+ Assert(!pgaio_worker_needs_synchronous_execution(staged_ios[i]));
+ if (!pgaio_worker_submission_queue_insert(staged_ios[i]))
{
/*
* We'll do it synchronously, but only after we've sent as many as
* we can to workers, to maximize concurrency.
*/
- synchronous_ios[nsync++] = ios[i];
+ synchronous_ios[nsync++] = staged_ios[i];
continue;
}
if (wakeup == NULL)
{
/* Choose an idle worker to wake up if we haven't already. */
- worker = pgaio_choose_idle_worker();
+ worker = pgaio_worker_choose_idle();
if (worker >= 0)
wakeup = io_worker_control->workers[worker].latch;
- pgaio_debug_io(DEBUG4, ios[i],
+ pgaio_debug_io(DEBUG4, staged_ios[i],
"choosing worker %d",
worker);
}
@@ -316,6 +317,7 @@ pgaio_worker_die(int code, Datum arg)
Assert(io_worker_control->workers[MyIoWorkerId].in_use);
Assert(io_worker_control->workers[MyIoWorkerId].latch == MyLatch);
+ io_worker_control->idle_worker_mask &= ~(UINT64_C(1) << MyIoWorkerId);
io_worker_control->workers[MyIoWorkerId].in_use = false;
io_worker_control->workers[MyIoWorkerId].latch = NULL;
LWLockRelease(AioWorkerSubmissionQueueLock);
@@ -461,7 +463,12 @@ IoWorkerMain(const void *startup_data, size_t startup_data_len)
int nwakeups = 0;
int worker;
- /* Try to get a job to do. */
+ /*
+ * Try to get a job to do.
+ *
+ * The lwlock acquisition also provides the necessary memory barrier
+ * to ensure that we don't see an outdated data in the handle.
+ */
LWLockAcquire(AioWorkerSubmissionQueueLock, LW_EXCLUSIVE);
if ((io_index = pgaio_worker_submission_queue_consume()) == UINT32_MAX)
{
@@ -483,7 +490,7 @@ IoWorkerMain(const void *startup_data, size_t startup_data_len)
IO_WORKER_WAKEUP_FANOUT);
for (int i = 0; i < nwakeups; ++i)
{
- if ((worker = pgaio_choose_idle_worker()) < 0)
+ if ((worker = pgaio_worker_choose_idle()) < 0)
break;
latches[nlatches++] = io_worker_control->workers[worker].latch;
}
@@ -568,6 +575,12 @@ IoWorkerMain(const void *startup_data, size_t startup_data_len)
}
CHECK_FOR_INTERRUPTS();
+
+ if (ConfigReloadPending)
+ {
+ ConfigReloadPending = false;
+ ProcessConfigFile(PGC_SIGHUP);
+ }
}
error_context_stack = errcallback.previous;
diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c
index f93131a645e..67431208e7f 100644
--- a/src/backend/storage/buffer/bufmgr.c
+++ b/src/backend/storage/buffer/bufmgr.c
@@ -2743,12 +2743,10 @@ ExtendBufferedRelShared(BufferManagerRelation bmr,
* because mdread doesn't complain about reads beyond EOF (when
* zero_damaged_pages is ON) and so a previous attempt to read a block
* beyond EOF could have left a "valid" zero-filled buffer.
- * Unfortunately, we have also seen this case occurring because of
- * buggy Linux kernels that sometimes return an lseek(SEEK_END) result
- * that doesn't account for a recent write. In that situation, the
- * pre-existing buffer would contain valid data that we don't want to
- * overwrite. Since the legitimate cases should always have left a
- * zero-filled buffer, complain if not PageIsNew.
+ *
+ * This has also been observed when relation was overwritten by
+ * external process. Since the legitimate cases should always have
+ * left a zero-filled buffer, complain if not PageIsNew.
*/
if (existing_id >= 0)
{
@@ -2778,8 +2776,7 @@ ExtendBufferedRelShared(BufferManagerRelation bmr,
ereport(ERROR,
(errmsg("unexpected data beyond EOF in block %u of relation %s",
existing_hdr->tag.blockNum,
- relpath(bmr.smgr->smgr_rlocator, fork).str),
- errhint("This has been seen to occur with buggy kernels; consider updating your system.")));
+ relpath(bmr.smgr->smgr_rlocator, fork).str)));
/*
* We *must* do smgr[zero]extend before succeeding, else the page
@@ -3339,10 +3336,10 @@ UnpinBufferNoOwner(BufferDesc *buf)
* BufferSync -- Write out all dirty buffers in the pool.
*
* This is called at checkpoint time to write out all dirty shared buffers.
- * The checkpoint request flags should be passed in. If CHECKPOINT_IMMEDIATE
- * is set, we disable delays between writes; if CHECKPOINT_IS_SHUTDOWN,
- * CHECKPOINT_END_OF_RECOVERY or CHECKPOINT_FLUSH_ALL is set, we write even
- * unlogged buffers, which are otherwise skipped. The remaining flags
+ * The checkpoint request flags should be passed in. If CHECKPOINT_FAST is
+ * set, we disable delays between writes; if CHECKPOINT_IS_SHUTDOWN,
+ * CHECKPOINT_END_OF_RECOVERY or CHECKPOINT_FLUSH_UNLOGGED is set, we write
+ * even unlogged buffers, which are otherwise skipped. The remaining flags
* currently have no effect here.
*/
static void
@@ -3367,7 +3364,7 @@ BufferSync(int flags)
* recovery, we write all dirty buffers.
*/
if (!((flags & (CHECKPOINT_IS_SHUTDOWN | CHECKPOINT_END_OF_RECOVERY |
- CHECKPOINT_FLUSH_ALL))))
+ CHECKPOINT_FLUSH_UNLOGGED))))
mask |= BM_PERMANENT;
/*
@@ -4550,11 +4547,9 @@ DropRelationBuffers(SMgrRelation smgr_reln, ForkNumber *forkNum,
if (RelFileLocatorBackendIsTemp(rlocator))
{
if (rlocator.backend == MyProcNumber)
- {
- for (j = 0; j < nforks; j++)
- DropRelationLocalBuffers(rlocator.locator, forkNum[j],
- firstDelBlock[j]);
- }
+ DropRelationLocalBuffers(rlocator.locator, forkNum, nforks,
+ firstDelBlock);
+
return;
}
@@ -7320,7 +7315,7 @@ buffer_readv_report(PgAioResult result, const PgAioTargetData *td,
affected_count > 1 ?
errdetail("Block %u held first zeroed page.",
first + first_off) : 0,
- errhint("See server log for details about the other %u invalid block(s).",
+ errhint("See server log for details about the other %d invalid block(s).",
affected_count + checkfail_count - 1));
return;
}
diff --git a/src/backend/storage/buffer/localbuf.c b/src/backend/storage/buffer/localbuf.c
index 63101d56a07..3c0d20f4659 100644
--- a/src/backend/storage/buffer/localbuf.c
+++ b/src/backend/storage/buffer/localbuf.c
@@ -629,7 +629,7 @@ InvalidateLocalBuffer(BufferDesc *bufHdr, bool check_unreferenced)
*/
if (check_unreferenced &&
(LocalRefCount[bufid] != 0 || BUF_STATE_GET_REFCOUNT(buf_state) != 0))
- elog(ERROR, "block %u of %s is still referenced (local %u)",
+ elog(ERROR, "block %u of %s is still referenced (local %d)",
bufHdr->tag.blockNum,
relpathbackend(BufTagGetRelFileLocator(&bufHdr->tag),
MyProcNumber,
@@ -660,10 +660,11 @@ InvalidateLocalBuffer(BufferDesc *bufHdr, bool check_unreferenced)
* See DropRelationBuffers in bufmgr.c for more notes.
*/
void
-DropRelationLocalBuffers(RelFileLocator rlocator, ForkNumber forkNum,
- BlockNumber firstDelBlock)
+DropRelationLocalBuffers(RelFileLocator rlocator, ForkNumber *forkNum,
+ int nforks, BlockNumber *firstDelBlock)
{
int i;
+ int j;
for (i = 0; i < NLocBuffer; i++)
{
@@ -672,12 +673,18 @@ DropRelationLocalBuffers(RelFileLocator rlocator, ForkNumber forkNum,
buf_state = pg_atomic_read_u32(&bufHdr->state);
- if ((buf_state & BM_TAG_VALID) &&
- BufTagMatchesRelFileLocator(&bufHdr->tag, &rlocator) &&
- BufTagGetForkNum(&bufHdr->tag) == forkNum &&
- bufHdr->tag.blockNum >= firstDelBlock)
+ if (!(buf_state & BM_TAG_VALID) ||
+ !BufTagMatchesRelFileLocator(&bufHdr->tag, &rlocator))
+ continue;
+
+ for (j = 0; j < nforks; j++)
{
- InvalidateLocalBuffer(bufHdr, true);
+ if (BufTagGetForkNum(&bufHdr->tag) == forkNum[j] &&
+ bufHdr->tag.blockNum >= firstDelBlock[j])
+ {
+ InvalidateLocalBuffer(bufHdr, true);
+ break;
+ }
}
}
}
@@ -925,10 +932,11 @@ GetLocalBufferStorage(void)
num_bufs = Min(num_bufs, MaxAllocSize / BLCKSZ);
/* Buffers should be I/O aligned. */
- cur_block = (char *)
- TYPEALIGN(PG_IO_ALIGN_SIZE,
- MemoryContextAlloc(LocalBufferContext,
- num_bufs * BLCKSZ + PG_IO_ALIGN_SIZE));
+ cur_block = MemoryContextAllocAligned(LocalBufferContext,
+ num_bufs * BLCKSZ,
+ PG_IO_ALIGN_SIZE,
+ 0);
+
next_buf_in_block = 0;
num_bufs_in_block = num_bufs;
}
diff --git a/src/backend/storage/file/fd.c b/src/backend/storage/file/fd.c
index 0e8299dd556..a4ec7959f31 100644
--- a/src/backend/storage/file/fd.c
+++ b/src/backend/storage/file/fd.c
@@ -400,25 +400,22 @@ pg_fsync(int fd)
* portable, even if it runs ok on the current system.
*
* We assert here that a descriptor for a file was opened with write
- * permissions (either O_RDWR or O_WRONLY) and for a directory without
- * write permissions (O_RDONLY).
+ * permissions (i.e., not O_RDONLY) and for a directory without write
+ * permissions (O_RDONLY). Notice that the assertion check is made even
+ * if fsync() is disabled.
*
- * Ignore any fstat errors and let the follow-up fsync() do its work.
- * Doing this sanity check here counts for the case where fsync() is
- * disabled.
+ * If fstat() fails, ignore it and let the follow-up fsync() complain.
*/
if (fstat(fd, &st) == 0)
{
int desc_flags = fcntl(fd, F_GETFL);
- /*
- * O_RDONLY is historically 0, so just make sure that for directories
- * no write flags are used.
- */
+ desc_flags &= O_ACCMODE;
+
if (S_ISDIR(st.st_mode))
- Assert((desc_flags & (O_RDWR | O_WRONLY)) == 0);
+ Assert(desc_flags == O_RDONLY);
else
- Assert((desc_flags & (O_RDWR | O_WRONLY)) != 0);
+ Assert(desc_flags != O_RDONLY);
}
errno = 0;
#endif
diff --git a/src/backend/storage/file/fileset.c b/src/backend/storage/file/fileset.c
index 64141c7cb91..4d5ee353fd7 100644
--- a/src/backend/storage/file/fileset.c
+++ b/src/backend/storage/file/fileset.c
@@ -185,7 +185,7 @@ FileSetPath(char *path, FileSet *fileset, Oid tablespace)
static Oid
ChooseTablespace(const FileSet *fileset, const char *name)
{
- uint32 hash = hash_any((const unsigned char *) name, strlen(name));
+ uint32 hash = hash_bytes((const unsigned char *) name, strlen(name));
return fileset->tablespaces[hash % fileset->ntablespaces];
}
diff --git a/src/backend/storage/ipc/dsm_registry.c b/src/backend/storage/ipc/dsm_registry.c
index 1d4fd31ffed..1682cc6d34c 100644
--- a/src/backend/storage/ipc/dsm_registry.c
+++ b/src/backend/storage/ipc/dsm_registry.c
@@ -15,6 +15,20 @@
* current backend. This function guarantees that only one backend
* initializes the segment and that all other backends just attach it.
*
+ * A DSA can be created in or retrieved from the registry by calling
+ * GetNamedDSA(). As with GetNamedDSMSegment(), if a DSA with the provided
+ * name does not yet exist, it is created. Otherwise, GetNamedDSA()
+ * ensures the DSA is attached to the current backend. This function
+ * guarantees that only one backend initializes the DSA and that all other
+ * backends just attach it.
+ *
+ * A dshash table can be created in or retrieved from the registry by
+ * calling GetNamedDSHash(). As with GetNamedDSMSegment(), if a hash
+ * table with the provided name does not yet exist, it is created.
+ * Otherwise, GetNamedDSHash() ensures the hash table is attached to the
+ * current backend. This function guarantees that only one backend
+ * initializes the table and that all other backends just attach it.
+ *
* Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
@@ -26,12 +40,20 @@
#include "postgres.h"
+#include "funcapi.h"
#include "lib/dshash.h"
#include "storage/dsm_registry.h"
#include "storage/lwlock.h"
#include "storage/shmem.h"
+#include "utils/builtins.h"
#include "utils/memutils.h"
+#define DSMR_NAME_LEN 128
+
+#define DSMR_DSA_TRANCHE_SUFFIX " DSA"
+#define DSMR_DSA_TRANCHE_SUFFIX_LEN (sizeof(DSMR_DSA_TRANCHE_SUFFIX) - 1)
+#define DSMR_DSA_TRANCHE_NAME_LEN (DSMR_NAME_LEN + DSMR_DSA_TRANCHE_SUFFIX_LEN)
+
typedef struct DSMRegistryCtxStruct
{
dsa_handle dsah;
@@ -40,15 +62,55 @@ typedef struct DSMRegistryCtxStruct
static DSMRegistryCtxStruct *DSMRegistryCtx;
-typedef struct DSMRegistryEntry
+typedef struct NamedDSMState
{
- char name[64];
dsm_handle handle;
size_t size;
+} NamedDSMState;
+
+typedef struct NamedDSAState
+{
+ dsa_handle handle;
+ int tranche;
+ char tranche_name[DSMR_DSA_TRANCHE_NAME_LEN];
+} NamedDSAState;
+
+typedef struct NamedDSHState
+{
+ NamedDSAState dsa;
+ dshash_table_handle handle;
+ int tranche;
+ char tranche_name[DSMR_NAME_LEN];
+} NamedDSHState;
+
+typedef enum DSMREntryType
+{
+ DSMR_ENTRY_TYPE_DSM,
+ DSMR_ENTRY_TYPE_DSA,
+ DSMR_ENTRY_TYPE_DSH,
+} DSMREntryType;
+
+static const char *const DSMREntryTypeNames[] =
+{
+ [DSMR_ENTRY_TYPE_DSM] = "segment",
+ [DSMR_ENTRY_TYPE_DSA] = "area",
+ [DSMR_ENTRY_TYPE_DSH] = "hash",
+};
+
+typedef struct DSMRegistryEntry
+{
+ char name[DSMR_NAME_LEN];
+ DSMREntryType type;
+ union
+ {
+ NamedDSMState dsm;
+ NamedDSAState dsa;
+ NamedDSHState dsh;
+ } data;
} DSMRegistryEntry;
static const dshash_parameters dsh_params = {
- offsetof(DSMRegistryEntry, handle),
+ offsetof(DSMRegistryEntry, type),
sizeof(DSMRegistryEntry),
dshash_strcmp,
dshash_strhash,
@@ -141,7 +203,7 @@ GetNamedDSMSegment(const char *name, size_t size,
ereport(ERROR,
(errmsg("DSM segment name cannot be empty")));
- if (strlen(name) >= offsetof(DSMRegistryEntry, handle))
+ if (strlen(name) >= offsetof(DSMRegistryEntry, type))
ereport(ERROR,
(errmsg("DSM segment name too long")));
@@ -158,32 +220,39 @@ GetNamedDSMSegment(const char *name, size_t size,
entry = dshash_find_or_insert(dsm_registry_table, name, found);
if (!(*found))
{
+ NamedDSMState *state = &entry->data.dsm;
+ dsm_segment *seg;
+
+ entry->type = DSMR_ENTRY_TYPE_DSM;
+
/* Initialize the segment. */
- dsm_segment *seg = dsm_create(size, 0);
+ seg = dsm_create(size, 0);
dsm_pin_segment(seg);
dsm_pin_mapping(seg);
- entry->handle = dsm_segment_handle(seg);
- entry->size = size;
+ state->handle = dsm_segment_handle(seg);
+ state->size = size;
ret = dsm_segment_address(seg);
if (init_callback)
(*init_callback) (ret);
}
- else if (entry->size != size)
- {
+ else if (entry->type != DSMR_ENTRY_TYPE_DSM)
ereport(ERROR,
- (errmsg("requested DSM segment size does not match size of "
- "existing segment")));
- }
+ (errmsg("requested DSM segment does not match type of existing entry")));
+ else if (entry->data.dsm.size != size)
+ ereport(ERROR,
+ (errmsg("requested DSM segment size does not match size of existing segment")));
else
{
- dsm_segment *seg = dsm_find_mapping(entry->handle);
+ NamedDSMState *state = &entry->data.dsm;
+ dsm_segment *seg;
/* If the existing segment is not already attached, attach it now. */
+ seg = dsm_find_mapping(state->handle);
if (seg == NULL)
{
- seg = dsm_attach(entry->handle);
+ seg = dsm_attach(state->handle);
if (seg == NULL)
elog(ERROR, "could not map dynamic shared memory segment");
@@ -198,3 +267,220 @@ GetNamedDSMSegment(const char *name, size_t size,
return ret;
}
+
+/*
+ * Initialize or attach a named DSA.
+ *
+ * This routine returns a pointer to the DSA. A new LWLock tranche ID will be
+ * generated if needed. Note that the lock tranche will be registered with the
+ * provided name. Also note that this should be called at most once for a
+ * given DSA in each backend.
+ */
+dsa_area *
+GetNamedDSA(const char *name, bool *found)
+{
+ DSMRegistryEntry *entry;
+ MemoryContext oldcontext;
+ dsa_area *ret;
+
+ Assert(found);
+
+ if (!name || *name == '\0')
+ ereport(ERROR,
+ (errmsg("DSA name cannot be empty")));
+
+ if (strlen(name) >= offsetof(DSMRegistryEntry, type))
+ ereport(ERROR,
+ (errmsg("DSA name too long")));
+
+ /* Be sure any local memory allocated by DSM/DSA routines is persistent. */
+ oldcontext = MemoryContextSwitchTo(TopMemoryContext);
+
+ /* Connect to the registry. */
+ init_dsm_registry();
+
+ entry = dshash_find_or_insert(dsm_registry_table, name, found);
+ if (!(*found))
+ {
+ NamedDSAState *state = &entry->data.dsa;
+
+ entry->type = DSMR_ENTRY_TYPE_DSA;
+
+ /* Initialize the LWLock tranche for the DSA. */
+ state->tranche = LWLockNewTrancheId();
+ strcpy(state->tranche_name, name);
+ LWLockRegisterTranche(state->tranche, state->tranche_name);
+
+ /* Initialize the DSA. */
+ ret = dsa_create(state->tranche);
+ dsa_pin(ret);
+ dsa_pin_mapping(ret);
+
+ /* Store handle for other backends to use. */
+ state->handle = dsa_get_handle(ret);
+ }
+ else if (entry->type != DSMR_ENTRY_TYPE_DSA)
+ ereport(ERROR,
+ (errmsg("requested DSA does not match type of existing entry")));
+ else
+ {
+ NamedDSAState *state = &entry->data.dsa;
+
+ if (dsa_is_attached(state->handle))
+ ereport(ERROR,
+ (errmsg("requested DSA already attached to current process")));
+
+ /* Initialize existing LWLock tranche for the DSA. */
+ LWLockRegisterTranche(state->tranche, state->tranche_name);
+
+ /* Attach to existing DSA. */
+ ret = dsa_attach(state->handle);
+ dsa_pin_mapping(ret);
+ }
+
+ dshash_release_lock(dsm_registry_table, entry);
+ MemoryContextSwitchTo(oldcontext);
+
+ return ret;
+}
+
+/*
+ * Initialize or attach a named dshash table.
+ *
+ * This routine returns the address of the table. The tranche_id member of
+ * params is ignored; new tranche IDs will be generated if needed. Note that
+ * the DSA lock tranche will be registered with the provided name with " DSA"
+ * appended. The dshash lock tranche will be registered with the provided
+ * name. Also note that this should be called at most once for a given table
+ * in each backend.
+ */
+dshash_table *
+GetNamedDSHash(const char *name, const dshash_parameters *params, bool *found)
+{
+ DSMRegistryEntry *entry;
+ MemoryContext oldcontext;
+ dshash_table *ret;
+
+ Assert(params);
+ Assert(found);
+
+ if (!name || *name == '\0')
+ ereport(ERROR,
+ (errmsg("DSHash name cannot be empty")));
+
+ if (strlen(name) >= offsetof(DSMRegistryEntry, type))
+ ereport(ERROR,
+ (errmsg("DSHash name too long")));
+
+ /* Be sure any local memory allocated by DSM/DSA routines is persistent. */
+ oldcontext = MemoryContextSwitchTo(TopMemoryContext);
+
+ /* Connect to the registry. */
+ init_dsm_registry();
+
+ entry = dshash_find_or_insert(dsm_registry_table, name, found);
+ if (!(*found))
+ {
+ NamedDSAState *dsa_state = &entry->data.dsh.dsa;
+ NamedDSHState *dsh_state = &entry->data.dsh;
+ dshash_parameters params_copy;
+ dsa_area *dsa;
+
+ entry->type = DSMR_ENTRY_TYPE_DSH;
+
+ /* Initialize the LWLock tranche for the DSA. */
+ dsa_state->tranche = LWLockNewTrancheId();
+ sprintf(dsa_state->tranche_name, "%s%s", name, DSMR_DSA_TRANCHE_SUFFIX);
+ LWLockRegisterTranche(dsa_state->tranche, dsa_state->tranche_name);
+
+ /* Initialize the LWLock tranche for the dshash table. */
+ dsh_state->tranche = LWLockNewTrancheId();
+ strcpy(dsh_state->tranche_name, name);
+ LWLockRegisterTranche(dsh_state->tranche, dsh_state->tranche_name);
+
+ /* Initialize the DSA for the hash table. */
+ dsa = dsa_create(dsa_state->tranche);
+ dsa_pin(dsa);
+ dsa_pin_mapping(dsa);
+
+ /* Initialize the dshash table. */
+ memcpy(&params_copy, params, sizeof(dshash_parameters));
+ params_copy.tranche_id = dsh_state->tranche;
+ ret = dshash_create(dsa, &params_copy, NULL);
+
+ /* Store handles for other backends to use. */
+ dsa_state->handle = dsa_get_handle(dsa);
+ dsh_state->handle = dshash_get_hash_table_handle(ret);
+ }
+ else if (entry->type != DSMR_ENTRY_TYPE_DSH)
+ ereport(ERROR,
+ (errmsg("requested DSHash does not match type of existing entry")));
+ else
+ {
+ NamedDSAState *dsa_state = &entry->data.dsh.dsa;
+ NamedDSHState *dsh_state = &entry->data.dsh;
+ dsa_area *dsa;
+
+ /* XXX: Should we verify params matches what table was created with? */
+
+ if (dsa_is_attached(dsa_state->handle))
+ ereport(ERROR,
+ (errmsg("requested DSHash already attached to current process")));
+
+ /* Initialize existing LWLock tranches for the DSA and dshash table. */
+ LWLockRegisterTranche(dsa_state->tranche, dsa_state->tranche_name);
+ LWLockRegisterTranche(dsh_state->tranche, dsh_state->tranche_name);
+
+ /* Attach to existing DSA for the hash table. */
+ dsa = dsa_attach(dsa_state->handle);
+ dsa_pin_mapping(dsa);
+
+ /* Attach to existing dshash table. */
+ ret = dshash_attach(dsa, params, dsh_state->handle, NULL);
+ }
+
+ dshash_release_lock(dsm_registry_table, entry);
+ MemoryContextSwitchTo(oldcontext);
+
+ return ret;
+}
+
+Datum
+pg_get_dsm_registry_allocations(PG_FUNCTION_ARGS)
+{
+ ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
+ DSMRegistryEntry *entry;
+ MemoryContext oldcontext;
+ dshash_seq_status status;
+
+ InitMaterializedSRF(fcinfo, MAT_SRF_USE_EXPECTED_DESC);
+
+ /* Be sure any local memory allocated by DSM/DSA routines is persistent. */
+ oldcontext = MemoryContextSwitchTo(TopMemoryContext);
+ init_dsm_registry();
+ MemoryContextSwitchTo(oldcontext);
+
+ dshash_seq_init(&status, dsm_registry_table, false);
+ while ((entry = dshash_seq_next(&status)) != NULL)
+ {
+ Datum vals[3];
+ bool nulls[3] = {0};
+
+ vals[0] = CStringGetTextDatum(entry->name);
+ vals[1] = CStringGetTextDatum(DSMREntryTypeNames[entry->type]);
+
+ /*
+ * Since we can't know the size of DSA/dshash entries without first
+ * attaching to them, return NULL for those.
+ */
+ if (entry->type == DSMR_ENTRY_TYPE_DSM)
+ vals[2] = Int64GetDatum(entry->data.dsm.size);
+ else
+ nulls[2] = true;
+
+ tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, vals, nulls);
+ }
+ dshash_seq_term(&status);
+
+ return (Datum) 0;
+}
diff --git a/src/backend/storage/ipc/latch.c b/src/backend/storage/ipc/latch.c
index c6aefd2f688..beadeb5e46a 100644
--- a/src/backend/storage/ipc/latch.c
+++ b/src/backend/storage/ipc/latch.c
@@ -187,9 +187,11 @@ WaitLatch(Latch *latch, int wakeEvents, long timeout,
if (!(wakeEvents & WL_LATCH_SET))
latch = NULL;
ModifyWaitEvent(LatchWaitSet, LatchWaitSetLatchPos, WL_LATCH_SET, latch);
- ModifyWaitEvent(LatchWaitSet, LatchWaitSetPostmasterDeathPos,
- (wakeEvents & (WL_EXIT_ON_PM_DEATH | WL_POSTMASTER_DEATH)),
- NULL);
+
+ if (IsUnderPostmaster)
+ ModifyWaitEvent(LatchWaitSet, LatchWaitSetPostmasterDeathPos,
+ (wakeEvents & (WL_EXIT_ON_PM_DEATH | WL_POSTMASTER_DEATH)),
+ NULL);
if (WaitEventSetWait(LatchWaitSet,
(wakeEvents & WL_TIMEOUT) ? timeout : -1,
diff --git a/src/backend/storage/ipc/procarray.c b/src/backend/storage/ipc/procarray.c
index e5b945a9ee3..bf987aed8d3 100644
--- a/src/backend/storage/ipc/procarray.c
+++ b/src/backend/storage/ipc/procarray.c
@@ -1622,58 +1622,6 @@ TransactionIdIsInProgress(TransactionId xid)
return false;
}
-/*
- * TransactionIdIsActive -- is xid the top-level XID of an active backend?
- *
- * This differs from TransactionIdIsInProgress in that it ignores prepared
- * transactions, as well as transactions running on the primary if we're in
- * hot standby. Also, we ignore subtransactions since that's not needed
- * for current uses.
- */
-bool
-TransactionIdIsActive(TransactionId xid)
-{
- bool result = false;
- ProcArrayStruct *arrayP = procArray;
- TransactionId *other_xids = ProcGlobal->xids;
- int i;
-
- /*
- * Don't bother checking a transaction older than RecentXmin; it could not
- * possibly still be running.
- */
- if (TransactionIdPrecedes(xid, RecentXmin))
- return false;
-
- LWLockAcquire(ProcArrayLock, LW_SHARED);
-
- for (i = 0; i < arrayP->numProcs; i++)
- {
- int pgprocno = arrayP->pgprocnos[i];
- PGPROC *proc = &allProcs[pgprocno];
- TransactionId pxid;
-
- /* Fetch xid just once - see GetNewTransactionId */
- pxid = UINT32_ACCESS_ONCE(other_xids[i]);
-
- if (!TransactionIdIsValid(pxid))
- continue;
-
- if (proc->pid == 0)
- continue; /* ignore prepared transactions */
-
- if (TransactionIdEquals(pxid, xid))
- {
- result = true;
- break;
- }
- }
-
- LWLockRelease(ProcArrayLock);
-
- return result;
-}
-
/*
* Determine XID horizons.
@@ -2866,8 +2814,10 @@ GetRunningTransactionData(void)
*
* Similar to GetSnapshotData but returns just oldestActiveXid. We include
* all PGPROCs with an assigned TransactionId, even VACUUM processes.
- * We look at all databases, though there is no need to include WALSender
- * since this has no effect on hot standby conflicts.
+ *
+ * If allDbs is true, we look at all databases, though there is no need to
+ * include WALSender since this has no effect on hot standby conflicts. If
+ * allDbs is false, skip processes attached to other databases.
*
* This is never executed during recovery so there is no need to look at
* KnownAssignedXids.
@@ -2875,9 +2825,12 @@ GetRunningTransactionData(void)
* We don't worry about updating other counters, we want to keep this as
* simple as possible and leave GetSnapshotData() as the primary code for
* that bookkeeping.
+ *
+ * inCommitOnly indicates getting the oldestActiveXid among the transactions
+ * in the commit critical section.
*/
TransactionId
-GetOldestActiveTransactionId(void)
+GetOldestActiveTransactionId(bool inCommitOnly, bool allDbs)
{
ProcArrayStruct *arrayP = procArray;
TransactionId *other_xids = ProcGlobal->xids;
@@ -2904,6 +2857,8 @@ GetOldestActiveTransactionId(void)
for (index = 0; index < arrayP->numProcs; index++)
{
TransactionId xid;
+ int pgprocno = arrayP->pgprocnos[index];
+ PGPROC *proc = &allProcs[pgprocno];
/* Fetch xid just once - see GetNewTransactionId */
xid = UINT32_ACCESS_ONCE(other_xids[index]);
@@ -2911,6 +2866,13 @@ GetOldestActiveTransactionId(void)
if (!TransactionIdIsNormal(xid))
continue;
+ if (inCommitOnly &&
+ (proc->delayChkptFlags & DELAY_CHKPT_IN_COMMIT) == 0)
+ continue;
+
+ if (!allDbs && proc->databaseId != MyDatabaseId)
+ continue;
+
if (TransactionIdPrecedes(xid, oldestRunningXid))
oldestRunningXid = xid;
diff --git a/src/backend/storage/ipc/procsignal.c b/src/backend/storage/ipc/procsignal.c
index a9bb540b55a..087821311cc 100644
--- a/src/backend/storage/ipc/procsignal.c
+++ b/src/backend/storage/ipc/procsignal.c
@@ -728,7 +728,11 @@ procsignal_sigusr1_handler(SIGNAL_ARGS)
void
SendCancelRequest(int backendPID, const uint8 *cancel_key, int cancel_key_len)
{
- Assert(backendPID != 0);
+ if (backendPID == 0)
+ {
+ ereport(LOG, (errmsg("invalid cancel request with PID 0")));
+ return;
+ }
/*
* See if we have a matching backend. Reading the pss_pid and
diff --git a/src/backend/storage/ipc/shmem.c b/src/backend/storage/ipc/shmem.c
index c9ae3b45b76..ca3656fc76f 100644
--- a/src/backend/storage/ipc/shmem.c
+++ b/src/backend/storage/ipc/shmem.c
@@ -679,12 +679,10 @@ pg_get_shmem_allocations_numa(PG_FUNCTION_ARGS)
*/
for (i = 0; i < shm_ent_page_count; i++)
{
- volatile uint64 touch pg_attribute_unused();
-
page_ptrs[i] = startptr + (i * os_page_size);
if (firstNumaTouch)
- pg_numa_touch_mem_if_required(touch, page_ptrs[i]);
+ pg_numa_touch_mem_if_required(page_ptrs[i]);
CHECK_FOR_INTERRUPTS();
}
diff --git a/src/backend/storage/ipc/standby.c b/src/backend/storage/ipc/standby.c
index 7fa8d9247e0..4222bdab078 100644
--- a/src/backend/storage/ipc/standby.c
+++ b/src/backend/storage/ipc/standby.c
@@ -1376,7 +1376,7 @@ LogCurrentRunningXacts(RunningTransactions CurrRunningXacts)
if (xlrec.subxid_overflow)
elog(DEBUG2,
- "snapshot of %d running transactions overflowed (lsn %X/%X oldest xid %u latest complete %u next xid %u)",
+ "snapshot of %d running transactions overflowed (lsn %X/%08X oldest xid %u latest complete %u next xid %u)",
CurrRunningXacts->xcnt,
LSN_FORMAT_ARGS(recptr),
CurrRunningXacts->oldestRunningXid,
@@ -1384,7 +1384,7 @@ LogCurrentRunningXacts(RunningTransactions CurrRunningXacts)
CurrRunningXacts->nextXid);
else
elog(DEBUG2,
- "snapshot of %d+%d running transaction ids (lsn %X/%X oldest xid %u latest complete %u next xid %u)",
+ "snapshot of %d+%d running transaction ids (lsn %X/%08X oldest xid %u latest complete %u next xid %u)",
CurrRunningXacts->xcnt, CurrRunningXacts->subxcnt,
LSN_FORMAT_ARGS(recptr),
CurrRunningXacts->oldestRunningXid,
diff --git a/src/backend/storage/lmgr/generate-lwlocknames.pl b/src/backend/storage/lmgr/generate-lwlocknames.pl
index 4441b7cba0c..cd3e43c448a 100644
--- a/src/backend/storage/lmgr/generate-lwlocknames.pl
+++ b/src/backend/storage/lmgr/generate-lwlocknames.pl
@@ -10,7 +10,6 @@ use Getopt::Long;
my $output_path = '.';
my $lastlockidx = -1;
-my $continue = "\n";
GetOptions('outdir:s' => \$output_path);
@@ -28,18 +27,24 @@ print $h "/* there is deliberately not an #ifndef LWLOCKNAMES_H here */\n\n";
#
-# First, record the predefined LWLocks listed in wait_event_names.txt. We'll
-# cross-check those with the ones in lwlocklist.h.
+# First, record the predefined LWLocks and built-in tranches listed in
+# wait_event_names.txt. We'll cross-check those with the ones in lwlocklist.h.
#
+my @wait_event_tranches;
my @wait_event_lwlocks;
my $record_lwlocks = 0;
+my $in_tranches = 0;
while (<$wait_event_names>)
{
chomp;
# Check for end marker.
- last if /^# END OF PREDEFINED LWLOCKS/;
+ if (/^# END OF PREDEFINED LWLOCKS/)
+ {
+ $in_tranches = 1;
+ next;
+ }
# Skip comments and empty lines.
next if /^#/;
@@ -55,13 +60,29 @@ while (<$wait_event_names>)
# Go to the next line if we are not yet recording LWLocks.
next if not $record_lwlocks;
+ # Stop recording if we reach another section.
+ last if /^Section:/;
+
# Record the LWLock.
(my $waiteventname, my $waitevendocsentence) = split(/\t/, $_);
- push(@wait_event_lwlocks, $waiteventname);
+
+ if ($in_tranches)
+ {
+ push(@wait_event_tranches, $waiteventname);
+ }
+ else
+ {
+ push(@wait_event_lwlocks, $waiteventname);
+ }
}
+#
+# While gathering the list of predefined LWLocks, cross-check the lists in
+# lwlocklist.h with the wait events we just recorded.
+#
my $in_comment = 0;
-my $i = 0;
+my $lwlock_count = 0;
+my $tranche_count = 0;
while (<$lwlocklist>)
{
chomp;
@@ -82,40 +103,72 @@ while (<$lwlocklist>)
next;
}
- die "unable to parse lwlocklist.h line \"$_\""
- unless /^PG_LWLOCK\((\d+),\s+(\w+)\)$/;
+ #
+ # Gather list of predefined LWLocks and cross-check with the wait events.
+ #
+ if (/^PG_LWLOCK\((\d+),\s+(\w+)\)$/)
+ {
+ my ($lockidx, $lockname) = ($1, $2);
- (my $lockidx, my $lockname) = ($1, $2);
+ die "lwlocklist.h not in order" if $lockidx < $lastlockidx;
+ die "lwlocklist.h has duplicates" if $lockidx == $lastlockidx;
- die "lwlocklist.h not in order" if $lockidx < $lastlockidx;
- die "lwlocklist.h has duplicates" if $lockidx == $lastlockidx;
+ die "$lockname defined in lwlocklist.h but missing from "
+ . "wait_event_names.txt"
+ if $lwlock_count >= scalar @wait_event_lwlocks;
+ die "lists of predefined LWLocks do not match (first mismatch at "
+ . "$wait_event_lwlocks[$lwlock_count] in wait_event_names.txt and "
+ . "$lockname in lwlocklist.h)"
+ if $wait_event_lwlocks[$lwlock_count] ne $lockname;
- die "$lockname defined in lwlocklist.h but missing from "
- . "wait_event_names.txt"
- if $i >= scalar @wait_event_lwlocks;
- die "lists of predefined LWLocks do not match (first mismatch at "
- . "$wait_event_lwlocks[$i] in wait_event_names.txt and $lockname in "
- . "lwlocklist.h)"
- if $wait_event_lwlocks[$i] ne $lockname;
- $i++;
+ $lwlock_count++;
- while ($lastlockidx < $lockidx - 1)
+ while ($lastlockidx < $lockidx - 1)
+ {
+ ++$lastlockidx;
+ }
+ $lastlockidx = $lockidx;
+
+ # Add a "Lock" suffix to each lock name, as the C code depends on that.
+ printf $h "#define %-32s (&MainLWLockArray[$lockidx].lock)\n",
+ $lockname . "Lock";
+
+ next;
+ }
+
+ #
+ # Cross-check the built-in LWLock tranches with the wait events.
+ #
+ if (/^PG_LWLOCKTRANCHE\((\w+),\s+(\w+)\)$/)
{
- ++$lastlockidx;
- $continue = ",\n";
+ my ($tranche_id, $tranche_name) = ($1, $2);
+
+ die "$tranche_name defined in lwlocklist.h but missing from "
+ . "wait_event_names.txt"
+ if $tranche_count >= scalar @wait_event_tranches;
+ die
+ "lists of built-in LWLock tranches do not match (first mismatch at "
+ . "$wait_event_tranches[$tranche_count] in wait_event_names.txt and "
+ . "$tranche_name in lwlocklist.h)"
+ if $wait_event_tranches[$tranche_count] ne $tranche_name;
+
+ $tranche_count++;
+
+ next;
}
- $lastlockidx = $lockidx;
- $continue = ",\n";
- # Add a "Lock" suffix to each lock name, as the C code depends on that
- printf $h "#define %-32s (&MainLWLockArray[$lockidx].lock)\n",
- $lockname . "Lock";
+ die "unable to parse lwlocklist.h line \"$_\"";
}
die
- "$wait_event_lwlocks[$i] defined in wait_event_names.txt but missing from "
- . "lwlocklist.h"
- if $i < scalar @wait_event_lwlocks;
+ "$wait_event_lwlocks[$lwlock_count] defined in wait_event_names.txt but "
+ . " missing from lwlocklist.h"
+ if $lwlock_count < scalar @wait_event_lwlocks;
+
+die
+ "$wait_event_tranches[$tranche_count] defined in wait_event_names.txt but "
+ . "missing from lwlocklist.h"
+ if $tranche_count < scalar @wait_event_tranches;
print $h "\n";
printf $h "#define NUM_INDIVIDUAL_LWLOCKS %s\n", $lastlockidx + 1;
diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c
index 86b06b9223f..62f3471448e 100644
--- a/src/backend/storage/lmgr/lock.c
+++ b/src/backend/storage/lmgr/lock.c
@@ -51,7 +51,7 @@
/* GUC variables */
int max_locks_per_xact; /* used to set the lock table size */
-bool log_lock_failure = false;
+bool log_lock_failures = false;
#define NLOCKENTS() \
mul_size(max_locks_per_xact, add_size(MaxBackends, max_prepared_xacts))
@@ -3539,9 +3539,9 @@ AtPrepare_Locks(void)
* but that probably costs more cycles.
*/
void
-PostPrepare_Locks(TransactionId xid)
+PostPrepare_Locks(FullTransactionId fxid)
{
- PGPROC *newproc = TwoPhaseGetDummyProc(xid, false);
+ PGPROC *newproc = TwoPhaseGetDummyProc(fxid, false);
HASH_SEQ_STATUS status;
LOCALLOCK *locallock;
LOCK *lock;
@@ -4324,11 +4324,11 @@ DumpAllLocks(void)
* and PANIC anyway.
*/
void
-lock_twophase_recover(TransactionId xid, uint16 info,
+lock_twophase_recover(FullTransactionId fxid, uint16 info,
void *recdata, uint32 len)
{
TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
- PGPROC *proc = TwoPhaseGetDummyProc(xid, false);
+ PGPROC *proc = TwoPhaseGetDummyProc(fxid, false);
LOCKTAG *locktag;
LOCKMODE lockmode;
LOCKMETHODID lockmethodid;
@@ -4505,7 +4505,7 @@ lock_twophase_recover(TransactionId xid, uint16 info,
* starting up into hot standby mode.
*/
void
-lock_twophase_standby_recover(TransactionId xid, uint16 info,
+lock_twophase_standby_recover(FullTransactionId fxid, uint16 info,
void *recdata, uint32 len)
{
TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
@@ -4524,7 +4524,7 @@ lock_twophase_standby_recover(TransactionId xid, uint16 info,
if (lockmode == AccessExclusiveLock &&
locktag->locktag_type == LOCKTAG_RELATION)
{
- StandbyAcquireAccessExclusiveLock(xid,
+ StandbyAcquireAccessExclusiveLock(XidFromFullTransactionId(fxid),
locktag->locktag_field1 /* dboid */ ,
locktag->locktag_field2 /* reloid */ );
}
@@ -4537,11 +4537,11 @@ lock_twophase_standby_recover(TransactionId xid, uint16 info,
* Find and release the lock indicated by the 2PC record.
*/
void
-lock_twophase_postcommit(TransactionId xid, uint16 info,
+lock_twophase_postcommit(FullTransactionId fxid, uint16 info,
void *recdata, uint32 len)
{
TwoPhaseLockRecord *rec = (TwoPhaseLockRecord *) recdata;
- PGPROC *proc = TwoPhaseGetDummyProc(xid, true);
+ PGPROC *proc = TwoPhaseGetDummyProc(fxid, true);
LOCKTAG *locktag;
LOCKMETHODID lockmethodid;
LockMethod lockMethodTable;
@@ -4563,10 +4563,10 @@ lock_twophase_postcommit(TransactionId xid, uint16 info,
* This is actually just the same as the COMMIT case.
*/
void
-lock_twophase_postabort(TransactionId xid, uint16 info,
+lock_twophase_postabort(FullTransactionId fxid, uint16 info,
void *recdata, uint32 len)
{
- lock_twophase_postcommit(xid, info, recdata, len);
+ lock_twophase_postcommit(fxid, info, recdata, len);
}
/*
diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c
index 46f44bc4511..ec9c345ffdf 100644
--- a/src/backend/storage/lmgr/lwlock.c
+++ b/src/backend/storage/lmgr/lwlock.c
@@ -122,9 +122,8 @@ StaticAssertDecl((LW_VAL_EXCLUSIVE & LW_FLAG_MASK) == 0,
* own tranche. We absorb the names of these tranches from there into
* BuiltinTrancheNames here.
*
- * 2. There are some predefined tranches for built-in groups of locks.
- * These are listed in enum BuiltinTrancheIds in lwlock.h, and their names
- * appear in BuiltinTrancheNames[] below.
+ * 2. There are some predefined tranches for built-in groups of locks defined
+ * in lwlocklist.h. We absorb the names of these tranches, too.
*
* 3. Extensions can create new tranches, via either RequestNamedLWLockTranche
* or LWLockRegisterTranche. The names of these that are known in the current
@@ -135,49 +134,10 @@ StaticAssertDecl((LW_VAL_EXCLUSIVE & LW_FLAG_MASK) == 0,
*/
static const char *const BuiltinTrancheNames[] = {
#define PG_LWLOCK(id, lockname) [id] = CppAsString(lockname),
+#define PG_LWLOCKTRANCHE(id, lockname) [LWTRANCHE_##id] = CppAsString(lockname),
#include "storage/lwlocklist.h"
#undef PG_LWLOCK
- [LWTRANCHE_XACT_BUFFER] = "XactBuffer",
- [LWTRANCHE_COMMITTS_BUFFER] = "CommitTsBuffer",
- [LWTRANCHE_SUBTRANS_BUFFER] = "SubtransBuffer",
- [LWTRANCHE_MULTIXACTOFFSET_BUFFER] = "MultiXactOffsetBuffer",
- [LWTRANCHE_MULTIXACTMEMBER_BUFFER] = "MultiXactMemberBuffer",
- [LWTRANCHE_NOTIFY_BUFFER] = "NotifyBuffer",
- [LWTRANCHE_SERIAL_BUFFER] = "SerialBuffer",
- [LWTRANCHE_WAL_INSERT] = "WALInsert",
- [LWTRANCHE_BUFFER_CONTENT] = "BufferContent",
- [LWTRANCHE_REPLICATION_ORIGIN_STATE] = "ReplicationOriginState",
- [LWTRANCHE_REPLICATION_SLOT_IO] = "ReplicationSlotIO",
- [LWTRANCHE_LOCK_FASTPATH] = "LockFastPath",
- [LWTRANCHE_BUFFER_MAPPING] = "BufferMapping",
- [LWTRANCHE_LOCK_MANAGER] = "LockManager",
- [LWTRANCHE_PREDICATE_LOCK_MANAGER] = "PredicateLockManager",
- [LWTRANCHE_PARALLEL_HASH_JOIN] = "ParallelHashJoin",
- [LWTRANCHE_PARALLEL_BTREE_SCAN] = "ParallelBtreeScan",
- [LWTRANCHE_PARALLEL_QUERY_DSA] = "ParallelQueryDSA",
- [LWTRANCHE_PER_SESSION_DSA] = "PerSessionDSA",
- [LWTRANCHE_PER_SESSION_RECORD_TYPE] = "PerSessionRecordType",
- [LWTRANCHE_PER_SESSION_RECORD_TYPMOD] = "PerSessionRecordTypmod",
- [LWTRANCHE_SHARED_TUPLESTORE] = "SharedTupleStore",
- [LWTRANCHE_SHARED_TIDBITMAP] = "SharedTidBitmap",
- [LWTRANCHE_PARALLEL_APPEND] = "ParallelAppend",
- [LWTRANCHE_PER_XACT_PREDICATE_LIST] = "PerXactPredicateList",
- [LWTRANCHE_PGSTATS_DSA] = "PgStatsDSA",
- [LWTRANCHE_PGSTATS_HASH] = "PgStatsHash",
- [LWTRANCHE_PGSTATS_DATA] = "PgStatsData",
- [LWTRANCHE_LAUNCHER_DSA] = "LogicalRepLauncherDSA",
- [LWTRANCHE_LAUNCHER_HASH] = "LogicalRepLauncherHash",
- [LWTRANCHE_DSM_REGISTRY_DSA] = "DSMRegistryDSA",
- [LWTRANCHE_DSM_REGISTRY_HASH] = "DSMRegistryHash",
- [LWTRANCHE_COMMITTS_SLRU] = "CommitTsSLRU",
- [LWTRANCHE_MULTIXACTOFFSET_SLRU] = "MultixactOffsetSLRU",
- [LWTRANCHE_MULTIXACTMEMBER_SLRU] = "MultixactMemberSLRU",
- [LWTRANCHE_NOTIFY_SLRU] = "NotifySLRU",
- [LWTRANCHE_SERIAL_SLRU] = "SerialSLRU",
- [LWTRANCHE_SUBTRANS_SLRU] = "SubtransSLRU",
- [LWTRANCHE_XACT_SLRU] = "XactSLRU",
- [LWTRANCHE_PARALLEL_VACUUM_DSA] = "ParallelVacuumDSA",
- [LWTRANCHE_AIO_URING_COMPLETION] = "AioUringCompletion",
+#undef PG_LWLOCKTRANCHE
};
StaticAssertDecl(lengthof(BuiltinTrancheNames) ==
diff --git a/src/backend/storage/lmgr/predicate.c b/src/backend/storage/lmgr/predicate.c
index d82114ffca1..c07fb588355 100644
--- a/src/backend/storage/lmgr/predicate.c
+++ b/src/backend/storage/lmgr/predicate.c
@@ -191,7 +191,7 @@
* AtPrepare_PredicateLocks(void);
* PostPrepare_PredicateLocks(TransactionId xid);
* PredicateLockTwoPhaseFinish(TransactionId xid, bool isCommit);
- * predicatelock_twophase_recover(TransactionId xid, uint16 info,
+ * predicatelock_twophase_recover(FullTransactionId fxid, uint16 info,
* void *recdata, uint32 len);
*/
@@ -4856,7 +4856,7 @@ AtPrepare_PredicateLocks(void)
* anyway. We only need to clean up our local state.
*/
void
-PostPrepare_PredicateLocks(TransactionId xid)
+PostPrepare_PredicateLocks(FullTransactionId fxid)
{
if (MySerializableXact == InvalidSerializableXact)
return;
@@ -4879,12 +4879,12 @@ PostPrepare_PredicateLocks(TransactionId xid)
* commits or aborts.
*/
void
-PredicateLockTwoPhaseFinish(TransactionId xid, bool isCommit)
+PredicateLockTwoPhaseFinish(FullTransactionId fxid, bool isCommit)
{
SERIALIZABLEXID *sxid;
SERIALIZABLEXIDTAG sxidtag;
- sxidtag.xid = xid;
+ sxidtag.xid = XidFromFullTransactionId(fxid);
LWLockAcquire(SerializableXactHashLock, LW_SHARED);
sxid = (SERIALIZABLEXID *)
@@ -4906,10 +4906,11 @@ PredicateLockTwoPhaseFinish(TransactionId xid, bool isCommit)
* Re-acquire a predicate lock belonging to a transaction that was prepared.
*/
void
-predicatelock_twophase_recover(TransactionId xid, uint16 info,
+predicatelock_twophase_recover(FullTransactionId fxid, uint16 info,
void *recdata, uint32 len)
{
TwoPhasePredicateRecord *record;
+ TransactionId xid = XidFromFullTransactionId(fxid);
Assert(len == sizeof(TwoPhasePredicateRecord));
diff --git a/src/backend/tcop/backend_startup.c b/src/backend/tcop/backend_startup.c
index a7d1fec981f..14d5fc0b196 100644
--- a/src/backend/tcop/backend_startup.c
+++ b/src/backend/tcop/backend_startup.c
@@ -492,7 +492,7 @@ static int
ProcessStartupPacket(Port *port, bool ssl_done, bool gss_done)
{
int32 len;
- char *buf;
+ char *buf = NULL;
ProtocolVersion proto;
MemoryContext oldcontext;
@@ -516,7 +516,7 @@ ProcessStartupPacket(Port *port, bool ssl_done, bool gss_done)
* scanners, which may be less benign, but it's not really our job to
* notice those.)
*/
- return STATUS_ERROR;
+ goto fail;
}
if (pq_getbytes(((char *) &len) + 1, 3) == EOF)
@@ -526,7 +526,7 @@ ProcessStartupPacket(Port *port, bool ssl_done, bool gss_done)
ereport(COMMERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
errmsg("incomplete startup packet")));
- return STATUS_ERROR;
+ goto fail;
}
len = pg_ntoh32(len);
@@ -538,7 +538,7 @@ ProcessStartupPacket(Port *port, bool ssl_done, bool gss_done)
ereport(COMMERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
errmsg("invalid length of startup packet")));
- return STATUS_ERROR;
+ goto fail;
}
/*
@@ -554,7 +554,7 @@ ProcessStartupPacket(Port *port, bool ssl_done, bool gss_done)
ereport(COMMERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
errmsg("incomplete startup packet")));
- return STATUS_ERROR;
+ goto fail;
}
pq_endmsgread();
@@ -568,7 +568,7 @@ ProcessStartupPacket(Port *port, bool ssl_done, bool gss_done)
{
ProcessCancelRequestPacket(port, buf, len);
/* Not really an error, but we don't want to proceed further */
- return STATUS_ERROR;
+ goto fail;
}
if (proto == NEGOTIATE_SSL_CODE && !ssl_done)
@@ -607,14 +607,16 @@ ProcessStartupPacket(Port *port, bool ssl_done, bool gss_done)
ereport(COMMERROR,
(errcode_for_socket_access(),
errmsg("failed to send SSL negotiation response: %m")));
- return STATUS_ERROR; /* close the connection */
+ goto fail; /* close the connection */
}
#ifdef USE_SSL
if (SSLok == 'S' && secure_open_server(port) == -1)
- return STATUS_ERROR;
+ goto fail;
#endif
+ pfree(buf);
+
/*
* At this point we should have no data already buffered. If we do,
* it was received before we performed the SSL handshake, so it wasn't
@@ -661,14 +663,16 @@ ProcessStartupPacket(Port *port, bool ssl_done, bool gss_done)
ereport(COMMERROR,
(errcode_for_socket_access(),
errmsg("failed to send GSSAPI negotiation response: %m")));
- return STATUS_ERROR; /* close the connection */
+ goto fail; /* close the connection */
}
#ifdef ENABLE_GSS
if (GSSok == 'G' && secure_open_gssapi(port) == -1)
- return STATUS_ERROR;
+ goto fail;
#endif
+ pfree(buf);
+
/*
* At this point we should have no data already buffered. If we do,
* it was received before we performed the GSS handshake, so it wasn't
@@ -863,7 +867,16 @@ ProcessStartupPacket(Port *port, bool ssl_done, bool gss_done)
*/
MemoryContextSwitchTo(oldcontext);
+ pfree(buf);
+
return STATUS_OK;
+
+fail:
+ /* be tidy, just to avoid Valgrind complaints */
+ if (buf)
+ pfree(buf);
+
+ return STATUS_ERROR;
}
/*
@@ -881,7 +894,7 @@ ProcessCancelRequestPacket(Port *port, void *pkt, int pktlen)
{
ereport(COMMERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- errmsg("invalid length of query cancel packet")));
+ errmsg("invalid length of cancel request packet")));
return;
}
len = pktlen - offsetof(CancelRequestPacket, cancelAuthCode);
@@ -889,7 +902,7 @@ ProcessCancelRequestPacket(Port *port, void *pkt, int pktlen)
{
ereport(COMMERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- errmsg("invalid length of query cancel key")));
+ errmsg("invalid length of cancel key in cancel request packet")));
return;
}
@@ -1077,7 +1090,7 @@ check_log_connections(char **newval, void **extra, GucSource source)
if (!SplitIdentifierString(rawstring, ',', &elemlist))
{
- GUC_check_errdetail("Invalid list syntax in parameter \"log_connections\".");
+ GUC_check_errdetail("Invalid list syntax in parameter \"%s\".", "log_connections");
pfree(rawstring);
list_free(elemlist);
return false;
diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c
index 4efd8120d2c..0cecd464902 100644
--- a/src/backend/tcop/postgres.c
+++ b/src/backend/tcop/postgres.c
@@ -988,6 +988,7 @@ pg_plan_queries(List *querytrees, const char *query_string, int cursorOptions,
stmt->stmt_location = query->stmt_location;
stmt->stmt_len = query->stmt_len;
stmt->queryId = query->queryId;
+ stmt->planOrigin = PLAN_STMT_INTERNAL;
}
else
{
@@ -2034,7 +2035,7 @@ exec_bind_message(StringInfo input_message)
{
PlannedStmt *plan = lfirst_node(PlannedStmt, lc);
- if (plan->planId != UINT64CONST(0))
+ if (plan->planId != INT64CONST(0))
{
pgstat_report_plan_id(plan->planId, false);
break;
@@ -2185,7 +2186,7 @@ exec_execute_message(const char *portal_name, long max_rows)
{
PlannedStmt *stmt = lfirst_node(PlannedStmt, lc);
- if (stmt->planId != UINT64CONST(0))
+ if (stmt->planId != INT64CONST(0))
{
pgstat_report_plan_id(stmt->planId, false);
break;
diff --git a/src/backend/tcop/pquery.c b/src/backend/tcop/pquery.c
index d1593f38b35..08791b8f75e 100644
--- a/src/backend/tcop/pquery.c
+++ b/src/backend/tcop/pquery.c
@@ -1350,24 +1350,15 @@ PortalRunMulti(Portal portal,
PopActiveSnapshot();
/*
- * If a query completion data was supplied, use it. Otherwise use the
- * portal's query completion data.
- *
- * Exception: Clients expect INSERT/UPDATE/DELETE tags to have counts, so
- * fake them with zeros. This can happen with DO INSTEAD rules if there
- * is no replacement query of the same type as the original. We print "0
- * 0" here because technically there is no query of the matching tag type,
- * and printing a non-zero count for a different query type seems wrong,
- * e.g. an INSERT that does an UPDATE instead should not print "0 1" if
- * one row was updated. See QueryRewrite(), step 3, for details.
+ * If a command tag was requested and we did not fill in a run-time-
+ * determined tag above, copy the parse-time tag from the Portal. (There
+ * might not be any tag there either, in edge cases such as empty prepared
+ * statements. That's OK.)
*/
- if (qc && qc->commandTag == CMDTAG_UNKNOWN)
- {
- if (portal->qc.commandTag != CMDTAG_UNKNOWN)
- CopyQueryCompletion(qc, &portal->qc);
- /* If the caller supplied a qc, we should have set it by now. */
- Assert(qc->commandTag != CMDTAG_UNKNOWN);
- }
+ if (qc &&
+ qc->commandTag == CMDTAG_UNKNOWN &&
+ portal->qc.commandTag != CMDTAG_UNKNOWN)
+ CopyQueryCompletion(qc, &portal->qc);
}
/*
diff --git a/src/backend/tcop/utility.c b/src/backend/tcop/utility.c
index 25fe3d58016..4f4191b0ea6 100644
--- a/src/backend/tcop/utility.c
+++ b/src/backend/tcop/utility.c
@@ -943,17 +943,7 @@ standard_ProcessUtility(PlannedStmt *pstmt,
break;
case T_CheckPointStmt:
- if (!has_privs_of_role(GetUserId(), ROLE_PG_CHECKPOINT))
- ereport(ERROR,
- (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- /* translator: %s is name of a SQL command, eg CHECKPOINT */
- errmsg("permission denied to execute %s command",
- "CHECKPOINT"),
- errdetail("Only roles with privileges of the \"%s\" role may execute this command.",
- "pg_checkpoint")));
-
- RequestCheckpoint(CHECKPOINT_IMMEDIATE | CHECKPOINT_WAIT |
- (RecoveryInProgress() ? 0 : CHECKPOINT_FORCE));
+ ExecCheckpoint(pstate, (CheckPointStmt *) parsetree);
break;
/*
@@ -1244,6 +1234,7 @@ ProcessUtilitySlow(ParseState *pstate,
wrapper->utilityStmt = stmt;
wrapper->stmt_location = pstmt->stmt_location;
wrapper->stmt_len = pstmt->stmt_len;
+ wrapper->planOrigin = PLAN_STMT_INTERNAL;
ProcessUtility(wrapper,
queryString,
@@ -1343,7 +1334,7 @@ ProcessUtilitySlow(ParseState *pstate,
*/
switch (stmt->subtype)
{
- case 'T': /* ALTER DOMAIN DEFAULT */
+ case AD_AlterDefault:
/*
* Recursively alter column default for table and,
@@ -1353,30 +1344,30 @@ ProcessUtilitySlow(ParseState *pstate,
AlterDomainDefault(stmt->typeName,
stmt->def);
break;
- case 'N': /* ALTER DOMAIN DROP NOT NULL */
+ case AD_DropNotNull:
address =
AlterDomainNotNull(stmt->typeName,
false);
break;
- case 'O': /* ALTER DOMAIN SET NOT NULL */
+ case AD_SetNotNull:
address =
AlterDomainNotNull(stmt->typeName,
true);
break;
- case 'C': /* ADD CONSTRAINT */
+ case AD_AddConstraint:
address =
AlterDomainAddConstraint(stmt->typeName,
stmt->def,
&secondaryObject);
break;
- case 'X': /* DROP CONSTRAINT */
+ case AD_DropConstraint:
address =
AlterDomainDropConstraint(stmt->typeName,
stmt->name,
stmt->behavior,
stmt->missing_ok);
break;
- case 'V': /* VALIDATE CONSTRAINT */
+ case AD_ValidateConstraint:
address =
AlterDomainValidateConstraint(stmt->typeName,
stmt->name);
@@ -1974,6 +1965,7 @@ ProcessUtilityForAlterTable(Node *stmt, AlterTableUtilityContext *context)
wrapper->utilityStmt = stmt;
wrapper->stmt_location = context->pstmt->stmt_location;
wrapper->stmt_len = context->pstmt->stmt_len;
+ wrapper->planOrigin = PLAN_STMT_INTERNAL;
ProcessUtility(wrapper,
context->queryString,
diff --git a/src/backend/tsearch/dict_ispell.c b/src/backend/tsearch/dict_ispell.c
index 63bd193a78a..debfbf956cc 100644
--- a/src/backend/tsearch/dict_ispell.c
+++ b/src/backend/tsearch/dict_ispell.c
@@ -47,24 +47,30 @@ dispell_init(PG_FUNCTION_ARGS)
if (strcmp(defel->defname, "dictfile") == 0)
{
+ char *filename;
+
if (dictloaded)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("multiple DictFile parameters")));
- NIImportDictionary(&(d->obj),
- get_tsearch_config_filename(defGetString(defel),
- "dict"));
+ filename = get_tsearch_config_filename(defGetString(defel),
+ "dict");
+ NIImportDictionary(&(d->obj), filename);
+ pfree(filename);
dictloaded = true;
}
else if (strcmp(defel->defname, "afffile") == 0)
{
+ char *filename;
+
if (affloaded)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("multiple AffFile parameters")));
- NIImportAffixes(&(d->obj),
- get_tsearch_config_filename(defGetString(defel),
- "affix"));
+ filename = get_tsearch_config_filename(defGetString(defel),
+ "affix");
+ NIImportAffixes(&(d->obj), filename);
+ pfree(filename);
affloaded = true;
}
else if (strcmp(defel->defname, "stopwords") == 0)
diff --git a/src/backend/tsearch/dict_synonym.c b/src/backend/tsearch/dict_synonym.c
index 0da5a9d6868..c2773eb01ad 100644
--- a/src/backend/tsearch/dict_synonym.c
+++ b/src/backend/tsearch/dict_synonym.c
@@ -199,6 +199,7 @@ skipline:
}
tsearch_readline_end(&trst);
+ pfree(filename);
d->len = cur;
qsort(d->syn, d->len, sizeof(Syn), compareSyn);
diff --git a/src/backend/tsearch/dict_thesaurus.c b/src/backend/tsearch/dict_thesaurus.c
index 1bebe36a691..1e6bbde1ca7 100644
--- a/src/backend/tsearch/dict_thesaurus.c
+++ b/src/backend/tsearch/dict_thesaurus.c
@@ -167,17 +167,17 @@ addWrd(DictThesaurus *d, char *b, char *e, uint32 idsubst, uint16 nwrd, uint16 p
static void
thesaurusRead(const char *filename, DictThesaurus *d)
{
+ char *real_filename = get_tsearch_config_filename(filename, "ths");
tsearch_readline_state trst;
uint32 idsubst = 0;
bool useasis = false;
char *line;
- filename = get_tsearch_config_filename(filename, "ths");
- if (!tsearch_readline_begin(&trst, filename))
+ if (!tsearch_readline_begin(&trst, real_filename))
ereport(ERROR,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
errmsg("could not open thesaurus file \"%s\": %m",
- filename)));
+ real_filename)));
while ((line = tsearch_readline(&trst)) != NULL)
{
@@ -297,6 +297,7 @@ thesaurusRead(const char *filename, DictThesaurus *d)
d->nsubst = idsubst;
tsearch_readline_end(&trst);
+ pfree(real_filename);
}
static TheLexeme *
diff --git a/src/backend/tsearch/ts_locale.c b/src/backend/tsearch/ts_locale.c
index b77d8c23d36..4801fe90089 100644
--- a/src/backend/tsearch/ts_locale.c
+++ b/src/backend/tsearch/ts_locale.c
@@ -36,7 +36,7 @@ t_isalpha(const char *ptr)
{
int clen = pg_mblen(ptr);
wchar_t character[WC_BUF_LEN];
- pg_locale_t mylocale = 0; /* TODO */
+ locale_t mylocale = 0; /* TODO */
if (clen == 1 || database_ctype_is_c)
return isalpha(TOUCHAR(ptr));
@@ -51,7 +51,7 @@ t_isalnum(const char *ptr)
{
int clen = pg_mblen(ptr);
wchar_t character[WC_BUF_LEN];
- pg_locale_t mylocale = 0; /* TODO */
+ locale_t mylocale = 0; /* TODO */
if (clen == 1 || database_ctype_is_c)
return isalnum(TOUCHAR(ptr));
diff --git a/src/backend/tsearch/ts_selfuncs.c b/src/backend/tsearch/ts_selfuncs.c
index 0c1d2bc1109..453a5e5c2ea 100644
--- a/src/backend/tsearch/ts_selfuncs.c
+++ b/src/backend/tsearch/ts_selfuncs.c
@@ -233,7 +233,7 @@ mcelem_tsquery_selec(TSQuery query, Datum *mcelem, int nmcelem,
* The text Datums came from an array, so it cannot be compressed or
* stored out-of-line -- it's safe to use VARSIZE_ANY*.
*/
- Assert(!VARATT_IS_COMPRESSED(mcelem[i]) && !VARATT_IS_EXTERNAL(mcelem[i]));
+ Assert(!VARATT_IS_COMPRESSED(DatumGetPointer(mcelem[i])) && !VARATT_IS_EXTERNAL(DatumGetPointer(mcelem[i])));
lookup[i].element = (text *) DatumGetPointer(mcelem[i]);
lookup[i].frequency = numbers[i];
}
diff --git a/src/backend/tsearch/wparser_def.c b/src/backend/tsearch/wparser_def.c
index 79bcd32a063..e2dd3da3aa3 100644
--- a/src/backend/tsearch/wparser_def.c
+++ b/src/backend/tsearch/wparser_def.c
@@ -299,7 +299,7 @@ TParserInit(char *str, int len)
*/
if (prs->charmaxlen > 1)
{
- pg_locale_t mylocale = 0; /* TODO */
+ locale_t mylocale = 0; /* TODO */
prs->usewide = true;
if (database_ctype_is_c)
diff --git a/src/backend/utils/activity/backend_status.c b/src/backend/utils/activity/backend_status.c
index 9c2ed2cb9e0..a290cc4c975 100644
--- a/src/backend/utils/activity/backend_status.c
+++ b/src/backend/utils/activity/backend_status.c
@@ -321,7 +321,7 @@ pgstat_bestart_initial(void)
lbeentry.st_progress_command = PROGRESS_COMMAND_INVALID;
lbeentry.st_progress_command_target = InvalidOid;
lbeentry.st_query_id = INT64CONST(0);
- lbeentry.st_plan_id = UINT64CONST(0);
+ lbeentry.st_plan_id = INT64CONST(0);
/*
* we don't zero st_progress_param here to save cycles; nobody should
@@ -600,7 +600,7 @@ pgstat_report_activity(BackendState state, const char *cmd_str)
/* st_xact_start_timestamp and wait_event_info are also disabled */
beentry->st_xact_start_timestamp = 0;
beentry->st_query_id = INT64CONST(0);
- beentry->st_plan_id = UINT64CONST(0);
+ beentry->st_plan_id = INT64CONST(0);
proc->wait_event_info = 0;
PGSTAT_END_WRITE_ACTIVITY(beentry);
}
@@ -663,7 +663,7 @@ pgstat_report_activity(BackendState state, const char *cmd_str)
if (state == STATE_RUNNING)
{
beentry->st_query_id = INT64CONST(0);
- beentry->st_plan_id = UINT64CONST(0);
+ beentry->st_plan_id = INT64CONST(0);
}
if (cmd_str != NULL)
@@ -722,7 +722,7 @@ pgstat_report_query_id(int64 query_id, bool force)
* --------
*/
void
-pgstat_report_plan_id(uint64 plan_id, bool force)
+pgstat_report_plan_id(int64 plan_id, bool force)
{
volatile PgBackendStatus *beentry = MyBEEntry;
@@ -1154,7 +1154,7 @@ pgstat_get_my_query_id(void)
*
* Return current backend's plan identifier.
*/
-uint64
+int64
pgstat_get_my_plan_id(void)
{
if (!MyBEEntry)
diff --git a/src/backend/utils/activity/pgstat.c b/src/backend/utils/activity/pgstat.c
index 8b57845e870..6bc91ce0dad 100644
--- a/src/backend/utils/activity/pgstat.c
+++ b/src/backend/utils/activity/pgstat.c
@@ -212,6 +212,11 @@ int pgstat_fetch_consistency = PGSTAT_FETCH_CONSISTENCY_CACHE;
PgStat_LocalState pgStatLocal;
+/*
+ * Track pending reports for fixed-numbered stats, used by
+ * pgstat_report_stat().
+ */
+bool pgstat_report_fixed = false;
/* ----------
* Local data
@@ -370,7 +375,6 @@ static const PgStat_KindInfo pgstat_kind_builtin_infos[PGSTAT_KIND_BUILTIN_SIZE]
.shared_data_off = offsetof(PgStatShared_Backend, stats),
.shared_data_len = sizeof(((PgStatShared_Backend *) 0)->stats),
- .have_static_pending_cb = pgstat_backend_have_pending_cb,
.flush_static_cb = pgstat_backend_flush_cb,
.reset_timestamp_cb = pgstat_backend_reset_timestamp_cb,
},
@@ -437,7 +441,6 @@ static const PgStat_KindInfo pgstat_kind_builtin_infos[PGSTAT_KIND_BUILTIN_SIZE]
.shared_data_len = sizeof(((PgStatShared_IO *) 0)->stats),
.flush_static_cb = pgstat_io_flush_cb,
- .have_static_pending_cb = pgstat_io_have_pending_cb,
.init_shmem_cb = pgstat_io_init_shmem_cb,
.reset_all_cb = pgstat_io_reset_all_cb,
.snapshot_cb = pgstat_io_snapshot_cb,
@@ -455,7 +458,6 @@ static const PgStat_KindInfo pgstat_kind_builtin_infos[PGSTAT_KIND_BUILTIN_SIZE]
.shared_data_len = sizeof(((PgStatShared_SLRU *) 0)->stats),
.flush_static_cb = pgstat_slru_flush_cb,
- .have_static_pending_cb = pgstat_slru_have_pending_cb,
.init_shmem_cb = pgstat_slru_init_shmem_cb,
.reset_all_cb = pgstat_slru_reset_all_cb,
.snapshot_cb = pgstat_slru_snapshot_cb,
@@ -474,7 +476,6 @@ static const PgStat_KindInfo pgstat_kind_builtin_infos[PGSTAT_KIND_BUILTIN_SIZE]
.init_backend_cb = pgstat_wal_init_backend_cb,
.flush_static_cb = pgstat_wal_flush_cb,
- .have_static_pending_cb = pgstat_wal_have_pending_cb,
.init_shmem_cb = pgstat_wal_init_shmem_cb,
.reset_all_cb = pgstat_wal_reset_all_cb,
.snapshot_cb = pgstat_wal_snapshot_cb,
@@ -708,29 +709,10 @@ pgstat_report_stat(bool force)
}
/* Don't expend a clock check if nothing to do */
- if (dlist_is_empty(&pgStatPending))
+ if (dlist_is_empty(&pgStatPending) &&
+ !pgstat_report_fixed)
{
- bool do_flush = false;
-
- /* Check for pending stats */
- for (PgStat_Kind kind = PGSTAT_KIND_MIN; kind <= PGSTAT_KIND_MAX; kind++)
- {
- const PgStat_KindInfo *kind_info = pgstat_get_kind_info(kind);
-
- if (!kind_info)
- continue;
- if (!kind_info->have_static_pending_cb)
- continue;
-
- if (kind_info->have_static_pending_cb())
- {
- do_flush = true;
- break;
- }
- }
-
- if (!do_flush)
- return 0;
+ return 0;
}
/*
@@ -784,16 +766,19 @@ pgstat_report_stat(bool force)
partial_flush |= pgstat_flush_pending_entries(nowait);
/* flush of other stats kinds */
- for (PgStat_Kind kind = PGSTAT_KIND_MIN; kind <= PGSTAT_KIND_MAX; kind++)
+ if (pgstat_report_fixed)
{
- const PgStat_KindInfo *kind_info = pgstat_get_kind_info(kind);
+ for (PgStat_Kind kind = PGSTAT_KIND_MIN; kind <= PGSTAT_KIND_MAX; kind++)
+ {
+ const PgStat_KindInfo *kind_info = pgstat_get_kind_info(kind);
- if (!kind_info)
- continue;
- if (!kind_info->flush_static_cb)
- continue;
+ if (!kind_info)
+ continue;
+ if (!kind_info->flush_static_cb)
+ continue;
- partial_flush |= kind_info->flush_static_cb(nowait);
+ partial_flush |= kind_info->flush_static_cb(nowait);
+ }
}
last_flush = now;
@@ -815,6 +800,7 @@ pgstat_report_stat(bool force)
}
pending_since = 0;
+ pgstat_report_fixed = false;
return 0;
}
diff --git a/src/backend/utils/activity/pgstat_backend.c b/src/backend/utils/activity/pgstat_backend.c
index 51256277e8d..8714a85e2d9 100644
--- a/src/backend/utils/activity/pgstat_backend.c
+++ b/src/backend/utils/activity/pgstat_backend.c
@@ -66,6 +66,7 @@ pgstat_count_backend_io_op_time(IOObject io_object, IOContext io_context,
io_time);
backend_has_iostats = true;
+ pgstat_report_fixed = true;
}
void
@@ -81,6 +82,7 @@ pgstat_count_backend_io_op(IOObject io_object, IOContext io_context,
PendingBackendStats.pending_io.bytes[io_object][io_context][io_op] += bytes;
backend_has_iostats = true;
+ pgstat_report_fixed = true;
}
/*
@@ -302,18 +304,6 @@ pgstat_flush_backend(bool nowait, bits32 flags)
}
/*
- * Check if there are any backend stats waiting for flush.
- */
-bool
-pgstat_backend_have_pending_cb(void)
-{
- if (!pgstat_tracks_backend_bktype(MyBackendType))
- return false;
-
- return (backend_has_iostats || pgstat_backend_wal_have_pending());
-}
-
-/*
* Callback to flush out locally pending backend statistics.
*
* If some stats could not be flushed due to lock contention, return true.
diff --git a/src/backend/utils/activity/pgstat_io.c b/src/backend/utils/activity/pgstat_io.c
index d8d26379a57..13ae57ed649 100644
--- a/src/backend/utils/activity/pgstat_io.c
+++ b/src/backend/utils/activity/pgstat_io.c
@@ -80,6 +80,7 @@ pgstat_count_io_op(IOObject io_object, IOContext io_context, IOOp io_op,
pgstat_count_backend_io_op(io_object, io_context, io_op, cnt, bytes);
have_iostats = true;
+ pgstat_report_fixed = true;
}
/*
@@ -168,15 +169,6 @@ pgstat_fetch_stat_io(void)
}
/*
- * Check if there any IO stats waiting for flush.
- */
-bool
-pgstat_io_have_pending_cb(void)
-{
- return have_iostats;
-}
-
-/*
* Simpler wrapper of pgstat_io_flush_cb()
*/
void
diff --git a/src/backend/utils/activity/pgstat_relation.c b/src/backend/utils/activity/pgstat_relation.c
index 28587e2916b..69df741cbf6 100644
--- a/src/backend/utils/activity/pgstat_relation.c
+++ b/src/backend/utils/activity/pgstat_relation.c
@@ -744,7 +744,7 @@ PostPrepare_PgStat_Relations(PgStat_SubXactStatus *xact_state)
* Load the saved counts into our local pgstats state.
*/
void
-pgstat_twophase_postcommit(TransactionId xid, uint16 info,
+pgstat_twophase_postcommit(FullTransactionId fxid, uint16 info,
void *recdata, uint32 len)
{
TwoPhasePgStatRecord *rec = (TwoPhasePgStatRecord *) recdata;
@@ -780,7 +780,7 @@ pgstat_twophase_postcommit(TransactionId xid, uint16 info,
* as aborted.
*/
void
-pgstat_twophase_postabort(TransactionId xid, uint16 info,
+pgstat_twophase_postabort(FullTransactionId fxid, uint16 info,
void *recdata, uint32 len)
{
TwoPhasePgStatRecord *rec = (TwoPhasePgStatRecord *) recdata;
diff --git a/src/backend/utils/activity/pgstat_shmem.c b/src/backend/utils/activity/pgstat_shmem.c
index 2e33293b000..53e7d534270 100644
--- a/src/backend/utils/activity/pgstat_shmem.c
+++ b/src/backend/utils/activity/pgstat_shmem.c
@@ -183,7 +183,7 @@ StatsShmemInit(void)
p += MAXALIGN(pgstat_dsa_init_size());
dsa = dsa_create_in_place(ctl->raw_dsa_area,
pgstat_dsa_init_size(),
- LWTRANCHE_PGSTATS_DSA, 0);
+ LWTRANCHE_PGSTATS_DSA, NULL);
dsa_pin(dsa);
/*
@@ -255,7 +255,8 @@ pgstat_attach_shmem(void)
dsa_pin_mapping(pgStatLocal.dsa);
pgStatLocal.shared_hash = dshash_attach(pgStatLocal.dsa, &dsh_params,
- pgStatLocal.shmem->hash_handle, 0);
+ pgStatLocal.shmem->hash_handle,
+ NULL);
MemoryContextSwitchTo(oldcontext);
}
diff --git a/src/backend/utils/activity/pgstat_slru.c b/src/backend/utils/activity/pgstat_slru.c
index b9e940dde45..7bd8744accb 100644
--- a/src/backend/utils/activity/pgstat_slru.c
+++ b/src/backend/utils/activity/pgstat_slru.c
@@ -144,15 +144,6 @@ pgstat_get_slru_index(const char *name)
}
/*
- * Check if there are any SLRU stats entries waiting for flush.
- */
-bool
-pgstat_slru_have_pending_cb(void)
-{
- return have_slrustats;
-}
-
-/*
* Flush out locally pending SLRU stats entries
*
* If nowait is true, this function returns false on lock failure. Otherwise
@@ -247,6 +238,7 @@ get_slru_entry(int slru_idx)
Assert((slru_idx >= 0) && (slru_idx < SLRU_NUM_ELEMENTS));
have_slrustats = true;
+ pgstat_report_fixed = true;
return &pending_SLRUStats[slru_idx];
}
diff --git a/src/backend/utils/activity/pgstat_wal.c b/src/backend/utils/activity/pgstat_wal.c
index 16a1ecb4d90..0d04480d2f6 100644
--- a/src/backend/utils/activity/pgstat_wal.c
+++ b/src/backend/utils/activity/pgstat_wal.c
@@ -72,6 +72,15 @@ pgstat_fetch_stat_wal(void)
}
/*
+ * To determine whether WAL usage happened.
+ */
+static inline bool
+pgstat_wal_have_pending(void)
+{
+ return pgWalUsage.wal_records != prevWalUsage.wal_records;
+}
+
+/*
* Calculate how much WAL usage counters have increased by subtracting the
* previous counters from the current ones.
*
@@ -92,7 +101,7 @@ pgstat_wal_flush_cb(bool nowait)
* This function can be called even if nothing at all has happened. Avoid
* taking lock for nothing in that case.
*/
- if (!pgstat_wal_have_pending_cb())
+ if (!pgstat_wal_have_pending())
return false;
/*
@@ -136,15 +145,6 @@ pgstat_wal_init_backend_cb(void)
prevWalUsage = pgWalUsage;
}
-/*
- * To determine whether WAL usage happened.
- */
-bool
-pgstat_wal_have_pending_cb(void)
-{
- return pgWalUsage.wal_records != prevWalUsage.wal_records;
-}
-
void
pgstat_wal_init_shmem_cb(void *stats)
{
diff --git a/src/backend/utils/activity/wait_event_names.txt b/src/backend/utils/activity/wait_event_names.txt
index 4da68312b5f..0be307d2ca0 100644
--- a/src/backend/utils/activity/wait_event_names.txt
+++ b/src/backend/utils/activity/wait_event_names.txt
@@ -356,9 +356,13 @@ AioWorkerSubmissionQueue "Waiting to access AIO worker submission queue."
#
# END OF PREDEFINED LWLOCKS (DO NOT CHANGE THIS LINE)
#
-# Predefined LWLocks (i.e., those declared in lwlocknames.h) must be listed
-# in the section above and must be listed in the same order as in
-# lwlocknames.h. Other LWLocks must be listed in the section below.
+# Predefined LWLocks (i.e., those declared at the top of lwlocknames.h) must be
+# listed in the section above and must be listed in the same order as in
+# lwlocknames.h.
+#
+# Likewise, the built-in LWLock tranches (i.e., those declared at the bottom of
+# lwlocknames.h) must be listed in the section below and must be listed in the
+# same order as in lwlocknames.h.
#
XactBuffer "Waiting for I/O on a transaction status SLRU buffer."
diff --git a/src/backend/utils/adt/Makefile b/src/backend/utils/adt/Makefile
index 4a233b63c32..ffeacf2b819 100644
--- a/src/backend/utils/adt/Makefile
+++ b/src/backend/utils/adt/Makefile
@@ -23,6 +23,7 @@ OBJS = \
arrayutils.o \
ascii.o \
bool.o \
+ bytea.o \
cash.o \
char.o \
cryptohashfuncs.o \
diff --git a/src/backend/utils/adt/acl.c b/src/backend/utils/adt/acl.c
index ca3c5ee3df3..1213f9106d5 100644
--- a/src/backend/utils/adt/acl.c
+++ b/src/backend/utils/adt/acl.c
@@ -135,6 +135,22 @@ static void RoleMembershipCacheCallback(Datum arg, int cacheid, uint32 hashvalue
/*
+ * Test whether an identifier char can be left unquoted in ACLs.
+ *
+ * Formerly, we used isalnum() even on non-ASCII characters, resulting in
+ * unportable behavior. To ensure dump compatibility with old versions,
+ * we now treat high-bit-set characters as always requiring quoting during
+ * putid(), but getid() will always accept them without quotes.
+ */
+static inline bool
+is_safe_acl_char(unsigned char c, bool is_getid)
+{
+ if (IS_HIGHBIT_SET(c))
+ return is_getid;
+ return isalnum(c) || c == '_';
+}
+
+/*
* getid
* Consumes the first alphanumeric string (identifier) found in string
* 's', ignoring any leading white space. If it finds a double quote
@@ -159,21 +175,22 @@ getid(const char *s, char *n, Node *escontext)
while (isspace((unsigned char) *s))
s++;
- /* This code had better match what putid() does, below */
for (;
*s != '\0' &&
- (isalnum((unsigned char) *s) ||
- *s == '_' ||
- *s == '"' ||
- in_quotes);
+ (in_quotes || *s == '"' || is_safe_acl_char(*s, true));
s++)
{
if (*s == '"')
{
+ if (!in_quotes)
+ {
+ in_quotes = true;
+ continue;
+ }
/* safe to look at next char (could be '\0' though) */
if (*(s + 1) != '"')
{
- in_quotes = !in_quotes;
+ in_quotes = false;
continue;
}
/* it's an escaped double quote; skip the escaping char */
@@ -207,10 +224,10 @@ putid(char *p, const char *s)
const char *src;
bool safe = true;
+ /* Detect whether we need to use double quotes */
for (src = s; *src; src++)
{
- /* This test had better match what getid() does, above */
- if (!isalnum((unsigned char) *src) && *src != '_')
+ if (!is_safe_acl_char(*src, false))
{
safe = false;
break;
diff --git a/src/backend/utils/adt/bytea.c b/src/backend/utils/adt/bytea.c
new file mode 100644
index 00000000000..6e7b914c563
--- /dev/null
+++ b/src/backend/utils/adt/bytea.c
@@ -0,0 +1,1114 @@
+/*-------------------------------------------------------------------------
+ *
+ * bytea.c
+ * Functions for the bytea type.
+ *
+ * Portions Copyright (c) 2025, PostgreSQL Global Development Group
+ *
+ *
+ * IDENTIFICATION
+ * src/backend/utils/adt/bytea.c
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#include "postgres.h"
+
+#include "access/detoast.h"
+#include "catalog/pg_collation_d.h"
+#include "catalog/pg_type_d.h"
+#include "common/int.h"
+#include "fmgr.h"
+#include "libpq/pqformat.h"
+#include "port/pg_bitutils.h"
+#include "utils/builtins.h"
+#include "utils/bytea.h"
+#include "utils/fmgrprotos.h"
+#include "utils/memutils.h"
+#include "utils/sortsupport.h"
+#include "utils/varlena.h"
+#include "varatt.h"
+
+/* GUC variable */
+int bytea_output = BYTEA_OUTPUT_HEX;
+
+static bytea *bytea_catenate(bytea *t1, bytea *t2);
+static bytea *bytea_substring(Datum str, int S, int L,
+ bool length_not_specified);
+static bytea *bytea_overlay(bytea *t1, bytea *t2, int sp, int sl);
+
+/*
+ * bytea_catenate
+ * Guts of byteacat(), broken out so it can be used by other functions
+ *
+ * Arguments can be in short-header form, but not compressed or out-of-line
+ */
+static bytea *
+bytea_catenate(bytea *t1, bytea *t2)
+{
+ bytea *result;
+ int len1,
+ len2,
+ len;
+ char *ptr;
+
+ len1 = VARSIZE_ANY_EXHDR(t1);
+ len2 = VARSIZE_ANY_EXHDR(t2);
+
+ /* paranoia ... probably should throw error instead? */
+ if (len1 < 0)
+ len1 = 0;
+ if (len2 < 0)
+ len2 = 0;
+
+ len = len1 + len2 + VARHDRSZ;
+ result = (bytea *) palloc(len);
+
+ /* Set size of result string... */
+ SET_VARSIZE(result, len);
+
+ /* Fill data field of result string... */
+ ptr = VARDATA(result);
+ if (len1 > 0)
+ memcpy(ptr, VARDATA_ANY(t1), len1);
+ if (len2 > 0)
+ memcpy(ptr + len1, VARDATA_ANY(t2), len2);
+
+ return result;
+}
+
+#define PG_STR_GET_BYTEA(str_) \
+ DatumGetByteaPP(DirectFunctionCall1(byteain, CStringGetDatum(str_)))
+
+static bytea *
+bytea_substring(Datum str,
+ int S,
+ int L,
+ bool length_not_specified)
+{
+ int32 S1; /* adjusted start position */
+ int32 L1; /* adjusted substring length */
+ int32 E; /* end position */
+
+ /*
+ * The logic here should generally match text_substring().
+ */
+ S1 = Max(S, 1);
+
+ if (length_not_specified)
+ {
+ /*
+ * Not passed a length - DatumGetByteaPSlice() grabs everything to the
+ * end of the string if we pass it a negative value for length.
+ */
+ L1 = -1;
+ }
+ else if (L < 0)
+ {
+ /* SQL99 says to throw an error for E < S, i.e., negative length */
+ ereport(ERROR,
+ (errcode(ERRCODE_SUBSTRING_ERROR),
+ errmsg("negative substring length not allowed")));
+ L1 = -1; /* silence stupider compilers */
+ }
+ else if (pg_add_s32_overflow(S, L, &E))
+ {
+ /*
+ * L could be large enough for S + L to overflow, in which case the
+ * substring must run to end of string.
+ */
+ L1 = -1;
+ }
+ else
+ {
+ /*
+ * A zero or negative value for the end position can happen if the
+ * start was negative or one. SQL99 says to return a zero-length
+ * string.
+ */
+ if (E < 1)
+ return PG_STR_GET_BYTEA("");
+
+ L1 = E - S1;
+ }
+
+ /*
+ * If the start position is past the end of the string, SQL99 says to
+ * return a zero-length string -- DatumGetByteaPSlice() will do that for
+ * us. We need only convert S1 to zero-based starting position.
+ */
+ return DatumGetByteaPSlice(str, S1 - 1, L1);
+}
+
+static bytea *
+bytea_overlay(bytea *t1, bytea *t2, int sp, int sl)
+{
+ bytea *result;
+ bytea *s1;
+ bytea *s2;
+ int sp_pl_sl;
+
+ /*
+ * Check for possible integer-overflow cases. For negative sp, throw a
+ * "substring length" error because that's what should be expected
+ * according to the spec's definition of OVERLAY().
+ */
+ if (sp <= 0)
+ ereport(ERROR,
+ (errcode(ERRCODE_SUBSTRING_ERROR),
+ errmsg("negative substring length not allowed")));
+ if (pg_add_s32_overflow(sp, sl, &sp_pl_sl))
+ ereport(ERROR,
+ (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
+ errmsg("integer out of range")));
+
+ s1 = bytea_substring(PointerGetDatum(t1), 1, sp - 1, false);
+ s2 = bytea_substring(PointerGetDatum(t1), sp_pl_sl, -1, true);
+ result = bytea_catenate(s1, t2);
+ result = bytea_catenate(result, s2);
+
+ return result;
+}
+
+/*****************************************************************************
+ * USER I/O ROUTINES *
+ *****************************************************************************/
+
+#define VAL(CH) ((CH) - '0')
+#define DIG(VAL) ((VAL) + '0')
+
+/*
+ * byteain - converts from printable representation of byte array
+ *
+ * Non-printable characters must be passed as '\nnn' (octal) and are
+ * converted to internal form. '\' must be passed as '\\'.
+ */
+Datum
+byteain(PG_FUNCTION_ARGS)
+{
+ char *inputText = PG_GETARG_CSTRING(0);
+ Node *escontext = fcinfo->context;
+ size_t len = strlen(inputText);
+ size_t bc;
+ char *tp;
+ char *rp;
+ bytea *result;
+
+ /* Recognize hex input */
+ if (inputText[0] == '\\' && inputText[1] == 'x')
+ {
+ bc = (len - 2) / 2 + VARHDRSZ; /* maximum possible length */
+ result = palloc(bc);
+ bc = hex_decode_safe(inputText + 2, len - 2, VARDATA(result),
+ escontext);
+ SET_VARSIZE(result, bc + VARHDRSZ); /* actual length */
+
+ PG_RETURN_BYTEA_P(result);
+ }
+
+ /* Else, it's the traditional escaped style */
+ result = (bytea *) palloc(len + VARHDRSZ); /* maximum possible length */
+
+ tp = inputText;
+ rp = VARDATA(result);
+ while (*tp != '\0')
+ {
+ if (tp[0] != '\\')
+ *rp++ = *tp++;
+ else if ((tp[1] >= '0' && tp[1] <= '3') &&
+ (tp[2] >= '0' && tp[2] <= '7') &&
+ (tp[3] >= '0' && tp[3] <= '7'))
+ {
+ int v;
+
+ v = VAL(tp[1]);
+ v <<= 3;
+ v += VAL(tp[2]);
+ v <<= 3;
+ *rp++ = v + VAL(tp[3]);
+
+ tp += 4;
+ }
+ else if (tp[1] == '\\')
+ {
+ *rp++ = '\\';
+ tp += 2;
+ }
+ else
+ {
+ /*
+ * one backslash, not followed by another or ### valid octal
+ */
+ ereturn(escontext, (Datum) 0,
+ (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
+ errmsg("invalid input syntax for type %s", "bytea")));
+ }
+ }
+
+ bc = rp - VARDATA(result); /* actual length */
+ SET_VARSIZE(result, bc + VARHDRSZ);
+
+ PG_RETURN_BYTEA_P(result);
+}
+
+/*
+ * byteaout - converts to printable representation of byte array
+ *
+ * In the traditional escaped format, non-printable characters are
+ * printed as '\nnn' (octal) and '\' as '\\'.
+ */
+Datum
+byteaout(PG_FUNCTION_ARGS)
+{
+ bytea *vlena = PG_GETARG_BYTEA_PP(0);
+ char *result;
+ char *rp;
+
+ if (bytea_output == BYTEA_OUTPUT_HEX)
+ {
+ /* Print hex format */
+ rp = result = palloc(VARSIZE_ANY_EXHDR(vlena) * 2 + 2 + 1);
+ *rp++ = '\\';
+ *rp++ = 'x';
+ rp += hex_encode(VARDATA_ANY(vlena), VARSIZE_ANY_EXHDR(vlena), rp);
+ }
+ else if (bytea_output == BYTEA_OUTPUT_ESCAPE)
+ {
+ /* Print traditional escaped format */
+ char *vp;
+ uint64 len;
+ int i;
+
+ len = 1; /* empty string has 1 char */
+ vp = VARDATA_ANY(vlena);
+ for (i = VARSIZE_ANY_EXHDR(vlena); i != 0; i--, vp++)
+ {
+ if (*vp == '\\')
+ len += 2;
+ else if ((unsigned char) *vp < 0x20 || (unsigned char) *vp > 0x7e)
+ len += 4;
+ else
+ len++;
+ }
+
+ /*
+ * In principle len can't overflow uint32 if the input fit in 1GB, but
+ * for safety let's check rather than relying on palloc's internal
+ * check.
+ */
+ if (len > MaxAllocSize)
+ ereport(ERROR,
+ (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg_internal("result of bytea output conversion is too large")));
+ rp = result = (char *) palloc(len);
+
+ vp = VARDATA_ANY(vlena);
+ for (i = VARSIZE_ANY_EXHDR(vlena); i != 0; i--, vp++)
+ {
+ if (*vp == '\\')
+ {
+ *rp++ = '\\';
+ *rp++ = '\\';
+ }
+ else if ((unsigned char) *vp < 0x20 || (unsigned char) *vp > 0x7e)
+ {
+ int val; /* holds unprintable chars */
+
+ val = *vp;
+ rp[0] = '\\';
+ rp[3] = DIG(val & 07);
+ val >>= 3;
+ rp[2] = DIG(val & 07);
+ val >>= 3;
+ rp[1] = DIG(val & 03);
+ rp += 4;
+ }
+ else
+ *rp++ = *vp;
+ }
+ }
+ else
+ {
+ elog(ERROR, "unrecognized \"bytea_output\" setting: %d",
+ bytea_output);
+ rp = result = NULL; /* keep compiler quiet */
+ }
+ *rp = '\0';
+ PG_RETURN_CSTRING(result);
+}
+
+/*
+ * bytearecv - converts external binary format to bytea
+ */
+Datum
+bytearecv(PG_FUNCTION_ARGS)
+{
+ StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
+ bytea *result;
+ int nbytes;
+
+ nbytes = buf->len - buf->cursor;
+ result = (bytea *) palloc(nbytes + VARHDRSZ);
+ SET_VARSIZE(result, nbytes + VARHDRSZ);
+ pq_copymsgbytes(buf, VARDATA(result), nbytes);
+ PG_RETURN_BYTEA_P(result);
+}
+
+/*
+ * byteasend - converts bytea to binary format
+ *
+ * This is a special case: just copy the input...
+ */
+Datum
+byteasend(PG_FUNCTION_ARGS)
+{
+ bytea *vlena = PG_GETARG_BYTEA_P_COPY(0);
+
+ PG_RETURN_BYTEA_P(vlena);
+}
+
+Datum
+bytea_string_agg_transfn(PG_FUNCTION_ARGS)
+{
+ StringInfo state;
+
+ state = PG_ARGISNULL(0) ? NULL : (StringInfo) PG_GETARG_POINTER(0);
+
+ /* Append the value unless null, preceding it with the delimiter. */
+ if (!PG_ARGISNULL(1))
+ {
+ bytea *value = PG_GETARG_BYTEA_PP(1);
+ bool isfirst = false;
+
+ /*
+ * You might think we can just throw away the first delimiter, however
+ * we must keep it as we may be a parallel worker doing partial
+ * aggregation building a state to send to the main process. We need
+ * to keep the delimiter of every aggregation so that the combine
+ * function can properly join up the strings of two separately
+ * partially aggregated results. The first delimiter is only stripped
+ * off in the final function. To know how much to strip off the front
+ * of the string, we store the length of the first delimiter in the
+ * StringInfo's cursor field, which we don't otherwise need here.
+ */
+ if (state == NULL)
+ {
+ MemoryContext aggcontext;
+ MemoryContext oldcontext;
+
+ if (!AggCheckCallContext(fcinfo, &aggcontext))
+ {
+ /* cannot be called directly because of internal-type argument */
+ elog(ERROR, "bytea_string_agg_transfn called in non-aggregate context");
+ }
+
+ /*
+ * Create state in aggregate context. It'll stay there across
+ * subsequent calls.
+ */
+ oldcontext = MemoryContextSwitchTo(aggcontext);
+ state = makeStringInfo();
+ MemoryContextSwitchTo(oldcontext);
+
+ isfirst = true;
+ }
+
+ if (!PG_ARGISNULL(2))
+ {
+ bytea *delim = PG_GETARG_BYTEA_PP(2);
+
+ appendBinaryStringInfo(state, VARDATA_ANY(delim),
+ VARSIZE_ANY_EXHDR(delim));
+ if (isfirst)
+ state->cursor = VARSIZE_ANY_EXHDR(delim);
+ }
+
+ appendBinaryStringInfo(state, VARDATA_ANY(value),
+ VARSIZE_ANY_EXHDR(value));
+ }
+
+ /*
+ * The transition type for string_agg() is declared to be "internal",
+ * which is a pass-by-value type the same size as a pointer.
+ */
+ if (state)
+ PG_RETURN_POINTER(state);
+ PG_RETURN_NULL();
+}
+
+Datum
+bytea_string_agg_finalfn(PG_FUNCTION_ARGS)
+{
+ StringInfo state;
+
+ /* cannot be called directly because of internal-type argument */
+ Assert(AggCheckCallContext(fcinfo, NULL));
+
+ state = PG_ARGISNULL(0) ? NULL : (StringInfo) PG_GETARG_POINTER(0);
+
+ if (state != NULL)
+ {
+ /* As per comment in transfn, strip data before the cursor position */
+ bytea *result;
+ int strippedlen = state->len - state->cursor;
+
+ result = (bytea *) palloc(strippedlen + VARHDRSZ);
+ SET_VARSIZE(result, strippedlen + VARHDRSZ);
+ memcpy(VARDATA(result), &state->data[state->cursor], strippedlen);
+ PG_RETURN_BYTEA_P(result);
+ }
+ else
+ PG_RETURN_NULL();
+}
+
+/*-------------------------------------------------------------
+ * byteaoctetlen
+ *
+ * get the number of bytes contained in an instance of type 'bytea'
+ *-------------------------------------------------------------
+ */
+Datum
+byteaoctetlen(PG_FUNCTION_ARGS)
+{
+ Datum str = PG_GETARG_DATUM(0);
+
+ /* We need not detoast the input at all */
+ PG_RETURN_INT32(toast_raw_datum_size(str) - VARHDRSZ);
+}
+
+/*
+ * byteacat -
+ * takes two bytea* and returns a bytea* that is the concatenation of
+ * the two.
+ *
+ * Cloned from textcat and modified as required.
+ */
+Datum
+byteacat(PG_FUNCTION_ARGS)
+{
+ bytea *t1 = PG_GETARG_BYTEA_PP(0);
+ bytea *t2 = PG_GETARG_BYTEA_PP(1);
+
+ PG_RETURN_BYTEA_P(bytea_catenate(t1, t2));
+}
+
+/*
+ * byteaoverlay
+ * Replace specified substring of first string with second
+ *
+ * The SQL standard defines OVERLAY() in terms of substring and concatenation.
+ * This code is a direct implementation of what the standard says.
+ */
+Datum
+byteaoverlay(PG_FUNCTION_ARGS)
+{
+ bytea *t1 = PG_GETARG_BYTEA_PP(0);
+ bytea *t2 = PG_GETARG_BYTEA_PP(1);
+ int sp = PG_GETARG_INT32(2); /* substring start position */
+ int sl = PG_GETARG_INT32(3); /* substring length */
+
+ PG_RETURN_BYTEA_P(bytea_overlay(t1, t2, sp, sl));
+}
+
+Datum
+byteaoverlay_no_len(PG_FUNCTION_ARGS)
+{
+ bytea *t1 = PG_GETARG_BYTEA_PP(0);
+ bytea *t2 = PG_GETARG_BYTEA_PP(1);
+ int sp = PG_GETARG_INT32(2); /* substring start position */
+ int sl;
+
+ sl = VARSIZE_ANY_EXHDR(t2); /* defaults to length(t2) */
+ PG_RETURN_BYTEA_P(bytea_overlay(t1, t2, sp, sl));
+}
+
+/*
+ * bytea_substr()
+ * Return a substring starting at the specified position.
+ * Cloned from text_substr and modified as required.
+ *
+ * Input:
+ * - string
+ * - starting position (is one-based)
+ * - string length (optional)
+ *
+ * If the starting position is zero or less, then return from the start of the string
+ * adjusting the length to be consistent with the "negative start" per SQL.
+ * If the length is less than zero, an ERROR is thrown. If no third argument
+ * (length) is provided, the length to the end of the string is assumed.
+ */
+Datum
+bytea_substr(PG_FUNCTION_ARGS)
+{
+ PG_RETURN_BYTEA_P(bytea_substring(PG_GETARG_DATUM(0),
+ PG_GETARG_INT32(1),
+ PG_GETARG_INT32(2),
+ false));
+}
+
+/*
+ * bytea_substr_no_len -
+ * Wrapper to avoid opr_sanity failure due to
+ * one function accepting a different number of args.
+ */
+Datum
+bytea_substr_no_len(PG_FUNCTION_ARGS)
+{
+ PG_RETURN_BYTEA_P(bytea_substring(PG_GETARG_DATUM(0),
+ PG_GETARG_INT32(1),
+ -1,
+ true));
+}
+
+/*
+ * bit_count
+ */
+Datum
+bytea_bit_count(PG_FUNCTION_ARGS)
+{
+ bytea *t1 = PG_GETARG_BYTEA_PP(0);
+
+ PG_RETURN_INT64(pg_popcount(VARDATA_ANY(t1), VARSIZE_ANY_EXHDR(t1)));
+}
+
+/*
+ * byteapos -
+ * Return the position of the specified substring.
+ * Implements the SQL POSITION() function.
+ * Cloned from textpos and modified as required.
+ */
+Datum
+byteapos(PG_FUNCTION_ARGS)
+{
+ bytea *t1 = PG_GETARG_BYTEA_PP(0);
+ bytea *t2 = PG_GETARG_BYTEA_PP(1);
+ int pos;
+ int px,
+ p;
+ int len1,
+ len2;
+ char *p1,
+ *p2;
+
+ len1 = VARSIZE_ANY_EXHDR(t1);
+ len2 = VARSIZE_ANY_EXHDR(t2);
+
+ if (len2 <= 0)
+ PG_RETURN_INT32(1); /* result for empty pattern */
+
+ p1 = VARDATA_ANY(t1);
+ p2 = VARDATA_ANY(t2);
+
+ pos = 0;
+ px = (len1 - len2);
+ for (p = 0; p <= px; p++)
+ {
+ if ((*p2 == *p1) && (memcmp(p1, p2, len2) == 0))
+ {
+ pos = p + 1;
+ break;
+ };
+ p1++;
+ };
+
+ PG_RETURN_INT32(pos);
+}
+
+/*-------------------------------------------------------------
+ * byteaGetByte
+ *
+ * this routine treats "bytea" as an array of bytes.
+ * It returns the Nth byte (a number between 0 and 255).
+ *-------------------------------------------------------------
+ */
+Datum
+byteaGetByte(PG_FUNCTION_ARGS)
+{
+ bytea *v = PG_GETARG_BYTEA_PP(0);
+ int32 n = PG_GETARG_INT32(1);
+ int len;
+ int byte;
+
+ len = VARSIZE_ANY_EXHDR(v);
+
+ if (n < 0 || n >= len)
+ ereport(ERROR,
+ (errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
+ errmsg("index %d out of valid range, 0..%d",
+ n, len - 1)));
+
+ byte = ((unsigned char *) VARDATA_ANY(v))[n];
+
+ PG_RETURN_INT32(byte);
+}
+
+/*-------------------------------------------------------------
+ * byteaGetBit
+ *
+ * This routine treats a "bytea" type like an array of bits.
+ * It returns the value of the Nth bit (0 or 1).
+ *
+ *-------------------------------------------------------------
+ */
+Datum
+byteaGetBit(PG_FUNCTION_ARGS)
+{
+ bytea *v = PG_GETARG_BYTEA_PP(0);
+ int64 n = PG_GETARG_INT64(1);
+ int byteNo,
+ bitNo;
+ int len;
+ int byte;
+
+ len = VARSIZE_ANY_EXHDR(v);
+
+ if (n < 0 || n >= (int64) len * 8)
+ ereport(ERROR,
+ (errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
+ errmsg("index %" PRId64 " out of valid range, 0..%" PRId64,
+ n, (int64) len * 8 - 1)));
+
+ /* n/8 is now known < len, so safe to cast to int */
+ byteNo = (int) (n / 8);
+ bitNo = (int) (n % 8);
+
+ byte = ((unsigned char *) VARDATA_ANY(v))[byteNo];
+
+ if (byte & (1 << bitNo))
+ PG_RETURN_INT32(1);
+ else
+ PG_RETURN_INT32(0);
+}
+
+/*-------------------------------------------------------------
+ * byteaSetByte
+ *
+ * Given an instance of type 'bytea' creates a new one with
+ * the Nth byte set to the given value.
+ *
+ *-------------------------------------------------------------
+ */
+Datum
+byteaSetByte(PG_FUNCTION_ARGS)
+{
+ bytea *res = PG_GETARG_BYTEA_P_COPY(0);
+ int32 n = PG_GETARG_INT32(1);
+ int32 newByte = PG_GETARG_INT32(2);
+ int len;
+
+ len = VARSIZE(res) - VARHDRSZ;
+
+ if (n < 0 || n >= len)
+ ereport(ERROR,
+ (errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
+ errmsg("index %d out of valid range, 0..%d",
+ n, len - 1)));
+
+ /*
+ * Now set the byte.
+ */
+ ((unsigned char *) VARDATA(res))[n] = newByte;
+
+ PG_RETURN_BYTEA_P(res);
+}
+
+/*-------------------------------------------------------------
+ * byteaSetBit
+ *
+ * Given an instance of type 'bytea' creates a new one with
+ * the Nth bit set to the given value.
+ *
+ *-------------------------------------------------------------
+ */
+Datum
+byteaSetBit(PG_FUNCTION_ARGS)
+{
+ bytea *res = PG_GETARG_BYTEA_P_COPY(0);
+ int64 n = PG_GETARG_INT64(1);
+ int32 newBit = PG_GETARG_INT32(2);
+ int len;
+ int oldByte,
+ newByte;
+ int byteNo,
+ bitNo;
+
+ len = VARSIZE(res) - VARHDRSZ;
+
+ if (n < 0 || n >= (int64) len * 8)
+ ereport(ERROR,
+ (errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
+ errmsg("index %" PRId64 " out of valid range, 0..%" PRId64,
+ n, (int64) len * 8 - 1)));
+
+ /* n/8 is now known < len, so safe to cast to int */
+ byteNo = (int) (n / 8);
+ bitNo = (int) (n % 8);
+
+ /*
+ * sanity check!
+ */
+ if (newBit != 0 && newBit != 1)
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("new bit must be 0 or 1")));
+
+ /*
+ * Update the byte.
+ */
+ oldByte = ((unsigned char *) VARDATA(res))[byteNo];
+
+ if (newBit == 0)
+ newByte = oldByte & (~(1 << bitNo));
+ else
+ newByte = oldByte | (1 << bitNo);
+
+ ((unsigned char *) VARDATA(res))[byteNo] = newByte;
+
+ PG_RETURN_BYTEA_P(res);
+}
+
+/*
+ * Return reversed bytea
+ */
+Datum
+bytea_reverse(PG_FUNCTION_ARGS)
+{
+ bytea *v = PG_GETARG_BYTEA_PP(0);
+ const char *p = VARDATA_ANY(v);
+ int len = VARSIZE_ANY_EXHDR(v);
+ const char *endp = p + len;
+ bytea *result = palloc(len + VARHDRSZ);
+ char *dst = (char *) VARDATA(result) + len;
+
+ SET_VARSIZE(result, len + VARHDRSZ);
+
+ while (p < endp)
+ *(--dst) = *p++;
+
+ PG_RETURN_BYTEA_P(result);
+}
+
+
+/*****************************************************************************
+ * Comparison Functions used for bytea
+ *
+ * Note: btree indexes need these routines not to leak memory; therefore,
+ * be careful to free working copies of toasted datums. Most places don't
+ * need to be so careful.
+ *****************************************************************************/
+
+Datum
+byteaeq(PG_FUNCTION_ARGS)
+{
+ Datum arg1 = PG_GETARG_DATUM(0);
+ Datum arg2 = PG_GETARG_DATUM(1);
+ bool result;
+ Size len1,
+ len2;
+
+ /*
+ * We can use a fast path for unequal lengths, which might save us from
+ * having to detoast one or both values.
+ */
+ len1 = toast_raw_datum_size(arg1);
+ len2 = toast_raw_datum_size(arg2);
+ if (len1 != len2)
+ result = false;
+ else
+ {
+ bytea *barg1 = DatumGetByteaPP(arg1);
+ bytea *barg2 = DatumGetByteaPP(arg2);
+
+ result = (memcmp(VARDATA_ANY(barg1), VARDATA_ANY(barg2),
+ len1 - VARHDRSZ) == 0);
+
+ PG_FREE_IF_COPY(barg1, 0);
+ PG_FREE_IF_COPY(barg2, 1);
+ }
+
+ PG_RETURN_BOOL(result);
+}
+
+Datum
+byteane(PG_FUNCTION_ARGS)
+{
+ Datum arg1 = PG_GETARG_DATUM(0);
+ Datum arg2 = PG_GETARG_DATUM(1);
+ bool result;
+ Size len1,
+ len2;
+
+ /*
+ * We can use a fast path for unequal lengths, which might save us from
+ * having to detoast one or both values.
+ */
+ len1 = toast_raw_datum_size(arg1);
+ len2 = toast_raw_datum_size(arg2);
+ if (len1 != len2)
+ result = true;
+ else
+ {
+ bytea *barg1 = DatumGetByteaPP(arg1);
+ bytea *barg2 = DatumGetByteaPP(arg2);
+
+ result = (memcmp(VARDATA_ANY(barg1), VARDATA_ANY(barg2),
+ len1 - VARHDRSZ) != 0);
+
+ PG_FREE_IF_COPY(barg1, 0);
+ PG_FREE_IF_COPY(barg2, 1);
+ }
+
+ PG_RETURN_BOOL(result);
+}
+
+Datum
+bytealt(PG_FUNCTION_ARGS)
+{
+ bytea *arg1 = PG_GETARG_BYTEA_PP(0);
+ bytea *arg2 = PG_GETARG_BYTEA_PP(1);
+ int len1,
+ len2;
+ int cmp;
+
+ len1 = VARSIZE_ANY_EXHDR(arg1);
+ len2 = VARSIZE_ANY_EXHDR(arg2);
+
+ cmp = memcmp(VARDATA_ANY(arg1), VARDATA_ANY(arg2), Min(len1, len2));
+
+ PG_FREE_IF_COPY(arg1, 0);
+ PG_FREE_IF_COPY(arg2, 1);
+
+ PG_RETURN_BOOL((cmp < 0) || ((cmp == 0) && (len1 < len2)));
+}
+
+Datum
+byteale(PG_FUNCTION_ARGS)
+{
+ bytea *arg1 = PG_GETARG_BYTEA_PP(0);
+ bytea *arg2 = PG_GETARG_BYTEA_PP(1);
+ int len1,
+ len2;
+ int cmp;
+
+ len1 = VARSIZE_ANY_EXHDR(arg1);
+ len2 = VARSIZE_ANY_EXHDR(arg2);
+
+ cmp = memcmp(VARDATA_ANY(arg1), VARDATA_ANY(arg2), Min(len1, len2));
+
+ PG_FREE_IF_COPY(arg1, 0);
+ PG_FREE_IF_COPY(arg2, 1);
+
+ PG_RETURN_BOOL((cmp < 0) || ((cmp == 0) && (len1 <= len2)));
+}
+
+Datum
+byteagt(PG_FUNCTION_ARGS)
+{
+ bytea *arg1 = PG_GETARG_BYTEA_PP(0);
+ bytea *arg2 = PG_GETARG_BYTEA_PP(1);
+ int len1,
+ len2;
+ int cmp;
+
+ len1 = VARSIZE_ANY_EXHDR(arg1);
+ len2 = VARSIZE_ANY_EXHDR(arg2);
+
+ cmp = memcmp(VARDATA_ANY(arg1), VARDATA_ANY(arg2), Min(len1, len2));
+
+ PG_FREE_IF_COPY(arg1, 0);
+ PG_FREE_IF_COPY(arg2, 1);
+
+ PG_RETURN_BOOL((cmp > 0) || ((cmp == 0) && (len1 > len2)));
+}
+
+Datum
+byteage(PG_FUNCTION_ARGS)
+{
+ bytea *arg1 = PG_GETARG_BYTEA_PP(0);
+ bytea *arg2 = PG_GETARG_BYTEA_PP(1);
+ int len1,
+ len2;
+ int cmp;
+
+ len1 = VARSIZE_ANY_EXHDR(arg1);
+ len2 = VARSIZE_ANY_EXHDR(arg2);
+
+ cmp = memcmp(VARDATA_ANY(arg1), VARDATA_ANY(arg2), Min(len1, len2));
+
+ PG_FREE_IF_COPY(arg1, 0);
+ PG_FREE_IF_COPY(arg2, 1);
+
+ PG_RETURN_BOOL((cmp > 0) || ((cmp == 0) && (len1 >= len2)));
+}
+
+Datum
+byteacmp(PG_FUNCTION_ARGS)
+{
+ bytea *arg1 = PG_GETARG_BYTEA_PP(0);
+ bytea *arg2 = PG_GETARG_BYTEA_PP(1);
+ int len1,
+ len2;
+ int cmp;
+
+ len1 = VARSIZE_ANY_EXHDR(arg1);
+ len2 = VARSIZE_ANY_EXHDR(arg2);
+
+ cmp = memcmp(VARDATA_ANY(arg1), VARDATA_ANY(arg2), Min(len1, len2));
+ if ((cmp == 0) && (len1 != len2))
+ cmp = (len1 < len2) ? -1 : 1;
+
+ PG_FREE_IF_COPY(arg1, 0);
+ PG_FREE_IF_COPY(arg2, 1);
+
+ PG_RETURN_INT32(cmp);
+}
+
+Datum
+bytea_larger(PG_FUNCTION_ARGS)
+{
+ bytea *arg1 = PG_GETARG_BYTEA_PP(0);
+ bytea *arg2 = PG_GETARG_BYTEA_PP(1);
+ bytea *result;
+ int len1,
+ len2;
+ int cmp;
+
+ len1 = VARSIZE_ANY_EXHDR(arg1);
+ len2 = VARSIZE_ANY_EXHDR(arg2);
+
+ cmp = memcmp(VARDATA_ANY(arg1), VARDATA_ANY(arg2), Min(len1, len2));
+ result = ((cmp > 0) || ((cmp == 0) && (len1 > len2)) ? arg1 : arg2);
+
+ PG_RETURN_BYTEA_P(result);
+}
+
+Datum
+bytea_smaller(PG_FUNCTION_ARGS)
+{
+ bytea *arg1 = PG_GETARG_BYTEA_PP(0);
+ bytea *arg2 = PG_GETARG_BYTEA_PP(1);
+ bytea *result;
+ int len1,
+ len2;
+ int cmp;
+
+ len1 = VARSIZE_ANY_EXHDR(arg1);
+ len2 = VARSIZE_ANY_EXHDR(arg2);
+
+ cmp = memcmp(VARDATA_ANY(arg1), VARDATA_ANY(arg2), Min(len1, len2));
+ result = ((cmp < 0) || ((cmp == 0) && (len1 < len2)) ? arg1 : arg2);
+
+ PG_RETURN_BYTEA_P(result);
+}
+
+Datum
+bytea_sortsupport(PG_FUNCTION_ARGS)
+{
+ SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0);
+ MemoryContext oldcontext;
+
+ oldcontext = MemoryContextSwitchTo(ssup->ssup_cxt);
+
+ /* Use generic string SortSupport, forcing "C" collation */
+ varstr_sortsupport(ssup, BYTEAOID, C_COLLATION_OID);
+
+ MemoryContextSwitchTo(oldcontext);
+
+ PG_RETURN_VOID();
+}
+
+/* Cast bytea -> int2 */
+Datum
+bytea_int2(PG_FUNCTION_ARGS)
+{
+ bytea *v = PG_GETARG_BYTEA_PP(0);
+ int len = VARSIZE_ANY_EXHDR(v);
+ uint16 result;
+
+ /* Check that the byte array is not too long */
+ if (len > sizeof(result))
+ ereport(ERROR,
+ errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
+ errmsg("smallint out of range"));
+
+ /* Convert it to an integer; most significant bytes come first */
+ result = 0;
+ for (int i = 0; i < len; i++)
+ {
+ result <<= BITS_PER_BYTE;
+ result |= ((unsigned char *) VARDATA_ANY(v))[i];
+ }
+
+ PG_RETURN_INT16(result);
+}
+
+/* Cast bytea -> int4 */
+Datum
+bytea_int4(PG_FUNCTION_ARGS)
+{
+ bytea *v = PG_GETARG_BYTEA_PP(0);
+ int len = VARSIZE_ANY_EXHDR(v);
+ uint32 result;
+
+ /* Check that the byte array is not too long */
+ if (len > sizeof(result))
+ ereport(ERROR,
+ errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
+ errmsg("integer out of range"));
+
+ /* Convert it to an integer; most significant bytes come first */
+ result = 0;
+ for (int i = 0; i < len; i++)
+ {
+ result <<= BITS_PER_BYTE;
+ result |= ((unsigned char *) VARDATA_ANY(v))[i];
+ }
+
+ PG_RETURN_INT32(result);
+}
+
+/* Cast bytea -> int8 */
+Datum
+bytea_int8(PG_FUNCTION_ARGS)
+{
+ bytea *v = PG_GETARG_BYTEA_PP(0);
+ int len = VARSIZE_ANY_EXHDR(v);
+ uint64 result;
+
+ /* Check that the byte array is not too long */
+ if (len > sizeof(result))
+ ereport(ERROR,
+ errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
+ errmsg("bigint out of range"));
+
+ /* Convert it to an integer; most significant bytes come first */
+ result = 0;
+ for (int i = 0; i < len; i++)
+ {
+ result <<= BITS_PER_BYTE;
+ result |= ((unsigned char *) VARDATA_ANY(v))[i];
+ }
+
+ PG_RETURN_INT64(result);
+}
+
+/* Cast int2 -> bytea; can just use int2send() */
+Datum
+int2_bytea(PG_FUNCTION_ARGS)
+{
+ return int2send(fcinfo);
+}
+
+/* Cast int4 -> bytea; can just use int4send() */
+Datum
+int4_bytea(PG_FUNCTION_ARGS)
+{
+ return int4send(fcinfo);
+}
+
+/* Cast int8 -> bytea; can just use int8send() */
+Datum
+int8_bytea(PG_FUNCTION_ARGS)
+{
+ return int8send(fcinfo);
+}
diff --git a/src/backend/utils/adt/date.c b/src/backend/utils/adt/date.c
index 4227ab1a72b..344f58b92f7 100644
--- a/src/backend/utils/adt/date.c
+++ b/src/backend/utils/adt/date.c
@@ -1363,10 +1363,35 @@ timestamp_date(PG_FUNCTION_ARGS)
{
Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
DateADT result;
+
+ result = timestamp2date_opt_overflow(timestamp, NULL);
+ PG_RETURN_DATEADT(result);
+}
+
+/*
+ * Convert timestamp to date.
+ *
+ * On successful conversion, *overflow is set to zero if it's not NULL.
+ *
+ * If the timestamp is finite but out of the valid range for date, then:
+ * if overflow is NULL, we throw an out-of-range error.
+ * if overflow is not NULL, we store +1 or -1 there to indicate the sign
+ * of the overflow, and return the appropriate date infinity.
+ *
+ * Note: given the ranges of the types, overflow is only possible at
+ * the minimum end of the range, but we don't assume that in this code.
+ */
+DateADT
+timestamp2date_opt_overflow(Timestamp timestamp, int *overflow)
+{
+ DateADT result;
struct pg_tm tt,
*tm = &tt;
fsec_t fsec;
+ if (overflow)
+ *overflow = 0;
+
if (TIMESTAMP_IS_NOBEGIN(timestamp))
DATE_NOBEGIN(result);
else if (TIMESTAMP_IS_NOEND(timestamp))
@@ -1374,14 +1399,30 @@ timestamp_date(PG_FUNCTION_ARGS)
else
{
if (timestamp2tm(timestamp, NULL, tm, &fsec, NULL, NULL) != 0)
+ {
+ if (overflow)
+ {
+ if (timestamp < 0)
+ {
+ *overflow = -1;
+ DATE_NOBEGIN(result);
+ }
+ else
+ {
+ *overflow = 1; /* not actually reachable */
+ DATE_NOEND(result);
+ }
+ return result;
+ }
ereport(ERROR,
(errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
errmsg("timestamp out of range")));
+ }
result = date2j(tm->tm_year, tm->tm_mon, tm->tm_mday) - POSTGRES_EPOCH_JDATE;
}
- PG_RETURN_DATEADT(result);
+ return result;
}
@@ -1408,11 +1449,36 @@ timestamptz_date(PG_FUNCTION_ARGS)
{
TimestampTz timestamp = PG_GETARG_TIMESTAMP(0);
DateADT result;
+
+ result = timestamptz2date_opt_overflow(timestamp, NULL);
+ PG_RETURN_DATEADT(result);
+}
+
+/*
+ * Convert timestamptz to date.
+ *
+ * On successful conversion, *overflow is set to zero if it's not NULL.
+ *
+ * If the timestamptz is finite but out of the valid range for date, then:
+ * if overflow is NULL, we throw an out-of-range error.
+ * if overflow is not NULL, we store +1 or -1 there to indicate the sign
+ * of the overflow, and return the appropriate date infinity.
+ *
+ * Note: given the ranges of the types, overflow is only possible at
+ * the minimum end of the range, but we don't assume that in this code.
+ */
+DateADT
+timestamptz2date_opt_overflow(TimestampTz timestamp, int *overflow)
+{
+ DateADT result;
struct pg_tm tt,
*tm = &tt;
fsec_t fsec;
int tz;
+ if (overflow)
+ *overflow = 0;
+
if (TIMESTAMP_IS_NOBEGIN(timestamp))
DATE_NOBEGIN(result);
else if (TIMESTAMP_IS_NOEND(timestamp))
@@ -1420,14 +1486,30 @@ timestamptz_date(PG_FUNCTION_ARGS)
else
{
if (timestamp2tm(timestamp, &tz, tm, &fsec, NULL, NULL) != 0)
+ {
+ if (overflow)
+ {
+ if (timestamp < 0)
+ {
+ *overflow = -1;
+ DATE_NOBEGIN(result);
+ }
+ else
+ {
+ *overflow = 1; /* not actually reachable */
+ DATE_NOEND(result);
+ }
+ return result;
+ }
ereport(ERROR,
(errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
errmsg("timestamp out of range")));
+ }
result = date2j(tm->tm_year, tm->tm_mon, tm->tm_mday) - POSTGRES_EPOCH_JDATE;
}
- PG_RETURN_DATEADT(result);
+ return result;
}
diff --git a/src/backend/utils/adt/float.c b/src/backend/utils/adt/float.c
index 6d20ae07ae7..7b97d2be6ca 100644
--- a/src/backend/utils/adt/float.c
+++ b/src/backend/utils/adt/float.c
@@ -4065,10 +4065,11 @@ float84ge(PG_FUNCTION_ARGS)
* in the histogram. width_bucket() returns an integer indicating the
* bucket number that 'operand' belongs to in an equiwidth histogram
* with the specified characteristics. An operand smaller than the
- * lower bound is assigned to bucket 0. An operand greater than the
- * upper bound is assigned to an additional bucket (with number
- * count+1). We don't allow "NaN" for any of the float8 inputs, and we
- * don't allow either of the histogram bounds to be +/- infinity.
+ * lower bound is assigned to bucket 0. An operand greater than or equal
+ * to the upper bound is assigned to an additional bucket (with number
+ * count+1). We don't allow the histogram bounds to be NaN or +/- infinity,
+ * but we do allow those values for the operand (taking NaN to be larger
+ * than any other value, as we do in comparisons).
*/
Datum
width_bucket_float8(PG_FUNCTION_ARGS)
@@ -4084,12 +4085,11 @@ width_bucket_float8(PG_FUNCTION_ARGS)
(errcode(ERRCODE_INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION),
errmsg("count must be greater than zero")));
- if (isnan(operand) || isnan(bound1) || isnan(bound2))
+ if (isnan(bound1) || isnan(bound2))
ereport(ERROR,
(errcode(ERRCODE_INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION),
- errmsg("operand, lower bound, and upper bound cannot be NaN")));
+ errmsg("lower and upper bounds cannot be NaN")));
- /* Note that we allow "operand" to be infinite */
if (isinf(bound1) || isinf(bound2))
ereport(ERROR,
(errcode(ERRCODE_INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION),
@@ -4097,15 +4097,15 @@ width_bucket_float8(PG_FUNCTION_ARGS)
if (bound1 < bound2)
{
- if (operand < bound1)
- result = 0;
- else if (operand >= bound2)
+ if (isnan(operand) || operand >= bound2)
{
if (pg_add_s32_overflow(count, 1, &result))
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("integer out of range")));
}
+ else if (operand < bound1)
+ result = 0;
else
{
if (!isinf(bound2 - bound1))
@@ -4135,7 +4135,7 @@ width_bucket_float8(PG_FUNCTION_ARGS)
}
else if (bound1 > bound2)
{
- if (operand > bound1)
+ if (isnan(operand) || operand > bound1)
result = 0;
else if (operand <= bound2)
{
diff --git a/src/backend/utils/adt/formatting.c b/src/backend/utils/adt/formatting.c
index 5bd1e01f7e4..1d05481181d 100644
--- a/src/backend/utils/adt/formatting.c
+++ b/src/backend/utils/adt/formatting.c
@@ -3590,14 +3590,15 @@ DCH_from_char(FormatNode *node, const char *in, TmFromChar *out,
if (matched < 2)
ereturn(escontext,,
(errcode(ERRCODE_INVALID_DATETIME_FORMAT),
- errmsg("invalid input string for \"Y,YYY\"")));
+ errmsg("invalid value \"%s\" for \"%s\"",
+ s, "Y,YYY")));
/* years += (millennia * 1000); */
if (pg_mul_s32_overflow(millennia, 1000, &millennia) ||
pg_add_s32_overflow(years, millennia, &years))
ereturn(escontext,,
(errcode(ERRCODE_DATETIME_FIELD_OVERFLOW),
- errmsg("value for \"Y,YYY\" in source string is out of range")));
+ errmsg("value for \"%s\" in source string is out of range", "Y,YYY")));
if (!from_char_set_int(&out->year, years, n, escontext))
return;
diff --git a/src/backend/utils/adt/inet_net_pton.c b/src/backend/utils/adt/inet_net_pton.c
index ef2236d9f04..3b0db2a3799 100644
--- a/src/backend/utils/adt/inet_net_pton.c
+++ b/src/backend/utils/adt/inet_net_pton.c
@@ -115,8 +115,7 @@ inet_cidr_pton_ipv4(const char *src, u_char *dst, size_t size)
src++; /* skip x or X. */
while ((ch = *src++) != '\0' && isxdigit((unsigned char) ch))
{
- if (isupper((unsigned char) ch))
- ch = tolower((unsigned char) ch);
+ ch = pg_ascii_tolower((unsigned char) ch);
n = strchr(xdigits, ch) - xdigits;
assert(n >= 0 && n <= 15);
if (dirty == 0)
diff --git a/src/backend/utils/adt/jsonb_gin.c b/src/backend/utils/adt/jsonb_gin.c
index c1950792b5a..9b56248cf0b 100644
--- a/src/backend/utils/adt/jsonb_gin.c
+++ b/src/backend/utils/adt/jsonb_gin.c
@@ -896,8 +896,8 @@ gin_extract_jsonb_query(PG_FUNCTION_ARGS)
continue;
/* We rely on the array elements not being toasted */
entries[j++] = make_text_key(JGINFLAG_KEY,
- VARDATA_ANY(key_datums[i]),
- VARSIZE_ANY_EXHDR(key_datums[i]));
+ VARDATA_ANY(DatumGetPointer(key_datums[i])),
+ VARSIZE_ANY_EXHDR(DatumGetPointer(key_datums[i])));
}
*nentries = j;
diff --git a/src/backend/utils/adt/jsonb_op.c b/src/backend/utils/adt/jsonb_op.c
index fa5603f26e1..51d38e321fb 100644
--- a/src/backend/utils/adt/jsonb_op.c
+++ b/src/backend/utils/adt/jsonb_op.c
@@ -63,8 +63,8 @@ jsonb_exists_any(PG_FUNCTION_ARGS)
strVal.type = jbvString;
/* We rely on the array elements not being toasted */
- strVal.val.string.val = VARDATA_ANY(key_datums[i]);
- strVal.val.string.len = VARSIZE_ANY_EXHDR(key_datums[i]);
+ strVal.val.string.val = VARDATA_ANY(DatumGetPointer(key_datums[i]));
+ strVal.val.string.len = VARSIZE_ANY_EXHDR(DatumGetPointer(key_datums[i]));
if (findJsonbValueFromContainer(&jb->root,
JB_FOBJECT | JB_FARRAY,
@@ -96,8 +96,8 @@ jsonb_exists_all(PG_FUNCTION_ARGS)
strVal.type = jbvString;
/* We rely on the array elements not being toasted */
- strVal.val.string.val = VARDATA_ANY(key_datums[i]);
- strVal.val.string.len = VARSIZE_ANY_EXHDR(key_datums[i]);
+ strVal.val.string.val = VARDATA_ANY(DatumGetPointer(key_datums[i]));
+ strVal.val.string.len = VARSIZE_ANY_EXHDR(DatumGetPointer(key_datums[i]));
if (findJsonbValueFromContainer(&jb->root,
JB_FOBJECT | JB_FARRAY,
diff --git a/src/backend/utils/adt/jsonb_util.c b/src/backend/utils/adt/jsonb_util.c
index c8b6c15e059..82b807d067a 100644
--- a/src/backend/utils/adt/jsonb_util.c
+++ b/src/backend/utils/adt/jsonb_util.c
@@ -277,22 +277,16 @@ compareJsonbContainers(JsonbContainer *a, JsonbContainer *b)
else
{
/*
- * It's safe to assume that the types differed, and that the va
- * and vb values passed were set.
- *
- * If the two values were of the same container type, then there'd
- * have been a chance to observe the variation in the number of
- * elements/pairs (when processing WJB_BEGIN_OBJECT, say). They're
- * either two heterogeneously-typed containers, or a container and
- * some scalar type.
- *
- * We don't have to consider the WJB_END_ARRAY and WJB_END_OBJECT
- * cases here, because we would have seen the corresponding
- * WJB_BEGIN_ARRAY and WJB_BEGIN_OBJECT tokens first, and
- * concluded that they don't match.
+ * It's not possible for one iterator to report end of array or
+ * object while the other one reports something else, because we
+ * would have detected a length mismatch when we processed the
+ * container-start tokens above. Likewise we can't see WJB_DONE
+ * from one but not the other. So we have two different-type
+ * containers, or a container and some scalar type, or two
+ * different scalar types. Sort on the basis of the type code.
*/
- Assert(ra != WJB_END_ARRAY && ra != WJB_END_OBJECT);
- Assert(rb != WJB_END_ARRAY && rb != WJB_END_OBJECT);
+ Assert(ra != WJB_DONE && ra != WJB_END_ARRAY && ra != WJB_END_OBJECT);
+ Assert(rb != WJB_DONE && rb != WJB_END_ARRAY && rb != WJB_END_OBJECT);
Assert(va.type != vb.type);
Assert(va.type != jbvBinary);
@@ -852,15 +846,20 @@ JsonbIteratorInit(JsonbContainer *container)
* It is our job to expand the jbvBinary representation without bothering them
* with it. However, clients should not take it upon themselves to touch array
* or Object element/pair buffers, since their element/pair pointers are
- * garbage. Also, *val will not be set when returning WJB_END_ARRAY or
- * WJB_END_OBJECT, on the assumption that it's only useful to access values
- * when recursing in.
+ * garbage.
+ *
+ * *val is not meaningful when the result is WJB_DONE, WJB_END_ARRAY or
+ * WJB_END_OBJECT. However, we set val->type = jbvNull in those cases,
+ * so that callers may assume that val->type is always well-defined.
*/
JsonbIteratorToken
JsonbIteratorNext(JsonbIterator **it, JsonbValue *val, bool skipNested)
{
if (*it == NULL)
+ {
+ val->type = jbvNull;
return WJB_DONE;
+ }
/*
* When stepping into a nested container, we jump back here to start
@@ -898,6 +897,7 @@ recurse:
* nesting).
*/
*it = freeAndGetParent(*it);
+ val->type = jbvNull;
return WJB_END_ARRAY;
}
@@ -951,6 +951,7 @@ recurse:
* of nesting).
*/
*it = freeAndGetParent(*it);
+ val->type = jbvNull;
return WJB_END_OBJECT;
}
else
@@ -995,8 +996,10 @@ recurse:
return WJB_VALUE;
}
- elog(ERROR, "invalid iterator state");
- return -1;
+ elog(ERROR, "invalid jsonb iterator state");
+ /* satisfy compilers that don't know that elog(ERROR) doesn't return */
+ val->type = jbvNull;
+ return WJB_DONE;
}
/*
diff --git a/src/backend/utils/adt/jsonfuncs.c b/src/backend/utils/adt/jsonfuncs.c
index bcb1720b6cd..370456408bf 100644
--- a/src/backend/utils/adt/jsonfuncs.c
+++ b/src/backend/utils/adt/jsonfuncs.c
@@ -4766,8 +4766,8 @@ jsonb_delete_array(PG_FUNCTION_ARGS)
continue;
/* We rely on the array elements not being toasted */
- keyptr = VARDATA_ANY(keys_elems[i]);
- keylen = VARSIZE_ANY_EXHDR(keys_elems[i]);
+ keyptr = VARDATA_ANY(DatumGetPointer(keys_elems[i]));
+ keylen = VARSIZE_ANY_EXHDR(DatumGetPointer(keys_elems[i]));
if (keylen == v.val.string.len &&
memcmp(keyptr, v.val.string.val, keylen) == 0)
{
diff --git a/src/backend/utils/adt/jsonpath_exec.c b/src/backend/utils/adt/jsonpath_exec.c
index dbab24737ef..407041b14a1 100644
--- a/src/backend/utils/adt/jsonpath_exec.c
+++ b/src/backend/utils/adt/jsonpath_exec.c
@@ -3074,8 +3074,8 @@ JsonItemFromDatum(Datum val, Oid typid, int32 typmod, JsonbValue *res)
case TEXTOID:
case VARCHAROID:
res->type = jbvString;
- res->val.string.val = VARDATA_ANY(val);
- res->val.string.len = VARSIZE_ANY_EXHDR(val);
+ res->val.string.val = VARDATA_ANY(DatumGetPointer(val));
+ res->val.string.len = VARSIZE_ANY_EXHDR(DatumGetPointer(val));
break;
case DATEOID:
case TIMEOID:
diff --git a/src/backend/utils/adt/like.c b/src/backend/utils/adt/like.c
index 7f4cf614585..4216ac17f43 100644
--- a/src/backend/utils/adt/like.c
+++ b/src/backend/utils/adt/like.c
@@ -98,7 +98,7 @@ SB_lower_char(unsigned char c, pg_locale_t locale)
else if (locale->is_default)
return pg_tolower(c);
else
- return tolower_l(c, locale->info.lt);
+ return char_tolower(c, locale);
}
@@ -209,7 +209,17 @@ Generic_Text_IC_like(text *str, text *pat, Oid collation)
* way.
*/
- if (pg_database_encoding_max_length() > 1 || (locale->provider == COLLPROVIDER_ICU))
+ if (locale->ctype_is_c ||
+ (char_tolower_enabled(locale) &&
+ pg_database_encoding_max_length() == 1))
+ {
+ p = VARDATA_ANY(pat);
+ plen = VARSIZE_ANY_EXHDR(pat);
+ s = VARDATA_ANY(str);
+ slen = VARSIZE_ANY_EXHDR(str);
+ return SB_IMatchText(s, slen, p, plen, locale);
+ }
+ else
{
pat = DatumGetTextPP(DirectFunctionCall1Coll(lower, collation,
PointerGetDatum(pat)));
@@ -224,14 +234,6 @@ Generic_Text_IC_like(text *str, text *pat, Oid collation)
else
return MB_MatchText(s, slen, p, plen, 0);
}
- else
- {
- p = VARDATA_ANY(pat);
- plen = VARSIZE_ANY_EXHDR(pat);
- s = VARDATA_ANY(str);
- slen = VARSIZE_ANY_EXHDR(str);
- return SB_IMatchText(s, slen, p, plen, locale);
- }
}
/*
diff --git a/src/backend/utils/adt/like_support.c b/src/backend/utils/adt/like_support.c
index 8fdc677371f..999f23f86d5 100644
--- a/src/backend/utils/adt/like_support.c
+++ b/src/backend/utils/adt/like_support.c
@@ -1495,13 +1495,8 @@ pattern_char_isalpha(char c, bool is_multibyte,
{
if (locale->ctype_is_c)
return (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z');
- else if (is_multibyte && IS_HIGHBIT_SET(c))
- return true;
- else if (locale->provider != COLLPROVIDER_LIBC)
- return IS_HIGHBIT_SET(c) ||
- (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z');
else
- return isalpha_l((unsigned char) c, locale->info.lt);
+ return char_is_cased(c, locale);
}
diff --git a/src/backend/utils/adt/mcxtfuncs.c b/src/backend/utils/adt/mcxtfuncs.c
index 396c2f223b4..fe6dce9cba3 100644
--- a/src/backend/utils/adt/mcxtfuncs.c
+++ b/src/backend/utils/adt/mcxtfuncs.c
@@ -38,7 +38,7 @@ typedef struct MemoryContextId
{
MemoryContext context;
int context_id;
-} MemoryContextId;
+} MemoryContextId;
/*
* int_list_to_array
diff --git a/src/backend/utils/adt/meson.build b/src/backend/utils/adt/meson.build
index 244f48f4fd7..ed9bbd7b926 100644
--- a/src/backend/utils/adt/meson.build
+++ b/src/backend/utils/adt/meson.build
@@ -12,6 +12,7 @@ backend_sources += files(
'arrayutils.c',
'ascii.c',
'bool.c',
+ 'bytea.c',
'cash.c',
'char.c',
'cryptohashfuncs.c',
diff --git a/src/backend/utils/adt/multirangetypes.c b/src/backend/utils/adt/multirangetypes.c
index cd84ced5b48..46f2ec0c29f 100644
--- a/src/backend/utils/adt/multirangetypes.c
+++ b/src/backend/utils/adt/multirangetypes.c
@@ -394,12 +394,13 @@ multirange_send(PG_FUNCTION_ARGS)
for (int i = 0; i < range_count; i++)
{
Datum range;
+ bytea *outputbytes;
range = RangeTypePGetDatum(ranges[i]);
- range = PointerGetDatum(SendFunctionCall(&cache->typioproc, range));
+ outputbytes = SendFunctionCall(&cache->typioproc, range);
- pq_sendint32(buf, VARSIZE(range) - VARHDRSZ);
- pq_sendbytes(buf, VARDATA(range), VARSIZE(range) - VARHDRSZ);
+ pq_sendint32(buf, VARSIZE(outputbytes) - VARHDRSZ);
+ pq_sendbytes(buf, VARDATA(outputbytes), VARSIZE(outputbytes) - VARHDRSZ);
}
PG_RETURN_BYTEA_P(pq_endtypsend(buf));
@@ -2833,7 +2834,7 @@ hash_multirange(PG_FUNCTION_ARGS)
upper_hash = 0;
/* Merge hashes of flags and bounds */
- range_hash = hash_uint32((uint32) flags);
+ range_hash = hash_bytes_uint32((uint32) flags);
range_hash ^= lower_hash;
range_hash = pg_rotate_left32(range_hash, 1);
range_hash ^= upper_hash;
diff --git a/src/backend/utils/adt/network.c b/src/backend/utils/adt/network.c
index f03fcc1147b..9fd211b2d45 100644
--- a/src/backend/utils/adt/network.c
+++ b/src/backend/utils/adt/network.c
@@ -12,8 +12,6 @@
#include <netinet/in.h>
#include <arpa/inet.h>
-#include "access/stratnum.h"
-#include "catalog/pg_opfamily.h"
#include "catalog/pg_type.h"
#include "common/hashfn.h"
#include "common/ip.h"
diff --git a/src/backend/utils/adt/network_spgist.c b/src/backend/utils/adt/network_spgist.c
index a84747d9275..602276a35c3 100644
--- a/src/backend/utils/adt/network_spgist.c
+++ b/src/backend/utils/adt/network_spgist.c
@@ -37,7 +37,6 @@
#include "catalog/pg_type.h"
#include "utils/fmgrprotos.h"
#include "utils/inet.h"
-#include "varatt.h"
static int inet_spg_node_number(const inet *val, int commonbits);
diff --git a/src/backend/utils/adt/numeric.c b/src/backend/utils/adt/numeric.c
index 40dcbc7b671..c9233565d57 100644
--- a/src/backend/utils/adt/numeric.c
+++ b/src/backend/utils/adt/numeric.c
@@ -1958,9 +1958,11 @@ generate_series_numeric_support(PG_FUNCTION_ARGS)
* in the histogram. width_bucket() returns an integer indicating the
* bucket number that 'operand' belongs to in an equiwidth histogram
* with the specified characteristics. An operand smaller than the
- * lower bound is assigned to bucket 0. An operand greater than the
- * upper bound is assigned to an additional bucket (with number
- * count+1). We don't allow "NaN" for any of the numeric arguments.
+ * lower bound is assigned to bucket 0. An operand greater than or equal
+ * to the upper bound is assigned to an additional bucket (with number
+ * count+1). We don't allow the histogram bounds to be NaN or +/- infinity,
+ * but we do allow those values for the operand (taking NaN to be larger
+ * than any other value, as we do in comparisons).
*/
Datum
width_bucket_numeric(PG_FUNCTION_ARGS)
@@ -1978,17 +1980,13 @@ width_bucket_numeric(PG_FUNCTION_ARGS)
(errcode(ERRCODE_INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION),
errmsg("count must be greater than zero")));
- if (NUMERIC_IS_SPECIAL(operand) ||
- NUMERIC_IS_SPECIAL(bound1) ||
- NUMERIC_IS_SPECIAL(bound2))
+ if (NUMERIC_IS_SPECIAL(bound1) || NUMERIC_IS_SPECIAL(bound2))
{
- if (NUMERIC_IS_NAN(operand) ||
- NUMERIC_IS_NAN(bound1) ||
- NUMERIC_IS_NAN(bound2))
+ if (NUMERIC_IS_NAN(bound1) || NUMERIC_IS_NAN(bound2))
ereport(ERROR,
(errcode(ERRCODE_INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION),
- errmsg("operand, lower bound, and upper bound cannot be NaN")));
- /* We allow "operand" to be infinite; cmp_numerics will cope */
+ errmsg("lower and upper bounds cannot be NaN")));
+
if (NUMERIC_IS_INF(bound1) || NUMERIC_IS_INF(bound2))
ereport(ERROR,
(errcode(ERRCODE_INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION),
diff --git a/src/backend/utils/adt/pg_locale.c b/src/backend/utils/adt/pg_locale.c
index f5e31c433a0..97c2ac1faf9 100644
--- a/src/backend/utils/adt/pg_locale.c
+++ b/src/backend/utils/adt/pg_locale.c
@@ -41,7 +41,6 @@
#include "mb/pg_wchar.h"
#include "miscadmin.h"
#include "utils/builtins.h"
-#include "utils/formatting.h"
#include "utils/guc_hooks.h"
#include "utils/lsyscache.h"
#include "utils/memutils.h"
@@ -80,31 +79,6 @@ extern pg_locale_t create_pg_locale_icu(Oid collid, MemoryContext context);
extern pg_locale_t create_pg_locale_libc(Oid collid, MemoryContext context);
extern char *get_collation_actual_version_libc(const char *collcollate);
-extern size_t strlower_builtin(char *dst, size_t dstsize, const char *src,
- ssize_t srclen, pg_locale_t locale);
-extern size_t strtitle_builtin(char *dst, size_t dstsize, const char *src,
- ssize_t srclen, pg_locale_t locale);
-extern size_t strupper_builtin(char *dst, size_t dstsize, const char *src,
- ssize_t srclen, pg_locale_t locale);
-extern size_t strfold_builtin(char *dst, size_t dstsize, const char *src,
- ssize_t srclen, pg_locale_t locale);
-
-extern size_t strlower_icu(char *dst, size_t dstsize, const char *src,
- ssize_t srclen, pg_locale_t locale);
-extern size_t strtitle_icu(char *dst, size_t dstsize, const char *src,
- ssize_t srclen, pg_locale_t locale);
-extern size_t strupper_icu(char *dst, size_t dstsize, const char *src,
- ssize_t srclen, pg_locale_t locale);
-extern size_t strfold_icu(char *dst, size_t dstsize, const char *src,
- ssize_t srclen, pg_locale_t locale);
-
-extern size_t strlower_libc(char *dst, size_t dstsize, const char *src,
- ssize_t srclen, pg_locale_t locale);
-extern size_t strtitle_libc(char *dst, size_t dstsize, const char *src,
- ssize_t srclen, pg_locale_t locale);
-extern size_t strupper_libc(char *dst, size_t dstsize, const char *src,
- ssize_t srclen, pg_locale_t locale);
-
/* GUC settings */
char *locale_messages;
char *locale_monetary;
@@ -1093,6 +1067,9 @@ create_pg_locale(Oid collid, MemoryContext context)
Assert((result->collate_is_c && result->collate == NULL) ||
(!result->collate_is_c && result->collate != NULL));
+ Assert((result->ctype_is_c && result->ctype == NULL) ||
+ (!result->ctype_is_c && result->ctype != NULL));
+
datum = SysCacheGetAttr(COLLOID, tp, Anum_pg_collation_collversion,
&isnull);
if (!isnull)
@@ -1257,77 +1234,31 @@ size_t
pg_strlower(char *dst, size_t dstsize, const char *src, ssize_t srclen,
pg_locale_t locale)
{
- if (locale->provider == COLLPROVIDER_BUILTIN)
- return strlower_builtin(dst, dstsize, src, srclen, locale);
-#ifdef USE_ICU
- else if (locale->provider == COLLPROVIDER_ICU)
- return strlower_icu(dst, dstsize, src, srclen, locale);
-#endif
- else if (locale->provider == COLLPROVIDER_LIBC)
- return strlower_libc(dst, dstsize, src, srclen, locale);
- else
- /* shouldn't happen */
- PGLOCALE_SUPPORT_ERROR(locale->provider);
-
- return 0; /* keep compiler quiet */
+ return locale->ctype->strlower(dst, dstsize, src, srclen, locale);
}
size_t
pg_strtitle(char *dst, size_t dstsize, const char *src, ssize_t srclen,
pg_locale_t locale)
{
- if (locale->provider == COLLPROVIDER_BUILTIN)
- return strtitle_builtin(dst, dstsize, src, srclen, locale);
-#ifdef USE_ICU
- else if (locale->provider == COLLPROVIDER_ICU)
- return strtitle_icu(dst, dstsize, src, srclen, locale);
-#endif
- else if (locale->provider == COLLPROVIDER_LIBC)
- return strtitle_libc(dst, dstsize, src, srclen, locale);
- else
- /* shouldn't happen */
- PGLOCALE_SUPPORT_ERROR(locale->provider);
-
- return 0; /* keep compiler quiet */
+ return locale->ctype->strtitle(dst, dstsize, src, srclen, locale);
}
size_t
pg_strupper(char *dst, size_t dstsize, const char *src, ssize_t srclen,
pg_locale_t locale)
{
- if (locale->provider == COLLPROVIDER_BUILTIN)
- return strupper_builtin(dst, dstsize, src, srclen, locale);
-#ifdef USE_ICU
- else if (locale->provider == COLLPROVIDER_ICU)
- return strupper_icu(dst, dstsize, src, srclen, locale);
-#endif
- else if (locale->provider == COLLPROVIDER_LIBC)
- return strupper_libc(dst, dstsize, src, srclen, locale);
- else
- /* shouldn't happen */
- PGLOCALE_SUPPORT_ERROR(locale->provider);
-
- return 0; /* keep compiler quiet */
+ return locale->ctype->strupper(dst, dstsize, src, srclen, locale);
}
size_t
pg_strfold(char *dst, size_t dstsize, const char *src, ssize_t srclen,
pg_locale_t locale)
{
- if (locale->provider == COLLPROVIDER_BUILTIN)
- return strfold_builtin(dst, dstsize, src, srclen, locale);
-#ifdef USE_ICU
- else if (locale->provider == COLLPROVIDER_ICU)
- return strfold_icu(dst, dstsize, src, srclen, locale);
-#endif
- /* for libc, just use strlower */
- else if (locale->provider == COLLPROVIDER_LIBC)
- return strlower_libc(dst, dstsize, src, srclen, locale);
+ if (locale->ctype->strfold)
+ return locale->ctype->strfold(dst, dstsize, src, srclen, locale);
else
- /* shouldn't happen */
- PGLOCALE_SUPPORT_ERROR(locale->provider);
-
- return 0; /* keep compiler quiet */
+ return locale->ctype->strlower(dst, dstsize, src, srclen, locale);
}
/*
@@ -1465,6 +1396,41 @@ pg_strnxfrm_prefix(char *dest, size_t destsize, const char *src,
}
/*
+ * char_is_cased()
+ *
+ * Fuzzy test of whether the given char is case-varying or not. The argument
+ * is a single byte, so in a multibyte encoding, just assume any non-ASCII
+ * char is case-varying.
+ */
+bool
+char_is_cased(char ch, pg_locale_t locale)
+{
+ return locale->ctype->char_is_cased(ch, locale);
+}
+
+/*
+ * char_tolower_enabled()
+ *
+ * Does the provider support char_tolower()?
+ */
+bool
+char_tolower_enabled(pg_locale_t locale)
+{
+ return (locale->ctype->char_tolower != NULL);
+}
+
+/*
+ * char_tolower()
+ *
+ * Convert char (single-byte encoding) to lowercase.
+ */
+char
+char_tolower(unsigned char ch, pg_locale_t locale)
+{
+ return locale->ctype->char_tolower(ch, locale);
+}
+
+/*
* Return required encoding ID for the given locale, or -1 if any encoding is
* valid for the locale.
*/
diff --git a/src/backend/utils/adt/pg_locale_builtin.c b/src/backend/utils/adt/pg_locale_builtin.c
index f51768830cd..0c9fbdb40f2 100644
--- a/src/backend/utils/adt/pg_locale_builtin.c
+++ b/src/backend/utils/adt/pg_locale_builtin.c
@@ -18,22 +18,12 @@
#include "mb/pg_wchar.h"
#include "miscadmin.h"
#include "utils/builtins.h"
-#include "utils/memutils.h"
#include "utils/pg_locale.h"
#include "utils/syscache.h"
extern pg_locale_t create_pg_locale_builtin(Oid collid,
MemoryContext context);
extern char *get_collation_actual_version_builtin(const char *collcollate);
-extern size_t strlower_builtin(char *dest, size_t destsize, const char *src,
- ssize_t srclen, pg_locale_t locale);
-extern size_t strtitle_builtin(char *dest, size_t destsize, const char *src,
- ssize_t srclen, pg_locale_t locale);
-extern size_t strupper_builtin(char *dest, size_t destsize, const char *src,
- ssize_t srclen, pg_locale_t locale);
-extern size_t strfold_builtin(char *dest, size_t destsize, const char *src,
- ssize_t srclen, pg_locale_t locale);
-
struct WordBoundaryState
{
@@ -77,7 +67,7 @@ initcap_wbnext(void *state)
return wbstate->len;
}
-size_t
+static size_t
strlower_builtin(char *dest, size_t destsize, const char *src, ssize_t srclen,
pg_locale_t locale)
{
@@ -85,7 +75,7 @@ strlower_builtin(char *dest, size_t destsize, const char *src, ssize_t srclen,
locale->info.builtin.casemap_full);
}
-size_t
+static size_t
strtitle_builtin(char *dest, size_t destsize, const char *src, ssize_t srclen,
pg_locale_t locale)
{
@@ -103,7 +93,7 @@ strtitle_builtin(char *dest, size_t destsize, const char *src, ssize_t srclen,
initcap_wbnext, &wbstate);
}
-size_t
+static size_t
strupper_builtin(char *dest, size_t destsize, const char *src, ssize_t srclen,
pg_locale_t locale)
{
@@ -111,7 +101,7 @@ strupper_builtin(char *dest, size_t destsize, const char *src, ssize_t srclen,
locale->info.builtin.casemap_full);
}
-size_t
+static size_t
strfold_builtin(char *dest, size_t destsize, const char *src, ssize_t srclen,
pg_locale_t locale)
{
@@ -119,6 +109,98 @@ strfold_builtin(char *dest, size_t destsize, const char *src, ssize_t srclen,
locale->info.builtin.casemap_full);
}
+static bool
+wc_isdigit_builtin(pg_wchar wc, pg_locale_t locale)
+{
+ return pg_u_isdigit(wc, !locale->info.builtin.casemap_full);
+}
+
+static bool
+wc_isalpha_builtin(pg_wchar wc, pg_locale_t locale)
+{
+ return pg_u_isalpha(wc);
+}
+
+static bool
+wc_isalnum_builtin(pg_wchar wc, pg_locale_t locale)
+{
+ return pg_u_isalnum(wc, !locale->info.builtin.casemap_full);
+}
+
+static bool
+wc_isupper_builtin(pg_wchar wc, pg_locale_t locale)
+{
+ return pg_u_isupper(wc);
+}
+
+static bool
+wc_islower_builtin(pg_wchar wc, pg_locale_t locale)
+{
+ return pg_u_islower(wc);
+}
+
+static bool
+wc_isgraph_builtin(pg_wchar wc, pg_locale_t locale)
+{
+ return pg_u_isgraph(wc);
+}
+
+static bool
+wc_isprint_builtin(pg_wchar wc, pg_locale_t locale)
+{
+ return pg_u_isprint(wc);
+}
+
+static bool
+wc_ispunct_builtin(pg_wchar wc, pg_locale_t locale)
+{
+ return pg_u_ispunct(wc, !locale->info.builtin.casemap_full);
+}
+
+static bool
+wc_isspace_builtin(pg_wchar wc, pg_locale_t locale)
+{
+ return pg_u_isspace(wc);
+}
+
+static bool
+char_is_cased_builtin(char ch, pg_locale_t locale)
+{
+ return IS_HIGHBIT_SET(ch) ||
+ (ch >= 'A' && ch <= 'Z') || (ch >= 'a' && ch <= 'z');
+}
+
+static pg_wchar
+wc_toupper_builtin(pg_wchar wc, pg_locale_t locale)
+{
+ return unicode_uppercase_simple(wc);
+}
+
+static pg_wchar
+wc_tolower_builtin(pg_wchar wc, pg_locale_t locale)
+{
+ return unicode_lowercase_simple(wc);
+}
+
+static const struct ctype_methods ctype_methods_builtin = {
+ .strlower = strlower_builtin,
+ .strtitle = strtitle_builtin,
+ .strupper = strupper_builtin,
+ .strfold = strfold_builtin,
+ .wc_isdigit = wc_isdigit_builtin,
+ .wc_isalpha = wc_isalpha_builtin,
+ .wc_isalnum = wc_isalnum_builtin,
+ .wc_isupper = wc_isupper_builtin,
+ .wc_islower = wc_islower_builtin,
+ .wc_isgraph = wc_isgraph_builtin,
+ .wc_isprint = wc_isprint_builtin,
+ .wc_ispunct = wc_ispunct_builtin,
+ .wc_isspace = wc_isspace_builtin,
+ .char_is_cased = char_is_cased_builtin,
+ .wc_tolower = wc_tolower_builtin,
+ .wc_toupper = wc_toupper_builtin,
+};
+
pg_locale_t
create_pg_locale_builtin(Oid collid, MemoryContext context)
{
@@ -158,10 +240,11 @@ create_pg_locale_builtin(Oid collid, MemoryContext context)
result->info.builtin.locale = MemoryContextStrdup(context, locstr);
result->info.builtin.casemap_full = (strcmp(locstr, "PG_UNICODE_FAST") == 0);
- result->provider = COLLPROVIDER_BUILTIN;
result->deterministic = true;
result->collate_is_c = true;
result->ctype_is_c = (strcmp(locstr, "C") == 0);
+ if (!result->ctype_is_c)
+ result->ctype = &ctype_methods_builtin;
return result;
}
diff --git a/src/backend/utils/adt/pg_locale_icu.c b/src/backend/utils/adt/pg_locale_icu.c
index a32c32a0744..96741e08269 100644
--- a/src/backend/utils/adt/pg_locale_icu.c
+++ b/src/backend/utils/adt/pg_locale_icu.c
@@ -48,19 +48,22 @@
#define TEXTBUFLEN 1024
extern pg_locale_t create_pg_locale_icu(Oid collid, MemoryContext context);
-extern size_t strlower_icu(char *dest, size_t destsize, const char *src,
- ssize_t srclen, pg_locale_t locale);
-extern size_t strtitle_icu(char *dest, size_t destsize, const char *src,
- ssize_t srclen, pg_locale_t locale);
-extern size_t strupper_icu(char *dest, size_t destsize, const char *src,
- ssize_t srclen, pg_locale_t locale);
-extern size_t strfold_icu(char *dest, size_t destsize, const char *src,
- ssize_t srclen, pg_locale_t locale);
#ifdef USE_ICU
extern UCollator *pg_ucol_open(const char *loc_str);
+static size_t strlower_icu(char *dest, size_t destsize, const char *src,
+ ssize_t srclen, pg_locale_t locale);
+static size_t strtitle_icu(char *dest, size_t destsize, const char *src,
+ ssize_t srclen, pg_locale_t locale);
+static size_t strupper_icu(char *dest, size_t destsize, const char *src,
+ ssize_t srclen, pg_locale_t locale);
+static size_t strfold_icu(char *dest, size_t destsize, const char *src,
+ ssize_t srclen, pg_locale_t locale);
+static int strncoll_icu(const char *arg1, ssize_t len1,
+ const char *arg2, ssize_t len2,
+ pg_locale_t locale);
static size_t strnxfrm_icu(char *dest, size_t destsize,
const char *src, ssize_t srclen,
pg_locale_t locale);
@@ -118,6 +121,25 @@ static int32_t u_strFoldCase_default(UChar *dest, int32_t destCapacity,
const char *locale,
UErrorCode *pErrorCode);
+static bool
+char_is_cased_icu(char ch, pg_locale_t locale)
+{
+ return IS_HIGHBIT_SET(ch) ||
+ (ch >= 'A' && ch <= 'Z') || (ch >= 'a' && ch <= 'z');
+}
+
+static pg_wchar
+toupper_icu(pg_wchar wc, pg_locale_t locale)
+{
+ return u_toupper(wc);
+}
+
+static pg_wchar
+tolower_icu(pg_wchar wc, pg_locale_t locale)
+{
+ return u_tolower(wc);
+}
+
static const struct collate_methods collate_methods_icu = {
.strncoll = strncoll_icu,
.strnxfrm = strnxfrm_icu,
@@ -136,6 +158,78 @@ static const struct collate_methods collate_methods_icu_utf8 = {
.strxfrm_is_safe = true,
};
+static bool
+wc_isdigit_icu(pg_wchar wc, pg_locale_t locale)
+{
+ return u_isdigit(wc);
+}
+
+static bool
+wc_isalpha_icu(pg_wchar wc, pg_locale_t locale)
+{
+ return u_isalpha(wc);
+}
+
+static bool
+wc_isalnum_icu(pg_wchar wc, pg_locale_t locale)
+{
+ return u_isalnum(wc);
+}
+
+static bool
+wc_isupper_icu(pg_wchar wc, pg_locale_t locale)
+{
+ return u_isupper(wc);
+}
+
+static bool
+wc_islower_icu(pg_wchar wc, pg_locale_t locale)
+{
+ return u_islower(wc);
+}
+
+static bool
+wc_isgraph_icu(pg_wchar wc, pg_locale_t locale)
+{
+ return u_isgraph(wc);
+}
+
+static bool
+wc_isprint_icu(pg_wchar wc, pg_locale_t locale)
+{
+ return u_isprint(wc);
+}
+
+static bool
+wc_ispunct_icu(pg_wchar wc, pg_locale_t locale)
+{
+ return u_ispunct(wc);
+}
+
+static bool
+wc_isspace_icu(pg_wchar wc, pg_locale_t locale)
+{
+ return u_isspace(wc);
+}
+
+static const struct ctype_methods ctype_methods_icu = {
+ .strlower = strlower_icu,
+ .strtitle = strtitle_icu,
+ .strupper = strupper_icu,
+ .strfold = strfold_icu,
+ .wc_isdigit = wc_isdigit_icu,
+ .wc_isalpha = wc_isalpha_icu,
+ .wc_isalnum = wc_isalnum_icu,
+ .wc_isupper = wc_isupper_icu,
+ .wc_islower = wc_islower_icu,
+ .wc_isgraph = wc_isgraph_icu,
+ .wc_isprint = wc_isprint_icu,
+ .wc_ispunct = wc_ispunct_icu,
+ .wc_isspace = wc_isspace_icu,
+ .char_is_cased = char_is_cased_icu,
+ .wc_toupper = toupper_icu,
+ .wc_tolower = tolower_icu,
+};
#endif
pg_locale_t
@@ -198,7 +292,6 @@ create_pg_locale_icu(Oid collid, MemoryContext context)
result = MemoryContextAllocZero(context, sizeof(struct pg_locale_struct));
result->info.icu.locale = MemoryContextStrdup(context, iculocstr);
result->info.icu.ucol = collator;
- result->provider = COLLPROVIDER_ICU;
result->deterministic = deterministic;
result->collate_is_c = false;
result->ctype_is_c = false;
@@ -206,6 +299,7 @@ create_pg_locale_icu(Oid collid, MemoryContext context)
result->collate = &collate_methods_icu_utf8;
else
result->collate = &collate_methods_icu;
+ result->ctype = &ctype_methods_icu;
return result;
#else
@@ -379,7 +473,7 @@ make_icu_collator(const char *iculocstr, const char *icurules)
}
}
-size_t
+static size_t
strlower_icu(char *dest, size_t destsize, const char *src, ssize_t srclen,
pg_locale_t locale)
{
@@ -399,7 +493,7 @@ strlower_icu(char *dest, size_t destsize, const char *src, ssize_t srclen,
return result_len;
}
-size_t
+static size_t
strtitle_icu(char *dest, size_t destsize, const char *src, ssize_t srclen,
pg_locale_t locale)
{
@@ -419,7 +513,7 @@ strtitle_icu(char *dest, size_t destsize, const char *src, ssize_t srclen,
return result_len;
}
-size_t
+static size_t
strupper_icu(char *dest, size_t destsize, const char *src, ssize_t srclen,
pg_locale_t locale)
{
@@ -439,7 +533,7 @@ strupper_icu(char *dest, size_t destsize, const char *src, ssize_t srclen,
return result_len;
}
-size_t
+static size_t
strfold_icu(char *dest, size_t destsize, const char *src, ssize_t srclen,
pg_locale_t locale)
{
@@ -474,8 +568,6 @@ strncoll_icu_utf8(const char *arg1, ssize_t len1, const char *arg2, ssize_t len2
int result;
UErrorCode status;
- Assert(locale->provider == COLLPROVIDER_ICU);
-
Assert(GetDatabaseEncoding() == PG_UTF8);
status = U_ZERO_ERROR;
@@ -503,8 +595,6 @@ strnxfrm_icu(char *dest, size_t destsize, const char *src, ssize_t srclen,
size_t uchar_bsize;
Size result_bsize;
- Assert(locale->provider == COLLPROVIDER_ICU);
-
init_icu_converter();
ulen = uchar_length(icu_converter, src, srclen);
@@ -549,8 +639,6 @@ strnxfrm_prefix_icu_utf8(char *dest, size_t destsize,
uint32_t state[2];
UErrorCode status;
- Assert(locale->provider == COLLPROVIDER_ICU);
-
Assert(GetDatabaseEncoding() == PG_UTF8);
uiter_setUTF8(&iter, src, srclen);
@@ -749,8 +837,6 @@ strncoll_icu(const char *arg1, ssize_t len1,
*uchar2;
int result;
- Assert(locale->provider == COLLPROVIDER_ICU);
-
/* if encoding is UTF8, use more efficient strncoll_icu_utf8 */
#ifdef HAVE_UCOL_STRCOLLUTF8
Assert(GetDatabaseEncoding() != PG_UTF8);
@@ -799,8 +885,6 @@ strnxfrm_prefix_icu(char *dest, size_t destsize,
size_t uchar_bsize;
Size result_bsize;
- Assert(locale->provider == COLLPROVIDER_ICU);
-
/* if encoding is UTF8, use more efficient strnxfrm_prefix_icu_utf8 */
Assert(GetDatabaseEncoding() != PG_UTF8);
diff --git a/src/backend/utils/adt/pg_locale_libc.c b/src/backend/utils/adt/pg_locale_libc.c
index 199857e22db..8d88b53c375 100644
--- a/src/backend/utils/adt/pg_locale_libc.c
+++ b/src/backend/utils/adt/pg_locale_libc.c
@@ -34,6 +34,46 @@
#endif
/*
+ * For the libc provider, to provide as much functionality as possible on a
+ * variety of platforms without going so far as to implement everything from
+ * scratch, we use several implementation strategies depending on the
+ * situation:
+ *
+ * 1. In C/POSIX collations, we use hard-wired code. We can't depend on
+ * the <ctype.h> functions since those will obey LC_CTYPE. Note that these
+ * collations don't give a fig about multibyte characters.
+ *
+ * 2. When working in UTF8 encoding, we use the <wctype.h> functions.
+ * This assumes that every platform uses Unicode codepoints directly
+ * as the wchar_t representation of Unicode. (XXX: ICU makes this assumption
+ * even for non-UTF8 encodings, which may be a problem.) On some platforms
+ * wchar_t is only 16 bits wide, so we have to punt for codepoints > 0xFFFF.
+ *
+ * 3. In all other encodings, we use the <ctype.h> functions for pg_wchar
+ * values up to 255, and punt for values above that. This is 100% correct
+ * only in single-byte encodings such as LATINn. However, non-Unicode
+ * multibyte encodings are mostly Far Eastern character sets for which the
+ * properties being tested here aren't very relevant for higher code values
+ * anyway. The difficulty with using the <wctype.h> functions with
+ * non-Unicode multibyte encodings is that we can have no certainty that
+ * the platform's wchar_t representation matches what we do in pg_wchar
+ * conversions.
+ *
+ * As a special case, in the "default" collation, (2) and (3) force ASCII
+ * letters to follow ASCII upcase/downcase rules, while in a non-default
+ * collation we just let the library functions do what they will. The case
+ * where this matters is treatment of I/i in Turkish, and the behavior is
+ * meant to match the upper()/lower() SQL functions.
+ *
+ * We store the active collation setting in static variables. In principle
+ * it could be passed down to here via the regex library's "struct vars" data
+ * structure; but that would require somewhat invasive changes in the regex
+ * library, and right now there's no real benefit to be gained from that.
+ *
+ * NB: the coding here assumes pg_wchar is an unsigned type.
+ */
+
+/*
* Size of stack buffer to use for string transformations, used to avoid heap
* allocations in typical cases. This should be large enough that most strings
* will fit, but small enough that we feel comfortable putting it on the
@@ -43,13 +83,6 @@
extern pg_locale_t create_pg_locale_libc(Oid collid, MemoryContext context);
-extern size_t strlower_libc(char *dst, size_t dstsize, const char *src,
- ssize_t srclen, pg_locale_t locale);
-extern size_t strtitle_libc(char *dst, size_t dstsize, const char *src,
- ssize_t srclen, pg_locale_t locale);
-extern size_t strupper_libc(char *dst, size_t dstsize, const char *src,
- ssize_t srclen, pg_locale_t locale);
-
static int strncoll_libc(const char *arg1, ssize_t len1,
const char *arg2, ssize_t len2,
pg_locale_t locale);
@@ -85,6 +118,251 @@ static size_t strupper_libc_mb(char *dest, size_t destsize,
const char *src, ssize_t srclen,
pg_locale_t locale);
+static bool
+wc_isdigit_libc_sb(pg_wchar wc, pg_locale_t locale)
+{
+ return isdigit_l((unsigned char) wc, locale->info.lt);
+}
+
+static bool
+wc_isalpha_libc_sb(pg_wchar wc, pg_locale_t locale)
+{
+ return isalpha_l((unsigned char) wc, locale->info.lt);
+}
+
+static bool
+wc_isalnum_libc_sb(pg_wchar wc, pg_locale_t locale)
+{
+ return isalnum_l((unsigned char) wc, locale->info.lt);
+}
+
+static bool
+wc_isupper_libc_sb(pg_wchar wc, pg_locale_t locale)
+{
+ return isupper_l((unsigned char) wc, locale->info.lt);
+}
+
+static bool
+wc_islower_libc_sb(pg_wchar wc, pg_locale_t locale)
+{
+ return islower_l((unsigned char) wc, locale->info.lt);
+}
+
+static bool
+wc_isgraph_libc_sb(pg_wchar wc, pg_locale_t locale)
+{
+ return isgraph_l((unsigned char) wc, locale->info.lt);
+}
+
+static bool
+wc_isprint_libc_sb(pg_wchar wc, pg_locale_t locale)
+{
+ return isprint_l((unsigned char) wc, locale->info.lt);
+}
+
+static bool
+wc_ispunct_libc_sb(pg_wchar wc, pg_locale_t locale)
+{
+ return ispunct_l((unsigned char) wc, locale->info.lt);
+}
+
+static bool
+wc_isspace_libc_sb(pg_wchar wc, pg_locale_t locale)
+{
+ return isspace_l((unsigned char) wc, locale->info.lt);
+}
+
+static bool
+wc_isdigit_libc_mb(pg_wchar wc, pg_locale_t locale)
+{
+ return iswdigit_l((wint_t) wc, locale->info.lt);
+}
+
+static bool
+wc_isalpha_libc_mb(pg_wchar wc, pg_locale_t locale)
+{
+ return iswalpha_l((wint_t) wc, locale->info.lt);
+}
+
+static bool
+wc_isalnum_libc_mb(pg_wchar wc, pg_locale_t locale)
+{
+ return iswalnum_l((wint_t) wc, locale->info.lt);
+}
+
+static bool
+wc_isupper_libc_mb(pg_wchar wc, pg_locale_t locale)
+{
+ return iswupper_l((wint_t) wc, locale->info.lt);
+}
+
+static bool
+wc_islower_libc_mb(pg_wchar wc, pg_locale_t locale)
+{
+ return iswlower_l((wint_t) wc, locale->info.lt);
+}
+
+static bool
+wc_isgraph_libc_mb(pg_wchar wc, pg_locale_t locale)
+{
+ return iswgraph_l((wint_t) wc, locale->info.lt);
+}
+
+static bool
+wc_isprint_libc_mb(pg_wchar wc, pg_locale_t locale)
+{
+ return iswprint_l((wint_t) wc, locale->info.lt);
+}
+
+static bool
+wc_ispunct_libc_mb(pg_wchar wc, pg_locale_t locale)
+{
+ return iswpunct_l((wint_t) wc, locale->info.lt);
+}
+
+static bool
+wc_isspace_libc_mb(pg_wchar wc, pg_locale_t locale)
+{
+ return iswspace_l((wint_t) wc, locale->info.lt);
+}
+
+static char
+char_tolower_libc(unsigned char ch, pg_locale_t locale)
+{
+ Assert(pg_database_encoding_max_length() == 1);
+ return tolower_l(ch, locale->info.lt);
+}
+
+static bool
+char_is_cased_libc(char ch, pg_locale_t locale)
+{
+ bool is_multibyte = pg_database_encoding_max_length() > 1;
+
+ if (is_multibyte && IS_HIGHBIT_SET(ch))
+ return true;
+ else
+ return isalpha_l((unsigned char) ch, locale->info.lt);
+}
+
+static pg_wchar
+toupper_libc_sb(pg_wchar wc, pg_locale_t locale)
+{
+ Assert(GetDatabaseEncoding() != PG_UTF8);
+
+ /* force C behavior for ASCII characters, per comments above */
+ if (locale->is_default && wc <= (pg_wchar) 127)
+ return pg_ascii_toupper((unsigned char) wc);
+ if (wc <= (pg_wchar) UCHAR_MAX)
+ return toupper_l((unsigned char) wc, locale->info.lt);
+ else
+ return wc;
+}
+
+static pg_wchar
+toupper_libc_mb(pg_wchar wc, pg_locale_t locale)
+{
+ Assert(GetDatabaseEncoding() == PG_UTF8);
+
+ /* force C behavior for ASCII characters, per comments above */
+ if (locale->is_default && wc <= (pg_wchar) 127)
+ return pg_ascii_toupper((unsigned char) wc);
+ if (sizeof(wchar_t) >= 4 || wc <= (pg_wchar) 0xFFFF)
+ return towupper_l((wint_t) wc, locale->info.lt);
+ else
+ return wc;
+}
+
+static pg_wchar
+tolower_libc_sb(pg_wchar wc, pg_locale_t locale)
+{
+ Assert(GetDatabaseEncoding() != PG_UTF8);
+
+ /* force C behavior for ASCII characters, per comments above */
+ if (locale->is_default && wc <= (pg_wchar) 127)
+ return pg_ascii_tolower((unsigned char) wc);
+ if (wc <= (pg_wchar) UCHAR_MAX)
+ return tolower_l((unsigned char) wc, locale->info.lt);
+ else
+ return wc;
+}
+
+static pg_wchar
+tolower_libc_mb(pg_wchar wc, pg_locale_t locale)
+{
+ Assert(GetDatabaseEncoding() == PG_UTF8);
+
+ /* force C behavior for ASCII characters, per comments above */
+ if (locale->is_default && wc <= (pg_wchar) 127)
+ return pg_ascii_tolower((unsigned char) wc);
+ if (sizeof(wchar_t) >= 4 || wc <= (pg_wchar) 0xFFFF)
+ return towlower_l((wint_t) wc, locale->info.lt);
+ else
+ return wc;
+}
+
+static const struct ctype_methods ctype_methods_libc_sb = {
+ .strlower = strlower_libc_sb,
+ .strtitle = strtitle_libc_sb,
+ .strupper = strupper_libc_sb,
+ .wc_isdigit = wc_isdigit_libc_sb,
+ .wc_isalpha = wc_isalpha_libc_sb,
+ .wc_isalnum = wc_isalnum_libc_sb,
+ .wc_isupper = wc_isupper_libc_sb,
+ .wc_islower = wc_islower_libc_sb,
+ .wc_isgraph = wc_isgraph_libc_sb,
+ .wc_isprint = wc_isprint_libc_sb,
+ .wc_ispunct = wc_ispunct_libc_sb,
+ .wc_isspace = wc_isspace_libc_sb,
+ .char_is_cased = char_is_cased_libc,
+ .char_tolower = char_tolower_libc,
+ .wc_toupper = toupper_libc_sb,
+ .wc_tolower = tolower_libc_sb,
+ .max_chr = UCHAR_MAX,
+};
+
+/*
+ * Non-UTF8 multibyte encodings use multibyte semantics for case mapping, but
+ * single-byte semantics for pattern matching.
+ */
+static const struct ctype_methods ctype_methods_libc_other_mb = {
+ .strlower = strlower_libc_mb,
+ .strtitle = strtitle_libc_mb,
+ .strupper = strupper_libc_mb,
+ .wc_isdigit = wc_isdigit_libc_sb,
+ .wc_isalpha = wc_isalpha_libc_sb,
+ .wc_isalnum = wc_isalnum_libc_sb,
+ .wc_isupper = wc_isupper_libc_sb,
+ .wc_islower = wc_islower_libc_sb,
+ .wc_isgraph = wc_isgraph_libc_sb,
+ .wc_isprint = wc_isprint_libc_sb,
+ .wc_ispunct = wc_ispunct_libc_sb,
+ .wc_isspace = wc_isspace_libc_sb,
+ .char_is_cased = char_is_cased_libc,
+ .char_tolower = char_tolower_libc,
+ .wc_toupper = toupper_libc_sb,
+ .wc_tolower = tolower_libc_sb,
+ .max_chr = UCHAR_MAX,
+};
+
+static const struct ctype_methods ctype_methods_libc_utf8 = {
+ .strlower = strlower_libc_mb,
+ .strtitle = strtitle_libc_mb,
+ .strupper = strupper_libc_mb,
+ .wc_isdigit = wc_isdigit_libc_mb,
+ .wc_isalpha = wc_isalpha_libc_mb,
+ .wc_isalnum = wc_isalnum_libc_mb,
+ .wc_isupper = wc_isupper_libc_mb,
+ .wc_islower = wc_islower_libc_mb,
+ .wc_isgraph = wc_isgraph_libc_mb,
+ .wc_isprint = wc_isprint_libc_mb,
+ .wc_ispunct = wc_ispunct_libc_mb,
+ .wc_isspace = wc_isspace_libc_mb,
+ .char_is_cased = char_is_cased_libc,
+ .char_tolower = char_tolower_libc,
+ .wc_toupper = toupper_libc_mb,
+ .wc_tolower = tolower_libc_mb,
+};
+
static const struct collate_methods collate_methods_libc = {
.strncoll = strncoll_libc,
.strnxfrm = strnxfrm_libc,
@@ -119,36 +397,6 @@ static const struct collate_methods collate_methods_libc_win32_utf8 = {
};
#endif
-size_t
-strlower_libc(char *dst, size_t dstsize, const char *src,
- ssize_t srclen, pg_locale_t locale)
-{
- if (pg_database_encoding_max_length() > 1)
- return strlower_libc_mb(dst, dstsize, src, srclen, locale);
- else
- return strlower_libc_sb(dst, dstsize, src, srclen, locale);
-}
-
-size_t
-strtitle_libc(char *dst, size_t dstsize, const char *src,
- ssize_t srclen, pg_locale_t locale)
-{
- if (pg_database_encoding_max_length() > 1)
- return strtitle_libc_mb(dst, dstsize, src, srclen, locale);
- else
- return strtitle_libc_sb(dst, dstsize, src, srclen, locale);
-}
-
-size_t
-strupper_libc(char *dst, size_t dstsize, const char *src,
- ssize_t srclen, pg_locale_t locale)
-{
- if (pg_database_encoding_max_length() > 1)
- return strupper_libc_mb(dst, dstsize, src, srclen, locale);
- else
- return strupper_libc_sb(dst, dstsize, src, srclen, locale);
-}
-
static size_t
strlower_libc_sb(char *dest, size_t destsize, const char *src, ssize_t srclen,
pg_locale_t locale)
@@ -209,7 +457,7 @@ strlower_libc_mb(char *dest, size_t destsize, const char *src, ssize_t srclen,
/* Output workspace cannot have more codes than input bytes */
workspace = (wchar_t *) palloc((srclen + 1) * sizeof(wchar_t));
- char2wchar(workspace, srclen + 1, src, srclen, locale);
+ char2wchar(workspace, srclen + 1, src, srclen, loc);
for (curr_char = 0; workspace[curr_char] != 0; curr_char++)
workspace[curr_char] = towlower_l(workspace[curr_char], loc);
@@ -220,7 +468,7 @@ strlower_libc_mb(char *dest, size_t destsize, const char *src, ssize_t srclen,
max_size = curr_char * pg_database_encoding_max_length();
result = palloc(max_size + 1);
- result_size = wchar2char(result, workspace, max_size + 1, locale);
+ result_size = wchar2char(result, workspace, max_size + 1, loc);
if (result_size + 1 > destsize)
return result_size;
@@ -304,7 +552,7 @@ strtitle_libc_mb(char *dest, size_t destsize, const char *src, ssize_t srclen,
/* Output workspace cannot have more codes than input bytes */
workspace = (wchar_t *) palloc((srclen + 1) * sizeof(wchar_t));
- char2wchar(workspace, srclen + 1, src, srclen, locale);
+ char2wchar(workspace, srclen + 1, src, srclen, loc);
for (curr_char = 0; workspace[curr_char] != 0; curr_char++)
{
@@ -321,7 +569,7 @@ strtitle_libc_mb(char *dest, size_t destsize, const char *src, ssize_t srclen,
max_size = curr_char * pg_database_encoding_max_length();
result = palloc(max_size + 1);
- result_size = wchar2char(result, workspace, max_size + 1, locale);
+ result_size = wchar2char(result, workspace, max_size + 1, loc);
if (result_size + 1 > destsize)
return result_size;
@@ -392,7 +640,7 @@ strupper_libc_mb(char *dest, size_t destsize, const char *src, ssize_t srclen,
/* Output workspace cannot have more codes than input bytes */
workspace = (wchar_t *) palloc((srclen + 1) * sizeof(wchar_t));
- char2wchar(workspace, srclen + 1, src, srclen, locale);
+ char2wchar(workspace, srclen + 1, src, srclen, loc);
for (curr_char = 0; workspace[curr_char] != 0; curr_char++)
workspace[curr_char] = towupper_l(workspace[curr_char], loc);
@@ -403,7 +651,7 @@ strupper_libc_mb(char *dest, size_t destsize, const char *src, ssize_t srclen,
max_size = curr_char * pg_database_encoding_max_length();
result = palloc(max_size + 1);
- result_size = wchar2char(result, workspace, max_size + 1, locale);
+ result_size = wchar2char(result, workspace, max_size + 1, loc);
if (result_size + 1 > destsize)
return result_size;
@@ -465,7 +713,6 @@ create_pg_locale_libc(Oid collid, MemoryContext context)
loc = make_libc_collator(collate, ctype);
result = MemoryContextAllocZero(context, sizeof(struct pg_locale_struct));
- result->provider = COLLPROVIDER_LIBC;
result->deterministic = true;
result->collate_is_c = (strcmp(collate, "C") == 0) ||
(strcmp(collate, "POSIX") == 0);
@@ -481,6 +728,15 @@ create_pg_locale_libc(Oid collid, MemoryContext context)
#endif
result->collate = &collate_methods_libc;
}
+ if (!result->ctype_is_c)
+ {
+ if (GetDatabaseEncoding() == PG_UTF8)
+ result->ctype = &ctype_methods_libc_utf8;
+ else if (pg_database_encoding_max_length() > 1)
+ result->ctype = &ctype_methods_libc_other_mb;
+ else
+ result->ctype = &ctype_methods_libc_sb;
+ }
return result;
}
@@ -576,8 +832,6 @@ strncoll_libc(const char *arg1, ssize_t len1, const char *arg2, ssize_t len2,
const char *arg2n;
int result;
- Assert(locale->provider == COLLPROVIDER_LIBC);
-
if (bufsize1 + bufsize2 > TEXTBUFLEN)
buf = palloc(bufsize1 + bufsize2);
@@ -632,8 +886,6 @@ strnxfrm_libc(char *dest, size_t destsize, const char *src, ssize_t srclen,
size_t bufsize = srclen + 1;
size_t result;
- Assert(locale->provider == COLLPROVIDER_LIBC);
-
if (srclen == -1)
return strxfrm_l(dest, src, destsize, locale->info.lt);
@@ -742,7 +994,6 @@ strncoll_libc_win32_utf8(const char *arg1, ssize_t len1, const char *arg2,
int r;
int result;
- Assert(locale->provider == COLLPROVIDER_LIBC);
Assert(GetDatabaseEncoding() == PG_UTF8);
if (len1 == -1)
@@ -879,7 +1130,7 @@ wcstombs_l(char *dest, const wchar_t *src, size_t n, locale_t loc)
* zero-terminated. The output will be zero-terminated iff there is room.
*/
size_t
-wchar2char(char *to, const wchar_t *from, size_t tolen, pg_locale_t locale)
+wchar2char(char *to, const wchar_t *from, size_t tolen, locale_t loc)
{
size_t result;
@@ -909,7 +1160,7 @@ wchar2char(char *to, const wchar_t *from, size_t tolen, pg_locale_t locale)
}
else
#endif /* WIN32 */
- if (locale == (pg_locale_t) 0)
+ if (loc == (locale_t) 0)
{
/* Use wcstombs directly for the default locale */
result = wcstombs(to, from, tolen);
@@ -917,7 +1168,7 @@ wchar2char(char *to, const wchar_t *from, size_t tolen, pg_locale_t locale)
else
{
/* Use wcstombs_l for nondefault locales */
- result = wcstombs_l(to, from, tolen, locale->info.lt);
+ result = wcstombs_l(to, from, tolen, loc);
}
return result;
@@ -934,7 +1185,7 @@ wchar2char(char *to, const wchar_t *from, size_t tolen, pg_locale_t locale)
*/
size_t
char2wchar(wchar_t *to, size_t tolen, const char *from, size_t fromlen,
- pg_locale_t locale)
+ locale_t loc)
{
size_t result;
@@ -969,7 +1220,7 @@ char2wchar(wchar_t *to, size_t tolen, const char *from, size_t fromlen,
/* mbstowcs requires ending '\0' */
char *str = pnstrdup(from, fromlen);
- if (locale == (pg_locale_t) 0)
+ if (loc == (locale_t) 0)
{
/* Use mbstowcs directly for the default locale */
result = mbstowcs(to, str, tolen);
@@ -977,7 +1228,7 @@ char2wchar(wchar_t *to, size_t tolen, const char *from, size_t fromlen,
else
{
/* Use mbstowcs_l for nondefault locales */
- result = mbstowcs_l(to, str, tolen, locale->info.lt);
+ result = mbstowcs_l(to, str, tolen, loc);
}
pfree(str);
diff --git a/src/backend/utils/adt/pg_lsn.c b/src/backend/utils/adt/pg_lsn.c
index 16311590a14..12de2446f5b 100644
--- a/src/backend/utils/adt/pg_lsn.c
+++ b/src/backend/utils/adt/pg_lsn.c
@@ -83,7 +83,7 @@ pg_lsn_out(PG_FUNCTION_ARGS)
char buf[MAXPG_LSNLEN + 1];
char *result;
- snprintf(buf, sizeof buf, "%X/%X", LSN_FORMAT_ARGS(lsn));
+ snprintf(buf, sizeof buf, "%X/%08X", LSN_FORMAT_ARGS(lsn));
result = pstrdup(buf);
PG_RETURN_CSTRING(result);
}
diff --git a/src/backend/utils/adt/pg_upgrade_support.c b/src/backend/utils/adt/pg_upgrade_support.c
index d44f8c262ba..a4f8b4faa90 100644
--- a/src/backend/utils/adt/pg_upgrade_support.c
+++ b/src/backend/utils/adt/pg_upgrade_support.c
@@ -21,6 +21,7 @@
#include "commands/extension.h"
#include "miscadmin.h"
#include "replication/logical.h"
+#include "replication/logicallauncher.h"
#include "replication/origin.h"
#include "replication/worker_internal.h"
#include "storage/lmgr.h"
@@ -410,3 +411,21 @@ binary_upgrade_replorigin_advance(PG_FUNCTION_ARGS)
PG_RETURN_VOID();
}
+
+/*
+ * binary_upgrade_create_conflict_detection_slot
+ *
+ * Create a replication slot to retain information necessary for conflict
+ * detection such as dead tuples, commit timestamps, and origins.
+ */
+Datum
+binary_upgrade_create_conflict_detection_slot(PG_FUNCTION_ARGS)
+{
+ CHECK_IS_BINARY_UPGRADE;
+
+ CreateConflictDetectionSlot();
+
+ ReplicationSlotRelease();
+
+ PG_RETURN_VOID();
+}
diff --git a/src/backend/utils/adt/pgstatfuncs.c b/src/backend/utils/adt/pgstatfuncs.c
index e980109f245..c756c2bebaa 100644
--- a/src/backend/utils/adt/pgstatfuncs.c
+++ b/src/backend/utils/adt/pgstatfuncs.c
@@ -1510,7 +1510,7 @@ pg_stat_io_build_tuples(ReturnSetInfo *rsinfo,
bktype_stats->bytes[io_obj][io_context][io_op];
/* Convert to numeric */
- snprintf(buf, sizeof buf, UINT64_FORMAT, byte);
+ snprintf(buf, sizeof buf, INT64_FORMAT, byte);
values[byte_idx] = DirectFunctionCall3(numeric_in,
CStringGetDatum(buf),
ObjectIdGetDatum(0),
@@ -2171,7 +2171,7 @@ pg_stat_get_replication_slot(PG_FUNCTION_ARGS)
Datum
pg_stat_get_subscription_stats(PG_FUNCTION_ARGS)
{
-#define PG_STAT_GET_SUBSCRIPTION_STATS_COLS 11
+#define PG_STAT_GET_SUBSCRIPTION_STATS_COLS 12
Oid subid = PG_GETARG_OID(0);
TupleDesc tupdesc;
Datum values[PG_STAT_GET_SUBSCRIPTION_STATS_COLS] = {0};
@@ -2197,15 +2197,17 @@ pg_stat_get_subscription_stats(PG_FUNCTION_ARGS)
INT8OID, -1, 0);
TupleDescInitEntry(tupdesc, (AttrNumber) 6, "confl_update_exists",
INT8OID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 7, "confl_update_missing",
+ TupleDescInitEntry(tupdesc, (AttrNumber) 7, "confl_update_deleted",
INT8OID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 8, "confl_delete_origin_differs",
+ TupleDescInitEntry(tupdesc, (AttrNumber) 8, "confl_update_missing",
INT8OID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 9, "confl_delete_missing",
+ TupleDescInitEntry(tupdesc, (AttrNumber) 9, "confl_delete_origin_differs",
INT8OID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 10, "confl_multiple_unique_conflicts",
+ TupleDescInitEntry(tupdesc, (AttrNumber) 10, "confl_delete_missing",
INT8OID, -1, 0);
- TupleDescInitEntry(tupdesc, (AttrNumber) 11, "stats_reset",
+ TupleDescInitEntry(tupdesc, (AttrNumber) 11, "confl_multiple_unique_conflicts",
+ INT8OID, -1, 0);
+ TupleDescInitEntry(tupdesc, (AttrNumber) 12, "stats_reset",
TIMESTAMPTZOID, -1, 0);
BlessTupleDesc(tupdesc);
diff --git a/src/backend/utils/adt/rangetypes.c b/src/backend/utils/adt/rangetypes.c
index 66cc0acf4a7..c83b239b3bb 100644
--- a/src/backend/utils/adt/rangetypes.c
+++ b/src/backend/utils/adt/rangetypes.c
@@ -285,8 +285,7 @@ range_send(PG_FUNCTION_ARGS)
if (RANGE_HAS_LBOUND(flags))
{
- Datum bound = PointerGetDatum(SendFunctionCall(&cache->typioproc,
- lower.val));
+ bytea *bound = SendFunctionCall(&cache->typioproc, lower.val);
uint32 bound_len = VARSIZE(bound) - VARHDRSZ;
char *bound_data = VARDATA(bound);
@@ -296,8 +295,7 @@ range_send(PG_FUNCTION_ARGS)
if (RANGE_HAS_UBOUND(flags))
{
- Datum bound = PointerGetDatum(SendFunctionCall(&cache->typioproc,
- upper.val));
+ bytea *bound = SendFunctionCall(&cache->typioproc, upper.val);
uint32 bound_len = VARSIZE(bound) - VARHDRSZ;
char *bound_data = VARDATA(bound);
@@ -1444,7 +1442,7 @@ hash_range(PG_FUNCTION_ARGS)
upper_hash = 0;
/* Merge hashes of flags and bounds */
- result = hash_uint32((uint32) flags);
+ result = hash_bytes_uint32((uint32) flags);
result ^= lower_hash;
result = pg_rotate_left32(result, 1);
result ^= upper_hash;
diff --git a/src/backend/utils/adt/regproc.c b/src/backend/utils/adt/regproc.c
index 5ee608a2b39..b8bbe95e82e 100644
--- a/src/backend/utils/adt/regproc.c
+++ b/src/backend/utils/adt/regproc.c
@@ -30,6 +30,7 @@
#include "catalog/pg_ts_config.h"
#include "catalog/pg_ts_dict.h"
#include "catalog/pg_type.h"
+#include "commands/dbcommands.h"
#include "lib/stringinfo.h"
#include "mb/pg_wchar.h"
#include "miscadmin.h"
@@ -1764,6 +1765,123 @@ regnamespacesend(PG_FUNCTION_ARGS)
}
/*
+ * regdatabasein - converts database name to database OID
+ *
+ * We also accept a numeric OID, for symmetry with the output routine.
+ *
+ * '-' signifies unknown (OID 0). In all other cases, the input must
+ * match an existing pg_database entry.
+ */
+Datum
+regdatabasein(PG_FUNCTION_ARGS)
+{
+ char *db_name_or_oid = PG_GETARG_CSTRING(0);
+ Node *escontext = fcinfo->context;
+ Oid result;
+ List *names;
+
+ /* Handle "-" or numeric OID */
+ if (parseDashOrOid(db_name_or_oid, &result, escontext))
+ PG_RETURN_OID(result);
+
+ /* The rest of this wouldn't work in bootstrap mode */
+ if (IsBootstrapProcessingMode())
+ elog(ERROR, "regdatabase values must be OIDs in bootstrap mode");
+
+ /* Normal case: see if the name matches any pg_database entry. */
+ names = stringToQualifiedNameList(db_name_or_oid, escontext);
+ if (names == NIL)
+ PG_RETURN_NULL();
+
+ if (list_length(names) != 1)
+ ereturn(escontext, (Datum) 0,
+ (errcode(ERRCODE_INVALID_NAME),
+ errmsg("invalid name syntax")));
+
+ result = get_database_oid(strVal(linitial(names)), true);
+
+ if (!OidIsValid(result))
+ ereturn(escontext, (Datum) 0,
+ (errcode(ERRCODE_UNDEFINED_OBJECT),
+ errmsg("database \"%s\" does not exist",
+ strVal(linitial(names)))));
+
+ PG_RETURN_OID(result);
+}
+
+/*
+ * to_regdatabase - converts database name to database OID
+ *
+ * If the name is not found, we return NULL.
+ */
+Datum
+to_regdatabase(PG_FUNCTION_ARGS)
+{
+ char *db_name = text_to_cstring(PG_GETARG_TEXT_PP(0));
+ Datum result;
+ ErrorSaveContext escontext = {T_ErrorSaveContext};
+
+ if (!DirectInputFunctionCallSafe(regdatabasein, db_name,
+ InvalidOid, -1,
+ (Node *) &escontext,
+ &result))
+ PG_RETURN_NULL();
+ PG_RETURN_DATUM(result);
+}
+
+/*
+ * regdatabaseout - converts database OID to database name
+ */
+Datum
+regdatabaseout(PG_FUNCTION_ARGS)
+{
+ Oid dboid = PG_GETARG_OID(0);
+ char *result;
+
+ if (dboid == InvalidOid)
+ {
+ result = pstrdup("-");
+ PG_RETURN_CSTRING(result);
+ }
+
+ result = get_database_name(dboid);
+
+ if (result)
+ {
+ /* pstrdup is not really necessary, but it avoids a compiler warning */
+ result = pstrdup(quote_identifier(result));
+ }
+ else
+ {
+ /* If OID doesn't match any database, return it numerically */
+ result = (char *) palloc(NAMEDATALEN);
+ snprintf(result, NAMEDATALEN, "%u", dboid);
+ }
+
+ PG_RETURN_CSTRING(result);
+}
+
+/*
+ * regdatabaserecv - converts external binary format to regdatabase
+ */
+Datum
+regdatabaserecv(PG_FUNCTION_ARGS)
+{
+ /* Exactly the same as oidrecv, so share code */
+ return oidrecv(fcinfo);
+}
+
+/*
+ * regdatabasesend - converts regdatabase to binary format
+ */
+Datum
+regdatabasesend(PG_FUNCTION_ARGS)
+{
+ /* Exactly the same as oidsend, so share code */
+ return oidsend(fcinfo);
+}
+
+/*
* text_regclass: convert text to regclass
*
* This could be replaced by CoerceViaIO, except that we need to treat
diff --git a/src/backend/utils/adt/ri_triggers.c b/src/backend/utils/adt/ri_triggers.c
index 6239900fa28..059fc5ebf60 100644
--- a/src/backend/utils/adt/ri_triggers.c
+++ b/src/backend/utils/adt/ri_triggers.c
@@ -30,7 +30,6 @@
#include "access/xact.h"
#include "catalog/pg_collation.h"
#include "catalog/pg_constraint.h"
-#include "catalog/pg_proc.h"
#include "commands/trigger.h"
#include "executor/executor.h"
#include "executor/spi.h"
@@ -46,7 +45,6 @@
#include "utils/inval.h"
#include "utils/lsyscache.h"
#include "utils/memutils.h"
-#include "utils/rangetypes.h"
#include "utils/rel.h"
#include "utils/rls.h"
#include "utils/ruleutils.h"
diff --git a/src/backend/utils/adt/selfuncs.c b/src/backend/utils/adt/selfuncs.c
index a96b1b9c0bc..17fbfa9b410 100644
--- a/src/backend/utils/adt/selfuncs.c
+++ b/src/backend/utils/adt/selfuncs.c
@@ -103,7 +103,6 @@
#include "access/table.h"
#include "access/tableam.h"
#include "access/visibilitymap.h"
-#include "catalog/pg_am.h"
#include "catalog/pg_collation.h"
#include "catalog/pg_operator.h"
#include "catalog/pg_statistic.h"
@@ -3799,18 +3798,25 @@ estimate_multivariate_bucketsize(PlannerInfo *root, RelOptInfo *inner,
List *hashclauses,
Selectivity *innerbucketsize)
{
- List *clauses = list_copy(hashclauses);
- List *otherclauses = NIL;
- double ndistinct = 1.0;
+ List *clauses;
+ List *otherclauses;
+ double ndistinct;
if (list_length(hashclauses) <= 1)
-
+ {
/*
* Nothing to do for a single clause. Could we employ univariate
* extended stat here?
*/
return hashclauses;
+ }
+ /* "clauses" is the list of hashclauses we've not dealt with yet */
+ clauses = list_copy(hashclauses);
+ /* "otherclauses" holds clauses we are going to return to caller */
+ otherclauses = NIL;
+ /* current estimate of ndistinct */
+ ndistinct = 1.0;
while (clauses != NIL)
{
ListCell *lc;
@@ -3875,12 +3881,13 @@ estimate_multivariate_bucketsize(PlannerInfo *root, RelOptInfo *inner,
group_rel = root->simple_rel_array[relid];
}
else if (group_relid != relid)
-
+ {
/*
* Being in the group forming state we don't need other
* clauses.
*/
continue;
+ }
/*
* We're going to add the new clause to the varinfos list. We
@@ -4620,6 +4627,7 @@ convert_to_scalar(Datum value, Oid valuetypid, Oid collid, double *scaledvalue,
case REGDICTIONARYOID:
case REGROLEOID:
case REGNAMESPACEOID:
+ case REGDATABASEOID:
*scaledvalue = convert_numeric_to_scalar(value, valuetypid,
&failure);
*scaledlobound = convert_numeric_to_scalar(lobound, boundstypid,
@@ -4752,6 +4760,7 @@ convert_numeric_to_scalar(Datum value, Oid typid, bool *failure)
case REGDICTIONARYOID:
case REGROLEOID:
case REGNAMESPACEOID:
+ case REGDATABASEOID:
/* we can treat OIDs as integers... */
return (double) DatumGetObjectId(value);
}
diff --git a/src/backend/utils/adt/tid.c b/src/backend/utils/adt/tid.c
index 1b0df111717..39dab3e42df 100644
--- a/src/backend/utils/adt/tid.c
+++ b/src/backend/utils/adt/tid.c
@@ -84,7 +84,7 @@ tidin(PG_FUNCTION_ARGS)
/*
* Cope with possibility that unsigned long is wider than BlockNumber, in
* which case strtoul will not raise an error for some values that are out
- * of the range of BlockNumber. (See similar code in oidin().)
+ * of the range of BlockNumber. (See similar code in uint32in_subr().)
*/
#if SIZEOF_LONG > 4
if (cvt != (unsigned long) blockNumber &&
diff --git a/src/backend/utils/adt/timestamp.c b/src/backend/utils/adt/timestamp.c
index 347089b7626..25cff56c3d0 100644
--- a/src/backend/utils/adt/timestamp.c
+++ b/src/backend/utils/adt/timestamp.c
@@ -5312,10 +5312,10 @@ isoweekdate2date(int isoweek, int wday, int *year, int *mon, int *mday)
int
date2isoweek(int year, int mon, int mday)
{
- float8 result;
int day0,
day4,
- dayn;
+ dayn,
+ week;
/* current day */
dayn = date2j(year, mon, mday);
@@ -5338,13 +5338,13 @@ date2isoweek(int year, int mon, int mday)
day0 = j2day(day4 - 1);
}
- result = (dayn - (day4 - day0)) / 7 + 1;
+ week = (dayn - (day4 - day0)) / 7 + 1;
/*
* Sometimes the last few days in a year will fall into the first week of
* the next year, so check for this.
*/
- if (result >= 52)
+ if (week >= 52)
{
day4 = date2j(year + 1, 1, 4);
@@ -5352,10 +5352,10 @@ date2isoweek(int year, int mon, int mday)
day0 = j2day(day4 - 1);
if (dayn >= day4 - day0)
- result = (dayn - (day4 - day0)) / 7 + 1;
+ week = (dayn - (day4 - day0)) / 7 + 1;
}
- return (int) result;
+ return week;
}
@@ -5367,10 +5367,10 @@ date2isoweek(int year, int mon, int mday)
int
date2isoyear(int year, int mon, int mday)
{
- float8 result;
int day0,
day4,
- dayn;
+ dayn,
+ week;
/* current day */
dayn = date2j(year, mon, mday);
@@ -5395,13 +5395,13 @@ date2isoyear(int year, int mon, int mday)
year--;
}
- result = (dayn - (day4 - day0)) / 7 + 1;
+ week = (dayn - (day4 - day0)) / 7 + 1;
/*
* Sometimes the last few days in a year will fall into the first week of
* the next year, so check for this.
*/
- if (result >= 52)
+ if (week >= 52)
{
day4 = date2j(year + 1, 1, 4);
@@ -6477,7 +6477,7 @@ timestamp2timestamptz_opt_overflow(Timestamp timestamp, int *overflow)
if (TIMESTAMP_NOT_FINITE(timestamp))
return timestamp;
- /* We don't expect this to fail, but check it pro forma */
+ /* timestamp2tm should not fail on valid timestamps, but cope */
if (timestamp2tm(timestamp, NULL, tm, &fsec, NULL, NULL) == 0)
{
tz = DetermineTimeZoneOffset(tm, session_timezone);
@@ -6485,23 +6485,22 @@ timestamp2timestamptz_opt_overflow(Timestamp timestamp, int *overflow)
result = dt2local(timestamp, -tz);
if (IS_VALID_TIMESTAMP(result))
- {
return result;
+ }
+
+ if (overflow)
+ {
+ if (timestamp < 0)
+ {
+ *overflow = -1;
+ TIMESTAMP_NOBEGIN(result);
}
- else if (overflow)
+ else
{
- if (result < MIN_TIMESTAMP)
- {
- *overflow = -1;
- TIMESTAMP_NOBEGIN(result);
- }
- else
- {
- *overflow = 1;
- TIMESTAMP_NOEND(result);
- }
- return result;
+ *overflow = 1;
+ TIMESTAMP_NOEND(result);
}
+ return result;
}
ereport(ERROR,
@@ -6531,27 +6530,81 @@ timestamptz_timestamp(PG_FUNCTION_ARGS)
PG_RETURN_TIMESTAMP(timestamptz2timestamp(timestamp));
}
+/*
+ * Convert timestamptz to timestamp, throwing error for overflow.
+ */
static Timestamp
timestamptz2timestamp(TimestampTz timestamp)
{
+ return timestamptz2timestamp_opt_overflow(timestamp, NULL);
+}
+
+/*
+ * Convert timestamp with time zone to timestamp.
+ *
+ * On successful conversion, *overflow is set to zero if it's not NULL.
+ *
+ * If the timestamptz is finite but out of the valid range for timestamp, then:
+ * if overflow is NULL, we throw an out-of-range error.
+ * if overflow is not NULL, we store +1 or -1 there to indicate the sign
+ * of the overflow, and return the appropriate timestamp infinity.
+ */
+Timestamp
+timestamptz2timestamp_opt_overflow(TimestampTz timestamp, int *overflow)
+{
Timestamp result;
struct pg_tm tt,
*tm = &tt;
fsec_t fsec;
int tz;
+ if (overflow)
+ *overflow = 0;
+
if (TIMESTAMP_NOT_FINITE(timestamp))
result = timestamp;
else
{
if (timestamp2tm(timestamp, &tz, tm, &fsec, NULL, NULL) != 0)
+ {
+ if (overflow)
+ {
+ if (timestamp < 0)
+ {
+ *overflow = -1;
+ TIMESTAMP_NOBEGIN(result);
+ }
+ else
+ {
+ *overflow = 1;
+ TIMESTAMP_NOEND(result);
+ }
+ return result;
+ }
ereport(ERROR,
(errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
errmsg("timestamp out of range")));
+ }
if (tm2timestamp(tm, fsec, NULL, &result) != 0)
+ {
+ if (overflow)
+ {
+ if (timestamp < 0)
+ {
+ *overflow = -1;
+ TIMESTAMP_NOBEGIN(result);
+ }
+ else
+ {
+ *overflow = 1;
+ TIMESTAMP_NOEND(result);
+ }
+ return result;
+ }
ereport(ERROR,
(errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
errmsg("timestamp out of range")));
+ }
}
return result;
}
diff --git a/src/backend/utils/adt/tsvector_op.c b/src/backend/utils/adt/tsvector_op.c
index 1fa1275ca63..0625da9532f 100644
--- a/src/backend/utils/adt/tsvector_op.c
+++ b/src/backend/utils/adt/tsvector_op.c
@@ -329,8 +329,8 @@ tsvector_setweight_by_filter(PG_FUNCTION_ARGS)
if (nulls[i])
continue;
- lex = VARDATA(dlexemes[i]);
- lex_len = VARSIZE(dlexemes[i]) - VARHDRSZ;
+ lex = VARDATA(DatumGetPointer(dlexemes[i]));
+ lex_len = VARSIZE(DatumGetPointer(dlexemes[i])) - VARHDRSZ;
lex_pos = tsvector_bsearch(tsout, lex, lex_len);
if (lex_pos >= 0 && (j = POSDATALEN(tsout, entry + lex_pos)) != 0)
@@ -443,10 +443,10 @@ compare_text_lexemes(const void *va, const void *vb)
{
Datum a = *((const Datum *) va);
Datum b = *((const Datum *) vb);
- char *alex = VARDATA_ANY(a);
- int alex_len = VARSIZE_ANY_EXHDR(a);
- char *blex = VARDATA_ANY(b);
- int blex_len = VARSIZE_ANY_EXHDR(b);
+ char *alex = VARDATA_ANY(DatumGetPointer(a));
+ int alex_len = VARSIZE_ANY_EXHDR(DatumGetPointer(a));
+ char *blex = VARDATA_ANY(DatumGetPointer(b));
+ int blex_len = VARSIZE_ANY_EXHDR(DatumGetPointer(b));
return tsCompareString(alex, alex_len, blex, blex_len, false);
}
@@ -605,8 +605,8 @@ tsvector_delete_arr(PG_FUNCTION_ARGS)
if (nulls[i])
continue;
- lex = VARDATA(dlexemes[i]);
- lex_len = VARSIZE(dlexemes[i]) - VARHDRSZ;
+ lex = VARDATA(DatumGetPointer(dlexemes[i]));
+ lex_len = VARSIZE(DatumGetPointer(dlexemes[i])) - VARHDRSZ;
lex_pos = tsvector_bsearch(tsin, lex, lex_len);
if (lex_pos >= 0)
@@ -770,7 +770,7 @@ array_to_tsvector(PG_FUNCTION_ARGS)
(errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED),
errmsg("lexeme array may not contain nulls")));
- if (VARSIZE(dlexemes[i]) - VARHDRSZ == 0)
+ if (VARSIZE(DatumGetPointer(dlexemes[i])) - VARHDRSZ == 0)
ereport(ERROR,
(errcode(ERRCODE_ZERO_LENGTH_CHARACTER_STRING),
errmsg("lexeme array may not contain empty strings")));
@@ -786,7 +786,7 @@ array_to_tsvector(PG_FUNCTION_ARGS)
/* Calculate space needed for surviving lexemes. */
for (i = 0; i < nitems; i++)
- datalen += VARSIZE(dlexemes[i]) - VARHDRSZ;
+ datalen += VARSIZE(DatumGetPointer(dlexemes[i])) - VARHDRSZ;
tslen = CALCDATASIZE(nitems, datalen);
/* Allocate and fill tsvector. */
@@ -798,8 +798,8 @@ array_to_tsvector(PG_FUNCTION_ARGS)
cur = STRPTR(tsout);
for (i = 0; i < nitems; i++)
{
- char *lex = VARDATA(dlexemes[i]);
- int lex_len = VARSIZE(dlexemes[i]) - VARHDRSZ;
+ char *lex = VARDATA(DatumGetPointer(dlexemes[i]));
+ int lex_len = VARSIZE(DatumGetPointer(dlexemes[i])) - VARHDRSZ;
memcpy(cur, lex, lex_len);
arrout[i].haspos = 0;
diff --git a/src/backend/utils/adt/varlena.c b/src/backend/utils/adt/varlena.c
index 3e4d5568bde..ffae8c23abf 100644
--- a/src/backend/utils/adt/varlena.c
+++ b/src/backend/utils/adt/varlena.c
@@ -35,7 +35,6 @@
#include "port/pg_bswap.h"
#include "regex/regex.h"
#include "utils/builtins.h"
-#include "utils/bytea.h"
#include "utils/guc.h"
#include "utils/lsyscache.h"
#include "utils/memutils.h"
@@ -43,10 +42,6 @@
#include "utils/sortsupport.h"
#include "utils/varlena.h"
-
-/* GUC variable */
-int bytea_output = BYTEA_OUTPUT_HEX;
-
typedef struct varlena VarString;
/*
@@ -148,12 +143,6 @@ static int text_position_get_match_pos(TextPositionState *state);
static void text_position_cleanup(TextPositionState *state);
static void check_collation_set(Oid collid);
static int text_cmp(text *arg1, text *arg2, Oid collid);
-static bytea *bytea_catenate(bytea *t1, bytea *t2);
-static bytea *bytea_substring(Datum str,
- int S,
- int L,
- bool length_not_specified);
-static bytea *bytea_overlay(bytea *t1, bytea *t2, int sp, int sl);
static void appendStringInfoText(StringInfo str, const text *t);
static bool split_text(FunctionCallInfo fcinfo, SplitTextOutputData *tstate);
static void split_text_accum_result(SplitTextOutputData *tstate,
@@ -279,307 +268,6 @@ text_to_cstring_buffer(const text *src, char *dst, size_t dst_len)
* USER I/O ROUTINES *
*****************************************************************************/
-
-#define VAL(CH) ((CH) - '0')
-#define DIG(VAL) ((VAL) + '0')
-
-/*
- * byteain - converts from printable representation of byte array
- *
- * Non-printable characters must be passed as '\nnn' (octal) and are
- * converted to internal form. '\' must be passed as '\\'.
- * ereport(ERROR, ...) if bad form.
- *
- * BUGS:
- * The input is scanned twice.
- * The error checking of input is minimal.
- */
-Datum
-byteain(PG_FUNCTION_ARGS)
-{
- char *inputText = PG_GETARG_CSTRING(0);
- Node *escontext = fcinfo->context;
- char *tp;
- char *rp;
- int bc;
- bytea *result;
-
- /* Recognize hex input */
- if (inputText[0] == '\\' && inputText[1] == 'x')
- {
- size_t len = strlen(inputText);
-
- bc = (len - 2) / 2 + VARHDRSZ; /* maximum possible length */
- result = palloc(bc);
- bc = hex_decode_safe(inputText + 2, len - 2, VARDATA(result),
- escontext);
- SET_VARSIZE(result, bc + VARHDRSZ); /* actual length */
-
- PG_RETURN_BYTEA_P(result);
- }
-
- /* Else, it's the traditional escaped style */
- for (bc = 0, tp = inputText; *tp != '\0'; bc++)
- {
- if (tp[0] != '\\')
- tp++;
- else if ((tp[0] == '\\') &&
- (tp[1] >= '0' && tp[1] <= '3') &&
- (tp[2] >= '0' && tp[2] <= '7') &&
- (tp[3] >= '0' && tp[3] <= '7'))
- tp += 4;
- else if ((tp[0] == '\\') &&
- (tp[1] == '\\'))
- tp += 2;
- else
- {
- /*
- * one backslash, not followed by another or ### valid octal
- */
- ereturn(escontext, (Datum) 0,
- (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type %s", "bytea")));
- }
- }
-
- bc += VARHDRSZ;
-
- result = (bytea *) palloc(bc);
- SET_VARSIZE(result, bc);
-
- tp = inputText;
- rp = VARDATA(result);
- while (*tp != '\0')
- {
- if (tp[0] != '\\')
- *rp++ = *tp++;
- else if ((tp[0] == '\\') &&
- (tp[1] >= '0' && tp[1] <= '3') &&
- (tp[2] >= '0' && tp[2] <= '7') &&
- (tp[3] >= '0' && tp[3] <= '7'))
- {
- bc = VAL(tp[1]);
- bc <<= 3;
- bc += VAL(tp[2]);
- bc <<= 3;
- *rp++ = bc + VAL(tp[3]);
-
- tp += 4;
- }
- else if ((tp[0] == '\\') &&
- (tp[1] == '\\'))
- {
- *rp++ = '\\';
- tp += 2;
- }
- else
- {
- /*
- * We should never get here. The first pass should not allow it.
- */
- ereturn(escontext, (Datum) 0,
- (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type %s", "bytea")));
- }
- }
-
- PG_RETURN_BYTEA_P(result);
-}
-
-/*
- * byteaout - converts to printable representation of byte array
- *
- * In the traditional escaped format, non-printable characters are
- * printed as '\nnn' (octal) and '\' as '\\'.
- */
-Datum
-byteaout(PG_FUNCTION_ARGS)
-{
- bytea *vlena = PG_GETARG_BYTEA_PP(0);
- char *result;
- char *rp;
-
- if (bytea_output == BYTEA_OUTPUT_HEX)
- {
- /* Print hex format */
- rp = result = palloc(VARSIZE_ANY_EXHDR(vlena) * 2 + 2 + 1);
- *rp++ = '\\';
- *rp++ = 'x';
- rp += hex_encode(VARDATA_ANY(vlena), VARSIZE_ANY_EXHDR(vlena), rp);
- }
- else if (bytea_output == BYTEA_OUTPUT_ESCAPE)
- {
- /* Print traditional escaped format */
- char *vp;
- uint64 len;
- int i;
-
- len = 1; /* empty string has 1 char */
- vp = VARDATA_ANY(vlena);
- for (i = VARSIZE_ANY_EXHDR(vlena); i != 0; i--, vp++)
- {
- if (*vp == '\\')
- len += 2;
- else if ((unsigned char) *vp < 0x20 || (unsigned char) *vp > 0x7e)
- len += 4;
- else
- len++;
- }
-
- /*
- * In principle len can't overflow uint32 if the input fit in 1GB, but
- * for safety let's check rather than relying on palloc's internal
- * check.
- */
- if (len > MaxAllocSize)
- ereport(ERROR,
- (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
- errmsg_internal("result of bytea output conversion is too large")));
- rp = result = (char *) palloc(len);
-
- vp = VARDATA_ANY(vlena);
- for (i = VARSIZE_ANY_EXHDR(vlena); i != 0; i--, vp++)
- {
- if (*vp == '\\')
- {
- *rp++ = '\\';
- *rp++ = '\\';
- }
- else if ((unsigned char) *vp < 0x20 || (unsigned char) *vp > 0x7e)
- {
- int val; /* holds unprintable chars */
-
- val = *vp;
- rp[0] = '\\';
- rp[3] = DIG(val & 07);
- val >>= 3;
- rp[2] = DIG(val & 07);
- val >>= 3;
- rp[1] = DIG(val & 03);
- rp += 4;
- }
- else
- *rp++ = *vp;
- }
- }
- else
- {
- elog(ERROR, "unrecognized \"bytea_output\" setting: %d",
- bytea_output);
- rp = result = NULL; /* keep compiler quiet */
- }
- *rp = '\0';
- PG_RETURN_CSTRING(result);
-}
-
-/*
- * bytearecv - converts external binary format to bytea
- */
-Datum
-bytearecv(PG_FUNCTION_ARGS)
-{
- StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
- bytea *result;
- int nbytes;
-
- nbytes = buf->len - buf->cursor;
- result = (bytea *) palloc(nbytes + VARHDRSZ);
- SET_VARSIZE(result, nbytes + VARHDRSZ);
- pq_copymsgbytes(buf, VARDATA(result), nbytes);
- PG_RETURN_BYTEA_P(result);
-}
-
-/*
- * byteasend - converts bytea to binary format
- *
- * This is a special case: just copy the input...
- */
-Datum
-byteasend(PG_FUNCTION_ARGS)
-{
- bytea *vlena = PG_GETARG_BYTEA_P_COPY(0);
-
- PG_RETURN_BYTEA_P(vlena);
-}
-
-Datum
-bytea_string_agg_transfn(PG_FUNCTION_ARGS)
-{
- StringInfo state;
-
- state = PG_ARGISNULL(0) ? NULL : (StringInfo) PG_GETARG_POINTER(0);
-
- /* Append the value unless null, preceding it with the delimiter. */
- if (!PG_ARGISNULL(1))
- {
- bytea *value = PG_GETARG_BYTEA_PP(1);
- bool isfirst = false;
-
- /*
- * You might think we can just throw away the first delimiter, however
- * we must keep it as we may be a parallel worker doing partial
- * aggregation building a state to send to the main process. We need
- * to keep the delimiter of every aggregation so that the combine
- * function can properly join up the strings of two separately
- * partially aggregated results. The first delimiter is only stripped
- * off in the final function. To know how much to strip off the front
- * of the string, we store the length of the first delimiter in the
- * StringInfo's cursor field, which we don't otherwise need here.
- */
- if (state == NULL)
- {
- state = makeStringAggState(fcinfo);
- isfirst = true;
- }
-
- if (!PG_ARGISNULL(2))
- {
- bytea *delim = PG_GETARG_BYTEA_PP(2);
-
- appendBinaryStringInfo(state, VARDATA_ANY(delim),
- VARSIZE_ANY_EXHDR(delim));
- if (isfirst)
- state->cursor = VARSIZE_ANY_EXHDR(delim);
- }
-
- appendBinaryStringInfo(state, VARDATA_ANY(value),
- VARSIZE_ANY_EXHDR(value));
- }
-
- /*
- * The transition type for string_agg() is declared to be "internal",
- * which is a pass-by-value type the same size as a pointer.
- */
- if (state)
- PG_RETURN_POINTER(state);
- PG_RETURN_NULL();
-}
-
-Datum
-bytea_string_agg_finalfn(PG_FUNCTION_ARGS)
-{
- StringInfo state;
-
- /* cannot be called directly because of internal-type argument */
- Assert(AggCheckCallContext(fcinfo, NULL));
-
- state = PG_ARGISNULL(0) ? NULL : (StringInfo) PG_GETARG_POINTER(0);
-
- if (state != NULL)
- {
- /* As per comment in transfn, strip data before the cursor position */
- bytea *result;
- int strippedlen = state->len - state->cursor;
-
- result = (bytea *) palloc(strippedlen + VARHDRSZ);
- SET_VARSIZE(result, strippedlen + VARHDRSZ);
- memcpy(VARDATA(result), &state->data[state->cursor], strippedlen);
- PG_RETURN_BYTEA_P(result);
- }
- else
- PG_RETURN_NULL();
-}
-
/*
* textin - converts cstring to internal representation
*/
@@ -2959,467 +2647,6 @@ bttext_pattern_sortsupport(PG_FUNCTION_ARGS)
}
-/*-------------------------------------------------------------
- * byteaoctetlen
- *
- * get the number of bytes contained in an instance of type 'bytea'
- *-------------------------------------------------------------
- */
-Datum
-byteaoctetlen(PG_FUNCTION_ARGS)
-{
- Datum str = PG_GETARG_DATUM(0);
-
- /* We need not detoast the input at all */
- PG_RETURN_INT32(toast_raw_datum_size(str) - VARHDRSZ);
-}
-
-/*
- * byteacat -
- * takes two bytea* and returns a bytea* that is the concatenation of
- * the two.
- *
- * Cloned from textcat and modified as required.
- */
-Datum
-byteacat(PG_FUNCTION_ARGS)
-{
- bytea *t1 = PG_GETARG_BYTEA_PP(0);
- bytea *t2 = PG_GETARG_BYTEA_PP(1);
-
- PG_RETURN_BYTEA_P(bytea_catenate(t1, t2));
-}
-
-/*
- * bytea_catenate
- * Guts of byteacat(), broken out so it can be used by other functions
- *
- * Arguments can be in short-header form, but not compressed or out-of-line
- */
-static bytea *
-bytea_catenate(bytea *t1, bytea *t2)
-{
- bytea *result;
- int len1,
- len2,
- len;
- char *ptr;
-
- len1 = VARSIZE_ANY_EXHDR(t1);
- len2 = VARSIZE_ANY_EXHDR(t2);
-
- /* paranoia ... probably should throw error instead? */
- if (len1 < 0)
- len1 = 0;
- if (len2 < 0)
- len2 = 0;
-
- len = len1 + len2 + VARHDRSZ;
- result = (bytea *) palloc(len);
-
- /* Set size of result string... */
- SET_VARSIZE(result, len);
-
- /* Fill data field of result string... */
- ptr = VARDATA(result);
- if (len1 > 0)
- memcpy(ptr, VARDATA_ANY(t1), len1);
- if (len2 > 0)
- memcpy(ptr + len1, VARDATA_ANY(t2), len2);
-
- return result;
-}
-
-#define PG_STR_GET_BYTEA(str_) \
- DatumGetByteaPP(DirectFunctionCall1(byteain, CStringGetDatum(str_)))
-
-/*
- * bytea_substr()
- * Return a substring starting at the specified position.
- * Cloned from text_substr and modified as required.
- *
- * Input:
- * - string
- * - starting position (is one-based)
- * - string length (optional)
- *
- * If the starting position is zero or less, then return from the start of the string
- * adjusting the length to be consistent with the "negative start" per SQL.
- * If the length is less than zero, an ERROR is thrown. If no third argument
- * (length) is provided, the length to the end of the string is assumed.
- */
-Datum
-bytea_substr(PG_FUNCTION_ARGS)
-{
- PG_RETURN_BYTEA_P(bytea_substring(PG_GETARG_DATUM(0),
- PG_GETARG_INT32(1),
- PG_GETARG_INT32(2),
- false));
-}
-
-/*
- * bytea_substr_no_len -
- * Wrapper to avoid opr_sanity failure due to
- * one function accepting a different number of args.
- */
-Datum
-bytea_substr_no_len(PG_FUNCTION_ARGS)
-{
- PG_RETURN_BYTEA_P(bytea_substring(PG_GETARG_DATUM(0),
- PG_GETARG_INT32(1),
- -1,
- true));
-}
-
-static bytea *
-bytea_substring(Datum str,
- int S,
- int L,
- bool length_not_specified)
-{
- int32 S1; /* adjusted start position */
- int32 L1; /* adjusted substring length */
- int32 E; /* end position */
-
- /*
- * The logic here should generally match text_substring().
- */
- S1 = Max(S, 1);
-
- if (length_not_specified)
- {
- /*
- * Not passed a length - DatumGetByteaPSlice() grabs everything to the
- * end of the string if we pass it a negative value for length.
- */
- L1 = -1;
- }
- else if (L < 0)
- {
- /* SQL99 says to throw an error for E < S, i.e., negative length */
- ereport(ERROR,
- (errcode(ERRCODE_SUBSTRING_ERROR),
- errmsg("negative substring length not allowed")));
- L1 = -1; /* silence stupider compilers */
- }
- else if (pg_add_s32_overflow(S, L, &E))
- {
- /*
- * L could be large enough for S + L to overflow, in which case the
- * substring must run to end of string.
- */
- L1 = -1;
- }
- else
- {
- /*
- * A zero or negative value for the end position can happen if the
- * start was negative or one. SQL99 says to return a zero-length
- * string.
- */
- if (E < 1)
- return PG_STR_GET_BYTEA("");
-
- L1 = E - S1;
- }
-
- /*
- * If the start position is past the end of the string, SQL99 says to
- * return a zero-length string -- DatumGetByteaPSlice() will do that for
- * us. We need only convert S1 to zero-based starting position.
- */
- return DatumGetByteaPSlice(str, S1 - 1, L1);
-}
-
-/*
- * byteaoverlay
- * Replace specified substring of first string with second
- *
- * The SQL standard defines OVERLAY() in terms of substring and concatenation.
- * This code is a direct implementation of what the standard says.
- */
-Datum
-byteaoverlay(PG_FUNCTION_ARGS)
-{
- bytea *t1 = PG_GETARG_BYTEA_PP(0);
- bytea *t2 = PG_GETARG_BYTEA_PP(1);
- int sp = PG_GETARG_INT32(2); /* substring start position */
- int sl = PG_GETARG_INT32(3); /* substring length */
-
- PG_RETURN_BYTEA_P(bytea_overlay(t1, t2, sp, sl));
-}
-
-Datum
-byteaoverlay_no_len(PG_FUNCTION_ARGS)
-{
- bytea *t1 = PG_GETARG_BYTEA_PP(0);
- bytea *t2 = PG_GETARG_BYTEA_PP(1);
- int sp = PG_GETARG_INT32(2); /* substring start position */
- int sl;
-
- sl = VARSIZE_ANY_EXHDR(t2); /* defaults to length(t2) */
- PG_RETURN_BYTEA_P(bytea_overlay(t1, t2, sp, sl));
-}
-
-static bytea *
-bytea_overlay(bytea *t1, bytea *t2, int sp, int sl)
-{
- bytea *result;
- bytea *s1;
- bytea *s2;
- int sp_pl_sl;
-
- /*
- * Check for possible integer-overflow cases. For negative sp, throw a
- * "substring length" error because that's what should be expected
- * according to the spec's definition of OVERLAY().
- */
- if (sp <= 0)
- ereport(ERROR,
- (errcode(ERRCODE_SUBSTRING_ERROR),
- errmsg("negative substring length not allowed")));
- if (pg_add_s32_overflow(sp, sl, &sp_pl_sl))
- ereport(ERROR,
- (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
- errmsg("integer out of range")));
-
- s1 = bytea_substring(PointerGetDatum(t1), 1, sp - 1, false);
- s2 = bytea_substring(PointerGetDatum(t1), sp_pl_sl, -1, true);
- result = bytea_catenate(s1, t2);
- result = bytea_catenate(result, s2);
-
- return result;
-}
-
-/*
- * bit_count
- */
-Datum
-bytea_bit_count(PG_FUNCTION_ARGS)
-{
- bytea *t1 = PG_GETARG_BYTEA_PP(0);
-
- PG_RETURN_INT64(pg_popcount(VARDATA_ANY(t1), VARSIZE_ANY_EXHDR(t1)));
-}
-
-/*
- * byteapos -
- * Return the position of the specified substring.
- * Implements the SQL POSITION() function.
- * Cloned from textpos and modified as required.
- */
-Datum
-byteapos(PG_FUNCTION_ARGS)
-{
- bytea *t1 = PG_GETARG_BYTEA_PP(0);
- bytea *t2 = PG_GETARG_BYTEA_PP(1);
- int pos;
- int px,
- p;
- int len1,
- len2;
- char *p1,
- *p2;
-
- len1 = VARSIZE_ANY_EXHDR(t1);
- len2 = VARSIZE_ANY_EXHDR(t2);
-
- if (len2 <= 0)
- PG_RETURN_INT32(1); /* result for empty pattern */
-
- p1 = VARDATA_ANY(t1);
- p2 = VARDATA_ANY(t2);
-
- pos = 0;
- px = (len1 - len2);
- for (p = 0; p <= px; p++)
- {
- if ((*p2 == *p1) && (memcmp(p1, p2, len2) == 0))
- {
- pos = p + 1;
- break;
- };
- p1++;
- };
-
- PG_RETURN_INT32(pos);
-}
-
-/*-------------------------------------------------------------
- * byteaGetByte
- *
- * this routine treats "bytea" as an array of bytes.
- * It returns the Nth byte (a number between 0 and 255).
- *-------------------------------------------------------------
- */
-Datum
-byteaGetByte(PG_FUNCTION_ARGS)
-{
- bytea *v = PG_GETARG_BYTEA_PP(0);
- int32 n = PG_GETARG_INT32(1);
- int len;
- int byte;
-
- len = VARSIZE_ANY_EXHDR(v);
-
- if (n < 0 || n >= len)
- ereport(ERROR,
- (errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
- errmsg("index %d out of valid range, 0..%d",
- n, len - 1)));
-
- byte = ((unsigned char *) VARDATA_ANY(v))[n];
-
- PG_RETURN_INT32(byte);
-}
-
-/*-------------------------------------------------------------
- * byteaGetBit
- *
- * This routine treats a "bytea" type like an array of bits.
- * It returns the value of the Nth bit (0 or 1).
- *
- *-------------------------------------------------------------
- */
-Datum
-byteaGetBit(PG_FUNCTION_ARGS)
-{
- bytea *v = PG_GETARG_BYTEA_PP(0);
- int64 n = PG_GETARG_INT64(1);
- int byteNo,
- bitNo;
- int len;
- int byte;
-
- len = VARSIZE_ANY_EXHDR(v);
-
- if (n < 0 || n >= (int64) len * 8)
- ereport(ERROR,
- (errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
- errmsg("index %" PRId64 " out of valid range, 0..%" PRId64,
- n, (int64) len * 8 - 1)));
-
- /* n/8 is now known < len, so safe to cast to int */
- byteNo = (int) (n / 8);
- bitNo = (int) (n % 8);
-
- byte = ((unsigned char *) VARDATA_ANY(v))[byteNo];
-
- if (byte & (1 << bitNo))
- PG_RETURN_INT32(1);
- else
- PG_RETURN_INT32(0);
-}
-
-/*-------------------------------------------------------------
- * byteaSetByte
- *
- * Given an instance of type 'bytea' creates a new one with
- * the Nth byte set to the given value.
- *
- *-------------------------------------------------------------
- */
-Datum
-byteaSetByte(PG_FUNCTION_ARGS)
-{
- bytea *res = PG_GETARG_BYTEA_P_COPY(0);
- int32 n = PG_GETARG_INT32(1);
- int32 newByte = PG_GETARG_INT32(2);
- int len;
-
- len = VARSIZE(res) - VARHDRSZ;
-
- if (n < 0 || n >= len)
- ereport(ERROR,
- (errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
- errmsg("index %d out of valid range, 0..%d",
- n, len - 1)));
-
- /*
- * Now set the byte.
- */
- ((unsigned char *) VARDATA(res))[n] = newByte;
-
- PG_RETURN_BYTEA_P(res);
-}
-
-/*-------------------------------------------------------------
- * byteaSetBit
- *
- * Given an instance of type 'bytea' creates a new one with
- * the Nth bit set to the given value.
- *
- *-------------------------------------------------------------
- */
-Datum
-byteaSetBit(PG_FUNCTION_ARGS)
-{
- bytea *res = PG_GETARG_BYTEA_P_COPY(0);
- int64 n = PG_GETARG_INT64(1);
- int32 newBit = PG_GETARG_INT32(2);
- int len;
- int oldByte,
- newByte;
- int byteNo,
- bitNo;
-
- len = VARSIZE(res) - VARHDRSZ;
-
- if (n < 0 || n >= (int64) len * 8)
- ereport(ERROR,
- (errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
- errmsg("index %" PRId64 " out of valid range, 0..%" PRId64,
- n, (int64) len * 8 - 1)));
-
- /* n/8 is now known < len, so safe to cast to int */
- byteNo = (int) (n / 8);
- bitNo = (int) (n % 8);
-
- /*
- * sanity check!
- */
- if (newBit != 0 && newBit != 1)
- ereport(ERROR,
- (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("new bit must be 0 or 1")));
-
- /*
- * Update the byte.
- */
- oldByte = ((unsigned char *) VARDATA(res))[byteNo];
-
- if (newBit == 0)
- newByte = oldByte & (~(1 << bitNo));
- else
- newByte = oldByte | (1 << bitNo);
-
- ((unsigned char *) VARDATA(res))[byteNo] = newByte;
-
- PG_RETURN_BYTEA_P(res);
-}
-
-/*
- * Return reversed bytea
- */
-Datum
-bytea_reverse(PG_FUNCTION_ARGS)
-{
- bytea *v = PG_GETARG_BYTEA_PP(0);
- const char *p = VARDATA_ANY(v);
- int len = VARSIZE_ANY_EXHDR(v);
- const char *endp = p + len;
- bytea *result = palloc(len + VARHDRSZ);
- char *dst = (char *) VARDATA(result) + len;
-
- SET_VARSIZE(result, len + VARHDRSZ);
-
- while (p < endp)
- *(--dst) = *p++;
-
- PG_RETURN_BYTEA_P(result);
-}
-
-
/* text_name()
* Converts a text type to a Name type.
*/
@@ -3849,331 +3076,6 @@ SplitGUCList(char *rawstring, char separator,
return true;
}
-
-/*****************************************************************************
- * Comparison Functions used for bytea
- *
- * Note: btree indexes need these routines not to leak memory; therefore,
- * be careful to free working copies of toasted datums. Most places don't
- * need to be so careful.
- *****************************************************************************/
-
-Datum
-byteaeq(PG_FUNCTION_ARGS)
-{
- Datum arg1 = PG_GETARG_DATUM(0);
- Datum arg2 = PG_GETARG_DATUM(1);
- bool result;
- Size len1,
- len2;
-
- /*
- * We can use a fast path for unequal lengths, which might save us from
- * having to detoast one or both values.
- */
- len1 = toast_raw_datum_size(arg1);
- len2 = toast_raw_datum_size(arg2);
- if (len1 != len2)
- result = false;
- else
- {
- bytea *barg1 = DatumGetByteaPP(arg1);
- bytea *barg2 = DatumGetByteaPP(arg2);
-
- result = (memcmp(VARDATA_ANY(barg1), VARDATA_ANY(barg2),
- len1 - VARHDRSZ) == 0);
-
- PG_FREE_IF_COPY(barg1, 0);
- PG_FREE_IF_COPY(barg2, 1);
- }
-
- PG_RETURN_BOOL(result);
-}
-
-Datum
-byteane(PG_FUNCTION_ARGS)
-{
- Datum arg1 = PG_GETARG_DATUM(0);
- Datum arg2 = PG_GETARG_DATUM(1);
- bool result;
- Size len1,
- len2;
-
- /*
- * We can use a fast path for unequal lengths, which might save us from
- * having to detoast one or both values.
- */
- len1 = toast_raw_datum_size(arg1);
- len2 = toast_raw_datum_size(arg2);
- if (len1 != len2)
- result = true;
- else
- {
- bytea *barg1 = DatumGetByteaPP(arg1);
- bytea *barg2 = DatumGetByteaPP(arg2);
-
- result = (memcmp(VARDATA_ANY(barg1), VARDATA_ANY(barg2),
- len1 - VARHDRSZ) != 0);
-
- PG_FREE_IF_COPY(barg1, 0);
- PG_FREE_IF_COPY(barg2, 1);
- }
-
- PG_RETURN_BOOL(result);
-}
-
-Datum
-bytealt(PG_FUNCTION_ARGS)
-{
- bytea *arg1 = PG_GETARG_BYTEA_PP(0);
- bytea *arg2 = PG_GETARG_BYTEA_PP(1);
- int len1,
- len2;
- int cmp;
-
- len1 = VARSIZE_ANY_EXHDR(arg1);
- len2 = VARSIZE_ANY_EXHDR(arg2);
-
- cmp = memcmp(VARDATA_ANY(arg1), VARDATA_ANY(arg2), Min(len1, len2));
-
- PG_FREE_IF_COPY(arg1, 0);
- PG_FREE_IF_COPY(arg2, 1);
-
- PG_RETURN_BOOL((cmp < 0) || ((cmp == 0) && (len1 < len2)));
-}
-
-Datum
-byteale(PG_FUNCTION_ARGS)
-{
- bytea *arg1 = PG_GETARG_BYTEA_PP(0);
- bytea *arg2 = PG_GETARG_BYTEA_PP(1);
- int len1,
- len2;
- int cmp;
-
- len1 = VARSIZE_ANY_EXHDR(arg1);
- len2 = VARSIZE_ANY_EXHDR(arg2);
-
- cmp = memcmp(VARDATA_ANY(arg1), VARDATA_ANY(arg2), Min(len1, len2));
-
- PG_FREE_IF_COPY(arg1, 0);
- PG_FREE_IF_COPY(arg2, 1);
-
- PG_RETURN_BOOL((cmp < 0) || ((cmp == 0) && (len1 <= len2)));
-}
-
-Datum
-byteagt(PG_FUNCTION_ARGS)
-{
- bytea *arg1 = PG_GETARG_BYTEA_PP(0);
- bytea *arg2 = PG_GETARG_BYTEA_PP(1);
- int len1,
- len2;
- int cmp;
-
- len1 = VARSIZE_ANY_EXHDR(arg1);
- len2 = VARSIZE_ANY_EXHDR(arg2);
-
- cmp = memcmp(VARDATA_ANY(arg1), VARDATA_ANY(arg2), Min(len1, len2));
-
- PG_FREE_IF_COPY(arg1, 0);
- PG_FREE_IF_COPY(arg2, 1);
-
- PG_RETURN_BOOL((cmp > 0) || ((cmp == 0) && (len1 > len2)));
-}
-
-Datum
-byteage(PG_FUNCTION_ARGS)
-{
- bytea *arg1 = PG_GETARG_BYTEA_PP(0);
- bytea *arg2 = PG_GETARG_BYTEA_PP(1);
- int len1,
- len2;
- int cmp;
-
- len1 = VARSIZE_ANY_EXHDR(arg1);
- len2 = VARSIZE_ANY_EXHDR(arg2);
-
- cmp = memcmp(VARDATA_ANY(arg1), VARDATA_ANY(arg2), Min(len1, len2));
-
- PG_FREE_IF_COPY(arg1, 0);
- PG_FREE_IF_COPY(arg2, 1);
-
- PG_RETURN_BOOL((cmp > 0) || ((cmp == 0) && (len1 >= len2)));
-}
-
-Datum
-byteacmp(PG_FUNCTION_ARGS)
-{
- bytea *arg1 = PG_GETARG_BYTEA_PP(0);
- bytea *arg2 = PG_GETARG_BYTEA_PP(1);
- int len1,
- len2;
- int cmp;
-
- len1 = VARSIZE_ANY_EXHDR(arg1);
- len2 = VARSIZE_ANY_EXHDR(arg2);
-
- cmp = memcmp(VARDATA_ANY(arg1), VARDATA_ANY(arg2), Min(len1, len2));
- if ((cmp == 0) && (len1 != len2))
- cmp = (len1 < len2) ? -1 : 1;
-
- PG_FREE_IF_COPY(arg1, 0);
- PG_FREE_IF_COPY(arg2, 1);
-
- PG_RETURN_INT32(cmp);
-}
-
-Datum
-bytea_larger(PG_FUNCTION_ARGS)
-{
- bytea *arg1 = PG_GETARG_BYTEA_PP(0);
- bytea *arg2 = PG_GETARG_BYTEA_PP(1);
- bytea *result;
- int len1,
- len2;
- int cmp;
-
- len1 = VARSIZE_ANY_EXHDR(arg1);
- len2 = VARSIZE_ANY_EXHDR(arg2);
-
- cmp = memcmp(VARDATA_ANY(arg1), VARDATA_ANY(arg2), Min(len1, len2));
- result = ((cmp > 0) || ((cmp == 0) && (len1 > len2)) ? arg1 : arg2);
-
- PG_RETURN_BYTEA_P(result);
-}
-
-Datum
-bytea_smaller(PG_FUNCTION_ARGS)
-{
- bytea *arg1 = PG_GETARG_BYTEA_PP(0);
- bytea *arg2 = PG_GETARG_BYTEA_PP(1);
- bytea *result;
- int len1,
- len2;
- int cmp;
-
- len1 = VARSIZE_ANY_EXHDR(arg1);
- len2 = VARSIZE_ANY_EXHDR(arg2);
-
- cmp = memcmp(VARDATA_ANY(arg1), VARDATA_ANY(arg2), Min(len1, len2));
- result = ((cmp < 0) || ((cmp == 0) && (len1 < len2)) ? arg1 : arg2);
-
- PG_RETURN_BYTEA_P(result);
-}
-
-Datum
-bytea_sortsupport(PG_FUNCTION_ARGS)
-{
- SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0);
- MemoryContext oldcontext;
-
- oldcontext = MemoryContextSwitchTo(ssup->ssup_cxt);
-
- /* Use generic string SortSupport, forcing "C" collation */
- varstr_sortsupport(ssup, BYTEAOID, C_COLLATION_OID);
-
- MemoryContextSwitchTo(oldcontext);
-
- PG_RETURN_VOID();
-}
-
-/* Cast bytea -> int2 */
-Datum
-bytea_int2(PG_FUNCTION_ARGS)
-{
- bytea *v = PG_GETARG_BYTEA_PP(0);
- int len = VARSIZE_ANY_EXHDR(v);
- uint16 result;
-
- /* Check that the byte array is not too long */
- if (len > sizeof(result))
- ereport(ERROR,
- errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
- errmsg("smallint out of range"));
-
- /* Convert it to an integer; most significant bytes come first */
- result = 0;
- for (int i = 0; i < len; i++)
- {
- result <<= BITS_PER_BYTE;
- result |= ((unsigned char *) VARDATA_ANY(v))[i];
- }
-
- PG_RETURN_INT16(result);
-}
-
-/* Cast bytea -> int4 */
-Datum
-bytea_int4(PG_FUNCTION_ARGS)
-{
- bytea *v = PG_GETARG_BYTEA_PP(0);
- int len = VARSIZE_ANY_EXHDR(v);
- uint32 result;
-
- /* Check that the byte array is not too long */
- if (len > sizeof(result))
- ereport(ERROR,
- errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
- errmsg("integer out of range"));
-
- /* Convert it to an integer; most significant bytes come first */
- result = 0;
- for (int i = 0; i < len; i++)
- {
- result <<= BITS_PER_BYTE;
- result |= ((unsigned char *) VARDATA_ANY(v))[i];
- }
-
- PG_RETURN_INT32(result);
-}
-
-/* Cast bytea -> int8 */
-Datum
-bytea_int8(PG_FUNCTION_ARGS)
-{
- bytea *v = PG_GETARG_BYTEA_PP(0);
- int len = VARSIZE_ANY_EXHDR(v);
- uint64 result;
-
- /* Check that the byte array is not too long */
- if (len > sizeof(result))
- ereport(ERROR,
- errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
- errmsg("bigint out of range"));
-
- /* Convert it to an integer; most significant bytes come first */
- result = 0;
- for (int i = 0; i < len; i++)
- {
- result <<= BITS_PER_BYTE;
- result |= ((unsigned char *) VARDATA_ANY(v))[i];
- }
-
- PG_RETURN_INT64(result);
-}
-
-/* Cast int2 -> bytea; can just use int2send() */
-Datum
-int2_bytea(PG_FUNCTION_ARGS)
-{
- return int2send(fcinfo);
-}
-
-/* Cast int4 -> bytea; can just use int4send() */
-Datum
-int4_bytea(PG_FUNCTION_ARGS)
-{
- return int4send(fcinfo);
-}
-
-/* Cast int8 -> bytea; can just use int8send() */
-Datum
-int8_bytea(PG_FUNCTION_ARGS)
-{
- return int8send(fcinfo);
-}
-
/*
* appendStringInfoText
*
diff --git a/src/backend/utils/adt/xml.c b/src/backend/utils/adt/xml.c
index a4150bff2ea..182e8f75db7 100644
--- a/src/backend/utils/adt/xml.c
+++ b/src/backend/utils/adt/xml.c
@@ -529,14 +529,36 @@ xmltext(PG_FUNCTION_ARGS)
#ifdef USE_LIBXML
text *arg = PG_GETARG_TEXT_PP(0);
text *result;
- xmlChar *xmlbuf = NULL;
+ volatile xmlChar *xmlbuf = NULL;
+ PgXmlErrorContext *xmlerrcxt;
+
+ /* First we gotta spin up some error handling. */
+ xmlerrcxt = pg_xml_init(PG_XML_STRICTNESS_ALL);
+
+ PG_TRY();
+ {
+ xmlbuf = xmlEncodeSpecialChars(NULL, xml_text2xmlChar(arg));
+
+ if (xmlbuf == NULL || xmlerrcxt->err_occurred)
+ xml_ereport(xmlerrcxt, ERROR, ERRCODE_OUT_OF_MEMORY,
+ "could not allocate xmlChar");
- xmlbuf = xmlEncodeSpecialChars(NULL, xml_text2xmlChar(arg));
+ result = cstring_to_text_with_len((const char *) xmlbuf,
+ xmlStrlen((const xmlChar *) xmlbuf));
+ }
+ PG_CATCH();
+ {
+ if (xmlbuf)
+ xmlFree((xmlChar *) xmlbuf);
- Assert(xmlbuf);
+ pg_xml_done(xmlerrcxt, true);
+ PG_RE_THROW();
+ }
+ PG_END_TRY();
+
+ xmlFree((xmlChar *) xmlbuf);
+ pg_xml_done(xmlerrcxt, false);
- result = cstring_to_text_with_len((const char *) xmlbuf, xmlStrlen(xmlbuf));
- xmlFree(xmlbuf);
PG_RETURN_XML_P(result);
#else
NO_XML_SUPPORT();
@@ -663,7 +685,7 @@ xmltotext_with_options(xmltype *data, XmlOptionType xmloption_arg, bool indent)
volatile xmlBufferPtr buf = NULL;
volatile xmlSaveCtxtPtr ctxt = NULL;
ErrorSaveContext escontext = {T_ErrorSaveContext};
- PgXmlErrorContext *xmlerrcxt;
+ PgXmlErrorContext *volatile xmlerrcxt = NULL;
#endif
if (xmloption_arg != XMLOPTION_DOCUMENT && !indent)
@@ -704,13 +726,18 @@ xmltotext_with_options(xmltype *data, XmlOptionType xmloption_arg, bool indent)
return (text *) data;
}
- /* Otherwise, we gotta spin up some error handling. */
- xmlerrcxt = pg_xml_init(PG_XML_STRICTNESS_ALL);
-
+ /*
+ * Otherwise, we gotta spin up some error handling. Unlike most other
+ * routines in this module, we already have a libxml "doc" structure to
+ * free, so we need to call pg_xml_init() inside the PG_TRY and be
+ * prepared for it to fail (typically due to palloc OOM).
+ */
PG_TRY();
{
size_t decl_len = 0;
+ xmlerrcxt = pg_xml_init(PG_XML_STRICTNESS_ALL);
+
/* The serialized data will go into this buffer. */
buf = xmlBufferCreate();
@@ -770,7 +797,10 @@ xmltotext_with_options(xmltype *data, XmlOptionType xmloption_arg, bool indent)
if (oldroot != NULL)
xmlFreeNode(oldroot);
- xmlAddChildList(root, content_nodes);
+ if (xmlAddChildList(root, content_nodes) == NULL ||
+ xmlerrcxt->err_occurred)
+ xml_ereport(xmlerrcxt, ERROR, ERRCODE_INTERNAL_ERROR,
+ "could not append xml node list");
/*
* We use this node to insert newlines in the dump. Note: in at
@@ -838,10 +868,10 @@ xmltotext_with_options(xmltype *data, XmlOptionType xmloption_arg, bool indent)
xmlSaveClose(ctxt);
if (buf)
xmlBufferFree(buf);
- if (doc)
- xmlFreeDoc(doc);
+ xmlFreeDoc(doc);
- pg_xml_done(xmlerrcxt, true);
+ if (xmlerrcxt)
+ pg_xml_done(xmlerrcxt, true);
PG_RE_THROW();
}
@@ -931,7 +961,10 @@ xmlelement(XmlExpr *xexpr,
xml_ereport(xmlerrcxt, ERROR, ERRCODE_OUT_OF_MEMORY,
"could not allocate xmlTextWriter");
- xmlTextWriterStartElement(writer, (xmlChar *) xexpr->name);
+ if (xmlTextWriterStartElement(writer, (xmlChar *) xexpr->name) < 0 ||
+ xmlerrcxt->err_occurred)
+ xml_ereport(xmlerrcxt, ERROR, ERRCODE_INTERNAL_ERROR,
+ "could not start xml element");
forboth(arg, named_arg_strings, narg, xexpr->arg_names)
{
@@ -939,19 +972,30 @@ xmlelement(XmlExpr *xexpr,
char *argname = strVal(lfirst(narg));
if (str)
- xmlTextWriterWriteAttribute(writer,
- (xmlChar *) argname,
- (xmlChar *) str);
+ {
+ if (xmlTextWriterWriteAttribute(writer,
+ (xmlChar *) argname,
+ (xmlChar *) str) < 0 ||
+ xmlerrcxt->err_occurred)
+ xml_ereport(xmlerrcxt, ERROR, ERRCODE_INTERNAL_ERROR,
+ "could not write xml attribute");
+ }
}
foreach(arg, arg_strings)
{
char *str = (char *) lfirst(arg);
- xmlTextWriterWriteRaw(writer, (xmlChar *) str);
+ if (xmlTextWriterWriteRaw(writer, (xmlChar *) str) < 0 ||
+ xmlerrcxt->err_occurred)
+ xml_ereport(xmlerrcxt, ERROR, ERRCODE_INTERNAL_ERROR,
+ "could not write raw xml text");
}
- xmlTextWriterEndElement(writer);
+ if (xmlTextWriterEndElement(writer) < 0 ||
+ xmlerrcxt->err_occurred)
+ xml_ereport(xmlerrcxt, ERROR, ERRCODE_INTERNAL_ERROR,
+ "could not end xml element");
/* we MUST do this now to flush data out to the buffer ... */
xmlFreeTextWriter(writer);
@@ -1725,7 +1769,7 @@ xml_doctype_in_content(const xmlChar *str)
* xmloption_arg, but a DOCTYPE node in the input can force DOCUMENT mode).
*
* If parsed_nodes isn't NULL and we parse in CONTENT mode, the list
- * of parsed nodes from the xmlParseInNodeContext call will be returned
+ * of parsed nodes from the xmlParseBalancedChunkMemory call will be returned
* to *parsed_nodes. (It is caller's responsibility to free that.)
*
* Errors normally result in ereport(ERROR), but if escontext is an
@@ -1751,6 +1795,7 @@ xml_parse(text *data, XmlOptionType xmloption_arg,
PgXmlErrorContext *xmlerrcxt;
volatile xmlParserCtxtPtr ctxt = NULL;
volatile xmlDocPtr doc = NULL;
+ volatile int save_keep_blanks = -1;
/*
* This step looks annoyingly redundant, but we must do it to have a
@@ -1778,7 +1823,6 @@ xml_parse(text *data, XmlOptionType xmloption_arg,
PG_TRY();
{
bool parse_as_document = false;
- int options;
int res_code;
size_t count = 0;
xmlChar *version = NULL;
@@ -1809,18 +1853,6 @@ xml_parse(text *data, XmlOptionType xmloption_arg,
parse_as_document = true;
}
- /*
- * Select parse options.
- *
- * Note that here we try to apply DTD defaults (XML_PARSE_DTDATTR)
- * according to SQL/XML:2008 GR 10.16.7.d: 'Default values defined by
- * internal DTD are applied'. As for external DTDs, we try to support
- * them too (see SQL/XML:2008 GR 10.16.7.e), but that doesn't really
- * happen because xmlPgEntityLoader prevents it.
- */
- options = XML_PARSE_NOENT | XML_PARSE_DTDATTR
- | (preserve_whitespace ? 0 : XML_PARSE_NOBLANKS);
-
/* initialize output parameters */
if (parsed_xmloptiontype != NULL)
*parsed_xmloptiontype = parse_as_document ? XMLOPTION_DOCUMENT :
@@ -1830,11 +1862,26 @@ xml_parse(text *data, XmlOptionType xmloption_arg,
if (parse_as_document)
{
+ int options;
+
+ /* set up parser context used by xmlCtxtReadDoc */
ctxt = xmlNewParserCtxt();
if (ctxt == NULL || xmlerrcxt->err_occurred)
xml_ereport(xmlerrcxt, ERROR, ERRCODE_OUT_OF_MEMORY,
"could not allocate parser context");
+ /*
+ * Select parse options.
+ *
+ * Note that here we try to apply DTD defaults (XML_PARSE_DTDATTR)
+ * according to SQL/XML:2008 GR 10.16.7.d: 'Default values defined
+ * by internal DTD are applied'. As for external DTDs, we try to
+ * support them too (see SQL/XML:2008 GR 10.16.7.e), but that
+ * doesn't really happen because xmlPgEntityLoader prevents it.
+ */
+ options = XML_PARSE_NOENT | XML_PARSE_DTDATTR
+ | (preserve_whitespace ? 0 : XML_PARSE_NOBLANKS);
+
doc = xmlCtxtReadDoc(ctxt, utf8string,
NULL, /* no URL */
"UTF-8",
@@ -1856,10 +1903,7 @@ xml_parse(text *data, XmlOptionType xmloption_arg,
}
else
{
- xmlNodePtr root;
- xmlNodePtr oldroot PG_USED_FOR_ASSERTS_ONLY;
-
- /* set up document with empty root node to be the context node */
+ /* set up document that xmlParseBalancedChunkMemory will add to */
doc = xmlNewDoc(version);
if (doc == NULL || xmlerrcxt->err_occurred)
xml_ereport(xmlerrcxt, ERROR, ERRCODE_OUT_OF_MEMORY,
@@ -1872,43 +1916,22 @@ xml_parse(text *data, XmlOptionType xmloption_arg,
"could not allocate XML document");
doc->standalone = standalone;
- root = xmlNewNode(NULL, (const xmlChar *) "content-root");
- if (root == NULL || xmlerrcxt->err_occurred)
- xml_ereport(xmlerrcxt, ERROR, ERRCODE_OUT_OF_MEMORY,
- "could not allocate xml node");
-
- /*
- * This attaches root to doc, so we need not free it separately;
- * and there can't yet be any old root to free.
- */
- oldroot = xmlDocSetRootElement(doc, root);
- Assert(oldroot == NULL);
+ /* set parse options --- have to do this the ugly way */
+ save_keep_blanks = xmlKeepBlanksDefault(preserve_whitespace ? 1 : 0);
/* allow empty content */
if (*(utf8string + count))
{
- xmlNodePtr node_list = NULL;
- xmlParserErrors res;
-
- res = xmlParseInNodeContext(root,
- (char *) utf8string + count,
- strlen((char *) utf8string + count),
- options,
- &node_list);
-
- if (res != XML_ERR_OK || xmlerrcxt->err_occurred)
+ res_code = xmlParseBalancedChunkMemory(doc, NULL, NULL, 0,
+ utf8string + count,
+ parsed_nodes);
+ if (res_code != 0 || xmlerrcxt->err_occurred)
{
- xmlFreeNodeList(node_list);
xml_errsave(escontext, xmlerrcxt,
ERRCODE_INVALID_XML_CONTENT,
"invalid XML content");
goto fail;
}
-
- if (parsed_nodes != NULL)
- *parsed_nodes = node_list;
- else
- xmlFreeNodeList(node_list);
}
}
@@ -1917,6 +1940,8 @@ fail:
}
PG_CATCH();
{
+ if (save_keep_blanks != -1)
+ xmlKeepBlanksDefault(save_keep_blanks);
if (doc != NULL)
xmlFreeDoc(doc);
if (ctxt != NULL)
@@ -1928,6 +1953,9 @@ fail:
}
PG_END_TRY();
+ if (save_keep_blanks != -1)
+ xmlKeepBlanksDefault(save_keep_blanks);
+
if (ctxt != NULL)
xmlFreeParserCtxt(ctxt);
@@ -4220,20 +4248,27 @@ xml_xmlnodetoxmltype(xmlNodePtr cur, PgXmlErrorContext *xmlerrcxt)
}
else
{
- xmlChar *str;
+ volatile xmlChar *str = NULL;
- str = xmlXPathCastNodeToString(cur);
PG_TRY();
{
+ char *escaped;
+
+ str = xmlXPathCastNodeToString(cur);
+ if (str == NULL || xmlerrcxt->err_occurred)
+ xml_ereport(xmlerrcxt, ERROR, ERRCODE_OUT_OF_MEMORY,
+ "could not allocate xmlChar");
+
/* Here we rely on XML having the same representation as TEXT */
- char *escaped = escape_xml((char *) str);
+ escaped = escape_xml((char *) str);
result = (xmltype *) cstring_to_text(escaped);
pfree(escaped);
}
PG_FINALLY();
{
- xmlFree(str);
+ if (str)
+ xmlFree((xmlChar *) str);
}
PG_END_TRY();
}
diff --git a/src/backend/utils/cache/catcache.c b/src/backend/utils/cache/catcache.c
index 657648996c2..e2cd3feaf81 100644
--- a/src/backend/utils/cache/catcache.c
+++ b/src/backend/utils/cache/catcache.c
@@ -213,7 +213,7 @@ namehashfast(Datum datum)
{
char *key = NameStr(*DatumGetName(datum));
- return hash_any((unsigned char *) key, strlen(key));
+ return hash_bytes((unsigned char *) key, strlen(key));
}
static bool
@@ -317,6 +317,7 @@ GetCCHashEqFuncs(Oid keytype, CCHashFN *hashfunc, RegProcedure *eqfunc, CCFastEq
case REGDICTIONARYOID:
case REGROLEOID:
case REGNAMESPACEOID:
+ case REGDATABASEOID:
*hashfunc = int4hashfast;
*fasteqfunc = int4eqfast;
*eqfunc = F_OIDEQ;
diff --git a/src/backend/utils/cache/evtcache.c b/src/backend/utils/cache/evtcache.c
index ce596bf5638..b9d5a5998be 100644
--- a/src/backend/utils/cache/evtcache.c
+++ b/src/backend/utils/cache/evtcache.c
@@ -78,7 +78,6 @@ BuildEventTriggerCache(void)
{
HASHCTL ctl;
HTAB *cache;
- MemoryContext oldcontext;
Relation rel;
Relation irel;
SysScanDesc scan;
@@ -110,9 +109,6 @@ BuildEventTriggerCache(void)
(Datum) 0);
}
- /* Switch to correct memory context. */
- oldcontext = MemoryContextSwitchTo(EventTriggerCacheContext);
-
/* Prevent the memory context from being nuked while we're rebuilding. */
EventTriggerCacheState = ETCS_REBUILD_STARTED;
@@ -145,6 +141,7 @@ BuildEventTriggerCache(void)
bool evttags_isnull;
EventTriggerCacheEntry *entry;
bool found;
+ MemoryContext oldcontext;
/* Get next tuple. */
tup = systable_getnext_ordered(scan, ForwardScanDirection);
@@ -171,6 +168,9 @@ BuildEventTriggerCache(void)
else
continue;
+ /* Switch to correct memory context. */
+ oldcontext = MemoryContextSwitchTo(EventTriggerCacheContext);
+
/* Allocate new cache item. */
item = palloc0(sizeof(EventTriggerCacheItem));
item->fnoid = form->evtfoid;
@@ -188,6 +188,9 @@ BuildEventTriggerCache(void)
entry->triggerlist = lappend(entry->triggerlist, item);
else
entry->triggerlist = list_make1(item);
+
+ /* Restore previous memory context. */
+ MemoryContextSwitchTo(oldcontext);
}
/* Done with pg_event_trigger scan. */
@@ -195,9 +198,6 @@ BuildEventTriggerCache(void)
index_close(irel, AccessShareLock);
relation_close(rel, AccessShareLock);
- /* Restore previous memory context. */
- MemoryContextSwitchTo(oldcontext);
-
/* Install new cache. */
EventTriggerCache = cache;
@@ -240,6 +240,8 @@ DecodeTextArrayToBitmapset(Datum array)
}
pfree(elems);
+ if ((Pointer) arr != DatumGetPointer(array))
+ pfree(arr);
return bms;
}
diff --git a/src/backend/utils/cache/plancache.c b/src/backend/utils/cache/plancache.c
index 89a1c79e984..6661d2c6b73 100644
--- a/src/backend/utils/cache/plancache.c
+++ b/src/backend/utils/cache/plancache.c
@@ -463,8 +463,7 @@ CompleteCachedPlan(CachedPlanSource *plansource,
/*
* Save the final parameter types (or other parameter specification data)
- * into the source_context, as well as our other parameters. Also save
- * the result tuple descriptor.
+ * into the source_context, as well as our other parameters.
*/
MemoryContextSwitchTo(source_context);
@@ -480,9 +479,25 @@ CompleteCachedPlan(CachedPlanSource *plansource,
plansource->parserSetupArg = parserSetupArg;
plansource->cursor_options = cursor_options;
plansource->fixed_result = fixed_result;
- plansource->resultDesc = PlanCacheComputeResultDesc(querytree_list);
+ /*
+ * Also save the result tuple descriptor. PlanCacheComputeResultDesc may
+ * leak some cruft; normally we just accept that to save a copy step, but
+ * in USE_VALGRIND mode be tidy by running it in the caller's context.
+ */
+#ifdef USE_VALGRIND
+ MemoryContextSwitchTo(oldcxt);
+ plansource->resultDesc = PlanCacheComputeResultDesc(querytree_list);
+ if (plansource->resultDesc)
+ {
+ MemoryContextSwitchTo(source_context);
+ plansource->resultDesc = CreateTupleDescCopy(plansource->resultDesc);
+ MemoryContextSwitchTo(oldcxt);
+ }
+#else
+ plansource->resultDesc = PlanCacheComputeResultDesc(querytree_list);
MemoryContextSwitchTo(oldcxt);
+#endif
plansource->is_complete = true;
plansource->is_valid = true;
@@ -1283,6 +1298,7 @@ GetCachedPlan(CachedPlanSource *plansource, ParamListInfo boundParams,
CachedPlan *plan = NULL;
List *qlist;
bool customplan;
+ ListCell *lc;
/* Assert caller is doing things in a sane order */
Assert(plansource->magic == CACHEDPLANSOURCE_MAGIC);
@@ -1385,6 +1401,13 @@ GetCachedPlan(CachedPlanSource *plansource, ParamListInfo boundParams,
plan->is_saved = true;
}
+ foreach(lc, plan->stmt_list)
+ {
+ PlannedStmt *pstmt = (PlannedStmt *) lfirst(lc);
+
+ pstmt->planOrigin = customplan ? PLAN_STMT_CACHE_CUSTOM : PLAN_STMT_CACHE_GENERIC;
+ }
+
return plan;
}
diff --git a/src/backend/utils/cache/ts_cache.c b/src/backend/utils/cache/ts_cache.c
index 18cccd778fd..e8ae53238d0 100644
--- a/src/backend/utils/cache/ts_cache.c
+++ b/src/backend/utils/cache/ts_cache.c
@@ -321,7 +321,9 @@ lookup_ts_dictionary_cache(Oid dictId)
/*
* Init method runs in dictionary's private memory context, and we
- * make sure the options are stored there too
+ * make sure the options are stored there too. This typically
+ * results in a small amount of memory leakage, but it's not worth
+ * complicating the API for tmplinit functions to avoid it.
*/
oldcontext = MemoryContextSwitchTo(entry->dictCtx);
diff --git a/src/backend/utils/cache/typcache.c b/src/backend/utils/cache/typcache.c
index f9aec38a11f..6a347698edf 100644
--- a/src/backend/utils/cache/typcache.c
+++ b/src/backend/utils/cache/typcache.c
@@ -1171,9 +1171,6 @@ load_domaintype_info(TypeCacheEntry *typentry)
elog(ERROR, "domain \"%s\" constraint \"%s\" has NULL conbin",
NameStr(typTup->typname), NameStr(c->conname));
- /* Convert conbin to C string in caller context */
- constring = TextDatumGetCString(val);
-
/* Create the DomainConstraintCache object and context if needed */
if (dcc == NULL)
{
@@ -1189,9 +1186,8 @@ load_domaintype_info(TypeCacheEntry *typentry)
dcc->dccRefCount = 0;
}
- /* Create node trees in DomainConstraintCache's context */
- oldcxt = MemoryContextSwitchTo(dcc->dccContext);
-
+ /* Convert conbin to a node tree, still in caller's context */
+ constring = TextDatumGetCString(val);
check_expr = (Expr *) stringToNode(constring);
/*
@@ -1206,10 +1202,13 @@ load_domaintype_info(TypeCacheEntry *typentry)
*/
check_expr = expression_planner(check_expr);
+ /* Create only the minimally needed stuff in dccContext */
+ oldcxt = MemoryContextSwitchTo(dcc->dccContext);
+
r = makeNode(DomainConstraintState);
r->constrainttype = DOM_CONSTRAINT_CHECK;
r->name = pstrdup(NameStr(c->conname));
- r->check_expr = check_expr;
+ r->check_expr = copyObject(check_expr);
r->check_exprstate = NULL;
MemoryContextSwitchTo(oldcxt);
diff --git a/src/backend/utils/error/elog.c b/src/backend/utils/error/elog.c
index 47af743990f..afce1a8e1f0 100644
--- a/src/backend/utils/error/elog.c
+++ b/src/backend/utils/error/elog.c
@@ -1128,12 +1128,15 @@ set_backtrace(ErrorData *edata, int num_skip)
nframes = backtrace(buf, lengthof(buf));
strfrms = backtrace_symbols(buf, nframes);
- if (strfrms == NULL)
- return;
-
- for (int i = num_skip; i < nframes; i++)
- appendStringInfo(&errtrace, "\n%s", strfrms[i]);
- free(strfrms);
+ if (strfrms != NULL)
+ {
+ for (int i = num_skip; i < nframes; i++)
+ appendStringInfo(&errtrace, "\n%s", strfrms[i]);
+ free(strfrms);
+ }
+ else
+ appendStringInfoString(&errtrace,
+ "insufficient memory for backtrace generation");
}
#else
appendStringInfoString(&errtrace,
diff --git a/src/backend/utils/fmgr/dfmgr.c b/src/backend/utils/fmgr/dfmgr.c
index 603632581d0..4bb84ff7087 100644
--- a/src/backend/utils/fmgr/dfmgr.c
+++ b/src/backend/utils/fmgr/dfmgr.c
@@ -99,6 +99,14 @@ load_external_function(const char *filename, const char *funcname,
void *lib_handle;
void *retval;
+ /*
+ * If the value starts with "$libdir/", strip that. This is because many
+ * extensions have hardcoded '$libdir/foo' as their library name, which
+ * prevents using the path.
+ */
+ if (strncmp(filename, "$libdir/", 8) == 0)
+ filename += 8;
+
/* Expand the possibly-abbreviated filename to an exact path name */
fullname = expand_dynamic_library_name(filename);
@@ -456,14 +464,6 @@ expand_dynamic_library_name(const char *name)
Assert(name);
- /*
- * If the value starts with "$libdir/", strip that. This is because many
- * extensions have hardcoded '$libdir/foo' as their library name, which
- * prevents using the path.
- */
- if (strncmp(name, "$libdir/", 8) == 0)
- name += 8;
-
have_slash = (first_dir_separator(name) != NULL);
if (!have_slash)
diff --git a/src/backend/utils/hash/dynahash.c b/src/backend/utils/hash/dynahash.c
index 1ad155d446e..81da03629f0 100644
--- a/src/backend/utils/hash/dynahash.c
+++ b/src/backend/utils/hash/dynahash.c
@@ -22,10 +22,11 @@
* lookup key's hash value as a partition number --- this will work because
* of the way calc_bucket() maps hash values to bucket numbers.
*
- * For hash tables in shared memory, the memory allocator function should
- * match malloc's semantics of returning NULL on failure. For hash tables
- * in local memory, we typically use palloc() which will throw error on
- * failure. The code in this file has to cope with both cases.
+ * The memory allocator function should match malloc's semantics of returning
+ * NULL on failure. (This is essential for hash tables in shared memory.
+ * For hash tables in local memory, we used to use palloc() which will throw
+ * error on failure; but we no longer do, so it's untested whether this
+ * module will still cope with that behavior.)
*
* dynahash.c provides support for these types of lookup keys:
*
@@ -98,6 +99,7 @@
#include "access/xact.h"
#include "common/hashfn.h"
+#include "lib/ilist.h"
#include "port/pg_bitutils.h"
#include "storage/shmem.h"
#include "storage/spin.h"
@@ -195,6 +197,7 @@ struct HASHHDR
long ssize; /* segment size --- must be power of 2 */
int sshift; /* segment shift = log2(ssize) */
int nelem_alloc; /* number of entries to allocate at once */
+ bool isfixed; /* if true, don't enlarge */
#ifdef HASH_STATISTICS
@@ -227,7 +230,6 @@ struct HTAB
MemoryContext hcxt; /* memory context if default allocator used */
char *tabname; /* table name (for error messages) */
bool isshared; /* true if table is in shared memory */
- bool isfixed; /* if true, don't enlarge */
/* freezing a shared table isn't allowed, so we can keep state here */
bool frozen; /* true = no more inserts allowed */
@@ -236,6 +238,16 @@ struct HTAB
Size keysize; /* hash key length in bytes */
long ssize; /* segment size --- must be power of 2 */
int sshift; /* segment shift = log2(ssize) */
+
+ /*
+ * In a USE_VALGRIND build, non-shared hashtables keep an slist chain of
+ * all the element blocks they have allocated. This pacifies Valgrind,
+ * which would otherwise often claim that the element blocks are "possibly
+ * lost" for lack of any non-interior pointers to their starts.
+ */
+#ifdef USE_VALGRIND
+ slist_head element_blocks;
+#endif
};
/*
@@ -618,8 +630,10 @@ hash_create(const char *tabname, long nelem, const HASHCTL *info, int flags)
}
}
+ /* Set isfixed if requested, but not till after we build initial entries */
if (flags & HASH_FIXED_SIZE)
- hashp->isfixed = true;
+ hctl->isfixed = true;
+
return hashp;
}
@@ -644,6 +658,8 @@ hdefault(HTAB *hashp)
hctl->ssize = DEF_SEGSIZE;
hctl->sshift = DEF_SEGSIZE_SHIFT;
+ hctl->isfixed = false; /* can be enlarged */
+
#ifdef HASH_STATISTICS
hctl->accesses = hctl->collisions = 0;
#endif
@@ -1708,23 +1724,51 @@ element_alloc(HTAB *hashp, int nelem, int freelist_idx)
{
HASHHDR *hctl = hashp->hctl;
Size elementSize;
+ Size requestSize;
+ char *allocedBlock;
HASHELEMENT *firstElement;
HASHELEMENT *tmpElement;
HASHELEMENT *prevElement;
int i;
- if (hashp->isfixed)
+ if (hctl->isfixed)
return false;
/* Each element has a HASHELEMENT header plus user data. */
elementSize = MAXALIGN(sizeof(HASHELEMENT)) + MAXALIGN(hctl->entrysize);
+ requestSize = nelem * elementSize;
+
+ /* Add space for slist_node list link if we need one. */
+#ifdef USE_VALGRIND
+ if (!hashp->isshared)
+ requestSize += MAXALIGN(sizeof(slist_node));
+#endif
+
+ /* Allocate the memory. */
CurrentDynaHashCxt = hashp->hcxt;
- firstElement = (HASHELEMENT *) hashp->alloc(nelem * elementSize);
+ allocedBlock = hashp->alloc(requestSize);
- if (!firstElement)
+ if (!allocedBlock)
return false;
+ /*
+ * If USE_VALGRIND, each allocated block of elements of a non-shared
+ * hashtable is chained into a list, so that Valgrind won't think it's
+ * been leaked.
+ */
+#ifdef USE_VALGRIND
+ if (hashp->isshared)
+ firstElement = (HASHELEMENT *) allocedBlock;
+ else
+ {
+ slist_push_head(&hashp->element_blocks, (slist_node *) allocedBlock);
+ firstElement = (HASHELEMENT *) (allocedBlock + MAXALIGN(sizeof(slist_node)));
+ }
+#else
+ firstElement = (HASHELEMENT *) allocedBlock;
+#endif
+
/* prepare to link all the new entries into the freelist */
prevElement = NULL;
tmpElement = firstElement;
diff --git a/src/backend/utils/init/miscinit.c b/src/backend/utils/init/miscinit.c
index 43b4dbccc3d..65d8cbfaed5 100644
--- a/src/backend/utils/init/miscinit.c
+++ b/src/backend/utils/init/miscinit.c
@@ -1183,7 +1183,6 @@ UnlinkLockFiles(int status, Datum arg)
/* Should we complain if the unlink fails? */
}
/* Since we're about to exit, no need to reclaim storage */
- lock_files = NIL;
/*
* Lock file removal should always be the last externally visible action
diff --git a/src/backend/utils/init/postinit.c b/src/backend/utils/init/postinit.c
index c86ceefda94..641e535a73c 100644
--- a/src/backend/utils/init/postinit.c
+++ b/src/backend/utils/init/postinit.c
@@ -417,12 +417,11 @@ CheckMyDatabase(const char *name, bool am_superuser, bool override_allow_connect
datum = SysCacheGetAttrNotNull(DATABASEOID, tup, Anum_pg_database_datctype);
ctype = TextDatumGetCString(datum);
- if (pg_perm_setlocale(LC_COLLATE, collate) == NULL)
- ereport(FATAL,
- (errmsg("database locale is incompatible with operating system"),
- errdetail("The database was initialized with LC_COLLATE \"%s\", "
- " which is not recognized by setlocale().", collate),
- errhint("Recreate the database with another locale or install the missing locale.")));
+ /*
+ * Historcally, we set LC_COLLATE from datcollate, as well. That's no
+ * longer necessary because all collation behavior is handled through
+ * pg_locale_t.
+ */
if (pg_perm_setlocale(LC_CTYPE, ctype) == NULL)
ereport(FATAL,
diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c
index 667df448732..e404c345e6e 100644
--- a/src/backend/utils/misc/guc.c
+++ b/src/backend/utils/misc/guc.c
@@ -249,6 +249,7 @@ static void reapply_stacked_values(struct config_generic *variable,
const char *curvalue,
GucContext curscontext, GucSource cursource,
Oid cursrole);
+static void free_placeholder(struct config_string *pHolder);
static bool validate_option_array_item(const char *name, const char *value,
bool skipIfNoPermissions);
static void write_auto_conf_file(int fd, const char *filename, ConfigVariable *head);
@@ -4722,8 +4723,13 @@ AlterSystemSetConfigFile(AlterSystemStmt *altersysstmt)
* the config file cannot cause postmaster start to fail, so we
* don't have to be too tense about possibly installing a bad
* value.)
+ *
+ * As an exception, we skip this check if this is a RESET command
+ * for an unknown custom GUC, else there'd be no way for users to
+ * remove such settings with reserved prefixes.
*/
- (void) assignable_custom_variable_name(name, false, ERROR);
+ if (value || !valid_custom_variable_name(name))
+ (void) assignable_custom_variable_name(name, false, ERROR);
}
/*
@@ -5018,16 +5024,8 @@ define_custom_variable(struct config_generic *variable)
set_config_sourcefile(name, pHolder->gen.sourcefile,
pHolder->gen.sourceline);
- /*
- * Free up as much as we conveniently can of the placeholder structure.
- * (This neglects any stack items, so it's possible for some memory to be
- * leaked. Since this can only happen once per session per variable, it
- * doesn't seem worth spending much code on.)
- */
- set_string_field(pHolder, pHolder->variable, NULL);
- set_string_field(pHolder, &pHolder->reset_val, NULL);
-
- guc_free(pHolder);
+ /* Now we can free the no-longer-referenced placeholder variable */
+ free_placeholder(pHolder);
}
/*
@@ -5127,6 +5125,25 @@ reapply_stacked_values(struct config_generic *variable,
}
/*
+ * Free up a no-longer-referenced placeholder GUC variable.
+ *
+ * This neglects any stack items, so it's possible for some memory to be
+ * leaked. Since this can only happen once per session per variable, it
+ * doesn't seem worth spending much code on.
+ */
+static void
+free_placeholder(struct config_string *pHolder)
+{
+ /* Placeholders are always STRING type, so free their values */
+ Assert(pHolder->gen.vartype == PGC_STRING);
+ set_string_field(pHolder, pHolder->variable, NULL);
+ set_string_field(pHolder, &pHolder->reset_val, NULL);
+
+ guc_free(unconstify(char *, pHolder->gen.name));
+ guc_free(pHolder);
+}
+
+/*
* Functions for extensions to call to define their custom GUC variables.
*/
void
@@ -5286,9 +5303,7 @@ MarkGUCPrefixReserved(const char *className)
/*
* Check for existing placeholders. We must actually remove invalid
- * placeholders, else future parallel worker startups will fail. (We
- * don't bother trying to free associated memory, since this shouldn't
- * happen often.)
+ * placeholders, else future parallel worker startups will fail.
*/
hash_seq_init(&status, guc_hashtab);
while ((hentry = (GUCHashEntry *) hash_seq_search(&status)) != NULL)
@@ -5312,6 +5327,8 @@ MarkGUCPrefixReserved(const char *className)
NULL);
/* Remove it from any lists it's in, too */
RemoveGUCFromLists(var);
+ /* And free it */
+ free_placeholder((struct config_string *) var);
}
}
@@ -6711,6 +6728,7 @@ validate_option_array_item(const char *name, const char *value,
{
struct config_generic *gconf;
+ bool reset_custom;
/*
* There are three cases to consider:
@@ -6729,16 +6747,21 @@ validate_option_array_item(const char *name, const char *value,
* it's assumed to be fully validated.)
*
* name is not known and can't be created as a placeholder. Throw error,
- * unless skipIfNoPermissions is true, in which case return false.
+ * unless skipIfNoPermissions or reset_custom is true. If reset_custom is
+ * true, this is a RESET or RESET ALL operation for an unknown custom GUC
+ * with a reserved prefix, in which case we want to fall through to the
+ * placeholder case described in the preceding paragraph (else there'd be
+ * no way for users to remove them). Otherwise, return false.
*/
- gconf = find_option(name, true, skipIfNoPermissions, ERROR);
- if (!gconf)
+ reset_custom = (!value && valid_custom_variable_name(name));
+ gconf = find_option(name, true, skipIfNoPermissions || reset_custom, ERROR);
+ if (!gconf && !reset_custom)
{
/* not known, failed to make a placeholder */
return false;
}
- if (gconf->flags & GUC_CUSTOM_PLACEHOLDER)
+ if (!gconf || gconf->flags & GUC_CUSTOM_PLACEHOLDER)
{
/*
* We cannot do any meaningful check on the value, so only permissions
diff --git a/src/backend/utils/misc/guc_tables.c b/src/backend/utils/misc/guc_tables.c
index 2f8cbd86759..d14b1678e7f 100644
--- a/src/backend/utils/misc/guc_tables.c
+++ b/src/backend/utils/misc/guc_tables.c
@@ -1028,7 +1028,7 @@ struct config_bool ConfigureNamesBool[] =
},
{
{"enable_distinct_reordering", PGC_USERSET, QUERY_TUNING_METHOD,
- gettext_noop("Enables reordering of DISTINCT pathkeys."),
+ gettext_noop("Enables reordering of DISTINCT keys."),
NULL,
GUC_EXPLAIN
},
@@ -1602,11 +1602,11 @@ struct config_bool ConfigureNamesBool[] =
NULL, NULL, NULL
},
{
- {"log_lock_failure", PGC_SUSET, LOGGING_WHAT,
+ {"log_lock_failures", PGC_SUSET, LOGGING_WHAT,
gettext_noop("Logs lock failures."),
NULL
},
- &log_lock_failure,
+ &log_lock_failures,
false,
NULL, NULL, NULL
},
@@ -3081,7 +3081,7 @@ struct config_int ConfigureNamesInt[] =
},
&max_slot_wal_keep_size_mb,
-1, -1, MAX_KILOBYTES,
- check_max_slot_wal_keep_size, NULL, NULL
+ NULL, NULL, NULL
},
{
@@ -3100,11 +3100,11 @@ struct config_int ConfigureNamesInt[] =
gettext_noop("Sets the duration a replication slot can remain idle before "
"it is invalidated."),
NULL,
- GUC_UNIT_MIN
+ GUC_UNIT_S
},
- &idle_replication_slot_timeout_mins,
- 0, 0, INT_MAX / SECS_PER_MINUTE,
- check_idle_replication_slot_timeout, NULL, NULL
+ &idle_replication_slot_timeout_secs,
+ 0, 0, INT_MAX,
+ NULL, NULL, NULL
},
{
@@ -4837,7 +4837,7 @@ struct config_string ConfigureNamesString[] =
{
{"ssl_groups", PGC_SIGHUP, CONN_AUTH_SSL,
gettext_noop("Sets the group(s) to use for Diffie-Hellman key exchange."),
- gettext_noop("Multiple groups can be specified using colon-separated list."),
+ gettext_noop("Multiple groups can be specified using a colon-separated list."),
GUC_SUPERUSER_ONLY
},
&SSLECDHCurve,
diff --git a/src/backend/utils/misc/injection_point.c b/src/backend/utils/misc/injection_point.c
index f58ebc8ee52..83b887b6978 100644
--- a/src/backend/utils/misc/injection_point.c
+++ b/src/backend/utils/misc/injection_point.c
@@ -584,3 +584,49 @@ IsInjectionPointAttached(const char *name)
return false; /* silence compiler */
#endif
}
+
+/*
+ * Retrieve a list of all the injection points currently attached.
+ *
+ * This list is palloc'd in the current memory context.
+ */
+List *
+InjectionPointList(void)
+{
+#ifdef USE_INJECTION_POINTS
+ List *inj_points = NIL;
+ uint32 max_inuse;
+
+ LWLockAcquire(InjectionPointLock, LW_SHARED);
+
+ max_inuse = pg_atomic_read_u32(&ActiveInjectionPoints->max_inuse);
+
+ for (uint32 idx = 0; idx < max_inuse; idx++)
+ {
+ InjectionPointEntry *entry;
+ InjectionPointData *inj_point;
+ uint64 generation;
+
+ entry = &ActiveInjectionPoints->entries[idx];
+ generation = pg_atomic_read_u64(&entry->generation);
+
+ /* skip free slots */
+ if (generation % 2 == 0)
+ continue;
+
+ inj_point = (InjectionPointData *) palloc0(sizeof(InjectionPointData));
+ inj_point->name = pstrdup(entry->name);
+ inj_point->library = pstrdup(entry->library);
+ inj_point->function = pstrdup(entry->function);
+ inj_points = lappend(inj_points, inj_point);
+ }
+
+ LWLockRelease(InjectionPointLock);
+
+ return inj_points;
+
+#else
+ elog(ERROR, "Injection points are not supported by this build");
+ return NIL; /* keep compiler quiet */
+#endif
+}
diff --git a/src/backend/utils/misc/postgresql.conf.sample b/src/backend/utils/misc/postgresql.conf.sample
index 87ce76b18f4..a9d8293474a 100644
--- a/src/backend/utils/misc/postgresql.conf.sample
+++ b/src/backend/utils/misc/postgresql.conf.sample
@@ -342,7 +342,7 @@
# (change requires restart)
#wal_keep_size = 0 # in megabytes; 0 disables
#max_slot_wal_keep_size = -1 # in megabytes; -1 disables
-#idle_replication_slot_timeout = 0 # in minutes; 0 disables
+#idle_replication_slot_timeout = 0 # in seconds; 0 disables
#wal_sender_timeout = 60s # in milliseconds; 0 disables
#track_commit_timestamp = off # collect timestamp of transaction commit
# (change requires restart)
@@ -624,7 +624,7 @@
# %% = '%'
# e.g. '<%u%%%d> '
#log_lock_waits = off # log lock waits >= deadlock_timeout
-#log_lock_failure = off # log lock failures
+#log_lock_failures = off # log lock failures
#log_recovery_conflict_waits = off # log standby recovery conflict waits
# >= deadlock_timeout
#log_parameter_max_length = -1 # when logging statements, limit logged
diff --git a/src/backend/utils/misc/ps_status.c b/src/backend/utils/misc/ps_status.c
index e08b26e8c14..4df25944deb 100644
--- a/src/backend/utils/misc/ps_status.c
+++ b/src/backend/utils/misc/ps_status.c
@@ -100,6 +100,17 @@ static void flush_ps_display(void);
static int save_argc;
static char **save_argv;
+/*
+ * Valgrind seems not to consider the global "environ" variable as a valid
+ * root pointer; so when we allocate a new environment array, it claims that
+ * data is leaked. To fix that, keep our own statically-allocated copy of the
+ * pointer. (Oddly, this doesn't seem to be a problem for "argv".)
+ */
+#if defined(PS_USE_CLOBBER_ARGV) && defined(USE_VALGRIND)
+extern char **ps_status_new_environ;
+char **ps_status_new_environ;
+#endif
+
/*
* Call this early in startup to save the original argc/argv values.
@@ -206,6 +217,11 @@ save_ps_display_args(int argc, char **argv)
}
new_environ[i] = NULL;
environ = new_environ;
+
+ /* See notes about Valgrind above. */
+#ifdef USE_VALGRIND
+ ps_status_new_environ = new_environ;
+#endif
}
/*
diff --git a/src/backend/utils/mmgr/alignedalloc.c b/src/backend/utils/mmgr/alignedalloc.c
index 7eea695de62..b1be7426914 100644
--- a/src/backend/utils/mmgr/alignedalloc.c
+++ b/src/backend/utils/mmgr/alignedalloc.c
@@ -45,6 +45,15 @@ AlignedAllocFree(void *pointer)
GetMemoryChunkContext(unaligned)->name, chunk);
#endif
+ /*
+ * Create a dummy vchunk covering the start of the unaligned chunk, but
+ * not overlapping the aligned chunk. This will be freed while pfree'ing
+ * the unaligned chunk, keeping Valgrind happy. Then when we return to
+ * the outer pfree, that will clean up the vchunk for the aligned chunk.
+ */
+ VALGRIND_MEMPOOL_ALLOC(GetMemoryChunkContext(unaligned), unaligned,
+ (char *) pointer - (char *) unaligned);
+
/* Recursively pfree the unaligned chunk */
pfree(unaligned);
}
@@ -123,6 +132,15 @@ AlignedAllocRealloc(void *pointer, Size size, int flags)
VALGRIND_MAKE_MEM_DEFINED(pointer, old_size);
memcpy(newptr, pointer, Min(size, old_size));
+ /*
+ * Create a dummy vchunk covering the start of the old unaligned chunk,
+ * but not overlapping the aligned chunk. This will be freed while
+ * pfree'ing the old unaligned chunk, keeping Valgrind happy. Then when
+ * we return to repalloc, it will move the vchunk for the aligned chunk.
+ */
+ VALGRIND_MEMPOOL_ALLOC(ctx, unaligned,
+ (char *) pointer - (char *) unaligned);
+
pfree(unaligned);
return newptr;
diff --git a/src/backend/utils/mmgr/aset.c b/src/backend/utils/mmgr/aset.c
index 666ecd8f78d..9ef109ca586 100644
--- a/src/backend/utils/mmgr/aset.c
+++ b/src/backend/utils/mmgr/aset.c
@@ -103,6 +103,8 @@
#define ALLOC_BLOCKHDRSZ MAXALIGN(sizeof(AllocBlockData))
#define ALLOC_CHUNKHDRSZ sizeof(MemoryChunk)
+#define FIRST_BLOCKHDRSZ (MAXALIGN(sizeof(AllocSetContext)) + \
+ ALLOC_BLOCKHDRSZ)
typedef struct AllocBlockData *AllocBlock; /* forward reference */
@@ -458,6 +460,21 @@ AllocSetContextCreateInternal(MemoryContext parent,
* we'd leak the header/initial block if we ereport in this stretch.
*/
+ /* Create a vpool associated with the context */
+ VALGRIND_CREATE_MEMPOOL(set, 0, false);
+
+ /*
+ * Create a vchunk covering both the AllocSetContext struct and the keeper
+ * block's header. (Perhaps it would be more sensible for these to be two
+ * separate vchunks, but doing that seems to tickle bugs in some versions
+ * of Valgrind.) We must have these vchunks, and also a vchunk for each
+ * subsequently-added block header, so that Valgrind considers the
+ * pointers within them while checking for leaked memory. Note that
+ * Valgrind doesn't distinguish between these vchunks and those created by
+ * mcxt.c for the user-accessible-data chunks we allocate.
+ */
+ VALGRIND_MEMPOOL_ALLOC(set, set, FIRST_BLOCKHDRSZ);
+
/* Fill in the initial block's block header */
block = KeeperBlock(set);
block->aset = set;
@@ -585,6 +602,14 @@ AllocSetReset(MemoryContext context)
#ifdef CLOBBER_FREED_MEMORY
wipe_mem(block, block->freeptr - ((char *) block));
#endif
+
+ /*
+ * We need to free the block header's vchunk explicitly, although
+ * the user-data vchunks within will go away in the TRIM below.
+ * Otherwise Valgrind complains about leaked allocations.
+ */
+ VALGRIND_MEMPOOL_FREE(set, block);
+
free(block);
}
block = next;
@@ -592,6 +617,14 @@ AllocSetReset(MemoryContext context)
Assert(context->mem_allocated == keepersize);
+ /*
+ * Instruct Valgrind to throw away all the vchunks associated with this
+ * context, except for the one covering the AllocSetContext and
+ * keeper-block header. This gets rid of the vchunks for whatever user
+ * data is getting discarded by the context reset.
+ */
+ VALGRIND_MEMPOOL_TRIM(set, set, FIRST_BLOCKHDRSZ);
+
/* Reset block size allocation sequence, too */
set->nextBlockSize = set->initBlockSize;
}
@@ -648,6 +681,9 @@ AllocSetDelete(MemoryContext context)
freelist->first_free = (AllocSetContext *) oldset->header.nextchild;
freelist->num_free--;
+ /* Destroy the context's vpool --- see notes below */
+ VALGRIND_DESTROY_MEMPOOL(oldset);
+
/* All that remains is to free the header/initial block */
free(oldset);
}
@@ -675,13 +711,24 @@ AllocSetDelete(MemoryContext context)
#endif
if (!IsKeeperBlock(set, block))
+ {
+ /* As in AllocSetReset, free block-header vchunks explicitly */
+ VALGRIND_MEMPOOL_FREE(set, block);
free(block);
+ }
block = next;
}
Assert(context->mem_allocated == keepersize);
+ /*
+ * Destroy the vpool. We don't seem to need to explicitly free the
+ * initial block's header vchunk, nor any user-data vchunks that Valgrind
+ * still knows about; they'll all go away automatically.
+ */
+ VALGRIND_DESTROY_MEMPOOL(set);
+
/* Finally, free the context header, including the keeper block */
free(set);
}
@@ -716,6 +763,9 @@ AllocSetAllocLarge(MemoryContext context, Size size, int flags)
if (block == NULL)
return MemoryContextAllocationFailure(context, size, flags);
+ /* Make a vchunk covering the new block's header */
+ VALGRIND_MEMPOOL_ALLOC(set, block, ALLOC_BLOCKHDRSZ);
+
context->mem_allocated += blksize;
block->aset = set;
@@ -922,6 +972,9 @@ AllocSetAllocFromNewBlock(MemoryContext context, Size size, int flags,
if (block == NULL)
return MemoryContextAllocationFailure(context, size, flags);
+ /* Make a vchunk covering the new block's header */
+ VALGRIND_MEMPOOL_ALLOC(set, block, ALLOC_BLOCKHDRSZ);
+
context->mem_allocated += blksize;
block->aset = set;
@@ -1104,6 +1157,10 @@ AllocSetFree(void *pointer)
#ifdef CLOBBER_FREED_MEMORY
wipe_mem(block, block->freeptr - ((char *) block));
#endif
+
+ /* As in AllocSetReset, free block-header vchunks explicitly */
+ VALGRIND_MEMPOOL_FREE(set, block);
+
free(block);
}
else
@@ -1184,6 +1241,7 @@ AllocSetRealloc(void *pointer, Size size, int flags)
* realloc() to make the containing block bigger, or smaller, with
* minimum space wastage.
*/
+ AllocBlock newblock;
Size chksize;
Size blksize;
Size oldblksize;
@@ -1223,14 +1281,21 @@ AllocSetRealloc(void *pointer, Size size, int flags)
blksize = chksize + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
oldblksize = block->endptr - ((char *) block);
- block = (AllocBlock) realloc(block, blksize);
- if (block == NULL)
+ newblock = (AllocBlock) realloc(block, blksize);
+ if (newblock == NULL)
{
/* Disallow access to the chunk header. */
VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
return MemoryContextAllocationFailure(&set->header, size, flags);
}
+ /*
+ * Move the block-header vchunk explicitly. (mcxt.c will take care of
+ * moving the vchunk for the user data.)
+ */
+ VALGRIND_MEMPOOL_CHANGE(set, block, newblock, ALLOC_BLOCKHDRSZ);
+ block = newblock;
+
/* updated separately, not to underflow when (oldblksize > blksize) */
set->header.mem_allocated -= oldblksize;
set->header.mem_allocated += blksize;
@@ -1294,7 +1359,7 @@ AllocSetRealloc(void *pointer, Size size, int flags)
/* Ensure any padding bytes are marked NOACCESS. */
VALGRIND_MAKE_MEM_NOACCESS((char *) pointer + size, chksize - size);
- /* Disallow access to the chunk header . */
+ /* Disallow access to the chunk header. */
VALGRIND_MAKE_MEM_NOACCESS(chunk, ALLOC_CHUNKHDRSZ);
return pointer;
diff --git a/src/backend/utils/mmgr/bump.c b/src/backend/utils/mmgr/bump.c
index f7a37d1b3e8..2805d55a2ec 100644
--- a/src/backend/utils/mmgr/bump.c
+++ b/src/backend/utils/mmgr/bump.c
@@ -45,7 +45,9 @@
#include "utils/memutils_memorychunk.h"
#include "utils/memutils_internal.h"
-#define Bump_BLOCKHDRSZ MAXALIGN(sizeof(BumpBlock))
+#define Bump_BLOCKHDRSZ MAXALIGN(sizeof(BumpBlock))
+#define FIRST_BLOCKHDRSZ (MAXALIGN(sizeof(BumpContext)) + \
+ Bump_BLOCKHDRSZ)
/* No chunk header unless built with MEMORY_CONTEXT_CHECKING */
#ifdef MEMORY_CONTEXT_CHECKING
@@ -189,6 +191,12 @@ BumpContextCreate(MemoryContext parent, const char *name, Size minContextSize,
* Avoid writing code that can fail between here and MemoryContextCreate;
* we'd leak the header and initial block if we ereport in this stretch.
*/
+
+ /* See comments about Valgrind interactions in aset.c */
+ VALGRIND_CREATE_MEMPOOL(set, 0, false);
+ /* This vchunk covers the BumpContext and the keeper block header */
+ VALGRIND_MEMPOOL_ALLOC(set, set, FIRST_BLOCKHDRSZ);
+
dlist_init(&set->blocks);
/* Fill in the initial block's block header */
@@ -262,6 +270,14 @@ BumpReset(MemoryContext context)
BumpBlockFree(set, block);
}
+ /*
+ * Instruct Valgrind to throw away all the vchunks associated with this
+ * context, except for the one covering the BumpContext and keeper-block
+ * header. This gets rid of the vchunks for whatever user data is getting
+ * discarded by the context reset.
+ */
+ VALGRIND_MEMPOOL_TRIM(set, set, FIRST_BLOCKHDRSZ);
+
/* Reset block size allocation sequence, too */
set->nextBlockSize = set->initBlockSize;
@@ -279,6 +295,10 @@ BumpDelete(MemoryContext context)
{
/* Reset to release all releasable BumpBlocks */
BumpReset(context);
+
+ /* Destroy the vpool -- see notes in aset.c */
+ VALGRIND_DESTROY_MEMPOOL(context);
+
/* And free the context header and keeper block */
free(context);
}
@@ -318,6 +338,9 @@ BumpAllocLarge(MemoryContext context, Size size, int flags)
if (block == NULL)
return MemoryContextAllocationFailure(context, size, flags);
+ /* Make a vchunk covering the new block's header */
+ VALGRIND_MEMPOOL_ALLOC(set, block, Bump_BLOCKHDRSZ);
+
context->mem_allocated += blksize;
/* the block is completely full */
@@ -455,6 +478,9 @@ BumpAllocFromNewBlock(MemoryContext context, Size size, int flags,
if (block == NULL)
return MemoryContextAllocationFailure(context, size, flags);
+ /* Make a vchunk covering the new block's header */
+ VALGRIND_MEMPOOL_ALLOC(set, block, Bump_BLOCKHDRSZ);
+
context->mem_allocated += blksize;
/* initialize the new block */
@@ -606,6 +632,9 @@ BumpBlockFree(BumpContext *set, BumpBlock *block)
wipe_mem(block, ((char *) block->endptr - (char *) block));
#endif
+ /* As in aset.c, free block-header vchunks explicitly */
+ VALGRIND_MEMPOOL_FREE(set, block);
+
free(block);
}
diff --git a/src/backend/utils/mmgr/dsa.c b/src/backend/utils/mmgr/dsa.c
index 17d4f7a7a06..be43e9351c3 100644
--- a/src/backend/utils/mmgr/dsa.c
+++ b/src/backend/utils/mmgr/dsa.c
@@ -532,6 +532,21 @@ dsa_attach(dsa_handle handle)
}
/*
+ * Returns whether the area with the given handle was already attached by the
+ * current process. The area must have been created with dsa_create (not
+ * dsa_create_in_place).
+ */
+bool
+dsa_is_attached(dsa_handle handle)
+{
+ /*
+ * An area handle is really a DSM segment handle for the first segment, so
+ * we can just search for that.
+ */
+ return dsm_find_mapping(handle) != NULL;
+}
+
+/*
* Attach to an area that was created with dsa_create_in_place. The caller
* must somehow know the location in memory that was used when the area was
* created, though it may be mapped at a different virtual address in this
diff --git a/src/backend/utils/mmgr/generation.c b/src/backend/utils/mmgr/generation.c
index 18679ad4f1e..cfafc9bf082 100644
--- a/src/backend/utils/mmgr/generation.c
+++ b/src/backend/utils/mmgr/generation.c
@@ -45,6 +45,8 @@
#define Generation_BLOCKHDRSZ MAXALIGN(sizeof(GenerationBlock))
#define Generation_CHUNKHDRSZ sizeof(MemoryChunk)
+#define FIRST_BLOCKHDRSZ (MAXALIGN(sizeof(GenerationContext)) + \
+ Generation_BLOCKHDRSZ)
#define Generation_CHUNK_FRACTION 8
@@ -221,6 +223,12 @@ GenerationContextCreate(MemoryContext parent,
* Avoid writing code that can fail between here and MemoryContextCreate;
* we'd leak the header if we ereport in this stretch.
*/
+
+ /* See comments about Valgrind interactions in aset.c */
+ VALGRIND_CREATE_MEMPOOL(set, 0, false);
+ /* This vchunk covers the GenerationContext and the keeper block header */
+ VALGRIND_MEMPOOL_ALLOC(set, set, FIRST_BLOCKHDRSZ);
+
dlist_init(&set->blocks);
/* Fill in the initial block's block header */
@@ -309,6 +317,14 @@ GenerationReset(MemoryContext context)
GenerationBlockFree(set, block);
}
+ /*
+ * Instruct Valgrind to throw away all the vchunks associated with this
+ * context, except for the one covering the GenerationContext and
+ * keeper-block header. This gets rid of the vchunks for whatever user
+ * data is getting discarded by the context reset.
+ */
+ VALGRIND_MEMPOOL_TRIM(set, set, FIRST_BLOCKHDRSZ);
+
/* set it so new allocations to make use of the keeper block */
set->block = KeeperBlock(set);
@@ -329,6 +345,10 @@ GenerationDelete(MemoryContext context)
{
/* Reset to release all releasable GenerationBlocks */
GenerationReset(context);
+
+ /* Destroy the vpool -- see notes in aset.c */
+ VALGRIND_DESTROY_MEMPOOL(context);
+
/* And free the context header and keeper block */
free(context);
}
@@ -365,6 +385,9 @@ GenerationAllocLarge(MemoryContext context, Size size, int flags)
if (block == NULL)
return MemoryContextAllocationFailure(context, size, flags);
+ /* Make a vchunk covering the new block's header */
+ VALGRIND_MEMPOOL_ALLOC(set, block, Generation_BLOCKHDRSZ);
+
context->mem_allocated += blksize;
/* block with a single (used) chunk */
@@ -487,6 +510,9 @@ GenerationAllocFromNewBlock(MemoryContext context, Size size, int flags,
if (block == NULL)
return MemoryContextAllocationFailure(context, size, flags);
+ /* Make a vchunk covering the new block's header */
+ VALGRIND_MEMPOOL_ALLOC(set, block, Generation_BLOCKHDRSZ);
+
context->mem_allocated += blksize;
/* initialize the new block */
@@ -677,6 +703,9 @@ GenerationBlockFree(GenerationContext *set, GenerationBlock *block)
wipe_mem(block, block->blksize);
#endif
+ /* As in aset.c, free block-header vchunks explicitly */
+ VALGRIND_MEMPOOL_FREE(set, block);
+
free(block);
}
diff --git a/src/backend/utils/mmgr/mcxt.c b/src/backend/utils/mmgr/mcxt.c
index 15fa4d0a55e..47fd774c7d2 100644
--- a/src/backend/utils/mmgr/mcxt.c
+++ b/src/backend/utils/mmgr/mcxt.c
@@ -8,6 +8,23 @@
* context-type-specific operations via the function pointers in a
* context's MemoryContextMethods struct.
*
+ * A note about Valgrind support: when USE_VALGRIND is defined, we provide
+ * support for memory leak tracking at the allocation-unit level. Valgrind
+ * does leak detection by tracking allocated "chunks", which can be grouped
+ * into "pools". The "chunk" terminology is overloaded, since we use that
+ * word for our allocation units, and it's sometimes important to distinguish
+ * those from the Valgrind objects that describe them. To reduce confusion,
+ * let's use the terms "vchunk" and "vpool" for the Valgrind objects.
+ *
+ * We use a separate vpool for each memory context. The context-type-specific
+ * code is responsible for creating and deleting the vpools, and also for
+ * creating vchunks to cover its management data structures such as block
+ * headers. (There must be a vchunk that includes every pointer we want
+ * Valgrind to consider for leak-tracking purposes.) This module creates
+ * and deletes the vchunks that cover the caller-visible allocated chunks.
+ * However, the context-type-specific code must handle cleaning up those
+ * vchunks too during memory context reset operations.
+ *
*
* Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
@@ -418,8 +435,6 @@ MemoryContextResetOnly(MemoryContext context)
context->methods->reset(context);
context->isReset = true;
- VALGRIND_DESTROY_MEMPOOL(context);
- VALGRIND_CREATE_MEMPOOL(context, 0, false);
}
}
@@ -526,8 +541,6 @@ MemoryContextDeleteOnly(MemoryContext context)
context->ident = NULL;
context->methods->delete_context(context);
-
- VALGRIND_DESTROY_MEMPOOL(context);
}
/*
@@ -560,9 +573,7 @@ MemoryContextDeleteChildren(MemoryContext context)
* the specified context, since that means it will automatically be freed
* when no longer needed.
*
- * There is no API for deregistering a callback once registered. If you
- * want it to not do anything anymore, adjust the state pointed to by its
- * "arg" to indicate that.
+ * Note that callers can assume this cannot fail.
*/
void
MemoryContextRegisterResetCallback(MemoryContext context,
@@ -578,6 +589,41 @@ MemoryContextRegisterResetCallback(MemoryContext context,
}
/*
+ * MemoryContextUnregisterResetCallback
+ * Undo the effects of MemoryContextRegisterResetCallback.
+ *
+ * This can be used if a callback's effects are no longer required
+ * at some point before the context has been reset/deleted. It is the
+ * caller's responsibility to pfree the callback struct (if needed).
+ *
+ * An assertion failure occurs if the callback was not registered.
+ * We could alternatively define that case as a no-op, but that seems too
+ * likely to mask programming errors such as passing the wrong context.
+ */
+void
+MemoryContextUnregisterResetCallback(MemoryContext context,
+ MemoryContextCallback *cb)
+{
+ MemoryContextCallback *prev,
+ *cur;
+
+ Assert(MemoryContextIsValid(context));
+
+ for (prev = NULL, cur = context->reset_cbs; cur != NULL;
+ prev = cur, cur = cur->next)
+ {
+ if (cur != cb)
+ continue;
+ if (prev)
+ prev->next = cur->next;
+ else
+ context->reset_cbs = cur->next;
+ return;
+ }
+ Assert(false);
+}
+
+/*
* MemoryContextCallResetCallbacks
* Internal function to call all registered callbacks for context.
*/
@@ -1137,8 +1183,6 @@ MemoryContextCreate(MemoryContext node,
node->nextchild = NULL;
node->allowInCritSection = false;
}
-
- VALGRIND_CREATE_MEMPOOL(node, 0, false);
}
/*
@@ -1421,7 +1465,13 @@ MemoryContextAllocAligned(MemoryContext context,
void *unaligned;
void *aligned;
- /* wouldn't make much sense to waste that much space */
+ /*
+ * Restrict alignto to ensure that it can fit into the "value" field of
+ * the redirection MemoryChunk, and that the distance back to the start of
+ * the unaligned chunk will fit into the space available for that. This
+ * isn't a limitation in practice, since it wouldn't make much sense to
+ * waste that much space.
+ */
Assert(alignto < (128 * 1024 * 1024));
/* ensure alignto is a power of 2 */
@@ -1458,10 +1508,15 @@ MemoryContextAllocAligned(MemoryContext context,
alloc_size += 1;
#endif
- /* perform the actual allocation */
- unaligned = MemoryContextAllocExtended(context, alloc_size, flags);
+ /*
+ * Perform the actual allocation, but do not pass down MCXT_ALLOC_ZERO.
+ * This ensures that wasted bytes beyond the aligned chunk do not become
+ * DEFINED.
+ */
+ unaligned = MemoryContextAllocExtended(context, alloc_size,
+ flags & ~MCXT_ALLOC_ZERO);
- /* set the aligned pointer */
+ /* compute the aligned pointer */
aligned = (void *) TYPEALIGN(alignto, (char *) unaligned +
sizeof(MemoryChunk));
@@ -1489,12 +1544,23 @@ MemoryContextAllocAligned(MemoryContext context,
set_sentinel(aligned, size);
#endif
- /* Mark the bytes before the redirection header as noaccess */
- VALGRIND_MAKE_MEM_NOACCESS(unaligned,
- (char *) alignedchunk - (char *) unaligned);
+ /*
+ * MemoryContextAllocExtended marked the whole unaligned chunk as a
+ * vchunk. Undo that, instead making just the aligned chunk be a vchunk.
+ * This prevents Valgrind from complaining that the vchunk is possibly
+ * leaked, since only pointers to the aligned chunk will exist.
+ *
+ * After these calls, the aligned chunk will be marked UNDEFINED, and all
+ * the rest of the unaligned chunk (the redirection chunk header, the
+ * padding bytes before it, and any wasted trailing bytes) will be marked
+ * NOACCESS, which is what we want.
+ */
+ VALGRIND_MEMPOOL_FREE(context, unaligned);
+ VALGRIND_MEMPOOL_ALLOC(context, aligned, size);
- /* Disallow access to the redirection chunk header. */
- VALGRIND_MAKE_MEM_NOACCESS(alignedchunk, sizeof(MemoryChunk));
+ /* Now zero (and make DEFINED) just the aligned chunk, if requested */
+ if ((flags & MCXT_ALLOC_ZERO) != 0)
+ MemSetAligned(aligned, 0, size);
return aligned;
}
@@ -1528,16 +1594,12 @@ void
pfree(void *pointer)
{
#ifdef USE_VALGRIND
- MemoryContextMethodID method = GetMemoryChunkMethodID(pointer);
MemoryContext context = GetMemoryChunkContext(pointer);
#endif
MCXT_METHOD(pointer, free_p) (pointer);
-#ifdef USE_VALGRIND
- if (method != MCTX_ALIGNED_REDIRECT_ID)
- VALGRIND_MEMPOOL_FREE(context, pointer);
-#endif
+ VALGRIND_MEMPOOL_FREE(context, pointer);
}
/*
@@ -1547,9 +1609,6 @@ pfree(void *pointer)
void *
repalloc(void *pointer, Size size)
{
-#ifdef USE_VALGRIND
- MemoryContextMethodID method = GetMemoryChunkMethodID(pointer);
-#endif
#if defined(USE_ASSERT_CHECKING) || defined(USE_VALGRIND)
MemoryContext context = GetMemoryChunkContext(pointer);
#endif
@@ -1572,10 +1631,7 @@ repalloc(void *pointer, Size size)
*/
ret = MCXT_METHOD(pointer, realloc) (pointer, size, 0);
-#ifdef USE_VALGRIND
- if (method != MCTX_ALIGNED_REDIRECT_ID)
- VALGRIND_MEMPOOL_CHANGE(context, pointer, ret, size);
-#endif
+ VALGRIND_MEMPOOL_CHANGE(context, pointer, ret, size);
return ret;
}
diff --git a/src/backend/utils/mmgr/slab.c b/src/backend/utils/mmgr/slab.c
index d32c0d318fb..0e35abcf5a0 100644
--- a/src/backend/utils/mmgr/slab.c
+++ b/src/backend/utils/mmgr/slab.c
@@ -377,6 +377,11 @@ SlabContextCreate(MemoryContext parent,
* we'd leak the header if we ereport in this stretch.
*/
+ /* See comments about Valgrind interactions in aset.c */
+ VALGRIND_CREATE_MEMPOOL(slab, 0, false);
+ /* This vchunk covers the SlabContext only */
+ VALGRIND_MEMPOOL_ALLOC(slab, slab, sizeof(SlabContext));
+
/* Fill in SlabContext-specific header fields */
slab->chunkSize = (uint32) chunkSize;
slab->fullChunkSize = (uint32) fullChunkSize;
@@ -451,6 +456,10 @@ SlabReset(MemoryContext context)
#ifdef CLOBBER_FREED_MEMORY
wipe_mem(block, slab->blockSize);
#endif
+
+ /* As in aset.c, free block-header vchunks explicitly */
+ VALGRIND_MEMPOOL_FREE(slab, block);
+
free(block);
context->mem_allocated -= slab->blockSize;
}
@@ -467,11 +476,23 @@ SlabReset(MemoryContext context)
#ifdef CLOBBER_FREED_MEMORY
wipe_mem(block, slab->blockSize);
#endif
+
+ /* As in aset.c, free block-header vchunks explicitly */
+ VALGRIND_MEMPOOL_FREE(slab, block);
+
free(block);
context->mem_allocated -= slab->blockSize;
}
}
+ /*
+ * Instruct Valgrind to throw away all the vchunks associated with this
+ * context, except for the one covering the SlabContext. This gets rid of
+ * the vchunks for whatever user data is getting discarded by the context
+ * reset.
+ */
+ VALGRIND_MEMPOOL_TRIM(slab, slab, sizeof(SlabContext));
+
slab->curBlocklistIndex = 0;
Assert(context->mem_allocated == 0);
@@ -486,6 +507,10 @@ SlabDelete(MemoryContext context)
{
/* Reset to release all the SlabBlocks */
SlabReset(context);
+
+ /* Destroy the vpool -- see notes in aset.c */
+ VALGRIND_DESTROY_MEMPOOL(context);
+
/* And free the context header */
free(context);
}
@@ -567,6 +592,9 @@ SlabAllocFromNewBlock(MemoryContext context, Size size, int flags)
if (unlikely(block == NULL))
return MemoryContextAllocationFailure(context, size, flags);
+ /* Make a vchunk covering the new block's header */
+ VALGRIND_MEMPOOL_ALLOC(slab, block, Slab_BLOCKHDRSZ);
+
block->slab = slab;
context->mem_allocated += slab->blockSize;
@@ -795,6 +823,10 @@ SlabFree(void *pointer)
#ifdef CLOBBER_FREED_MEMORY
wipe_mem(block, slab->blockSize);
#endif
+
+ /* As in aset.c, free block-header vchunks explicitly */
+ VALGRIND_MEMPOOL_FREE(slab, block);
+
free(block);
slab->header.mem_allocated -= slab->blockSize;
}