aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.git-blame-ignore-revs6
-rw-r--r--config/c-compiler.m41
-rwxr-xr-xconfigure34
-rw-r--r--configure.ac5
-rw-r--r--contrib/amcheck/expected/check_gin.out12
-rw-r--r--contrib/amcheck/meson.build1
-rw-r--r--contrib/amcheck/sql/check_gin.sql10
-rw-r--r--contrib/amcheck/t/006_verify_gin.pl293
-rw-r--r--contrib/amcheck/verify_gin.c79
-rw-r--r--contrib/auto_explain/auto_explain.c16
-rw-r--r--contrib/btree_gist/btree_gist--1.7--1.8.sql54
-rw-r--r--contrib/btree_gist/btree_gist.c4
-rw-r--r--contrib/btree_gist/expected/stratnum.out18
-rw-r--r--contrib/btree_gist/sql/stratnum.sql6
-rw-r--r--contrib/dblink/dblink.c78
-rw-r--r--contrib/file_fdw/expected/file_fdw.out4
-rw-r--r--contrib/file_fdw/sql/file_fdw.sql2
-rw-r--r--contrib/pg_overexplain/expected/pg_overexplain.out4
-rw-r--r--contrib/pg_prewarm/Makefile2
-rw-r--r--contrib/pg_prewarm/autoprewarm.c11
-rw-r--r--contrib/pg_prewarm/expected/pg_prewarm.out10
-rw-r--r--contrib/pg_prewarm/meson.build5
-rw-r--r--contrib/pg_prewarm/pg_prewarm.c8
-rw-r--r--contrib/pg_prewarm/sql/pg_prewarm.sql10
-rw-r--r--contrib/pg_stat_statements/expected/extended.out94
-rw-r--r--contrib/pg_stat_statements/expected/level_tracking.out233
-rw-r--r--contrib/pg_stat_statements/expected/planning.out10
-rw-r--r--contrib/pg_stat_statements/expected/select.out61
-rw-r--r--contrib/pg_stat_statements/expected/squashing.out543
-rw-r--r--contrib/pg_stat_statements/expected/utility.out2
-rw-r--r--contrib/pg_stat_statements/pg_stat_statements.c153
-rw-r--r--contrib/pg_stat_statements/sql/extended.sql25
-rw-r--r--contrib/pg_stat_statements/sql/level_tracking.sql26
-rw-r--r--contrib/pg_stat_statements/sql/planning.sql4
-rw-r--r--contrib/pg_stat_statements/sql/select.sql16
-rw-r--r--contrib/pg_stat_statements/sql/squashing.sql187
-rw-r--r--contrib/postgres_fdw/postgres_fdw.c62
-rw-r--r--contrib/sepgsql/t/001_sepgsql.pl2
-rw-r--r--contrib/test_decoding/expected/invalidation_distribution.out23
-rw-r--r--contrib/test_decoding/specs/invalidation_distribution.spec11
-rw-r--r--doc/src/sgml/catalogs.sgml8
-rw-r--r--doc/src/sgml/config.sgml73
-rw-r--r--doc/src/sgml/ddl.sgml15
-rw-r--r--doc/src/sgml/docguide.sgml4
-rw-r--r--doc/src/sgml/filelist.sgml2
-rw-r--r--doc/src/sgml/func.sgml197
-rw-r--r--doc/src/sgml/gist.sgml25
-rw-r--r--doc/src/sgml/libpq.sgml38
-rw-r--r--doc/src/sgml/logical-replication.sgml461
-rw-r--r--doc/src/sgml/logicaldecoding.sgml61
-rw-r--r--doc/src/sgml/pgstattuple.sgml9
-rw-r--r--doc/src/sgml/plpython.sgml2
-rw-r--r--doc/src/sgml/protocol.sgml14
-rw-r--r--doc/src/sgml/ref/alter_database.sgml2
-rw-r--r--doc/src/sgml/ref/alter_table.sgml16
-rw-r--r--doc/src/sgml/ref/create_database.sgml2
-rw-r--r--doc/src/sgml/ref/create_foreign_table.sgml2
-rw-r--r--doc/src/sgml/ref/create_index.sgml2
-rw-r--r--doc/src/sgml/ref/create_operator.sgml6
-rw-r--r--doc/src/sgml/ref/create_table.sgml29
-rw-r--r--doc/src/sgml/ref/merge.sgml39
-rw-r--r--doc/src/sgml/ref/pg_createsubscriber.sgml59
-rw-r--r--doc/src/sgml/ref/pg_dump.sgml31
-rw-r--r--doc/src/sgml/ref/pg_dumpall.sgml30
-rw-r--r--doc/src/sgml/ref/pg_recvlogical.sgml9
-rw-r--r--doc/src/sgml/ref/pg_restore.sgml13
-rw-r--r--doc/src/sgml/ref/psql-ref.sgml32
-rw-r--r--doc/src/sgml/ref/reindex.sgml7
-rw-r--r--doc/src/sgml/ref/security_label.sgml4
-rw-r--r--doc/src/sgml/ref/update.sgml3
-rw-r--r--doc/src/sgml/release-18.sgml3554
-rw-r--r--doc/src/sgml/release-19.sgml16
-rw-r--r--doc/src/sgml/release.sgml2
-rw-r--r--doc/src/sgml/system-views.sgml2
-rw-r--r--doc/src/sgml/textsearch.sgml2
-rw-r--r--doc/src/sgml/trigger.sgml7
-rw-r--r--doc/src/sgml/xindex.sgml2
-rw-r--r--doc/src/sgml/xoper.sgml2
-rw-r--r--meson.build19
-rw-r--r--src/Makefile.shlib4
-rw-r--r--src/backend/access/brin/brin.c2
-rw-r--r--src/backend/access/common/reloptions.c17
-rw-r--r--src/backend/access/common/tupdesc.c15
-rw-r--r--src/backend/access/gist/gistutil.c14
-rw-r--r--src/backend/access/gist/gistvalidate.c6
-rw-r--r--src/backend/access/heap/heapam.c35
-rw-r--r--src/backend/access/heap/heapam_handler.c2
-rw-r--r--src/backend/access/heap/heapam_xlog.c7
-rw-r--r--src/backend/access/heap/vacuumlazy.c129
-rw-r--r--src/backend/access/nbtree/nbtree.c32
-rw-r--r--src/backend/access/nbtree/nbtsearch.c81
-rw-r--r--src/backend/access/nbtree/nbtsort.c2
-rw-r--r--src/backend/access/nbtree/nbtutils.c97
-rw-r--r--src/backend/access/rmgrdesc/xactdesc.c2
-rw-r--r--src/backend/access/transam/xlog.c4
-rw-r--r--src/backend/catalog/heap.c96
-rw-r--r--src/backend/catalog/index.c2
-rw-r--r--src/backend/catalog/system_views.sql5
-rw-r--r--src/backend/commands/analyze.c26
-rw-r--r--src/backend/commands/cluster.c2
-rw-r--r--src/backend/commands/copyto.c5
-rw-r--r--src/backend/commands/createas.c5
-rw-r--r--src/backend/commands/dbcommands.c41
-rw-r--r--src/backend/commands/explain.c34
-rw-r--r--src/backend/commands/extension.c4
-rw-r--r--src/backend/commands/foreigncmds.c15
-rw-r--r--src/backend/commands/indexcmds.c44
-rw-r--r--src/backend/commands/matview.c8
-rw-r--r--src/backend/commands/portalcmds.c1
-rw-r--r--src/backend/commands/prepare.c9
-rw-r--r--src/backend/commands/publicationcmds.c4
-rw-r--r--src/backend/commands/subscriptioncmds.c6
-rw-r--r--src/backend/commands/tablecmds.c109
-rw-r--r--src/backend/commands/trigger.c15
-rw-r--r--src/backend/commands/vacuum.c121
-rw-r--r--src/backend/commands/vacuumparallel.c2
-rw-r--r--src/backend/executor/README35
-rw-r--r--src/backend/executor/execGrouping.c4
-rw-r--r--src/backend/executor/execMain.c127
-rw-r--r--src/backend/executor/execParallel.c12
-rw-r--r--src/backend/executor/execPartition.c66
-rw-r--r--src/backend/executor/execUtils.c1
-rw-r--r--src/backend/executor/functions.c5
-rw-r--r--src/backend/executor/nodeModifyTable.c140
-rw-r--r--src/backend/executor/nodeTidrangescan.c6
-rw-r--r--src/backend/executor/spi.c29
-rw-r--r--src/backend/jit/README2
-rw-r--r--src/backend/lib/README4
-rw-r--r--src/backend/libpq/be-secure-gssapi.c67
-rw-r--r--src/backend/libpq/be-secure-openssl.c4
-rw-r--r--src/backend/nodes/gen_node_support.pl7
-rw-r--r--src/backend/nodes/outfuncs.c8
-rw-r--r--src/backend/nodes/queryjumblefuncs.c302
-rw-r--r--src/backend/nodes/readfuncs.c8
-rw-r--r--src/backend/optimizer/path/allpaths.c12
-rw-r--r--src/backend/optimizer/path/joinpath.c9
-rw-r--r--src/backend/optimizer/path/joinrels.c60
-rw-r--r--src/backend/optimizer/plan/createplan.c70
-rw-r--r--src/backend/optimizer/plan/planner.c4
-rw-r--r--src/backend/optimizer/plan/setrefs.c10
-rw-r--r--src/backend/optimizer/util/paramassign.c109
-rw-r--r--src/backend/optimizer/util/placeholder.c40
-rw-r--r--src/backend/parser/analyze.c95
-rw-r--r--src/backend/parser/gram.y184
-rw-r--r--src/backend/parser/parse_expr.c4
-rw-r--r--src/backend/parser/parse_utilcmd.c22
-rw-r--r--src/backend/postmaster/autovacuum.c63
-rw-r--r--src/backend/postmaster/checkpointer.c4
-rw-r--r--src/backend/postmaster/interrupt.c4
-rw-r--r--src/backend/postmaster/pgarch.c4
-rw-r--r--src/backend/postmaster/startup.c4
-rw-r--r--src/backend/postmaster/walsummarizer.c4
-rw-r--r--src/backend/replication/logical/launcher.c44
-rw-r--r--src/backend/replication/logical/logical.c41
-rw-r--r--src/backend/replication/logical/reorderbuffer.c196
-rw-r--r--src/backend/replication/logical/slotsync.c6
-rw-r--r--src/backend/replication/logical/snapbuild.c12
-rw-r--r--src/backend/replication/logical/tablesync.c19
-rw-r--r--src/backend/replication/logical/worker.c31
-rw-r--r--src/backend/replication/pgoutput/pgoutput.c2
-rw-r--r--src/backend/replication/slot.c64
-rw-r--r--src/backend/replication/walsender.c10
-rw-r--r--src/backend/rewrite/rewriteHandler.c2
-rw-r--r--src/backend/storage/aio/aio.c140
-rw-r--r--src/backend/storage/aio/aio_callback.c7
-rw-r--r--src/backend/storage/aio/aio_io.c4
-rw-r--r--src/backend/storage/aio/method_io_uring.c8
-rw-r--r--src/backend/storage/aio/method_worker.c7
-rw-r--r--src/backend/storage/buffer/bufmgr.c2
-rw-r--r--src/backend/storage/buffer/localbuf.c2
-rw-r--r--src/backend/storage/ipc/ipci.c3
-rw-r--r--src/backend/storage/ipc/procsignal.c3
-rw-r--r--src/backend/storage/lmgr/lmgr.c6
-rw-r--r--src/backend/storage/lmgr/lock.c2
-rw-r--r--src/backend/storage/lmgr/lwlock.c2
-rw-r--r--src/backend/storage/lmgr/proc.c1
-rw-r--r--src/backend/tcop/backend_startup.c6
-rw-r--r--src/backend/tcop/postgres.c17
-rw-r--r--src/backend/tcop/pquery.c51
-rw-r--r--src/backend/utils/activity/backend_status.c22
-rw-r--r--src/backend/utils/activity/pgstat_shmem.c5
-rw-r--r--src/backend/utils/activity/wait_event_names.txt2
-rw-r--r--src/backend/utils/adt/datetime.c44
-rw-r--r--src/backend/utils/adt/float.c4
-rw-r--r--src/backend/utils/adt/formatting.c5
-rw-r--r--src/backend/utils/adt/mcxtfuncs.c426
-rw-r--r--src/backend/utils/adt/network.c2
-rw-r--r--src/backend/utils/adt/network_spgist.c1
-rw-r--r--src/backend/utils/adt/numeric.c7
-rw-r--r--src/backend/utils/adt/pg_locale.c2
-rw-r--r--src/backend/utils/adt/pg_locale_builtin.c1
-rw-r--r--src/backend/utils/adt/pgstatfuncs.c6
-rw-r--r--src/backend/utils/adt/regexp.c38
-rw-r--r--src/backend/utils/adt/ri_triggers.c2
-rw-r--r--src/backend/utils/adt/ruleutils.c10
-rw-r--r--src/backend/utils/adt/selfuncs.c1
-rw-r--r--src/backend/utils/adt/xml.c21
-rw-r--r--src/backend/utils/cache/funccache.c38
-rw-r--r--src/backend/utils/cache/plancache.c197
-rw-r--r--src/backend/utils/cache/syscache.c2
-rw-r--r--src/backend/utils/fmgr/dfmgr.c16
-rw-r--r--src/backend/utils/init/globals.c1
-rw-r--r--src/backend/utils/init/postinit.c7
-rw-r--r--src/backend/utils/mb/mbutils.c1
-rw-r--r--src/backend/utils/misc/guc_tables.c8
-rw-r--r--src/backend/utils/misc/postgresql.conf.sample12
-rw-r--r--src/backend/utils/mmgr/alignedalloc.c29
-rw-r--r--src/backend/utils/mmgr/mcxt.c646
-rw-r--r--src/backend/utils/mmgr/portalmem.c4
-rw-r--r--src/bin/initdb/t/001_initdb.pl3
-rw-r--r--src/bin/pg_basebackup/pg_createsubscriber.c42
-rw-r--r--src/bin/pg_basebackup/pg_recvlogical.c20
-rw-r--r--src/bin/pg_basebackup/t/030_pg_recvlogical.pl7
-rw-r--r--src/bin/pg_basebackup/t/040_pg_createsubscriber.pl10
-rw-r--r--src/bin/pg_combinebackup/t/010_hardlink.pl92
-rw-r--r--src/bin/pg_dump/pg_backup_archiver.c4
-rw-r--r--src/bin/pg_dump/pg_backup_directory.c11
-rw-r--r--src/bin/pg_dump/pg_dump.c142
-rw-r--r--src/bin/pg_dump/pg_dump.h1
-rw-r--r--src/bin/pg_dump/pg_dumpall.c10
-rw-r--r--src/bin/pg_dump/pg_restore.c49
-rw-r--r--src/bin/pg_dump/t/001_basic.pl13
-rw-r--r--src/bin/pg_dump/t/002_pg_dump.pl100
-rw-r--r--src/bin/pg_dump/t/006_pg_dumpall.pl53
-rw-r--r--src/bin/pg_rewind/t/RewindTest.pm2
-rw-r--r--src/bin/pg_upgrade/check.c4
-rw-r--r--src/bin/pg_upgrade/dump.c2
-rw-r--r--src/bin/pg_upgrade/relfilenumber.c12
-rw-r--r--src/bin/pg_upgrade/t/004_subscription.pl6
-rw-r--r--src/bin/pg_upgrade/t/005_char_signedness.pl2
-rw-r--r--src/bin/pg_upgrade/t/006_transfer_modes.pl28
-rw-r--r--src/bin/pg_upgrade/task.c5
-rw-r--r--src/bin/pgbench/t/002_pgbench_no_server.pl18
-rw-r--r--src/bin/psql/command.c39
-rw-r--r--src/bin/psql/common.c29
-rw-r--r--src/bin/psql/describe.c9
-rw-r--r--src/bin/psql/help.c14
-rw-r--r--src/bin/psql/t/001_basic.pl38
-rw-r--r--src/bin/psql/tab-complete.in.c19
-rw-r--r--src/bin/psql/variables.c10
-rw-r--r--src/bin/scripts/t/100_vacuumdb.pl79
-rw-r--r--src/include/access/gin_tuple.h2
-rw-r--r--src/include/access/gist.h2
-rw-r--r--src/include/access/heapam.h6
-rw-r--r--src/include/access/nbtree.h5
-rw-r--r--src/include/access/tableam.h6
-rw-r--r--src/include/catalog/catversion.h2
-rw-r--r--src/include/catalog/pg_amproc.dat12
-rw-r--r--src/include/catalog/pg_authid.dat2
-rw-r--r--src/include/catalog/pg_collation.dat3
-rw-r--r--src/include/catalog/pg_index.h2
-rw-r--r--src/include/catalog/pg_proc.dat255
-rw-r--r--src/include/commands/explain.h6
-rw-r--r--src/include/commands/trigger.h1
-rw-r--r--src/include/commands/vacuum.h6
-rw-r--r--src/include/executor/execdesc.h2
-rw-r--r--src/include/executor/executor.h34
-rw-r--r--src/include/executor/nodeAgg.h2
-rw-r--r--src/include/miscadmin.h1
-rw-r--r--src/include/nodes/execnodes.h3
-rw-r--r--src/include/nodes/parsenodes.h27
-rw-r--r--src/include/nodes/pathnodes.h3
-rw-r--r--src/include/nodes/plannodes.h11
-rw-r--r--src/include/nodes/primnodes.h10
-rw-r--r--src/include/nodes/queryjumble.h19
-rw-r--r--src/include/optimizer/paramassign.h3
-rw-r--r--src/include/optimizer/paths.h2
-rw-r--r--src/include/optimizer/placeholder.h2
-rw-r--r--src/include/parser/parse_node.h16
-rw-r--r--src/include/pg_config.h.in7
-rw-r--r--src/include/replication/reorderbuffer.h16
-rw-r--r--src/include/replication/slot.h8
-rw-r--r--src/include/storage/aio.h2
-rw-r--r--src/include/storage/copydir.h2
-rw-r--r--src/include/storage/lock.h2
-rw-r--r--src/include/storage/lwlock.h2
-rw-r--r--src/include/storage/procsignal.h1
-rw-r--r--src/include/storage/sinval.h2
-rw-r--r--src/include/storage/waiteventset.h2
-rw-r--r--src/include/tcop/backend_startup.h2
-rw-r--r--src/include/utils/backend_status.h12
-rw-r--r--src/include/utils/elog.h2
-rw-r--r--src/include/utils/memutils.h82
-rw-r--r--src/include/utils/plancache.h46
-rw-r--r--src/include/utils/portal.h4
-rw-r--r--src/include/utils/skipsupport.h2
-rw-r--r--src/interfaces/ecpg/preproc/meson.build2
-rw-r--r--src/interfaces/libpq-oauth/.gitignore1
-rw-r--r--src/interfaces/libpq-oauth/oauth-curl.c26
-rw-r--r--src/interfaces/libpq/Makefile11
-rw-r--r--src/interfaces/libpq/fe-auth-oauth.c25
-rw-r--r--src/interfaces/libpq/fe-cancel.c2
-rw-r--r--src/interfaces/libpq/fe-connect.c46
-rw-r--r--src/interfaces/libpq/fe-misc.c28
-rw-r--r--src/interfaces/libpq/fe-protocol3.c7
-rw-r--r--src/interfaces/libpq/fe-secure-gssapi.c74
-rw-r--r--src/interfaces/libpq/fe-secure-openssl.c4
-rw-r--r--src/interfaces/libpq/t/005_negotiate_encryption.pl2
-rw-r--r--src/makefiles/pgxs.mk3
-rw-r--r--src/pl/plpgsql/src/pl_comp.c12
-rw-r--r--src/pl/plpython/expected/README3
-rw-r--r--src/pl/plpython/expected/plpython_error.out2
-rw-r--r--src/pl/plpython/expected/plpython_error_5.out460
-rw-r--r--src/pl/plpython/plpy_cursorobject.c6
-rw-r--r--src/pl/plpython/plpy_elog.c345
-rw-r--r--src/pl/plpython/plpy_planobject.c6
-rw-r--r--src/pl/plpython/plpy_resultobject.c6
-rw-r--r--src/pl/plpython/plpy_subxactobject.c6
-rw-r--r--src/port/explicit_bzero.c4
-rw-r--r--src/port/pg_crc32c_sse42.c2
-rw-r--r--src/port/pg_localeconv_r.c2
-rw-r--r--src/test/authentication/t/001_password.pl23
-rw-r--r--src/test/authentication/t/003_peer.pl2
-rw-r--r--src/test/authentication/t/005_sspi.pl2
-rw-r--r--src/test/authentication/t/007_pre_auth.pl2
-rw-r--r--src/test/kerberos/t/001_auth.pl2
-rw-r--r--src/test/ldap/t/001_auth.pl2
-rw-r--r--src/test/ldap/t/002_bindpasswd.pl2
-rw-r--r--src/test/modules/injection_points/Makefile2
-rw-r--r--src/test/modules/injection_points/expected/vacuum.out122
-rw-r--r--src/test/modules/injection_points/meson.build1
-rw-r--r--src/test/modules/injection_points/sql/vacuum.sql47
-rw-r--r--src/test/modules/ldap_password_func/t/001_mutated_bindpasswd.pl3
-rw-r--r--src/test/modules/libpq_pipeline/t/001_libpq_pipeline.pl6
-rw-r--r--src/test/modules/oauth_validator/t/001_server.pl22
-rw-r--r--src/test/modules/oauth_validator/t/002_client.pl2
-rwxr-xr-xsrc/test/modules/oauth_validator/t/oauth_server.py31
-rw-r--r--src/test/modules/test_aio/t/001_aio.pl46
-rw-r--r--src/test/modules/test_aio/test_aio.c4
-rw-r--r--src/test/modules/test_dsm_registry/test_dsm_registry.c4
-rw-r--r--src/test/modules/test_shm_mq/worker.c2
-rw-r--r--src/test/perl/PostgreSQL/Test/AdjustUpgrade.pm1
-rw-r--r--src/test/perl/PostgreSQL/Test/Cluster.pm2
-rw-r--r--src/test/postmaster/t/002_connection_limits.pl6
-rw-r--r--src/test/postmaster/t/003_start_stop.pl3
-rw-r--r--src/test/recovery/meson.build2
-rw-r--r--src/test/recovery/t/013_crash_restart.pl2
-rw-r--r--src/test/recovery/t/022_crash_temp_files.pl2
-rw-r--r--src/test/recovery/t/032_relfilenode_reuse.pl2
-rw-r--r--src/test/recovery/t/037_invalid_database.pl2
-rw-r--r--src/test/recovery/t/040_standby_failover_slots_sync.pl3
-rw-r--r--src/test/recovery/t/047_checkpoint_physical_slot.pl132
-rw-r--r--src/test/recovery/t/048_vacuum_horizon_floor.pl288
-rw-r--r--src/test/regress/expected/alter_table.out7
-rw-r--r--src/test/regress/expected/constraints.out2
-rw-r--r--src/test/regress/expected/create_table_like.out30
-rw-r--r--src/test/regress/expected/foreign_key.out79
-rw-r--r--src/test/regress/expected/generated_virtual.out89
-rw-r--r--src/test/regress/expected/horology.out9
-rw-r--r--src/test/regress/expected/inherit.out4
-rw-r--r--src/test/regress/expected/join.out242
-rw-r--r--src/test/regress/expected/limit.out20
-rw-r--r--src/test/regress/expected/matview.out2
-rw-r--r--src/test/regress/expected/merge.out70
-rw-r--r--src/test/regress/expected/misc_functions.out18
-rw-r--r--src/test/regress/expected/partition_join.out18
-rw-r--r--src/test/regress/expected/partition_prune.out47
-rw-r--r--src/test/regress/expected/psql.out14
-rw-r--r--src/test/regress/expected/psql_pipeline.out194
-rw-r--r--src/test/regress/expected/publication.out17
-rw-r--r--src/test/regress/expected/strings.out67
-rw-r--r--src/test/regress/expected/sysviews.out19
-rw-r--r--src/test/regress/expected/triggers.out26
-rw-r--r--src/test/regress/expected/without_overlaps.out4
-rw-r--r--src/test/regress/sql/alter_table.sql9
-rw-r--r--src/test/regress/sql/constraints.sql3
-rw-r--r--src/test/regress/sql/create_table_like.sql16
-rw-r--r--src/test/regress/sql/foreign_key.sql36
-rw-r--r--src/test/regress/sql/generated_virtual.sql39
-rw-r--r--src/test/regress/sql/horology.sql4
-rw-r--r--src/test/regress/sql/join.sql75
-rw-r--r--src/test/regress/sql/limit.sql5
-rw-r--r--src/test/regress/sql/merge.sql49
-rw-r--r--src/test/regress/sql/misc_functions.sql6
-rw-r--r--src/test/regress/sql/partition_join.sql3
-rw-r--r--src/test/regress/sql/partition_prune.sql6
-rw-r--r--src/test/regress/sql/psql.sql12
-rw-r--r--src/test/regress/sql/psql_pipeline.sql106
-rw-r--r--src/test/regress/sql/publication.sql5
-rw-r--r--src/test/regress/sql/strings.sql20
-rw-r--r--src/test/regress/sql/sysviews.sql18
-rw-r--r--src/test/regress/sql/triggers.sql18
-rw-r--r--src/test/ssl/t/SSL/Server.pm5
-rw-r--r--src/test/subscription/t/007_ddl.pl35
-rw-r--r--src/test/subscription/t/013_partition.pl3
-rw-r--r--src/test/subscription/t/021_twophase.pl7
-rw-r--r--src/test/subscription/t/024_add_drop_pub.pl14
-rw-r--r--src/test/subscription/t/035_conflicts.pl3
-rw-r--r--src/tools/RELEASE_CHANGES3
-rw-r--r--src/tools/ci/pg_ci_base.conf4
-rwxr-xr-xsrc/tools/git_changelog1
-rwxr-xr-xsrc/tools/pgflex4
-rwxr-xr-xsrc/tools/pgindent/pgindent9
-rw-r--r--src/tools/pgindent/typedefs.list149
-rwxr-xr-xsrc/tools/version_stamp.pl2
395 files changed, 7336 insertions, 9386 deletions
diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs
index d132a32b975..8048afd1a80 100644
--- a/.git-blame-ignore-revs
+++ b/.git-blame-ignore-revs
@@ -14,6 +14,12 @@
#
# $ git log --pretty=format:"%H # %cd%n# %s" $PGINDENTGITHASH -1 --date=iso
+b27644bade0348d0dafd3036c47880a349fe9332 # 2025-06-15 13:04:24 -0400
+# Sync typedefs.list with the buildfarm.
+
+4672b6223910687b2aab075bcd2dd54ce90d5171 # 2025-06-01 14:55:24 -0400
+# Run pgindent on the previous commit.
+
918e7287ed20eb1fe280ab6c4056ccf94dcd53a8 # 2025-04-30 19:18:30 +1200
# Fix broken indentation
diff --git a/config/c-compiler.m4 b/config/c-compiler.m4
index 5f3e1d1faf9..da40bd6a647 100644
--- a/config/c-compiler.m4
+++ b/config/c-compiler.m4
@@ -602,6 +602,7 @@ AC_CACHE_CHECK([for _mm512_clmulepi64_epi128], [Ac_cachevar],
{
__m128i z;
+ x = _mm512_xor_si512(_mm512_zextsi128_si512(_mm_cvtsi32_si128(0)), x);
y = _mm512_clmulepi64_epi128(x, y, 0);
z = _mm_ternarylogic_epi64(
_mm512_castsi512_si128(y),
diff --git a/configure b/configure
index 275c67ee67c..16ef5b58d1a 100755
--- a/configure
+++ b/configure
@@ -1,6 +1,6 @@
#! /bin/sh
# Guess values for system-dependent variables and create Makefiles.
-# Generated by GNU Autoconf 2.69 for PostgreSQL 18beta1.
+# Generated by GNU Autoconf 2.69 for PostgreSQL 19devel.
#
# Report bugs to <pgsql-bugs@lists.postgresql.org>.
#
@@ -582,8 +582,8 @@ MAKEFLAGS=
# Identity of this package.
PACKAGE_NAME='PostgreSQL'
PACKAGE_TARNAME='postgresql'
-PACKAGE_VERSION='18beta1'
-PACKAGE_STRING='PostgreSQL 18beta1'
+PACKAGE_VERSION='19devel'
+PACKAGE_STRING='PostgreSQL 19devel'
PACKAGE_BUGREPORT='pgsql-bugs@lists.postgresql.org'
PACKAGE_URL='https://www.postgresql.org/'
@@ -1468,7 +1468,7 @@ if test "$ac_init_help" = "long"; then
# Omit some internal or obsolete options to make the list less imposing.
# This message is too long to be a string in the A/UX 3.1 sh.
cat <<_ACEOF
-\`configure' configures PostgreSQL 18beta1 to adapt to many kinds of systems.
+\`configure' configures PostgreSQL 19devel to adapt to many kinds of systems.
Usage: $0 [OPTION]... [VAR=VALUE]...
@@ -1533,7 +1533,7 @@ fi
if test -n "$ac_init_help"; then
case $ac_init_help in
- short | recursive ) echo "Configuration of PostgreSQL 18beta1:";;
+ short | recursive ) echo "Configuration of PostgreSQL 19devel:";;
esac
cat <<\_ACEOF
@@ -1724,7 +1724,7 @@ fi
test -n "$ac_init_help" && exit $ac_status
if $ac_init_version; then
cat <<\_ACEOF
-PostgreSQL configure 18beta1
+PostgreSQL configure 19devel
generated by GNU Autoconf 2.69
Copyright (C) 2012 Free Software Foundation, Inc.
@@ -2477,7 +2477,7 @@ cat >config.log <<_ACEOF
This file contains any messages produced by compilers while
running configure, to aid debugging if configure makes a mistake.
-It was created by PostgreSQL $as_me 18beta1, which was
+It was created by PostgreSQL $as_me 19devel, which was
generated by GNU Autoconf 2.69. Invocation command line was
$ $0 $@
@@ -15616,7 +15616,7 @@ fi
LIBS_including_readline="$LIBS"
LIBS=`echo "$LIBS" | sed -e 's/-ledit//g' -e 's/-lreadline//g'`
-for ac_func in backtrace_symbols copyfile copy_file_range elf_aux_info getauxval getifaddrs getpeerucred inet_pton kqueue localeconv_l mbstowcs_l memset_s posix_fallocate ppoll pthread_is_threaded_np setproctitle setproctitle_fast strsignal syncfs sync_file_range uselocale wcstombs_l
+for ac_func in backtrace_symbols copyfile copy_file_range elf_aux_info getauxval getifaddrs getpeerucred inet_pton kqueue localeconv_l mbstowcs_l posix_fallocate ppoll pthread_is_threaded_np setproctitle setproctitle_fast strsignal syncfs sync_file_range uselocale wcstombs_l
do :
as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh`
ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var"
@@ -16192,6 +16192,19 @@ cat >>confdefs.h <<_ACEOF
#define HAVE_DECL_STRCHRNUL $ac_have_decl
_ACEOF
+ac_fn_c_check_decl "$LINENO" "memset_s" "ac_cv_have_decl_memset_s" "#define __STDC_WANT_LIB_EXT1__ 1
+#include <string.h>
+"
+if test "x$ac_cv_have_decl_memset_s" = xyes; then :
+ ac_have_decl=1
+else
+ ac_have_decl=0
+fi
+
+cat >>confdefs.h <<_ACEOF
+#define HAVE_DECL_MEMSET_S $ac_have_decl
+_ACEOF
+
# This is probably only present on macOS, but may as well check always
ac_fn_c_check_decl "$LINENO" "F_FULLFSYNC" "ac_cv_have_decl_F_FULLFSYNC" "#include <fcntl.h>
@@ -18214,6 +18227,7 @@ else
{
__m128i z;
+ x = _mm512_xor_si512(_mm512_zextsi128_si512(_mm_cvtsi32_si128(0)), x);
y = _mm512_clmulepi64_epi128(x, y, 0);
z = _mm_ternarylogic_epi64(
_mm512_castsi512_si128(y),
@@ -20049,7 +20063,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
# report actual input values of CONFIG_FILES etc. instead of their
# values after options handling.
ac_log="
-This file was extended by PostgreSQL $as_me 18beta1, which was
+This file was extended by PostgreSQL $as_me 19devel, which was
generated by GNU Autoconf 2.69. Invocation command line was
CONFIG_FILES = $CONFIG_FILES
@@ -20120,7 +20134,7 @@ _ACEOF
cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
ac_cs_version="\\
-PostgreSQL config.status 18beta1
+PostgreSQL config.status 19devel
configured by $0, generated by GNU Autoconf 2.69,
with options \\"\$ac_cs_config\\"
diff --git a/configure.ac b/configure.ac
index 7ea91d56adb..b3efc49c97a 100644
--- a/configure.ac
+++ b/configure.ac
@@ -17,7 +17,7 @@ dnl Read the Autoconf manual for details.
dnl
m4_pattern_forbid(^PGAC_)dnl to catch undefined macros
-AC_INIT([PostgreSQL], [18beta1], [pgsql-bugs@lists.postgresql.org], [], [https://www.postgresql.org/])
+AC_INIT([PostgreSQL], [19devel], [pgsql-bugs@lists.postgresql.org], [], [https://www.postgresql.org/])
m4_if(m4_defn([m4_PACKAGE_VERSION]), [2.69], [], [m4_fatal([Autoconf version 2.69 is required.
Untested combinations of 'autoconf' and PostgreSQL versions are not
@@ -1792,7 +1792,6 @@ AC_CHECK_FUNCS(m4_normalize([
kqueue
localeconv_l
mbstowcs_l
- memset_s
posix_fallocate
ppoll
pthread_is_threaded_np
@@ -1838,6 +1837,8 @@ AC_CHECK_DECLS([strlcat, strlcpy, strnlen, strsep, timingsafe_bcmp])
AC_CHECK_DECLS([preadv], [], [], [#include <sys/uio.h>])
AC_CHECK_DECLS([pwritev], [], [], [#include <sys/uio.h>])
AC_CHECK_DECLS([strchrnul], [], [], [#include <string.h>])
+AC_CHECK_DECLS([memset_s], [], [], [#define __STDC_WANT_LIB_EXT1__ 1
+#include <string.h>])
# This is probably only present on macOS, but may as well check always
AC_CHECK_DECLS(F_FULLFSYNC, [], [], [#include <fcntl.h>])
diff --git a/contrib/amcheck/expected/check_gin.out b/contrib/amcheck/expected/check_gin.out
index b4f0b110747..8dd01ced8d1 100644
--- a/contrib/amcheck/expected/check_gin.out
+++ b/contrib/amcheck/expected/check_gin.out
@@ -76,3 +76,15 @@ SELECT gin_index_check('gin_check_jsonb_idx');
-- cleanup
DROP TABLE gin_check_jsonb;
+-- Test GIN multicolumn index
+CREATE TABLE "gin_check_multicolumn"(a text[], b text[]);
+INSERT INTO gin_check_multicolumn (a,b) values ('{a,c,e}','{b,d,f}');
+CREATE INDEX "gin_check_multicolumn_idx" on gin_check_multicolumn USING GIN(a,b);
+SELECT gin_index_check('gin_check_multicolumn_idx');
+ gin_index_check
+-----------------
+
+(1 row)
+
+-- cleanup
+DROP TABLE gin_check_multicolumn;
diff --git a/contrib/amcheck/meson.build b/contrib/amcheck/meson.build
index b33e8c9b062..1f0c347ed54 100644
--- a/contrib/amcheck/meson.build
+++ b/contrib/amcheck/meson.build
@@ -49,6 +49,7 @@ tests += {
't/003_cic_2pc.pl',
't/004_verify_nbtree_unique.pl',
't/005_pitr.pl',
+ 't/006_verify_gin.pl',
],
},
}
diff --git a/contrib/amcheck/sql/check_gin.sql b/contrib/amcheck/sql/check_gin.sql
index 66f42c34311..11caed3d6a8 100644
--- a/contrib/amcheck/sql/check_gin.sql
+++ b/contrib/amcheck/sql/check_gin.sql
@@ -50,3 +50,13 @@ SELECT gin_index_check('gin_check_jsonb_idx');
-- cleanup
DROP TABLE gin_check_jsonb;
+
+-- Test GIN multicolumn index
+CREATE TABLE "gin_check_multicolumn"(a text[], b text[]);
+INSERT INTO gin_check_multicolumn (a,b) values ('{a,c,e}','{b,d,f}');
+CREATE INDEX "gin_check_multicolumn_idx" on gin_check_multicolumn USING GIN(a,b);
+
+SELECT gin_index_check('gin_check_multicolumn_idx');
+
+-- cleanup
+DROP TABLE gin_check_multicolumn;
diff --git a/contrib/amcheck/t/006_verify_gin.pl b/contrib/amcheck/t/006_verify_gin.pl
new file mode 100644
index 00000000000..5be0bee3218
--- /dev/null
+++ b/contrib/amcheck/t/006_verify_gin.pl
@@ -0,0 +1,293 @@
+
+# Copyright (c) 2021-2025, PostgreSQL Global Development Group
+
+use strict;
+use warnings FATAL => 'all';
+
+use PostgreSQL::Test::Cluster;
+use PostgreSQL::Test::Utils;
+
+use Test::More;
+
+my $node;
+my $blksize;
+
+# to get the split fast, we want tuples to be as large as possible, but the same time we don't want them to be toasted.
+my $filler_size = 1900;
+
+#
+# Test set-up
+#
+$node = PostgreSQL::Test::Cluster->new('test');
+$node->init(no_data_checksums => 1);
+$node->append_conf('postgresql.conf', 'autovacuum=off');
+$node->start;
+$blksize = int($node->safe_psql('postgres', 'SHOW block_size;'));
+$node->safe_psql('postgres', q(CREATE EXTENSION amcheck));
+$node->safe_psql(
+ 'postgres', q(
+ CREATE OR REPLACE FUNCTION random_string( INT ) RETURNS text AS $$
+ SELECT string_agg(substring('0123456789abcdefghijklmnopqrstuvwxyz', ceil(random() * 36)::integer, 1), '') from generate_series(1, $1);
+ $$ LANGUAGE SQL;));
+
+# Tests
+invalid_entry_order_leaf_page_test();
+invalid_entry_order_inner_page_test();
+invalid_entry_columns_order_test();
+inconsistent_with_parent_key__parent_key_corrupted_test();
+inconsistent_with_parent_key__child_key_corrupted_test();
+inconsistent_with_parent_key__parent_key_corrupted_posting_tree_test();
+
+sub invalid_entry_order_leaf_page_test
+{
+ my $relname = "test";
+ my $indexname = "test_gin_idx";
+
+ $node->safe_psql(
+ 'postgres', qq(
+ DROP TABLE IF EXISTS $relname;
+ CREATE TABLE $relname (a text[]);
+ INSERT INTO $relname (a) VALUES ('{aaaaa,bbbbb}');
+ CREATE INDEX $indexname ON $relname USING gin (a);
+ ));
+ my $relpath = relation_filepath($indexname);
+
+ $node->stop;
+
+ my $blkno = 1; # root
+
+ # produce wrong order by replacing aaaaa with ccccc
+ string_replace_block($relpath, 'aaaaa', 'ccccc', $blkno);
+
+ $node->start;
+
+ my ($result, $stdout, $stderr) =
+ $node->psql('postgres', qq(SELECT gin_index_check('$indexname')));
+ my $expected =
+ "index \"$indexname\" has wrong tuple order on entry tree page, block 1, offset 2, rightlink 4294967295";
+ like($stderr, qr/$expected/);
+}
+
+sub invalid_entry_order_inner_page_test
+{
+ my $relname = "test";
+ my $indexname = "test_gin_idx";
+
+ # to break the order in the inner page we need at least 3 items (rightmost key in the inner level is not checked for the order)
+ # so fill table until we have 2 splits
+ $node->safe_psql(
+ 'postgres', qq(
+ DROP TABLE IF EXISTS $relname;
+ CREATE TABLE $relname (a text[]);
+ INSERT INTO $relname (a) VALUES (('{' || 'pppppppppp' || random_string($filler_size) ||'}')::text[]);
+ INSERT INTO $relname (a) VALUES (('{' || 'qqqqqqqqqq' || random_string($filler_size) ||'}')::text[]);
+ INSERT INTO $relname (a) VALUES (('{' || 'rrrrrrrrrr' || random_string($filler_size) ||'}')::text[]);
+ INSERT INTO $relname (a) VALUES (('{' || 'ssssssssss' || random_string($filler_size) ||'}')::text[]);
+ INSERT INTO $relname (a) VALUES (('{' || 'tttttttttt' || random_string($filler_size) ||'}')::text[]);
+ INSERT INTO $relname (a) VALUES (('{' || 'uuuuuuuuuu' || random_string($filler_size) ||'}')::text[]);
+ INSERT INTO $relname (a) VALUES (('{' || 'vvvvvvvvvv' || random_string($filler_size) ||'}')::text[]);
+ INSERT INTO $relname (a) VALUES (('{' || 'wwwwwwwwww' || random_string($filler_size) ||'}')::text[]);
+ CREATE INDEX $indexname ON $relname USING gin (a);
+ ));
+ my $relpath = relation_filepath($indexname);
+
+ $node->stop;
+
+ my $blkno = 1; # root
+
+ # we have rrrrrrrrr... and tttttttttt... as keys in the root, so produce wrong order by replacing rrrrrrrrrr....
+ string_replace_block($relpath, 'rrrrrrrrrr', 'zzzzzzzzzz', $blkno);
+
+ $node->start;
+
+ my ($result, $stdout, $stderr) =
+ $node->psql('postgres', qq(SELECT gin_index_check('$indexname')));
+ my $expected =
+ "index \"$indexname\" has wrong tuple order on entry tree page, block 1, offset 2, rightlink 4294967295";
+ like($stderr, qr/$expected/);
+}
+
+sub invalid_entry_columns_order_test
+{
+ my $relname = "test";
+ my $indexname = "test_gin_idx";
+
+ $node->safe_psql(
+ 'postgres', qq(
+ DROP TABLE IF EXISTS $relname;
+ CREATE TABLE $relname (a text[],b text[]);
+ INSERT INTO $relname (a,b) VALUES ('{aaa}','{bbb}');
+ CREATE INDEX $indexname ON $relname USING gin (a,b);
+ ));
+ my $relpath = relation_filepath($indexname);
+
+ $node->stop;
+
+ my $blkno = 1; # root
+
+ # mess column numbers
+ # root items order before: (1,aaa), (2,bbb)
+ # root items order after: (2,aaa), (1,bbb)
+ my $attrno_1 = pack('s', 1);
+ my $attrno_2 = pack('s', 2);
+
+ my $find = qr/($attrno_1)(.)(aaa)/s;
+ my $replace = $attrno_2 . '$2$3';
+ string_replace_block($relpath, $find, $replace, $blkno);
+
+ $find = qr/($attrno_2)(.)(bbb)/s;
+ $replace = $attrno_1 . '$2$3';
+ string_replace_block($relpath, $find, $replace, $blkno);
+
+ $node->start;
+
+ my ($result, $stdout, $stderr) =
+ $node->psql('postgres', qq(SELECT gin_index_check('$indexname')));
+ my $expected =
+ "index \"$indexname\" has wrong tuple order on entry tree page, block 1, offset 2, rightlink 4294967295";
+ like($stderr, qr/$expected/);
+}
+
+sub inconsistent_with_parent_key__parent_key_corrupted_test
+{
+ my $relname = "test";
+ my $indexname = "test_gin_idx";
+
+ # fill the table until we have a split
+ $node->safe_psql(
+ 'postgres', qq(
+ DROP TABLE IF EXISTS $relname;
+ CREATE TABLE $relname (a text[]);
+ INSERT INTO $relname (a) VALUES (('{' || 'llllllllll' || random_string($filler_size) ||'}')::text[]);
+ INSERT INTO $relname (a) VALUES (('{' || 'mmmmmmmmmm' || random_string($filler_size) ||'}')::text[]);
+ INSERT INTO $relname (a) VALUES (('{' || 'nnnnnnnnnn' || random_string($filler_size) ||'}')::text[]);
+ INSERT INTO $relname (a) VALUES (('{' || 'xxxxxxxxxx' || random_string($filler_size) ||'}')::text[]);
+ INSERT INTO $relname (a) VALUES (('{' || 'yyyyyyyyyy' || random_string($filler_size) ||'}')::text[]);
+ CREATE INDEX $indexname ON $relname USING gin (a);
+ ));
+ my $relpath = relation_filepath($indexname);
+
+ $node->stop;
+
+ my $blkno = 1; # root
+
+ # we have nnnnnnnnnn... as parent key in the root, so replace it with something smaller then child's keys
+ string_replace_block($relpath, 'nnnnnnnnnn', 'aaaaaaaaaa', $blkno);
+
+ $node->start;
+
+ my ($result, $stdout, $stderr) =
+ $node->psql('postgres', qq(SELECT gin_index_check('$indexname')));
+ my $expected =
+ "index \"$indexname\" has inconsistent records on page 3 offset 3";
+ like($stderr, qr/$expected/);
+}
+
+sub inconsistent_with_parent_key__child_key_corrupted_test
+{
+ my $relname = "test";
+ my $indexname = "test_gin_idx";
+
+ # fill the table until we have a split
+ $node->safe_psql(
+ 'postgres', qq(
+ DROP TABLE IF EXISTS $relname;
+ CREATE TABLE $relname (a text[]);
+ INSERT INTO $relname (a) VALUES (('{' || 'llllllllll' || random_string($filler_size) ||'}')::text[]);
+ INSERT INTO $relname (a) VALUES (('{' || 'mmmmmmmmmm' || random_string($filler_size) ||'}')::text[]);
+ INSERT INTO $relname (a) VALUES (('{' || 'nnnnnnnnnn' || random_string($filler_size) ||'}')::text[]);
+ INSERT INTO $relname (a) VALUES (('{' || 'xxxxxxxxxx' || random_string($filler_size) ||'}')::text[]);
+ INSERT INTO $relname (a) VALUES (('{' || 'yyyyyyyyyy' || random_string($filler_size) ||'}')::text[]);
+ CREATE INDEX $indexname ON $relname USING gin (a);
+ ));
+ my $relpath = relation_filepath($indexname);
+
+ $node->stop;
+
+ my $blkno = 3; # leaf
+
+ # we have nnnnnnnnnn... as parent key in the root, so replace child key with something bigger
+ string_replace_block($relpath, 'nnnnnnnnnn', 'pppppppppp', $blkno);
+
+ $node->start;
+
+ my ($result, $stdout, $stderr) =
+ $node->psql('postgres', qq(SELECT gin_index_check('$indexname')));
+ my $expected =
+ "index \"$indexname\" has inconsistent records on page 3 offset 3";
+ like($stderr, qr/$expected/);
+}
+
+sub inconsistent_with_parent_key__parent_key_corrupted_posting_tree_test
+{
+ my $relname = "test";
+ my $indexname = "test_gin_idx";
+
+ $node->safe_psql(
+ 'postgres', qq(
+ DROP TABLE IF EXISTS $relname;
+ CREATE TABLE $relname (a text[]);
+ INSERT INTO $relname (a) select ('{aaaaa}') from generate_series(1,10000);
+ CREATE INDEX $indexname ON $relname USING gin (a);
+ ));
+ my $relpath = relation_filepath($indexname);
+
+ $node->stop;
+
+ my $blkno = 2; # posting tree root
+
+ # we have a posting tree for 'aaaaa' key with the root at 2nd block
+ # and two leaf pages 3 and 4. replace 4th page's high key with (1,1)
+ # so that there are tid's in leaf page that are larger then the new high key.
+ my $find = pack('S*', 0, 4, 0) . '....';
+ my $replace = pack('S*', 0, 4, 0, 1, 1);
+ string_replace_block($relpath, $find, $replace, $blkno);
+
+ $node->start;
+
+ my ($result, $stdout, $stderr) =
+ $node->psql('postgres', qq(SELECT gin_index_check('$indexname')));
+ my $expected =
+ "index \"$indexname\": tid exceeds parent's high key in postingTree leaf on block 4";
+ like($stderr, qr/$expected/);
+}
+
+
+# Returns the filesystem path for the named relation.
+sub relation_filepath
+{
+ my ($relname) = @_;
+
+ my $pgdata = $node->data_dir;
+ my $rel = $node->safe_psql('postgres',
+ qq(SELECT pg_relation_filepath('$relname')));
+ die "path not found for relation $relname" unless defined $rel;
+ return "$pgdata/$rel";
+}
+
+# substitute pattern 'find' with 'replace' within the block with number 'blkno' in the file 'filename'
+sub string_replace_block
+{
+ my ($filename, $find, $replace, $blkno) = @_;
+
+ my $fh;
+ open($fh, '+<', $filename) or BAIL_OUT("open failed: $!");
+ binmode $fh;
+
+ my $offset = $blkno * $blksize;
+ my $buffer;
+
+ sysseek($fh, $offset, 0) or BAIL_OUT("seek failed: $!");
+ sysread($fh, $buffer, $blksize) or BAIL_OUT("read failed: $!");
+
+ $buffer =~ s/$find/'"' . $replace . '"'/gee;
+
+ sysseek($fh, $offset, 0) or BAIL_OUT("seek failed: $!");
+ syswrite($fh, $buffer) or BAIL_OUT("write failed: $!");
+
+ close($fh) or BAIL_OUT("close failed: $!");
+
+ return;
+}
+
+done_testing();
diff --git a/contrib/amcheck/verify_gin.c b/contrib/amcheck/verify_gin.c
index b5f363562e3..c615d950736 100644
--- a/contrib/amcheck/verify_gin.c
+++ b/contrib/amcheck/verify_gin.c
@@ -38,7 +38,6 @@ typedef struct GinScanItem
int depth;
IndexTuple parenttup;
BlockNumber parentblk;
- XLogRecPtr parentlsn;
BlockNumber blkno;
struct GinScanItem *next;
} GinScanItem;
@@ -346,7 +345,7 @@ gin_check_posting_tree_parent_keys_consistency(Relation rel, BlockNumber posting
* Check if this tuple is consistent with the downlink in the
* parent.
*/
- if (stack->parentblk != InvalidBlockNumber && i == maxoff &&
+ if (i == maxoff && ItemPointerIsValid(&stack->parentkey) &&
ItemPointerCompare(&stack->parentkey, &posting_item->key) < 0)
ereport(ERROR,
(errcode(ERRCODE_INDEX_CORRUPTED),
@@ -359,14 +358,10 @@ gin_check_posting_tree_parent_keys_consistency(Relation rel, BlockNumber posting
ptr->depth = stack->depth + 1;
/*
- * Set rightmost parent key to invalid item pointer. Its value
- * is 'Infinity' and not explicitly stored.
+ * The rightmost parent key is always invalid item pointer.
+ * Its value is 'Infinity' and not explicitly stored.
*/
- if (rightlink == InvalidBlockNumber)
- ItemPointerSetInvalid(&ptr->parentkey);
- else
- ptr->parentkey = posting_item->key;
-
+ ptr->parentkey = posting_item->key;
ptr->parentblk = stack->blkno;
ptr->blkno = BlockIdGetBlockNumber(&posting_item->child_blkno);
ptr->next = stack->next;
@@ -421,7 +416,6 @@ gin_check_parent_keys_consistency(Relation rel,
stack->depth = 0;
stack->parenttup = NULL;
stack->parentblk = InvalidBlockNumber;
- stack->parentlsn = InvalidXLogRecPtr;
stack->blkno = GIN_ROOT_BLKNO;
while (stack)
@@ -432,7 +426,6 @@ gin_check_parent_keys_consistency(Relation rel,
OffsetNumber i,
maxoff,
prev_attnum;
- XLogRecPtr lsn;
IndexTuple prev_tuple;
BlockNumber rightlink;
@@ -442,7 +435,6 @@ gin_check_parent_keys_consistency(Relation rel,
RBM_NORMAL, strategy);
LockBuffer(buffer, GIN_SHARE);
page = (Page) BufferGetPage(buffer);
- lsn = BufferGetLSNAtomic(buffer);
maxoff = PageGetMaxOffsetNumber(page);
rightlink = GinPageGetOpaque(page)->rightlink;
@@ -463,17 +455,18 @@ gin_check_parent_keys_consistency(Relation rel,
Datum parent_key = gintuple_get_key(&state,
stack->parenttup,
&parent_key_category);
+ OffsetNumber parent_key_attnum = gintuple_get_attrnum(&state, stack->parenttup);
ItemId iid = PageGetItemIdCareful(rel, stack->blkno,
page, maxoff);
IndexTuple idxtuple = (IndexTuple) PageGetItem(page, iid);
- OffsetNumber attnum = gintuple_get_attrnum(&state, idxtuple);
+ OffsetNumber page_max_key_attnum = gintuple_get_attrnum(&state, idxtuple);
GinNullCategory page_max_key_category;
Datum page_max_key = gintuple_get_key(&state, idxtuple, &page_max_key_category);
if (rightlink != InvalidBlockNumber &&
- ginCompareEntries(&state, attnum, page_max_key,
- page_max_key_category, parent_key,
- parent_key_category) > 0)
+ ginCompareAttEntries(&state, page_max_key_attnum, page_max_key,
+ page_max_key_category, parent_key_attnum,
+ parent_key, parent_key_category) < 0)
{
/* split page detected, install right link to the stack */
GinScanItem *ptr;
@@ -484,7 +477,6 @@ gin_check_parent_keys_consistency(Relation rel,
ptr->depth = stack->depth;
ptr->parenttup = CopyIndexTuple(stack->parenttup);
ptr->parentblk = stack->parentblk;
- ptr->parentlsn = stack->parentlsn;
ptr->blkno = rightlink;
ptr->next = stack->next;
stack->next = ptr;
@@ -513,9 +505,7 @@ gin_check_parent_keys_consistency(Relation rel,
{
ItemId iid = PageGetItemIdCareful(rel, stack->blkno, page, i);
IndexTuple idxtuple = (IndexTuple) PageGetItem(page, iid);
- OffsetNumber attnum = gintuple_get_attrnum(&state, idxtuple);
- GinNullCategory prev_key_category;
- Datum prev_key;
+ OffsetNumber current_attnum = gintuple_get_attrnum(&state, idxtuple);
GinNullCategory current_key_category;
Datum current_key;
@@ -528,20 +518,24 @@ gin_check_parent_keys_consistency(Relation rel,
current_key = gintuple_get_key(&state, idxtuple, &current_key_category);
/*
- * First block is metadata, skip order check. Also, never check
- * for high key on rightmost page, as this key is not really
- * stored explicitly.
+ * Compare the entry to the preceding one.
+ *
+ * Don't check for high key on the rightmost inner page, as this
+ * key is not really stored explicitly.
*
- * Also make sure to not compare entries for different attnums,
- * which may be stored on the same page.
+ * The entries may be for different attributes, so make sure to
+ * use ginCompareAttEntries for comparison.
*/
- if (i != FirstOffsetNumber && attnum == prev_attnum && stack->blkno != GIN_ROOT_BLKNO &&
- !(i == maxoff && rightlink == InvalidBlockNumber))
+ if ((i != FirstOffsetNumber) &&
+ !(i == maxoff && rightlink == InvalidBlockNumber && !GinPageIsLeaf(page)))
{
+ Datum prev_key;
+ GinNullCategory prev_key_category;
+
prev_key = gintuple_get_key(&state, prev_tuple, &prev_key_category);
- if (ginCompareEntries(&state, attnum, prev_key,
- prev_key_category, current_key,
- current_key_category) >= 0)
+ if (ginCompareAttEntries(&state, prev_attnum, prev_key,
+ prev_key_category, current_attnum,
+ current_key, current_key_category) >= 0)
ereport(ERROR,
(errcode(ERRCODE_INDEX_CORRUPTED),
errmsg("index \"%s\" has wrong tuple order on entry tree page, block %u, offset %u, rightlink %u",
@@ -556,13 +550,14 @@ gin_check_parent_keys_consistency(Relation rel,
i == maxoff)
{
GinNullCategory parent_key_category;
+ OffsetNumber parent_key_attnum = gintuple_get_attrnum(&state, stack->parenttup);
Datum parent_key = gintuple_get_key(&state,
stack->parenttup,
&parent_key_category);
- if (ginCompareEntries(&state, attnum, current_key,
- current_key_category, parent_key,
- parent_key_category) > 0)
+ if (ginCompareAttEntries(&state, current_attnum, current_key,
+ current_key_category, parent_key_attnum,
+ parent_key, parent_key_category) > 0)
{
/*
* There was a discrepancy between parent and child
@@ -581,6 +576,7 @@ gin_check_parent_keys_consistency(Relation rel,
stack->blkno, stack->parentblk);
else
{
+ parent_key_attnum = gintuple_get_attrnum(&state, stack->parenttup);
parent_key = gintuple_get_key(&state,
stack->parenttup,
&parent_key_category);
@@ -589,9 +585,9 @@ gin_check_parent_keys_consistency(Relation rel,
* Check if it is properly adjusted. If succeed,
* proceed to the next key.
*/
- if (ginCompareEntries(&state, attnum, current_key,
- current_key_category, parent_key,
- parent_key_category) > 0)
+ if (ginCompareAttEntries(&state, current_attnum, current_key,
+ current_key_category, parent_key_attnum,
+ parent_key, parent_key_category) > 0)
ereport(ERROR,
(errcode(ERRCODE_INDEX_CORRUPTED),
errmsg("index \"%s\" has inconsistent records on page %u offset %u",
@@ -608,13 +604,12 @@ gin_check_parent_keys_consistency(Relation rel,
ptr = (GinScanItem *) palloc(sizeof(GinScanItem));
ptr->depth = stack->depth + 1;
/* last tuple in layer has no high key */
- if (i != maxoff && !GinPageGetOpaque(page)->rightlink)
- ptr->parenttup = CopyIndexTuple(idxtuple);
- else
+ if (i == maxoff && rightlink == InvalidBlockNumber)
ptr->parenttup = NULL;
+ else
+ ptr->parenttup = CopyIndexTuple(idxtuple);
ptr->parentblk = stack->blkno;
ptr->blkno = GinGetDownlink(idxtuple);
- ptr->parentlsn = lsn;
ptr->next = stack->next;
stack->next = ptr;
}
@@ -644,7 +639,7 @@ gin_check_parent_keys_consistency(Relation rel,
}
prev_tuple = CopyIndexTuple(idxtuple);
- prev_attnum = attnum;
+ prev_attnum = current_attnum;
}
LockBuffer(buffer, GIN_UNLOCK);
@@ -749,7 +744,7 @@ gin_refind_parent(Relation rel, BlockNumber parentblkno,
ItemId p_iid = PageGetItemIdCareful(rel, parentblkno, parentpage, o);
IndexTuple itup = (IndexTuple) PageGetItem(parentpage, p_iid);
- if (ItemPointerGetBlockNumber(&(itup->t_tid)) == childblkno)
+ if (GinGetDownlink(itup) == childblkno)
{
/* Found it! Make copy and return it */
result = CopyIndexTuple(itup);
diff --git a/contrib/auto_explain/auto_explain.c b/contrib/auto_explain/auto_explain.c
index cd6625020a7..1f4badb4928 100644
--- a/contrib/auto_explain/auto_explain.c
+++ b/contrib/auto_explain/auto_explain.c
@@ -81,7 +81,7 @@ static ExecutorRun_hook_type prev_ExecutorRun = NULL;
static ExecutorFinish_hook_type prev_ExecutorFinish = NULL;
static ExecutorEnd_hook_type prev_ExecutorEnd = NULL;
-static bool explain_ExecutorStart(QueryDesc *queryDesc, int eflags);
+static void explain_ExecutorStart(QueryDesc *queryDesc, int eflags);
static void explain_ExecutorRun(QueryDesc *queryDesc,
ScanDirection direction,
uint64 count);
@@ -261,11 +261,9 @@ _PG_init(void)
/*
* ExecutorStart hook: start up logging if needed
*/
-static bool
+static void
explain_ExecutorStart(QueryDesc *queryDesc, int eflags)
{
- bool plan_valid;
-
/*
* At the beginning of each top-level statement, decide whether we'll
* sample this statement. If nested-statement explaining is enabled,
@@ -301,13 +299,9 @@ explain_ExecutorStart(QueryDesc *queryDesc, int eflags)
}
if (prev_ExecutorStart)
- plan_valid = prev_ExecutorStart(queryDesc, eflags);
+ prev_ExecutorStart(queryDesc, eflags);
else
- plan_valid = standard_ExecutorStart(queryDesc, eflags);
-
- /* The plan may have become invalid during standard_ExecutorStart() */
- if (!plan_valid)
- return false;
+ standard_ExecutorStart(queryDesc, eflags);
if (auto_explain_enabled())
{
@@ -325,8 +319,6 @@ explain_ExecutorStart(QueryDesc *queryDesc, int eflags)
MemoryContextSwitchTo(oldcxt);
}
}
-
- return true;
}
/*
diff --git a/contrib/btree_gist/btree_gist--1.7--1.8.sql b/contrib/btree_gist/btree_gist--1.7--1.8.sql
index 4ff9c43a8eb..8f79365a461 100644
--- a/contrib/btree_gist/btree_gist--1.7--1.8.sql
+++ b/contrib/btree_gist/btree_gist--1.7--1.8.sql
@@ -3,85 +3,85 @@
-- complain if script is sourced in psql, rather than via CREATE EXTENSION
\echo Use "ALTER EXTENSION btree_gist UPDATE TO '1.8'" to load this file. \quit
-CREATE FUNCTION gist_stratnum_btree(int)
+CREATE FUNCTION gist_translate_cmptype_btree(int)
RETURNS smallint
AS 'MODULE_PATHNAME'
LANGUAGE C IMMUTABLE PARALLEL SAFE STRICT;
ALTER OPERATOR FAMILY gist_oid_ops USING gist ADD
- FUNCTION 12 ("any", "any") gist_stratnum_btree (int) ;
+ FUNCTION 12 ("any", "any") gist_translate_cmptype_btree (int) ;
ALTER OPERATOR FAMILY gist_int2_ops USING gist ADD
- FUNCTION 12 ("any", "any") gist_stratnum_btree (int) ;
+ FUNCTION 12 ("any", "any") gist_translate_cmptype_btree (int) ;
ALTER OPERATOR FAMILY gist_int4_ops USING gist ADD
- FUNCTION 12 ("any", "any") gist_stratnum_btree (int) ;
+ FUNCTION 12 ("any", "any") gist_translate_cmptype_btree (int) ;
ALTER OPERATOR FAMILY gist_int8_ops USING gist ADD
- FUNCTION 12 ("any", "any") gist_stratnum_btree (int) ;
+ FUNCTION 12 ("any", "any") gist_translate_cmptype_btree (int) ;
ALTER OPERATOR FAMILY gist_float4_ops USING gist ADD
- FUNCTION 12 ("any", "any") gist_stratnum_btree (int) ;
+ FUNCTION 12 ("any", "any") gist_translate_cmptype_btree (int) ;
ALTER OPERATOR FAMILY gist_float8_ops USING gist ADD
- FUNCTION 12 ("any", "any") gist_stratnum_btree (int) ;
+ FUNCTION 12 ("any", "any") gist_translate_cmptype_btree (int) ;
ALTER OPERATOR FAMILY gist_timestamp_ops USING gist ADD
- FUNCTION 12 ("any", "any") gist_stratnum_btree (int) ;
+ FUNCTION 12 ("any", "any") gist_translate_cmptype_btree (int) ;
ALTER OPERATOR FAMILY gist_timestamptz_ops USING gist ADD
- FUNCTION 12 ("any", "any") gist_stratnum_btree (int) ;
+ FUNCTION 12 ("any", "any") gist_translate_cmptype_btree (int) ;
ALTER OPERATOR FAMILY gist_time_ops USING gist ADD
- FUNCTION 12 ("any", "any") gist_stratnum_btree (int) ;
+ FUNCTION 12 ("any", "any") gist_translate_cmptype_btree (int) ;
ALTER OPERATOR FAMILY gist_date_ops USING gist ADD
- FUNCTION 12 ("any", "any") gist_stratnum_btree (int) ;
+ FUNCTION 12 ("any", "any") gist_translate_cmptype_btree (int) ;
ALTER OPERATOR FAMILY gist_interval_ops USING gist ADD
- FUNCTION 12 ("any", "any") gist_stratnum_btree (int) ;
+ FUNCTION 12 ("any", "any") gist_translate_cmptype_btree (int) ;
ALTER OPERATOR FAMILY gist_cash_ops USING gist ADD
- FUNCTION 12 ("any", "any") gist_stratnum_btree (int) ;
+ FUNCTION 12 ("any", "any") gist_translate_cmptype_btree (int) ;
ALTER OPERATOR FAMILY gist_macaddr_ops USING gist ADD
- FUNCTION 12 ("any", "any") gist_stratnum_btree (int) ;
+ FUNCTION 12 ("any", "any") gist_translate_cmptype_btree (int) ;
ALTER OPERATOR FAMILY gist_text_ops USING gist ADD
- FUNCTION 12 ("any", "any") gist_stratnum_btree (int) ;
+ FUNCTION 12 ("any", "any") gist_translate_cmptype_btree (int) ;
ALTER OPERATOR FAMILY gist_bpchar_ops USING gist ADD
- FUNCTION 12 ("any", "any") gist_stratnum_btree (int) ;
+ FUNCTION 12 ("any", "any") gist_translate_cmptype_btree (int) ;
ALTER OPERATOR FAMILY gist_bytea_ops USING gist ADD
- FUNCTION 12 ("any", "any") gist_stratnum_btree (int) ;
+ FUNCTION 12 ("any", "any") gist_translate_cmptype_btree (int) ;
ALTER OPERATOR FAMILY gist_numeric_ops USING gist ADD
- FUNCTION 12 ("any", "any") gist_stratnum_btree (int) ;
+ FUNCTION 12 ("any", "any") gist_translate_cmptype_btree (int) ;
ALTER OPERATOR FAMILY gist_bit_ops USING gist ADD
- FUNCTION 12 ("any", "any") gist_stratnum_btree (int) ;
+ FUNCTION 12 ("any", "any") gist_translate_cmptype_btree (int) ;
ALTER OPERATOR FAMILY gist_vbit_ops USING gist ADD
- FUNCTION 12 ("any", "any") gist_stratnum_btree (int) ;
+ FUNCTION 12 ("any", "any") gist_translate_cmptype_btree (int) ;
ALTER OPERATOR FAMILY gist_inet_ops USING gist ADD
- FUNCTION 12 ("any", "any") gist_stratnum_btree (int) ;
+ FUNCTION 12 ("any", "any") gist_translate_cmptype_btree (int) ;
ALTER OPERATOR FAMILY gist_cidr_ops USING gist ADD
- FUNCTION 12 ("any", "any") gist_stratnum_btree (int) ;
+ FUNCTION 12 ("any", "any") gist_translate_cmptype_btree (int) ;
ALTER OPERATOR FAMILY gist_timetz_ops USING gist ADD
- FUNCTION 12 ("any", "any") gist_stratnum_btree (int) ;
+ FUNCTION 12 ("any", "any") gist_translate_cmptype_btree (int) ;
ALTER OPERATOR FAMILY gist_uuid_ops USING gist ADD
- FUNCTION 12 ("any", "any") gist_stratnum_btree (int) ;
+ FUNCTION 12 ("any", "any") gist_translate_cmptype_btree (int) ;
ALTER OPERATOR FAMILY gist_macaddr8_ops USING gist ADD
- FUNCTION 12 ("any", "any") gist_stratnum_btree (int) ;
+ FUNCTION 12 ("any", "any") gist_translate_cmptype_btree (int) ;
ALTER OPERATOR FAMILY gist_enum_ops USING gist ADD
- FUNCTION 12 ("any", "any") gist_stratnum_btree (int) ;
+ FUNCTION 12 ("any", "any") gist_translate_cmptype_btree (int) ;
ALTER OPERATOR FAMILY gist_bool_ops USING gist ADD
- FUNCTION 12 ("any", "any") gist_stratnum_btree (int) ;
+ FUNCTION 12 ("any", "any") gist_translate_cmptype_btree (int) ;
diff --git a/contrib/btree_gist/btree_gist.c b/contrib/btree_gist/btree_gist.c
index 280ce808456..39fcbdad334 100644
--- a/contrib/btree_gist/btree_gist.c
+++ b/contrib/btree_gist/btree_gist.c
@@ -15,7 +15,7 @@ PG_MODULE_MAGIC_EXT(
PG_FUNCTION_INFO_V1(gbt_decompress);
PG_FUNCTION_INFO_V1(gbtreekey_in);
PG_FUNCTION_INFO_V1(gbtreekey_out);
-PG_FUNCTION_INFO_V1(gist_stratnum_btree);
+PG_FUNCTION_INFO_V1(gist_translate_cmptype_btree);
/**************************************************
* In/Out for keys
@@ -62,7 +62,7 @@ gbt_decompress(PG_FUNCTION_ARGS)
* Returns the btree number for supported operators, otherwise invalid.
*/
Datum
-gist_stratnum_btree(PG_FUNCTION_ARGS)
+gist_translate_cmptype_btree(PG_FUNCTION_ARGS)
{
CompareType cmptype = PG_GETARG_INT32(0);
diff --git a/contrib/btree_gist/expected/stratnum.out b/contrib/btree_gist/expected/stratnum.out
index dd0edaf4a20..8222b661538 100644
--- a/contrib/btree_gist/expected/stratnum.out
+++ b/contrib/btree_gist/expected/stratnum.out
@@ -1,13 +1,13 @@
--- test stratnum support func
-SELECT gist_stratnum_btree(7);
- gist_stratnum_btree
----------------------
- 0
+-- test stratnum translation support func
+SELECT gist_translate_cmptype_btree(7);
+ gist_translate_cmptype_btree
+------------------------------
+ 0
(1 row)
-SELECT gist_stratnum_btree(3);
- gist_stratnum_btree
----------------------
- 3
+SELECT gist_translate_cmptype_btree(3);
+ gist_translate_cmptype_btree
+------------------------------
+ 3
(1 row)
diff --git a/contrib/btree_gist/sql/stratnum.sql b/contrib/btree_gist/sql/stratnum.sql
index 75adddad849..da8bbf883b0 100644
--- a/contrib/btree_gist/sql/stratnum.sql
+++ b/contrib/btree_gist/sql/stratnum.sql
@@ -1,3 +1,3 @@
--- test stratnum support func
-SELECT gist_stratnum_btree(7);
-SELECT gist_stratnum_btree(3);
+-- test stratnum translation support func
+SELECT gist_translate_cmptype_btree(7);
+SELECT gist_translate_cmptype_btree(3);
diff --git a/contrib/dblink/dblink.c b/contrib/dblink/dblink.c
index 98d4e3d7dac..8a0b112a7ff 100644
--- a/contrib/dblink/dblink.c
+++ b/contrib/dblink/dblink.c
@@ -105,7 +105,7 @@ static PGresult *storeQueryResult(volatile storeInfo *sinfo, PGconn *conn, const
static void storeRow(volatile storeInfo *sinfo, PGresult *res, bool first);
static remoteConn *getConnectionByName(const char *name);
static HTAB *createConnHash(void);
-static void createNewConnection(const char *name, remoteConn *rconn);
+static remoteConn *createNewConnection(const char *name);
static void deleteConnection(const char *name);
static char **get_pkey_attnames(Relation rel, int16 *indnkeyatts);
static char **get_text_array_contents(ArrayType *array, int *numitems);
@@ -119,7 +119,8 @@ static Relation get_rel_from_relname(text *relname_text, LOCKMODE lockmode, AclM
static char *generate_relation_name(Relation rel);
static void dblink_connstr_check(const char *connstr);
static bool dblink_connstr_has_pw(const char *connstr);
-static void dblink_security_check(PGconn *conn, remoteConn *rconn, const char *connstr);
+static void dblink_security_check(PGconn *conn, const char *connname,
+ const char *connstr);
static void dblink_res_error(PGconn *conn, const char *conname, PGresult *res,
bool fail, const char *fmt,...) pg_attribute_printf(5, 6);
static char *get_connect_string(const char *servername);
@@ -147,16 +148,22 @@ static uint32 dblink_we_get_conn = 0;
static uint32 dblink_we_get_result = 0;
/*
- * Following is list that holds multiple remote connections.
+ * Following is hash that holds multiple remote connections.
* Calling convention of each dblink function changes to accept
- * connection name as the first parameter. The connection list is
+ * connection name as the first parameter. The connection hash is
* much like ecpg e.g. a mapping between a name and a PGconn object.
+ *
+ * To avoid potentially leaking a PGconn object in case of out-of-memory
+ * errors, we first create the hash entry, then open the PGconn.
+ * Hence, a hash entry whose rconn.conn pointer is NULL must be
+ * understood as a leftover from a failed create; it should be ignored
+ * by lookup operations, and silently replaced by create operations.
*/
typedef struct remoteConnHashEnt
{
char name[NAMEDATALEN];
- remoteConn *rconn;
+ remoteConn rconn;
} remoteConnHashEnt;
/* initial number of connection hashes */
@@ -233,7 +240,7 @@ dblink_get_conn(char *conname_or_str,
errmsg("could not establish connection"),
errdetail_internal("%s", msg)));
}
- dblink_security_check(conn, rconn, connstr);
+ dblink_security_check(conn, NULL, connstr);
if (PQclientEncoding(conn) != GetDatabaseEncoding())
PQsetClientEncoding(conn, GetDatabaseEncodingName());
freeconn = true;
@@ -296,15 +303,6 @@ dblink_connect(PG_FUNCTION_ARGS)
else if (PG_NARGS() == 1)
conname_or_str = text_to_cstring(PG_GETARG_TEXT_PP(0));
- if (connname)
- {
- rconn = (remoteConn *) MemoryContextAlloc(TopMemoryContext,
- sizeof(remoteConn));
- rconn->conn = NULL;
- rconn->openCursorCount = 0;
- rconn->newXactForCursor = false;
- }
-
/* first check for valid foreign data server */
connstr = get_connect_string(conname_or_str);
if (connstr == NULL)
@@ -317,6 +315,13 @@ dblink_connect(PG_FUNCTION_ARGS)
if (dblink_we_connect == 0)
dblink_we_connect = WaitEventExtensionNew("DblinkConnect");
+ /* if we need a hashtable entry, make that first, since it might fail */
+ if (connname)
+ {
+ rconn = createNewConnection(connname);
+ Assert(rconn->conn == NULL);
+ }
+
/* OK to make connection */
conn = libpqsrv_connect(connstr, dblink_we_connect);
@@ -324,8 +329,8 @@ dblink_connect(PG_FUNCTION_ARGS)
{
msg = pchomp(PQerrorMessage(conn));
libpqsrv_disconnect(conn);
- if (rconn)
- pfree(rconn);
+ if (connname)
+ deleteConnection(connname);
ereport(ERROR,
(errcode(ERRCODE_SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION),
@@ -334,16 +339,16 @@ dblink_connect(PG_FUNCTION_ARGS)
}
/* check password actually used if not superuser */
- dblink_security_check(conn, rconn, connstr);
+ dblink_security_check(conn, connname, connstr);
/* attempt to set client encoding to match server encoding, if needed */
if (PQclientEncoding(conn) != GetDatabaseEncoding())
PQsetClientEncoding(conn, GetDatabaseEncodingName());
+ /* all OK, save away the conn */
if (connname)
{
rconn->conn = conn;
- createNewConnection(connname, rconn);
}
else
{
@@ -383,10 +388,7 @@ dblink_disconnect(PG_FUNCTION_ARGS)
libpqsrv_disconnect(conn);
if (rconn)
- {
deleteConnection(conname);
- pfree(rconn);
- }
else
pconn->conn = NULL;
@@ -1304,6 +1306,9 @@ dblink_get_connections(PG_FUNCTION_ARGS)
hash_seq_init(&status, remoteConnHash);
while ((hentry = (remoteConnHashEnt *) hash_seq_search(&status)) != NULL)
{
+ /* ignore it if it's not an open connection */
+ if (hentry->rconn.conn == NULL)
+ continue;
/* stash away current value */
astate = accumArrayResult(astate,
CStringGetTextDatum(hentry->name),
@@ -2539,8 +2544,8 @@ getConnectionByName(const char *name)
hentry = (remoteConnHashEnt *) hash_search(remoteConnHash,
key, HASH_FIND, NULL);
- if (hentry)
- return hentry->rconn;
+ if (hentry && hentry->rconn.conn != NULL)
+ return &hentry->rconn;
return NULL;
}
@@ -2557,8 +2562,8 @@ createConnHash(void)
HASH_ELEM | HASH_STRINGS);
}
-static void
-createNewConnection(const char *name, remoteConn *rconn)
+static remoteConn *
+createNewConnection(const char *name)
{
remoteConnHashEnt *hentry;
bool found;
@@ -2572,17 +2577,15 @@ createNewConnection(const char *name, remoteConn *rconn)
hentry = (remoteConnHashEnt *) hash_search(remoteConnHash, key,
HASH_ENTER, &found);
- if (found)
- {
- libpqsrv_disconnect(rconn->conn);
- pfree(rconn);
-
+ if (found && hentry->rconn.conn != NULL)
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
errmsg("duplicate connection name")));
- }
- hentry->rconn = rconn;
+ /* New, or reusable, so initialize the rconn struct to zeroes */
+ memset(&hentry->rconn, 0, sizeof(remoteConn));
+
+ return &hentry->rconn;
}
static void
@@ -2671,9 +2674,12 @@ dblink_connstr_has_required_scram_options(const char *connstr)
* We need to make sure that the connection made used credentials
* which were provided by the user, so check what credentials were
* used to connect and then make sure that they came from the user.
+ *
+ * On failure, we close "conn" and also delete the hashtable entry
+ * identified by "connname" (if that's not NULL).
*/
static void
-dblink_security_check(PGconn *conn, remoteConn *rconn, const char *connstr)
+dblink_security_check(PGconn *conn, const char *connname, const char *connstr)
{
/* Superuser bypasses security check */
if (superuser())
@@ -2703,8 +2709,8 @@ dblink_security_check(PGconn *conn, remoteConn *rconn, const char *connstr)
/* Otherwise, fail out */
libpqsrv_disconnect(conn);
- if (rconn)
- pfree(rconn);
+ if (connname)
+ deleteConnection(connname);
ereport(ERROR,
(errcode(ERRCODE_S_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED),
diff --git a/contrib/file_fdw/expected/file_fdw.out b/contrib/file_fdw/expected/file_fdw.out
index df8d43b3749..246e3d3e566 100644
--- a/contrib/file_fdw/expected/file_fdw.out
+++ b/contrib/file_fdw/expected/file_fdw.out
@@ -48,6 +48,10 @@ SET ROLE regress_file_fdw_superuser;
CREATE USER MAPPING FOR regress_file_fdw_superuser SERVER file_server;
CREATE USER MAPPING FOR regress_no_priv_user SERVER file_server;
-- validator tests
+CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (foo 'bar'); -- ERROR
+ERROR: invalid option "foo"
+CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS ("a=b" 'true'); -- ERROR
+ERROR: invalid option name "a=b": must not contain "="
CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'xml'); -- ERROR
ERROR: COPY format "xml" not recognized
CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'text', quote ':'); -- ERROR
diff --git a/contrib/file_fdw/sql/file_fdw.sql b/contrib/file_fdw/sql/file_fdw.sql
index 2cdbe7a8a4c..1a397ad4bd1 100644
--- a/contrib/file_fdw/sql/file_fdw.sql
+++ b/contrib/file_fdw/sql/file_fdw.sql
@@ -55,6 +55,8 @@ CREATE USER MAPPING FOR regress_file_fdw_superuser SERVER file_server;
CREATE USER MAPPING FOR regress_no_priv_user SERVER file_server;
-- validator tests
+CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (foo 'bar'); -- ERROR
+CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS ("a=b" 'true'); -- ERROR
CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'xml'); -- ERROR
CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'text', quote ':'); -- ERROR
CREATE FOREIGN TABLE tbl () SERVER file_server OPTIONS (format 'text', escape ':'); -- ERROR
diff --git a/contrib/pg_overexplain/expected/pg_overexplain.out b/contrib/pg_overexplain/expected/pg_overexplain.out
index 44120c388af..6de02323d7c 100644
--- a/contrib/pg_overexplain/expected/pg_overexplain.out
+++ b/contrib/pg_overexplain/expected/pg_overexplain.out
@@ -37,7 +37,7 @@ EXPLAIN (DEBUG) SELECT 1;
Subplans Needing Rewind: none
Relation OIDs: none
Executor Parameter Types: none
- Parse Location: 16 for 8 bytes
+ Parse Location: 0 to end
(11 rows)
EXPLAIN (RANGE_TABLE) SELECT 1;
@@ -436,7 +436,7 @@ $$);
Subplans Needing Rewind: none
Relation OIDs: NNN...
Executor Parameter Types: 23
- Parse Location: 75 for 62 bytes
+ Parse Location: 0 to end
(47 rows)
RESET enable_hashjoin;
diff --git a/contrib/pg_prewarm/Makefile b/contrib/pg_prewarm/Makefile
index 9cfde8c4e4f..617ac8e09b2 100644
--- a/contrib/pg_prewarm/Makefile
+++ b/contrib/pg_prewarm/Makefile
@@ -10,6 +10,8 @@ EXTENSION = pg_prewarm
DATA = pg_prewarm--1.1--1.2.sql pg_prewarm--1.1.sql pg_prewarm--1.0--1.1.sql
PGFILEDESC = "pg_prewarm - preload relation data into system buffer cache"
+REGRESS = pg_prewarm
+
TAP_TESTS = 1
ifdef USE_PGXS
diff --git a/contrib/pg_prewarm/autoprewarm.c b/contrib/pg_prewarm/autoprewarm.c
index c52f4d4dc9e..c01b9c7e6a4 100644
--- a/contrib/pg_prewarm/autoprewarm.c
+++ b/contrib/pg_prewarm/autoprewarm.c
@@ -693,8 +693,15 @@ apw_dump_now(bool is_bgworker, bool dump_unlogged)
return 0;
}
- block_info_array =
- (BlockInfoRecord *) palloc(sizeof(BlockInfoRecord) * NBuffers);
+ /*
+ * With sufficiently large shared_buffers, allocation will exceed 1GB, so
+ * allow for a huge allocation to prevent outright failure.
+ *
+ * (In the future, it might be a good idea to redesign this to use a more
+ * memory-efficient data structure.)
+ */
+ block_info_array = (BlockInfoRecord *)
+ palloc_extended((sizeof(BlockInfoRecord) * NBuffers), MCXT_ALLOC_HUGE);
for (num_blocks = 0, i = 0; i < NBuffers; i++)
{
diff --git a/contrib/pg_prewarm/expected/pg_prewarm.out b/contrib/pg_prewarm/expected/pg_prewarm.out
new file mode 100644
index 00000000000..94e4fa1a9d2
--- /dev/null
+++ b/contrib/pg_prewarm/expected/pg_prewarm.out
@@ -0,0 +1,10 @@
+-- Test pg_prewarm extension
+CREATE EXTENSION pg_prewarm;
+-- pg_prewarm() should fail if the target relation has no storage.
+CREATE TABLE test (c1 int) PARTITION BY RANGE (c1);
+SELECT pg_prewarm('test', 'buffer');
+ERROR: relation "test" does not have storage
+DETAIL: This operation is not supported for partitioned tables.
+-- Cleanup
+DROP TABLE test;
+DROP EXTENSION pg_prewarm;
diff --git a/contrib/pg_prewarm/meson.build b/contrib/pg_prewarm/meson.build
index 82b9851303c..f24c47ef6a5 100644
--- a/contrib/pg_prewarm/meson.build
+++ b/contrib/pg_prewarm/meson.build
@@ -29,6 +29,11 @@ tests += {
'name': 'pg_prewarm',
'sd': meson.current_source_dir(),
'bd': meson.current_build_dir(),
+ 'regress': {
+ 'sql': [
+ 'pg_prewarm',
+ ],
+ },
'tap': {
'tests': [
't/001_basic.pl',
diff --git a/contrib/pg_prewarm/pg_prewarm.c b/contrib/pg_prewarm/pg_prewarm.c
index 50808569bd7..b968933ea8b 100644
--- a/contrib/pg_prewarm/pg_prewarm.c
+++ b/contrib/pg_prewarm/pg_prewarm.c
@@ -112,6 +112,14 @@ pg_prewarm(PG_FUNCTION_ARGS)
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, get_relkind_objtype(rel->rd_rel->relkind), get_rel_name(relOid));
+ /* Check that the relation has storage. */
+ if (!RELKIND_HAS_STORAGE(rel->rd_rel->relkind))
+ ereport(ERROR,
+ (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+ errmsg("relation \"%s\" does not have storage",
+ RelationGetRelationName(rel)),
+ errdetail_relkind_not_supported(rel->rd_rel->relkind)));
+
/* Check that the fork exists. */
if (!smgrexists(RelationGetSmgr(rel), forkNumber))
ereport(ERROR,
diff --git a/contrib/pg_prewarm/sql/pg_prewarm.sql b/contrib/pg_prewarm/sql/pg_prewarm.sql
new file mode 100644
index 00000000000..c76f2c79164
--- /dev/null
+++ b/contrib/pg_prewarm/sql/pg_prewarm.sql
@@ -0,0 +1,10 @@
+-- Test pg_prewarm extension
+CREATE EXTENSION pg_prewarm;
+
+-- pg_prewarm() should fail if the target relation has no storage.
+CREATE TABLE test (c1 int) PARTITION BY RANGE (c1);
+SELECT pg_prewarm('test', 'buffer');
+
+-- Cleanup
+DROP TABLE test;
+DROP EXTENSION pg_prewarm;
diff --git a/contrib/pg_stat_statements/expected/extended.out b/contrib/pg_stat_statements/expected/extended.out
index 04a05943372..1bfd0c1ca24 100644
--- a/contrib/pg_stat_statements/expected/extended.out
+++ b/contrib/pg_stat_statements/expected/extended.out
@@ -68,3 +68,97 @@ SELECT calls, rows, query FROM pg_stat_statements ORDER BY query COLLATE "C";
1 | 1 | SELECT pg_stat_statements_reset() IS NOT NULL AS t
(4 rows)
+-- Various parameter numbering patterns
+-- Unique query IDs with parameter numbers switched.
+SELECT pg_stat_statements_reset() IS NOT NULL AS t;
+ t
+---
+ t
+(1 row)
+
+SELECT WHERE ($1::int, 7) IN ((8, $2::int), ($3::int, 9)) \bind '1' '2' '3' \g
+--
+(0 rows)
+
+SELECT WHERE ($2::int, 10) IN ((11, $3::int), ($1::int, 12)) \bind '1' '2' '3' \g
+--
+(0 rows)
+
+SELECT WHERE $1::int IN ($2::int, $3::int) \bind '1' '2' '3' \g
+--
+(0 rows)
+
+SELECT WHERE $2::int IN ($3::int, $1::int) \bind '1' '2' '3' \g
+--
+(0 rows)
+
+SELECT WHERE $3::int IN ($1::int, $2::int) \bind '1' '2' '3' \g
+--
+(0 rows)
+
+SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
+ query | calls
+--------------------------------------------------------------+-------
+ SELECT WHERE $1::int IN ($2 /*, ... */) | 1
+ SELECT WHERE $1::int IN ($2 /*, ... */) | 1
+ SELECT WHERE $1::int IN ($2 /*, ... */) | 1
+ SELECT WHERE ($1::int, $4) IN (($5, $2::int), ($3::int, $6)) | 1
+ SELECT WHERE ($2::int, $4) IN (($5, $3::int), ($1::int, $6)) | 1
+ SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1
+(6 rows)
+
+-- Two groups of two queries with the same query ID.
+SELECT pg_stat_statements_reset() IS NOT NULL AS t;
+ t
+---
+ t
+(1 row)
+
+SELECT WHERE '1'::int IN ($1::int, '2'::int) \bind '1' \g
+--
+(1 row)
+
+SELECT WHERE '4'::int IN ($1::int, '5'::int) \bind '2' \g
+--
+(0 rows)
+
+SELECT WHERE $2::int IN ($1::int, '1'::int) \bind '1' '2' \g
+--
+(0 rows)
+
+SELECT WHERE $2::int IN ($1::int, '2'::int) \bind '3' '4' \g
+--
+(0 rows)
+
+SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
+ query | calls
+----------------------------------------------------+-------
+ SELECT WHERE $1::int IN ($2 /*, ... */) | 2
+ SELECT WHERE $1::int IN ($2 /*, ... */) | 2
+ SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1
+(3 rows)
+
+SELECT pg_stat_statements_reset() IS NOT NULL AS t;
+ t
+---
+ t
+(1 row)
+
+-- no squashable list, the parameters id's are kept as-is
+SELECT WHERE $3 = $1 AND $2 = $4 \bind 1 2 1 2 \g
+--
+(1 row)
+
+-- squashable list, so the parameter IDs will be re-assigned
+SELECT WHERE 1 IN (1, 2, 3) AND $3 = $1 AND $2 = $4 \bind 1 2 1 2 \g
+--
+(1 row)
+
+SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
+ query | calls
+------------------------------------------------------------+-------
+ SELECT WHERE $1 IN ($2 /*, ... */) AND $3 = $4 AND $5 = $6 | 1
+ SELECT WHERE $3 = $1 AND $2 = $4 | 1
+ SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1
+(3 rows)
+
diff --git a/contrib/pg_stat_statements/expected/level_tracking.out b/contrib/pg_stat_statements/expected/level_tracking.out
index 03bea14d5da..8213fcd2e61 100644
--- a/contrib/pg_stat_statements/expected/level_tracking.out
+++ b/contrib/pg_stat_statements/expected/level_tracking.out
@@ -206,37 +206,37 @@ EXPLAIN (COSTS OFF) SELECT 1 UNION SELECT 2;
SELECT toplevel, calls, query FROM pg_stat_statements
ORDER BY query COLLATE "C";
- toplevel | calls | query
-----------+-------+--------------------------------------------------------------------
- f | 1 | DELETE FROM stats_track_tab
+ toplevel | calls | query
+----------+-------+---------------------------------------------------------------------
t | 1 | EXPLAIN (COSTS OFF) (SELECT $1, $2)
+ f | 1 | EXPLAIN (COSTS OFF) (SELECT $1, $2);
t | 1 | EXPLAIN (COSTS OFF) (TABLE test_table)
+ f | 1 | EXPLAIN (COSTS OFF) (TABLE test_table);
t | 1 | EXPLAIN (COSTS OFF) (VALUES ($1, $2))
+ f | 1 | EXPLAIN (COSTS OFF) (VALUES ($1, $2));
t | 1 | EXPLAIN (COSTS OFF) DELETE FROM stats_track_tab
+ f | 1 | EXPLAIN (COSTS OFF) DELETE FROM stats_track_tab;
t | 1 | EXPLAIN (COSTS OFF) INSERT INTO stats_track_tab VALUES (($1))
- t | 1 | EXPLAIN (COSTS OFF) MERGE INTO stats_track_tab +
- | | USING (SELECT id FROM generate_series($1, $2) id) ON x = id +
- | | WHEN MATCHED THEN UPDATE SET x = id +
+ f | 1 | EXPLAIN (COSTS OFF) INSERT INTO stats_track_tab VALUES (($1));
+ t | 1 | EXPLAIN (COSTS OFF) MERGE INTO stats_track_tab +
+ | | USING (SELECT id FROM generate_series($1, $2) id) ON x = id +
+ | | WHEN MATCHED THEN UPDATE SET x = id +
| | WHEN NOT MATCHED THEN INSERT (x) VALUES (id)
+ f | 1 | EXPLAIN (COSTS OFF) MERGE INTO stats_track_tab +
+ | | USING (SELECT id FROM generate_series($1, $2) id) ON x = id +
+ | | WHEN MATCHED THEN UPDATE SET x = id +
+ | | WHEN NOT MATCHED THEN INSERT (x) VALUES (id);
t | 1 | EXPLAIN (COSTS OFF) SELECT $1
t | 1 | EXPLAIN (COSTS OFF) SELECT $1 UNION SELECT $2
+ f | 1 | EXPLAIN (COSTS OFF) SELECT $1 UNION SELECT $2;
+ f | 1 | EXPLAIN (COSTS OFF) SELECT $1;
t | 1 | EXPLAIN (COSTS OFF) TABLE stats_track_tab
+ f | 1 | EXPLAIN (COSTS OFF) TABLE stats_track_tab;
t | 1 | EXPLAIN (COSTS OFF) UPDATE stats_track_tab SET x = $1 WHERE x = $2
+ f | 1 | EXPLAIN (COSTS OFF) UPDATE stats_track_tab SET x = $1 WHERE x = $2;
t | 1 | EXPLAIN (COSTS OFF) VALUES ($1)
- f | 1 | INSERT INTO stats_track_tab VALUES (($1))
- f | 1 | MERGE INTO stats_track_tab +
- | | USING (SELECT id FROM generate_series($1, $2) id) ON x = id +
- | | WHEN MATCHED THEN UPDATE SET x = id +
- | | WHEN NOT MATCHED THEN INSERT (x) VALUES (id)
- f | 1 | SELECT $1
- f | 1 | SELECT $1 UNION SELECT $2
- f | 1 | SELECT $1, $2
+ f | 1 | EXPLAIN (COSTS OFF) VALUES ($1);
t | 1 | SELECT pg_stat_statements_reset() IS NOT NULL AS t
- f | 1 | TABLE stats_track_tab
- f | 1 | TABLE test_table
- f | 1 | UPDATE stats_track_tab SET x = $1 WHERE x = $2
- f | 1 | VALUES ($1)
- f | 1 | VALUES ($1, $2)
(23 rows)
-- EXPLAIN - top-level tracking.
@@ -405,20 +405,20 @@ EXPLAIN (COSTS OFF) SELECT 1, 2 UNION SELECT 3, 4\; EXPLAIN (COSTS OFF) (SELECT
SELECT toplevel, calls, query FROM pg_stat_statements
ORDER BY query COLLATE "C";
- toplevel | calls | query
-----------+-------+-----------------------------------------------------------------
- f | 1 | (SELECT $1, $2, $3) UNION SELECT $4, $5, $6
+ toplevel | calls | query
+----------+-------+---------------------------------------------------------------------------------------------------------------------
t | 1 | EXPLAIN (COSTS OFF) (SELECT $1, $2, $3)
t | 1 | EXPLAIN (COSTS OFF) (SELECT $1, $2, $3) UNION SELECT $4, $5, $6
+ f | 1 | EXPLAIN (COSTS OFF) (SELECT $1, $2, $3); EXPLAIN (COSTS OFF) (SELECT 1, 2, 3, 4);
t | 1 | EXPLAIN (COSTS OFF) (SELECT $1, $2, $3, $4)
+ f | 1 | EXPLAIN (COSTS OFF) (SELECT 1, 2, 3); EXPLAIN (COSTS OFF) (SELECT $1, $2, $3, $4);
t | 1 | EXPLAIN (COSTS OFF) SELECT $1
t | 1 | EXPLAIN (COSTS OFF) SELECT $1, $2
t | 1 | EXPLAIN (COSTS OFF) SELECT $1, $2 UNION SELECT $3, $4
- f | 1 | SELECT $1
- f | 1 | SELECT $1, $2
- f | 1 | SELECT $1, $2 UNION SELECT $3, $4
- f | 1 | SELECT $1, $2, $3
- f | 1 | SELECT $1, $2, $3, $4
+ f | 1 | EXPLAIN (COSTS OFF) SELECT $1, $2 UNION SELECT $3, $4; EXPLAIN (COSTS OFF) (SELECT 1, 2, 3) UNION SELECT 3, 4, 5;
+ f | 1 | EXPLAIN (COSTS OFF) SELECT $1; EXPLAIN (COSTS OFF) SELECT 1, 2;
+ f | 1 | EXPLAIN (COSTS OFF) SELECT 1, 2 UNION SELECT 3, 4; EXPLAIN (COSTS OFF) (SELECT $1, $2, $3) UNION SELECT $4, $5, $6;
+ f | 1 | EXPLAIN (COSTS OFF) SELECT 1; EXPLAIN (COSTS OFF) SELECT $1, $2;
t | 1 | SELECT pg_stat_statements_reset() IS NOT NULL AS t
(13 rows)
@@ -494,29 +494,29 @@ EXPLAIN (COSTS OFF) INSERT INTO stats_track_tab VALUES ((1))\; EXPLAIN (COSTS OF
SELECT toplevel, calls, query FROM pg_stat_statements
ORDER BY query COLLATE "C";
- toplevel | calls | query
-----------+-------+--------------------------------------------------------------------
- f | 1 | DELETE FROM stats_track_tab
- f | 1 | DELETE FROM stats_track_tab WHERE x = $1
+ toplevel | calls | query
+----------+-------+----------------------------------------------------------------------------------------------------------------------------------
t | 1 | EXPLAIN (COSTS OFF) (TABLE test_table)
t | 1 | EXPLAIN (COSTS OFF) (VALUES ($1, $2))
t | 1 | EXPLAIN (COSTS OFF) DELETE FROM stats_track_tab
t | 1 | EXPLAIN (COSTS OFF) DELETE FROM stats_track_tab WHERE x = $1
+ f | 1 | EXPLAIN (COSTS OFF) DELETE FROM stats_track_tab; EXPLAIN (COSTS OFF) DELETE FROM stats_track_tab WHERE x = $1;
+ f | 1 | EXPLAIN (COSTS OFF) DELETE FROM stats_track_tab; EXPLAIN (COSTS OFF) DELETE FROM stats_track_tab WHERE x = 1;
t | 1 | EXPLAIN (COSTS OFF) INSERT INTO stats_track_tab VALUES ($1), ($2)
t | 1 | EXPLAIN (COSTS OFF) INSERT INTO stats_track_tab VALUES (($1))
+ f | 1 | EXPLAIN (COSTS OFF) INSERT INTO stats_track_tab VALUES (($1)); EXPLAIN (COSTS OFF) INSERT INTO stats_track_tab VALUES (1), (2);
+ f | 1 | EXPLAIN (COSTS OFF) INSERT INTO stats_track_tab VALUES ((1)); EXPLAIN (COSTS OFF) INSERT INTO stats_track_tab VALUES ($1), ($2);
t | 1 | EXPLAIN (COSTS OFF) TABLE stats_track_tab
+ f | 1 | EXPLAIN (COSTS OFF) TABLE stats_track_tab; EXPLAIN (COSTS OFF) (TABLE test_table);
+ f | 1 | EXPLAIN (COSTS OFF) TABLE stats_track_tab; EXPLAIN (COSTS OFF) (TABLE test_table);
t | 1 | EXPLAIN (COSTS OFF) UPDATE stats_track_tab SET x = $1
t | 1 | EXPLAIN (COSTS OFF) UPDATE stats_track_tab SET x = $1 WHERE x = $2
+ f | 1 | EXPLAIN (COSTS OFF) UPDATE stats_track_tab SET x = $1 WHERE x = $2; EXPLAIN (COSTS OFF) UPDATE stats_track_tab SET x = 1;
+ f | 1 | EXPLAIN (COSTS OFF) UPDATE stats_track_tab SET x = 1 WHERE x = 1; EXPLAIN (COSTS OFF) UPDATE stats_track_tab SET x = $1;
t | 1 | EXPLAIN (COSTS OFF) VALUES ($1)
- f | 1 | INSERT INTO stats_track_tab VALUES ($1), ($2)
- f | 1 | INSERT INTO stats_track_tab VALUES (($1))
+ f | 1 | EXPLAIN (COSTS OFF) VALUES ($1); EXPLAIN (COSTS OFF) (VALUES (1, 2));
+ f | 1 | EXPLAIN (COSTS OFF) VALUES (1); EXPLAIN (COSTS OFF) (VALUES ($1, $2));
t | 1 | SELECT pg_stat_statements_reset() IS NOT NULL AS t
- f | 1 | TABLE stats_track_tab
- f | 1 | TABLE test_table
- f | 1 | UPDATE stats_track_tab SET x = $1
- f | 1 | UPDATE stats_track_tab SET x = $1 WHERE x = $2
- f | 1 | VALUES ($1)
- f | 1 | VALUES ($1, $2)
(21 rows)
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
@@ -547,18 +547,21 @@ EXPLAIN (COSTS OFF) MERGE INTO stats_track_tab
SELECT toplevel, calls, query FROM pg_stat_statements
ORDER BY query COLLATE "C";
- toplevel | calls | query
-----------+-------+---------------------------------------------------------------
- t | 1 | EXPLAIN (COSTS OFF) MERGE INTO stats_track_tab +
- | | USING (SELECT id FROM generate_series($1, $2) id) ON x = id+
- | | WHEN MATCHED THEN UPDATE SET x = id +
+ toplevel | calls | query
+----------+-------+------------------------------------------------------------------------------------------------
+ t | 1 | EXPLAIN (COSTS OFF) MERGE INTO stats_track_tab +
+ | | USING (SELECT id FROM generate_series($1, $2) id) ON x = id +
+ | | WHEN MATCHED THEN UPDATE SET x = id +
| | WHEN NOT MATCHED THEN INSERT (x) VALUES (id)
+ f | 1 | EXPLAIN (COSTS OFF) MERGE INTO stats_track_tab +
+ | | USING (SELECT id FROM generate_series($1, $2) id) ON x = id +
+ | | WHEN MATCHED THEN UPDATE SET x = id +
+ | | WHEN NOT MATCHED THEN INSERT (x) VALUES (id); EXPLAIN (COSTS OFF) SELECT 1, 2, 3, 4, 5;
+ f | 1 | EXPLAIN (COSTS OFF) MERGE INTO stats_track_tab +
+ | | USING (SELECT id FROM generate_series(1, 10) id) ON x = id +
+ | | WHEN MATCHED THEN UPDATE SET x = id +
+ | | WHEN NOT MATCHED THEN INSERT (x) VALUES (id); EXPLAIN (COSTS OFF) SELECT $1, $2, $3, $4, $5;
t | 1 | EXPLAIN (COSTS OFF) SELECT $1, $2, $3, $4, $5
- f | 1 | MERGE INTO stats_track_tab +
- | | USING (SELECT id FROM generate_series($1, $2) id) ON x = id+
- | | WHEN MATCHED THEN UPDATE SET x = id +
- | | WHEN NOT MATCHED THEN INSERT (x) VALUES (id)
- f | 1 | SELECT $1, $2, $3, $4, $5
t | 1 | SELECT pg_stat_statements_reset() IS NOT NULL AS t
(5 rows)
@@ -786,29 +789,29 @@ EXPLAIN (COSTS OFF) WITH a AS (select 4) SELECT 1 UNION SELECT 2;
SELECT toplevel, calls, query FROM pg_stat_statements
ORDER BY query COLLATE "C";
- toplevel | calls | query
-----------+-------+------------------------------------------------------------------------------------------
+ toplevel | calls | query
+----------+-------+-------------------------------------------------------------------------------------------
t | 1 | EXPLAIN (COSTS OFF) (WITH a AS (SELECT $1) (SELECT $2, $3))
+ f | 1 | EXPLAIN (COSTS OFF) (WITH a AS (SELECT $1) (SELECT $2, $3));
t | 1 | EXPLAIN (COSTS OFF) WITH a AS (SELECT $1) DELETE FROM stats_track_tab
+ f | 1 | EXPLAIN (COSTS OFF) WITH a AS (SELECT $1) DELETE FROM stats_track_tab;
t | 1 | EXPLAIN (COSTS OFF) WITH a AS (SELECT $1) INSERT INTO stats_track_tab VALUES (($2))
- t | 1 | EXPLAIN (COSTS OFF) WITH a AS (SELECT $1) MERGE INTO stats_track_tab +
- | | USING (SELECT id FROM generate_series($2, $3) id) ON x = id +
- | | WHEN MATCHED THEN UPDATE SET x = id +
+ f | 1 | EXPLAIN (COSTS OFF) WITH a AS (SELECT $1) INSERT INTO stats_track_tab VALUES (($2));
+ t | 1 | EXPLAIN (COSTS OFF) WITH a AS (SELECT $1) MERGE INTO stats_track_tab +
+ | | USING (SELECT id FROM generate_series($2, $3) id) ON x = id +
+ | | WHEN MATCHED THEN UPDATE SET x = id +
| | WHEN NOT MATCHED THEN INSERT (x) VALUES (id)
+ f | 1 | EXPLAIN (COSTS OFF) WITH a AS (SELECT $1) MERGE INTO stats_track_tab +
+ | | USING (SELECT id FROM generate_series($2, $3) id) ON x = id +
+ | | WHEN MATCHED THEN UPDATE SET x = id +
+ | | WHEN NOT MATCHED THEN INSERT (x) VALUES (id);
t | 1 | EXPLAIN (COSTS OFF) WITH a AS (SELECT $1) SELECT $2
+ f | 1 | EXPLAIN (COSTS OFF) WITH a AS (SELECT $1) SELECT $2;
t | 1 | EXPLAIN (COSTS OFF) WITH a AS (SELECT $1) UPDATE stats_track_tab SET x = $2 WHERE x = $3
+ f | 1 | EXPLAIN (COSTS OFF) WITH a AS (SELECT $1) UPDATE stats_track_tab SET x = $2 WHERE x = $3;
t | 1 | EXPLAIN (COSTS OFF) WITH a AS (select $1) SELECT $2 UNION SELECT $3
+ f | 1 | EXPLAIN (COSTS OFF) WITH a AS (select $1) SELECT $2 UNION SELECT $3;
t | 1 | SELECT pg_stat_statements_reset() IS NOT NULL AS t
- f | 1 | WITH a AS (SELECT $1) (SELECT $2, $3)
- f | 1 | WITH a AS (SELECT $1) DELETE FROM stats_track_tab
- f | 1 | WITH a AS (SELECT $1) INSERT INTO stats_track_tab VALUES (($2))
- f | 1 | WITH a AS (SELECT $1) MERGE INTO stats_track_tab +
- | | USING (SELECT id FROM generate_series($2, $3) id) ON x = id +
- | | WHEN MATCHED THEN UPDATE SET x = id +
- | | WHEN NOT MATCHED THEN INSERT (x) VALUES (id)
- f | 1 | WITH a AS (SELECT $1) SELECT $2
- f | 1 | WITH a AS (SELECT $1) UPDATE stats_track_tab SET x = $2 WHERE x = $3
- f | 1 | WITH a AS (select $1) SELECT $2 UNION SELECT $3
(15 rows)
-- EXPLAIN with CTEs - top-level tracking
@@ -918,13 +921,14 @@ EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF)
SELECT toplevel, calls, query FROM pg_stat_statements
ORDER BY query COLLATE "C";
- toplevel | calls | query
-----------+-------+------------------------------------------------------------------------------
- t | 1 | EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) +
+ toplevel | calls | query
+----------+-------+-------------------------------------------------------------------------------
+ t | 1 | EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) +
| | DECLARE foocur CURSOR FOR SELECT * FROM stats_track_tab
+ f | 1 | EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) +
+ | | DECLARE foocur CURSOR FOR SELECT * FROM stats_track_tab;
t | 1 | EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) SELECT $1
- f | 1 | SELECT $1
- f | 1 | SELECT * FROM stats_track_tab
+ f | 1 | EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF, BUFFERS OFF) SELECT $1;
t | 1 | SELECT pg_stat_statements_reset() IS NOT NULL AS t
(5 rows)
@@ -1047,10 +1051,10 @@ SELECT toplevel, calls, query FROM pg_stat_statements
toplevel | calls | query
----------+-------+-----------------------------------------------------------------
t | 1 | CREATE TEMPORARY TABLE pgss_ctas_1 AS SELECT $1
+ f | 1 | CREATE TEMPORARY TABLE pgss_ctas_1 AS SELECT $1;
t | 1 | CREATE TEMPORARY TABLE pgss_ctas_2 AS EXECUTE test_prepare_pgss
- f | 1 | SELECT $1
+ f | 1 | PREPARE test_prepare_pgss AS select generate_series($1, $2)
t | 1 | SELECT pg_stat_statements_reset() IS NOT NULL AS t
- f | 1 | select generate_series($1, $2)
(5 rows)
-- CREATE TABLE AS, top-level tracking.
@@ -1088,10 +1092,10 @@ EXPLAIN (COSTS OFF) CREATE TEMPORARY TABLE pgss_explain_ctas AS SELECT 1;
SELECT toplevel, calls, query FROM pg_stat_statements
ORDER BY query COLLATE "C";
- toplevel | calls | query
-----------+-------+---------------------------------------------------------------------------
+ toplevel | calls | query
+----------+-------+----------------------------------------------------------------------------
t | 1 | EXPLAIN (COSTS OFF) CREATE TEMPORARY TABLE pgss_explain_ctas AS SELECT $1
- f | 1 | SELECT $1
+ f | 1 | EXPLAIN (COSTS OFF) CREATE TEMPORARY TABLE pgss_explain_ctas AS SELECT $1;
t | 1 | SELECT pg_stat_statements_reset() IS NOT NULL AS t
(3 rows)
@@ -1136,14 +1140,14 @@ CLOSE foocur;
COMMIT;
SELECT toplevel, calls, query FROM pg_stat_statements
ORDER BY query COLLATE "C";
- toplevel | calls | query
-----------+-------+---------------------------------------------------------
+ toplevel | calls | query
+----------+-------+----------------------------------------------------------
t | 1 | BEGIN
t | 1 | CLOSE foocur
t | 1 | COMMIT
t | 1 | DECLARE FOOCUR CURSOR FOR SELECT * from stats_track_tab
+ f | 1 | DECLARE FOOCUR CURSOR FOR SELECT * from stats_track_tab;
t | 1 | FETCH FORWARD 1 FROM foocur
- f | 1 | SELECT * from stats_track_tab
t | 1 | SELECT pg_stat_statements_reset() IS NOT NULL AS t
(7 rows)
@@ -1203,25 +1207,25 @@ COPY (DELETE FROM stats_track_tab WHERE x = 2 RETURNING x) TO stdout;
2
SELECT toplevel, calls, query FROM pg_stat_statements
ORDER BY query COLLATE "C";
- toplevel | calls | query
-----------+-------+---------------------------------------------------------------------------
+ toplevel | calls | query
+----------+-------+-----------------------------------------------------------------------------
+ f | 1 | COPY (DELETE FROM stats_track_tab WHERE x = $1 RETURNING x) TO stdout
t | 1 | COPY (DELETE FROM stats_track_tab WHERE x = 2 RETURNING x) TO stdout
+ f | 1 | COPY (INSERT INTO stats_track_tab (x) VALUES ($1) RETURNING x) TO stdout
t | 1 | COPY (INSERT INTO stats_track_tab (x) VALUES (1) RETURNING x) TO stdout
- t | 1 | COPY (MERGE INTO stats_track_tab USING (SELECT 1 id) ON x = id +
- | | WHEN MATCHED THEN UPDATE SET x = id +
+ f | 1 | COPY (MERGE INTO stats_track_tab USING (SELECT $1 id) ON x = id +
+ | | WHEN MATCHED THEN UPDATE SET x = id +
+ | | WHEN NOT MATCHED THEN INSERT (x) VALUES (id) RETURNING x) TO stdout
+ t | 1 | COPY (MERGE INTO stats_track_tab USING (SELECT 1 id) ON x = id +
+ | | WHEN MATCHED THEN UPDATE SET x = id +
| | WHEN NOT MATCHED THEN INSERT (x) VALUES (id) RETURNING x) TO stdout
+ f | 1 | COPY (SELECT $1 UNION SELECT $2) TO stdout
+ f | 1 | COPY (SELECT $1) TO stdout
t | 1 | COPY (SELECT 1 UNION SELECT 2) TO stdout
t | 1 | COPY (SELECT 1) TO stdout
+ f | 1 | COPY (UPDATE stats_track_tab SET x = $1 WHERE x = $2 RETURNING x) TO stdout
t | 1 | COPY (UPDATE stats_track_tab SET x = 2 WHERE x = 1 RETURNING x) TO stdout
- f | 1 | DELETE FROM stats_track_tab WHERE x = $1 RETURNING x
- f | 1 | INSERT INTO stats_track_tab (x) VALUES ($1) RETURNING x
- f | 1 | MERGE INTO stats_track_tab USING (SELECT $1 id) ON x = id +
- | | WHEN MATCHED THEN UPDATE SET x = id +
- | | WHEN NOT MATCHED THEN INSERT (x) VALUES (id) RETURNING x
- f | 1 | SELECT $1
- f | 1 | SELECT $1 UNION SELECT $2
t | 1 | SELECT pg_stat_statements_reset() IS NOT NULL AS t
- f | 1 | UPDATE stats_track_tab SET x = $1 WHERE x = $2 RETURNING x
(13 rows)
-- COPY - top-level tracking.
@@ -1319,6 +1323,57 @@ SELECT toplevel, calls, query FROM pg_stat_statements
t | 1 | SELECT pg_stat_statements_reset() IS NOT NULL AS t
(4 rows)
+-- DO block --- multiple inner queries with separators
+SET pg_stat_statements.track = 'all';
+SET pg_stat_statements.track_utility = TRUE;
+CREATE TABLE pgss_do_util_tab_1 (a int);
+CREATE TABLE pgss_do_util_tab_2 (a int);
+SELECT pg_stat_statements_reset() IS NOT NULL AS t;
+ t
+---
+ t
+(1 row)
+
+DO $$
+DECLARE BEGIN
+ EXECUTE 'CREATE TABLE pgss_do_table (id INT); DROP TABLE pgss_do_table';
+ EXECUTE 'SELECT a FROM pgss_do_util_tab_1; SELECT a FROM pgss_do_util_tab_2';
+END $$;
+SELECT toplevel, calls, rows, query FROM pg_stat_statements
+ WHERE toplevel IS FALSE
+ ORDER BY query COLLATE "C";
+ toplevel | calls | rows | query
+----------+-------+------+-------------------------------------
+ f | 1 | 0 | CREATE TABLE pgss_do_table (id INT)
+ f | 1 | 0 | DROP TABLE pgss_do_table
+ f | 1 | 0 | SELECT a FROM pgss_do_util_tab_1
+ f | 1 | 0 | SELECT a FROM pgss_do_util_tab_2
+(4 rows)
+
+SELECT pg_stat_statements_reset() IS NOT NULL AS t;
+ t
+---
+ t
+(1 row)
+
+-- Note the extra semicolon at the end of the query.
+DO $$
+DECLARE BEGIN
+ EXECUTE 'CREATE TABLE pgss_do_table (id INT); DROP TABLE pgss_do_table;';
+ EXECUTE 'SELECT a FROM pgss_do_util_tab_1; SELECT a FROM pgss_do_util_tab_2;';
+END $$;
+SELECT toplevel, calls, rows, query FROM pg_stat_statements
+ WHERE toplevel IS FALSE
+ ORDER BY query COLLATE "C";
+ toplevel | calls | rows | query
+----------+-------+------+-------------------------------------
+ f | 1 | 0 | CREATE TABLE pgss_do_table (id INT)
+ f | 1 | 0 | DROP TABLE pgss_do_table
+ f | 1 | 0 | SELECT a FROM pgss_do_util_tab_1
+ f | 1 | 0 | SELECT a FROM pgss_do_util_tab_2
+(4 rows)
+
+DROP TABLE pgss_do_util_tab_1, pgss_do_util_tab_2;
-- PL/pgSQL function - top-level tracking.
SET pg_stat_statements.track = 'top';
SET pg_stat_statements.track_utility = FALSE;
diff --git a/contrib/pg_stat_statements/expected/planning.out b/contrib/pg_stat_statements/expected/planning.out
index 3ee1928cbe9..9effd11fdc8 100644
--- a/contrib/pg_stat_statements/expected/planning.out
+++ b/contrib/pg_stat_statements/expected/planning.out
@@ -58,7 +58,7 @@ SELECT 42;
(1 row)
SELECT plans, calls, rows, query FROM pg_stat_statements
- WHERE query NOT LIKE 'SELECT COUNT%' ORDER BY query COLLATE "C";
+ WHERE query NOT LIKE 'PREPARE%' ORDER BY query COLLATE "C";
plans | calls | rows | query
-------+-------+------+----------------------------------------------------------
0 | 1 | 0 | ALTER TABLE stats_plan_test ADD COLUMN x int
@@ -72,10 +72,10 @@ SELECT plans, calls, rows, query FROM pg_stat_statements
-- for the prepared statement we expect at least one replan, but cache
-- invalidations could force more
SELECT plans >= 2 AND plans <= calls AS plans_ok, calls, rows, query FROM pg_stat_statements
- WHERE query LIKE 'SELECT COUNT%' ORDER BY query COLLATE "C";
- plans_ok | calls | rows | query
-----------+-------+------+--------------------------------------
- t | 4 | 4 | SELECT COUNT(*) FROM stats_plan_test
+ WHERE query LIKE 'PREPARE%' ORDER BY query COLLATE "C";
+ plans_ok | calls | rows | query
+----------+-------+------+-------------------------------------------------------
+ t | 4 | 4 | PREPARE prep1 AS SELECT COUNT(*) FROM stats_plan_test
(1 row)
-- Cleanup
diff --git a/contrib/pg_stat_statements/expected/select.out b/contrib/pg_stat_statements/expected/select.out
index 09476a7b699..75c896f3885 100644
--- a/contrib/pg_stat_statements/expected/select.out
+++ b/contrib/pg_stat_statements/expected/select.out
@@ -208,6 +208,7 @@ DEALLOCATE pgss_test;
SELECT calls, rows, query FROM pg_stat_statements ORDER BY query COLLATE "C";
calls | rows | query
-------+------+------------------------------------------------------------------------------
+ 1 | 1 | PREPARE pgss_test (int) AS SELECT $1, $2 LIMIT $3
4 | 4 | SELECT $1 +
| | -- but this one will appear +
| | AS "text"
@@ -221,7 +222,6 @@ SELECT calls, rows, query FROM pg_stat_statements ORDER BY query COLLATE "C";
2 | 2 | SELECT $1 AS "int" ORDER BY 1
1 | 2 | SELECT $1 AS i UNION SELECT $2 ORDER BY i
1 | 1 | SELECT $1 || $2
- 1 | 1 | SELECT $1, $2 LIMIT $3
2 | 2 | SELECT DISTINCT $1 AS "int"
0 | 0 | SELECT calls, rows, query FROM pg_stat_statements ORDER BY query COLLATE "C"
1 | 1 | SELECT pg_stat_statements_reset() IS NOT NULL AS t
@@ -238,6 +238,65 @@ SELECT pg_stat_statements_reset() IS NOT NULL AS t;
t
(1 row)
+-- normalization of constants and parameters, with constant locations
+-- recorded one or more times.
+SELECT pg_stat_statements_reset() IS NOT NULL AS t;
+ t
+---
+ t
+(1 row)
+
+SELECT WHERE '1' IN ('1'::int, '3'::int::text);
+--
+(1 row)
+
+SELECT WHERE (1, 2) IN ((1, 2), (2, 3));
+--
+(1 row)
+
+SELECT WHERE (3, 4) IN ((5, 6), (8, 7));
+--
+(0 rows)
+
+SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
+ query | calls
+------------------------------------------------------------------------+-------
+ SELECT WHERE $1 IN ($2::int, $3::int::text) | 1
+ SELECT WHERE ($1, $2) IN (($3, $4), ($5, $6)) | 2
+ SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1
+ SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C" | 0
+(4 rows)
+
+-- with the last element being an explicit function call with an argument, ensure
+-- the normalization of the squashing interval is correct.
+SELECT pg_stat_statements_reset() IS NOT NULL AS t;
+ t
+---
+ t
+(1 row)
+
+SELECT pg_stat_statements_reset() IS NOT NULL AS t;
+ t
+---
+ t
+(1 row)
+
+SELECT WHERE 1 IN (1, int4(1), int4(2));
+--
+(1 row)
+
+SELECT WHERE 1 = ANY (ARRAY[1, int4(1), int4(2)]);
+--
+(1 row)
+
+SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
+ query | calls
+------------------------------------------------------------------------+-------
+ SELECT WHERE $1 IN ($2 /*, ... */) | 2
+ SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1
+ SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C" | 0
+(3 rows)
+
--
-- queries with locking clauses
--
diff --git a/contrib/pg_stat_statements/expected/squashing.out b/contrib/pg_stat_statements/expected/squashing.out
index 7b138af098c..f952f47ef7b 100644
--- a/contrib/pg_stat_statements/expected/squashing.out
+++ b/contrib/pg_stat_statements/expected/squashing.out
@@ -2,9 +2,11 @@
-- Const squashing functionality
--
CREATE EXTENSION pg_stat_statements;
+--
+-- Simple Lists
+--
CREATE TABLE test_squash (id int, data int);
--- IN queries
--- Normal scenario, too many simple constants for an IN query
+-- single element will not be squashed
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
t
---
@@ -16,42 +18,150 @@ SELECT * FROM test_squash WHERE id IN (1);
----+------
(0 rows)
+SELECT ARRAY[1];
+ array
+-------
+ {1}
+(1 row)
+
+SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
+ query | calls
+----------------------------------------------------+-------
+ SELECT * FROM test_squash WHERE id IN ($1) | 1
+ SELECT ARRAY[$1] | 1
+ SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1
+(3 rows)
+
+-- more than 1 element in a list will be squashed
+SELECT pg_stat_statements_reset() IS NOT NULL AS t;
+ t
+---
+ t
+(1 row)
+
SELECT * FROM test_squash WHERE id IN (1, 2, 3);
id | data
----+------
(0 rows)
+SELECT * FROM test_squash WHERE id IN (1, 2, 3, 4);
+ id | data
+----+------
+(0 rows)
+
+SELECT * FROM test_squash WHERE id IN (1, 2, 3, 4, 5);
+ id | data
+----+------
+(0 rows)
+
+SELECT ARRAY[1, 2, 3];
+ array
+---------
+ {1,2,3}
+(1 row)
+
+SELECT ARRAY[1, 2, 3, 4];
+ array
+-----------
+ {1,2,3,4}
+(1 row)
+
+SELECT ARRAY[1, 2, 3, 4, 5];
+ array
+-------------
+ {1,2,3,4,5}
+(1 row)
+
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
query | calls
-------------------------------------------------------+-------
- SELECT * FROM test_squash WHERE id IN ($1 /*, ... */) | 1
- SELECT * FROM test_squash WHERE id IN ($1) | 1
+ SELECT * FROM test_squash WHERE id IN ($1 /*, ... */) | 3
+ SELECT ARRAY[$1 /*, ... */] | 3
SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1
(3 rows)
-SELECT * FROM test_squash WHERE id IN (1, 2, 3, 4, 5, 6, 7, 8, 9);
+-- built-in functions will be squashed
+-- the IN and ARRAY forms of this statement will have the same queryId
+SELECT pg_stat_statements_reset() IS NOT NULL AS t;
+ t
+---
+ t
+(1 row)
+
+SELECT WHERE 1 IN (1, int4(1), int4(2), 2);
+--
+(1 row)
+
+SELECT WHERE 1 = ANY (ARRAY[1, int4(1), int4(2), 2]);
+--
+(1 row)
+
+SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
+ query | calls
+----------------------------------------------------+-------
+ SELECT WHERE $1 IN ($2 /*, ... */) | 2
+ SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1
+(2 rows)
+
+-- external parameters will be squashed
+SELECT pg_stat_statements_reset() IS NOT NULL AS t;
+ t
+---
+ t
+(1 row)
+
+SELECT * FROM test_squash WHERE id IN ($1, $2, $3, $4, $5) \bind 1 2 3 4 5
+;
id | data
----+------
(0 rows)
-SELECT * FROM test_squash WHERE id IN (1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
+SELECT * FROM test_squash WHERE id::text = ANY(ARRAY[$1, $2, $3, $4, $5]) \bind 1 2 3 4 5
+;
id | data
----+------
(0 rows)
-SELECT * FROM test_squash WHERE id IN (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11);
+SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
+ query | calls
+----------------------------------------------------------------------+-------
+ SELECT * FROM test_squash WHERE id IN ($1 /*, ... */) | 1
+ SELECT * FROM test_squash WHERE id::text = ANY(ARRAY[$1 /*, ... */]) | 1
+ SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1
+(3 rows)
+
+-- prepared statements will also be squashed
+-- the IN and ARRAY forms of this statement will have the same queryId
+SELECT pg_stat_statements_reset() IS NOT NULL AS t;
+ t
+---
+ t
+(1 row)
+
+PREPARE p1(int, int, int, int, int) AS
+SELECT * FROM test_squash WHERE id IN ($1, $2, $3, $4, $5);
+EXECUTE p1(1, 2, 3, 4, 5);
id | data
----+------
(0 rows)
+DEALLOCATE p1;
+PREPARE p1(int, int, int, int, int) AS
+SELECT * FROM test_squash WHERE id = ANY(ARRAY[$1, $2, $3, $4, $5]);
+EXECUTE p1(1, 2, 3, 4, 5);
+ id | data
+----+------
+(0 rows)
+
+DEALLOCATE p1;
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
- query | calls
-------------------------------------------------------------------------+-------
- SELECT * FROM test_squash WHERE id IN ($1 /*, ... */) | 4
- SELECT * FROM test_squash WHERE id IN ($1) | 1
- SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1
- SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C" | 1
-(4 rows)
+ query | calls
+-------------------------------------------------------+-------
+ DEALLOCATE $1 | 2
+ PREPARE p1(int, int, int, int, int) AS +| 2
+ SELECT * FROM test_squash WHERE id IN ($1 /*, ... */) |
+ SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1
+(3 rows)
-- More conditions in the query
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
@@ -75,10 +185,25 @@ SELECT * FROM test_squash WHERE id IN (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11) AND da
----+------
(0 rows)
+SELECT * FROM test_squash WHERE id = ANY (ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9]) AND data = 2;
+ id | data
+----+------
+(0 rows)
+
+SELECT * FROM test_squash WHERE id = ANY (ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) AND data = 2;
+ id | data
+----+------
+(0 rows)
+
+SELECT * FROM test_squash WHERE id = ANY (ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) AND data = 2;
+ id | data
+----+------
+(0 rows)
+
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
query | calls
---------------------------------------------------------------------+-------
- SELECT * FROM test_squash WHERE id IN ($1 /*, ... */) AND data = $2 | 3
+ SELECT * FROM test_squash WHERE id IN ($1 /*, ... */) AND data = $2 | 6
SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1
(2 rows)
@@ -107,24 +232,46 @@ SELECT * FROM test_squash WHERE id IN (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11)
----+------
(0 rows)
+SELECT * FROM test_squash WHERE id = ANY (ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9])
+ AND data = ANY (ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9]);
+ id | data
+----+------
+(0 rows)
+
+SELECT * FROM test_squash WHERE id = ANY (ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
+ AND data = ANY (ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
+ id | data
+----+------
+(0 rows)
+
+SELECT * FROM test_squash WHERE id = ANY (ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
+ AND data = ANY (ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]);
+ id | data
+----+------
+(0 rows)
+
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
query | calls
-------------------------------------------------------+-------
- SELECT * FROM test_squash WHERE id IN ($1 /*, ... */)+| 3
+ SELECT * FROM test_squash WHERE id IN ($1 /*, ... */)+| 6
AND data IN ($2 /*, ... */) |
SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1
(2 rows)
--- No constants simplification for OpExpr
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
t
---
t
(1 row)
--- In the following two queries the operator expressions (+) and (@) have
--- different oppno, and will be given different query_id if squashed, even though
--- the normalized query will be the same
+-- No constants squashing for OpExpr
+-- The IN and ARRAY forms of this statement will have the same queryId
+SELECT pg_stat_statements_reset() IS NOT NULL AS t;
+ t
+---
+ t
+(1 row)
+
SELECT * FROM test_squash WHERE id IN
(1 + 1, 2 + 2, 3 + 3, 4 + 4, 5 + 5, 6 + 6, 7 + 7, 8 + 8, 9 + 9);
id | data
@@ -137,19 +284,35 @@ SELECT * FROM test_squash WHERE id IN
----+------
(0 rows)
+SELECT * FROM test_squash WHERE id = ANY(ARRAY
+ [1 + 1, 2 + 2, 3 + 3, 4 + 4, 5 + 5, 6 + 6, 7 + 7, 8 + 8, 9 + 9]);
+ id | data
+----+------
+(0 rows)
+
+SELECT * FROM test_squash WHERE id = ANY(ARRAY
+ [@ '-1', @ '-2', @ '-3', @ '-4', @ '-5', @ '-6', @ '-7', @ '-8', @ '-9']);
+ id | data
+----+------
+(0 rows)
+
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
query | calls
----------------------------------------------------------------------------------------------------+-------
- SELECT * FROM test_squash WHERE id IN +| 1
+ SELECT * FROM test_squash WHERE id IN +| 2
($1 + $2, $3 + $4, $5 + $6, $7 + $8, $9 + $10, $11 + $12, $13 + $14, $15 + $16, $17 + $18) |
- SELECT * FROM test_squash WHERE id IN +| 1
+ SELECT * FROM test_squash WHERE id IN +| 2
(@ $1, @ $2, @ $3, @ $4, @ $5, @ $6, @ $7, @ $8, @ $9) |
SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1
(3 rows)
+--
-- FuncExpr
+--
-- Verify multiple type representation end up with the same query_id
CREATE TABLE test_float (data float);
+-- The casted ARRAY expressions will have the same queryId as the IN clause
+-- form of the query
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
t
---
@@ -181,12 +344,38 @@ SELECT data FROM test_float WHERE data IN (1.0, 1.0);
------
(0 rows)
+SELECT data FROM test_float WHERE data = ANY(ARRAY['1'::double precision, '2'::double precision]);
+ data
+------
+(0 rows)
+
+SELECT data FROM test_float WHERE data = ANY(ARRAY[1.0::double precision, 1.0::double precision]);
+ data
+------
+(0 rows)
+
+SELECT data FROM test_float WHERE data = ANY(ARRAY[1, 2]);
+ data
+------
+(0 rows)
+
+SELECT data FROM test_float WHERE data = ANY(ARRAY[1, '2']);
+ data
+------
+(0 rows)
+
+SELECT data FROM test_float WHERE data = ANY(ARRAY['1', 2]);
+ data
+------
+(0 rows)
+
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
- query | calls
------------------------------------------------------------+-------
- SELECT data FROM test_float WHERE data IN ($1 /*, ... */) | 5
- SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1
-(2 rows)
+ query | calls
+--------------------------------------------------------------------+-------
+ SELECT data FROM test_float WHERE data = ANY(ARRAY[$1 /*, ... */]) | 3
+ SELECT data FROM test_float WHERE data IN ($1 /*, ... */) | 7
+ SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1
+(3 rows)
-- Numeric type, implicit cast is squashed
CREATE TABLE test_squash_numeric (id int, data numeric(5, 2));
@@ -201,12 +390,18 @@ SELECT * FROM test_squash_numeric WHERE data IN (1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
----+------
(0 rows)
+SELECT * FROM test_squash_numeric WHERE data = ANY(ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]);
+ id | data
+----+------
+(0 rows)
+
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
- query | calls
------------------------------------------------------------------+-------
- SELECT * FROM test_squash_numeric WHERE data IN ($1 /*, ... */) | 1
- SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1
-(2 rows)
+ query | calls
+--------------------------------------------------------------------------+-------
+ SELECT * FROM test_squash_numeric WHERE data = ANY(ARRAY[$1 /*, ... */]) | 1
+ SELECT * FROM test_squash_numeric WHERE data IN ($1 /*, ... */) | 1
+ SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1
+(3 rows)
-- Bigint, implicit cast is squashed
CREATE TABLE test_squash_bigint (id int, data bigint);
@@ -221,14 +416,20 @@ SELECT * FROM test_squash_bigint WHERE data IN (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1
----+------
(0 rows)
+SELECT * FROM test_squash_bigint WHERE data = ANY(ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]);
+ id | data
+----+------
+(0 rows)
+
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
- query | calls
-----------------------------------------------------------------+-------
- SELECT * FROM test_squash_bigint WHERE data IN ($1 /*, ... */) | 1
- SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1
-(2 rows)
+ query | calls
+-------------------------------------------------------------------------+-------
+ SELECT * FROM test_squash_bigint WHERE data = ANY(ARRAY[$1 /*, ... */]) | 1
+ SELECT * FROM test_squash_bigint WHERE data IN ($1 /*, ... */) | 1
+ SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1
+(3 rows)
--- Bigint, explicit cast is not squashed
+-- Bigint, explicit cast is squashed
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
t
---
@@ -242,15 +443,22 @@ SELECT * FROM test_squash_bigint WHERE data IN
----+------
(0 rows)
+SELECT * FROM test_squash_bigint WHERE data = ANY(ARRAY[
+ 1::bigint, 2::bigint, 3::bigint, 4::bigint, 5::bigint, 6::bigint,
+ 7::bigint, 8::bigint, 9::bigint, 10::bigint, 11::bigint]);
+ id | data
+----+------
+(0 rows)
+
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
query | calls
----------------------------------------------------+-------
- SELECT * FROM test_squash_bigint WHERE data IN +| 1
- ($1 /*, ... */::bigint) |
+ SELECT * FROM test_squash_bigint WHERE data IN +| 2
+ ($1 /*, ... */) |
SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1
(2 rows)
--- Bigint, long tokens with parenthesis
+-- Bigint, long tokens with parenthesis, will not squash
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
t
---
@@ -264,44 +472,47 @@ SELECT * FROM test_squash_bigint WHERE id IN
----+------
(0 rows)
+SELECT * FROM test_squash_bigint WHERE id = ANY(ARRAY[
+ abs(100), abs(200), abs(300), abs(400), abs(500), abs(600), abs(700),
+ abs(800), abs(900), abs(1000), ((abs(1100)))]);
+ id | data
+----+------
+(0 rows)
+
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
query | calls
-------------------------------------------------------------------------+-------
- SELECT * FROM test_squash_bigint WHERE id IN +| 1
+ SELECT * FROM test_squash_bigint WHERE id IN +| 2
(abs($1), abs($2), abs($3), abs($4), abs($5), abs($6), abs($7),+|
abs($8), abs($9), abs($10), ((abs($11)))) |
SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1
(2 rows)
--- CoerceViaIO, SubLink instead of a Const
-CREATE TABLE test_squash_jsonb (id int, data jsonb);
+-- Multiple FuncExpr's. Will not squash
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
t
---
t
(1 row)
-SELECT * FROM test_squash_jsonb WHERE data IN
- ((SELECT '"1"')::jsonb, (SELECT '"2"')::jsonb, (SELECT '"3"')::jsonb,
- (SELECT '"4"')::jsonb, (SELECT '"5"')::jsonb, (SELECT '"6"')::jsonb,
- (SELECT '"7"')::jsonb, (SELECT '"8"')::jsonb, (SELECT '"9"')::jsonb,
- (SELECT '"10"')::jsonb);
- id | data
-----+------
-(0 rows)
+SELECT WHERE 1 IN (1::int::bigint::int, 2::int::bigint::int);
+--
+(1 row)
+
+SELECT WHERE 1 = ANY(ARRAY[1::int::bigint::int, 2::int::bigint::int]);
+--
+(1 row)
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
- query | calls
-----------------------------------------------------------------------+-------
- SELECT * FROM test_squash_jsonb WHERE data IN +| 1
- ((SELECT $1)::jsonb, (SELECT $2)::jsonb, (SELECT $3)::jsonb,+|
- (SELECT $4)::jsonb, (SELECT $5)::jsonb, (SELECT $6)::jsonb,+|
- (SELECT $7)::jsonb, (SELECT $8)::jsonb, (SELECT $9)::jsonb,+|
- (SELECT $10)::jsonb) |
- SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1
+ query | calls
+----------------------------------------------------+-------
+ SELECT WHERE $1 IN ($2 /*, ... */) | 2
+ SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1
(2 rows)
+--
-- CoerceViaIO
+--
-- Create some dummy type to force CoerceViaIO
CREATE TYPE casttesttype;
CREATE FUNCTION casttesttype_in(cstring)
@@ -349,15 +560,25 @@ SELECT * FROM test_squash_cast WHERE data IN
----+------
(0 rows)
+SELECT * FROM test_squash_cast WHERE data = ANY (ARRAY
+ [1::int4::casttesttype, 2::int4::casttesttype, 3::int4::casttesttype,
+ 4::int4::casttesttype, 5::int4::casttesttype, 6::int4::casttesttype,
+ 7::int4::casttesttype, 8::int4::casttesttype, 9::int4::casttesttype,
+ 10::int4::casttesttype, 11::int4::casttesttype]);
+ id | data
+----+------
+(0 rows)
+
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
query | calls
----------------------------------------------------+-------
- SELECT * FROM test_squash_cast WHERE data IN +| 1
- ($1 /*, ... */::int4::casttesttype) |
+ SELECT * FROM test_squash_cast WHERE data IN +| 2
+ ($1 /*, ... */) |
SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1
(2 rows)
-- Some casting expression are simplified to Const
+CREATE TABLE test_squash_jsonb (id int, data jsonb);
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
t
---
@@ -366,8 +587,16 @@ SELECT pg_stat_statements_reset() IS NOT NULL AS t;
SELECT * FROM test_squash_jsonb WHERE data IN
(('"1"')::jsonb, ('"2"')::jsonb, ('"3"')::jsonb, ('"4"')::jsonb,
- ( '"5"')::jsonb, ( '"6"')::jsonb, ( '"7"')::jsonb, ( '"8"')::jsonb,
- ( '"9"')::jsonb, ( '"10"')::jsonb);
+ ('"5"')::jsonb, ('"6"')::jsonb, ('"7"')::jsonb, ('"8"')::jsonb,
+ ('"9"')::jsonb, ('"10"')::jsonb);
+ id | data
+----+------
+(0 rows)
+
+SELECT * FROM test_squash_jsonb WHERE data = ANY (ARRAY
+ [('"1"')::jsonb, ('"2"')::jsonb, ('"3"')::jsonb, ('"4"')::jsonb,
+ ('"5"')::jsonb, ('"6"')::jsonb, ('"7"')::jsonb, ('"8"')::jsonb,
+ ('"9"')::jsonb, ('"10"')::jsonb]);
id | data
----+------
(0 rows)
@@ -375,28 +604,152 @@ SELECT * FROM test_squash_jsonb WHERE data IN
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
query | calls
----------------------------------------------------+-------
- SELECT * FROM test_squash_jsonb WHERE data IN +| 1
- (($1 /*, ... */)::jsonb) |
+ SELECT * FROM test_squash_jsonb WHERE data IN +| 2
+ ($1 /*, ... */) |
SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1
(2 rows)
+-- CoerceViaIO, SubLink instead of a Const. Will not squash
+SELECT pg_stat_statements_reset() IS NOT NULL AS t;
+ t
+---
+ t
+(1 row)
+
+SELECT * FROM test_squash_jsonb WHERE data IN
+ ((SELECT '"1"')::jsonb, (SELECT '"2"')::jsonb, (SELECT '"3"')::jsonb,
+ (SELECT '"4"')::jsonb, (SELECT '"5"')::jsonb, (SELECT '"6"')::jsonb,
+ (SELECT '"7"')::jsonb, (SELECT '"8"')::jsonb, (SELECT '"9"')::jsonb,
+ (SELECT '"10"')::jsonb);
+ id | data
+----+------
+(0 rows)
+
+SELECT * FROM test_squash_jsonb WHERE data = ANY(ARRAY
+ [(SELECT '"1"')::jsonb, (SELECT '"2"')::jsonb, (SELECT '"3"')::jsonb,
+ (SELECT '"4"')::jsonb, (SELECT '"5"')::jsonb, (SELECT '"6"')::jsonb,
+ (SELECT '"7"')::jsonb, (SELECT '"8"')::jsonb, (SELECT '"9"')::jsonb,
+ (SELECT '"10"')::jsonb]);
+ id | data
+----+------
+(0 rows)
+
+SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
+ query | calls
+----------------------------------------------------------------------+-------
+ SELECT * FROM test_squash_jsonb WHERE data IN +| 2
+ ((SELECT $1)::jsonb, (SELECT $2)::jsonb, (SELECT $3)::jsonb,+|
+ (SELECT $4)::jsonb, (SELECT $5)::jsonb, (SELECT $6)::jsonb,+|
+ (SELECT $7)::jsonb, (SELECT $8)::jsonb, (SELECT $9)::jsonb,+|
+ (SELECT $10)::jsonb) |
+ SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1
+(2 rows)
+
+-- Multiple CoerceViaIO are squashed
+SELECT pg_stat_statements_reset() IS NOT NULL AS t;
+ t
+---
+ t
+(1 row)
+
+SELECT WHERE 1 IN (1::text::int::text::int, 1::text::int::text::int);
+--
+(1 row)
+
+SELECT WHERE 1 = ANY(ARRAY[1::text::int::text::int, 1::text::int::text::int]);
+--
+(1 row)
+
+SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
+ query | calls
+----------------------------------------------------+-------
+ SELECT WHERE $1 IN ($2 /*, ... */) | 2
+ SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1
+(2 rows)
+
+--
-- RelabelType
+--
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
t
---
t
(1 row)
-SELECT * FROM test_squash WHERE id IN (1::oid, 2::oid, 3::oid, 4::oid, 5::oid, 6::oid, 7::oid, 8::oid, 9::oid);
+-- However many layers of RelabelType there are, the list will be squashable.
+SELECT * FROM test_squash WHERE id IN
+ (1::oid, 2::oid, 3::oid, 4::oid, 5::oid, 6::oid, 7::oid, 8::oid, 9::oid);
+ id | data
+----+------
+(0 rows)
+
+SELECT ARRAY[1::oid, 2::oid, 3::oid, 4::oid, 5::oid, 6::oid, 7::oid, 8::oid, 9::oid];
+ array
+---------------------
+ {1,2,3,4,5,6,7,8,9}
+(1 row)
+
+SELECT * FROM test_squash WHERE id IN (1::oid, 2::oid::int::oid);
+ id | data
+----+------
+(0 rows)
+
+SELECT * FROM test_squash WHERE id = ANY(ARRAY[1::oid, 2::oid::int::oid]);
+ id | data
+----+------
+(0 rows)
+
+-- RelabelType together with CoerceViaIO is also squashable
+SELECT * FROM test_squash WHERE id = ANY(ARRAY[1::oid::text::int::oid, 2::oid::int::oid]);
+ id | data
+----+------
+(0 rows)
+
+SELECT * FROM test_squash WHERE id = ANY(ARRAY[1::text::int::oid, 2::oid::int::oid]);
id | data
----+------
(0 rows)
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
- query | calls
-------------------------------------------------------------+-------
- SELECT * FROM test_squash WHERE id IN ($1 /*, ... */::oid) | 1
- SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1
+ query | calls
+----------------------------------------------------+-------
+ SELECT * FROM test_squash WHERE id IN +| 5
+ ($1 /*, ... */) |
+ SELECT ARRAY[$1 /*, ... */] | 1
+ SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1
+(3 rows)
+
+--
+-- edge cases
+--
+SELECT pg_stat_statements_reset() IS NOT NULL AS t;
+ t
+---
+ t
+(1 row)
+
+-- for nested arrays, only constants are squashed
+SELECT ARRAY[
+ ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
+ ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
+ ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
+ ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
+ ];
+ array
+-----------------------------------------------------------------------------------------------
+ {{1,2,3,4,5,6,7,8,9,10},{1,2,3,4,5,6,7,8,9,10},{1,2,3,4,5,6,7,8,9,10},{1,2,3,4,5,6,7,8,9,10}}
+(1 row)
+
+SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
+ query | calls
+----------------------------------------------------+-------
+ SELECT ARRAY[ +| 1
+ ARRAY[$1 /*, ... */], +|
+ ARRAY[$2 /*, ... */], +|
+ ARRAY[$3 /*, ... */], +|
+ ARRAY[$4 /*, ... */] +|
+ ] |
+ SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1
(2 rows)
-- Test constants evaluation in a CTE, which was causing issues in the past
@@ -409,23 +762,59 @@ FROM cte;
--------
(0 rows)
--- Simple array would be squashed as well
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
t
---
t
(1 row)
-SELECT ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
- array
-------------------------
- {1,2,3,4,5,6,7,8,9,10}
+-- Rewritten as an OpExpr, so it will not be squashed
+select where '1' IN ('1'::int, '2'::int::text);
+--
+(1 row)
+
+-- Rewritten as an ArrayExpr, so it will be squashed
+select where '1' IN ('1'::int, '2'::int);
+--
+(1 row)
+
+SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
+ query | calls
+----------------------------------------------------+-------
+ SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1
+ select where $1 IN ($2 /*, ... */) | 1
+ select where $1 IN ($2::int, $3::int::text) | 1
+(3 rows)
+
+SELECT pg_stat_statements_reset() IS NOT NULL AS t;
+ t
+---
+ t
+(1 row)
+
+-- Both of these queries will be rewritten as an ArrayExpr, so they
+-- will be squashed, and have a similar queryId
+select where '1' IN ('1'::int::text, '2'::int::text);
+--
+(1 row)
+
+select where '1' = ANY (array['1'::int::text, '2'::int::text]);
+--
(1 row)
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
query | calls
----------------------------------------------------+-------
- SELECT ARRAY[$1 /*, ... */] | 1
SELECT pg_stat_statements_reset() IS NOT NULL AS t | 1
+ select where $1 IN ($2 /*, ... */) | 2
(2 rows)
+--
+-- cleanup
+--
+DROP TABLE test_squash;
+DROP TABLE test_float;
+DROP TABLE test_squash_numeric;
+DROP TABLE test_squash_bigint;
+DROP TABLE test_squash_cast CASCADE;
+DROP TABLE test_squash_jsonb;
diff --git a/contrib/pg_stat_statements/expected/utility.out b/contrib/pg_stat_statements/expected/utility.out
index aa4f0f7e628..060d4416dd7 100644
--- a/contrib/pg_stat_statements/expected/utility.out
+++ b/contrib/pg_stat_statements/expected/utility.out
@@ -540,7 +540,7 @@ SELECT calls, rows, query FROM pg_stat_statements ORDER BY query COLLATE "C";
-------+------+----------------------------------------------------
2 | 0 | DEALLOCATE $1
2 | 0 | DEALLOCATE ALL
- 2 | 2 | SELECT $1 AS a
+ 2 | 2 | PREPARE stat_select AS SELECT $1 AS a
1 | 1 | SELECT $1 as a
1 | 1 | SELECT pg_stat_statements_reset() IS NOT NULL AS t
(5 rows)
diff --git a/contrib/pg_stat_statements/pg_stat_statements.c b/contrib/pg_stat_statements/pg_stat_statements.c
index 9778407cba3..e7857f81ec0 100644
--- a/contrib/pg_stat_statements/pg_stat_statements.c
+++ b/contrib/pg_stat_statements/pg_stat_statements.c
@@ -144,7 +144,7 @@ typedef struct pgssHashKey
{
Oid userid; /* user OID */
Oid dbid; /* database OID */
- uint64 queryid; /* query identifier */
+ int64 queryid; /* query identifier */
bool toplevel; /* query executed at top level */
} pgssHashKey;
@@ -335,7 +335,7 @@ static PlannedStmt *pgss_planner(Query *parse,
const char *query_string,
int cursorOptions,
ParamListInfo boundParams);
-static bool pgss_ExecutorStart(QueryDesc *queryDesc, int eflags);
+static void pgss_ExecutorStart(QueryDesc *queryDesc, int eflags);
static void pgss_ExecutorRun(QueryDesc *queryDesc,
ScanDirection direction,
uint64 count);
@@ -346,7 +346,7 @@ static void pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString,
ProcessUtilityContext context, ParamListInfo params,
QueryEnvironment *queryEnv,
DestReceiver *dest, QueryCompletion *qc);
-static void pgss_store(const char *query, uint64 queryId,
+static void pgss_store(const char *query, int64 queryId,
int query_location, int query_len,
pgssStoreKind kind,
double total_time, uint64 rows,
@@ -370,7 +370,7 @@ static char *qtext_fetch(Size query_offset, int query_len,
char *buffer, Size buffer_size);
static bool need_gc_qtexts(void);
static void gc_qtexts(void);
-static TimestampTz entry_reset(Oid userid, Oid dbid, uint64 queryid, bool minmax_only);
+static TimestampTz entry_reset(Oid userid, Oid dbid, int64 queryid, bool minmax_only);
static char *generate_normalized_query(JumbleState *jstate, const char *query,
int query_loc, int *query_len_p);
static void fill_in_constant_lengths(JumbleState *jstate, const char *query,
@@ -852,7 +852,7 @@ pgss_post_parse_analyze(ParseState *pstate, Query *query, JumbleState *jstate)
{
if (pgss_track_utility && IsA(query->utilityStmt, ExecuteStmt))
{
- query->queryId = UINT64CONST(0);
+ query->queryId = INT64CONST(0);
return;
}
}
@@ -899,7 +899,7 @@ pgss_planner(Query *parse,
*/
if (pgss_enabled(nesting_level)
&& pgss_track_planning && query_string
- && parse->queryId != UINT64CONST(0))
+ && parse->queryId != INT64CONST(0))
{
instr_time start;
instr_time duration;
@@ -989,26 +989,20 @@ pgss_planner(Query *parse,
/*
* ExecutorStart hook: start up tracking if needed
*/
-static bool
+static void
pgss_ExecutorStart(QueryDesc *queryDesc, int eflags)
{
- bool plan_valid;
-
if (prev_ExecutorStart)
- plan_valid = prev_ExecutorStart(queryDesc, eflags);
+ prev_ExecutorStart(queryDesc, eflags);
else
- plan_valid = standard_ExecutorStart(queryDesc, eflags);
-
- /* The plan may have become invalid during standard_ExecutorStart() */
- if (!plan_valid)
- return false;
+ standard_ExecutorStart(queryDesc, eflags);
/*
* If query has queryId zero, don't track it. This prevents double
* counting of optimizable statements that are directly contained in
* utility statements.
*/
- if (pgss_enabled(nesting_level) && queryDesc->plannedstmt->queryId != UINT64CONST(0))
+ if (pgss_enabled(nesting_level) && queryDesc->plannedstmt->queryId != INT64CONST(0))
{
/*
* Set up to track total elapsed time in ExecutorRun. Make sure the
@@ -1024,8 +1018,6 @@ pgss_ExecutorStart(QueryDesc *queryDesc, int eflags)
MemoryContextSwitchTo(oldcxt);
}
}
-
- return true;
}
/*
@@ -1076,9 +1068,9 @@ pgss_ExecutorFinish(QueryDesc *queryDesc)
static void
pgss_ExecutorEnd(QueryDesc *queryDesc)
{
- uint64 queryId = queryDesc->plannedstmt->queryId;
+ int64 queryId = queryDesc->plannedstmt->queryId;
- if (queryId != UINT64CONST(0) && queryDesc->totaltime &&
+ if (queryId != INT64CONST(0) && queryDesc->totaltime &&
pgss_enabled(nesting_level))
{
/*
@@ -1119,7 +1111,7 @@ pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString,
DestReceiver *dest, QueryCompletion *qc)
{
Node *parsetree = pstmt->utilityStmt;
- uint64 saved_queryId = pstmt->queryId;
+ int64 saved_queryId = pstmt->queryId;
int saved_stmt_location = pstmt->stmt_location;
int saved_stmt_len = pstmt->stmt_len;
bool enabled = pgss_track_utility && pgss_enabled(nesting_level);
@@ -1139,7 +1131,7 @@ pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString,
* only.
*/
if (enabled)
- pstmt->queryId = UINT64CONST(0);
+ pstmt->queryId = INT64CONST(0);
/*
* If it's an EXECUTE statement, we don't track it and don't increment the
@@ -1286,7 +1278,7 @@ pgss_ProcessUtility(PlannedStmt *pstmt, const char *queryString,
* for the arrays in the Counters field.
*/
static void
-pgss_store(const char *query, uint64 queryId,
+pgss_store(const char *query, int64 queryId,
int query_location, int query_len,
pgssStoreKind kind,
double total_time, uint64 rows,
@@ -1312,7 +1304,7 @@ pgss_store(const char *query, uint64 queryId,
* Nothing to do if compute_query_id isn't enabled and no other module
* computed a query identifier.
*/
- if (queryId == UINT64CONST(0))
+ if (queryId == INT64CONST(0))
return;
/*
@@ -1522,11 +1514,11 @@ pg_stat_statements_reset_1_7(PG_FUNCTION_ARGS)
{
Oid userid;
Oid dbid;
- uint64 queryid;
+ int64 queryid;
userid = PG_GETARG_OID(0);
dbid = PG_GETARG_OID(1);
- queryid = (uint64) PG_GETARG_INT64(2);
+ queryid = PG_GETARG_INT64(2);
entry_reset(userid, dbid, queryid, false);
@@ -1538,12 +1530,12 @@ pg_stat_statements_reset_1_11(PG_FUNCTION_ARGS)
{
Oid userid;
Oid dbid;
- uint64 queryid;
+ int64 queryid;
bool minmax_only;
userid = PG_GETARG_OID(0);
dbid = PG_GETARG_OID(1);
- queryid = (uint64) PG_GETARG_INT64(2);
+ queryid = PG_GETARG_INT64(2);
minmax_only = PG_GETARG_BOOL(3);
PG_RETURN_TIMESTAMPTZ(entry_reset(userid, dbid, queryid, minmax_only));
@@ -2679,7 +2671,7 @@ if (e) { \
* Reset entries corresponding to parameters passed.
*/
static TimestampTz
-entry_reset(Oid userid, Oid dbid, uint64 queryid, bool minmax_only)
+entry_reset(Oid userid, Oid dbid, int64 queryid, bool minmax_only)
{
HASH_SEQ_STATUS hash_seq;
pgssEntry *entry;
@@ -2699,7 +2691,7 @@ entry_reset(Oid userid, Oid dbid, uint64 queryid, bool minmax_only)
stats_reset = GetCurrentTimestamp();
- if (userid != 0 && dbid != 0 && queryid != UINT64CONST(0))
+ if (userid != 0 && dbid != 0 && queryid != INT64CONST(0))
{
/* If all the parameters are available, use the fast path. */
memset(&key, 0, sizeof(pgssHashKey));
@@ -2722,7 +2714,7 @@ entry_reset(Oid userid, Oid dbid, uint64 queryid, bool minmax_only)
SINGLE_ENTRY_RESET(entry);
}
- else if (userid != 0 || dbid != 0 || queryid != UINT64CONST(0))
+ else if (userid != 0 || dbid != 0 || queryid != INT64CONST(0))
{
/* Reset entries corresponding to valid parameters. */
hash_seq_init(&hash_seq, pgss_hash);
@@ -2818,17 +2810,13 @@ generate_normalized_query(JumbleState *jstate, const char *query,
{
char *norm_query;
int query_len = *query_len_p;
- int i,
- norm_query_buflen, /* Space allowed for norm_query */
+ int norm_query_buflen, /* Space allowed for norm_query */
len_to_wrt, /* Length (in bytes) to write */
quer_loc = 0, /* Source query byte location */
n_quer_loc = 0, /* Normalized query byte location */
last_off = 0, /* Offset from start for previous tok */
last_tok_len = 0; /* Length (in bytes) of that tok */
- bool in_squashed = false; /* in a run of squashed consts? */
- int skipped_constants = 0; /* Position adjustment of later
- * constants after squashed ones */
-
+ int num_constants_replaced = 0;
/*
* Get constants' lengths (core system only gives us locations). Note
@@ -2842,20 +2830,27 @@ generate_normalized_query(JumbleState *jstate, const char *query,
* certainly isn't more than 11 bytes, even if n reaches INT_MAX. We
* could refine that limit based on the max value of n for the current
* query, but it hardly seems worth any extra effort to do so.
- *
- * Note this also gives enough room for the commented-out ", ..." list
- * syntax used by constant squashing.
*/
norm_query_buflen = query_len + jstate->clocations_count * 10;
/* Allocate result buffer */
norm_query = palloc(norm_query_buflen + 1);
- for (i = 0; i < jstate->clocations_count; i++)
+ for (int i = 0; i < jstate->clocations_count; i++)
{
int off, /* Offset from start for cur tok */
tok_len; /* Length (in bytes) of that tok */
+ /*
+ * If we have an external param at this location, but no lists are
+ * being squashed across the query, then we skip here; this will make
+ * us print the characters found in the original query that represent
+ * the parameter in the next iteration (or after the loop is done),
+ * which is a bit odd but seems to work okay in most cases.
+ */
+ if (jstate->clocations[i].extern_param && !jstate->has_squashed_lists)
+ continue;
+
off = jstate->clocations[i].location;
/* Adjust recorded location if we're dealing with partial string */
@@ -2866,67 +2861,24 @@ generate_normalized_query(JumbleState *jstate, const char *query,
if (tok_len < 0)
continue; /* ignore any duplicates */
+ /* Copy next chunk (what precedes the next constant) */
+ len_to_wrt = off - last_off;
+ len_to_wrt -= last_tok_len;
+ Assert(len_to_wrt >= 0);
+ memcpy(norm_query + n_quer_loc, query + quer_loc, len_to_wrt);
+ n_quer_loc += len_to_wrt;
+
/*
- * What to do next depends on whether we're squashing constant lists,
- * and whether we're already in a run of such constants.
+ * And insert a param symbol in place of the constant token; and, if
+ * we have a squashable list, insert a placeholder comment starting
+ * from the list's second value.
*/
- if (!jstate->clocations[i].squashed)
- {
- /*
- * This location corresponds to a constant not to be squashed.
- * Print what comes before the constant ...
- */
- len_to_wrt = off - last_off;
- len_to_wrt -= last_tok_len;
-
- Assert(len_to_wrt >= 0);
-
- memcpy(norm_query + n_quer_loc, query + quer_loc, len_to_wrt);
- n_quer_loc += len_to_wrt;
-
- /* ... and then a param symbol replacing the constant itself */
- n_quer_loc += sprintf(norm_query + n_quer_loc, "$%d",
- i + 1 + jstate->highest_extern_param_id - skipped_constants);
-
- /* In case previous constants were merged away, stop doing that */
- in_squashed = false;
- }
- else if (!in_squashed)
- {
- /*
- * This location is the start position of a run of constants to be
- * squashed, so we need to print the representation of starting a
- * group of stashed constants.
- *
- * Print what comes before the constant ...
- */
- len_to_wrt = off - last_off;
- len_to_wrt -= last_tok_len;
- Assert(len_to_wrt >= 0);
- Assert(i + 1 < jstate->clocations_count);
- Assert(jstate->clocations[i + 1].squashed);
- memcpy(norm_query + n_quer_loc, query + quer_loc, len_to_wrt);
- n_quer_loc += len_to_wrt;
-
- /* ... and then start a run of squashed constants */
- n_quer_loc += sprintf(norm_query + n_quer_loc, "$%d /*, ... */",
- i + 1 + jstate->highest_extern_param_id - skipped_constants);
-
- /* The next location will match the block below, to end the run */
- in_squashed = true;
-
- skipped_constants++;
- }
- else
- {
- /*
- * The second location of a run of squashable elements; this
- * indicates its end.
- */
- in_squashed = false;
- }
+ n_quer_loc += sprintf(norm_query + n_quer_loc, "$%d%s",
+ num_constants_replaced + 1 + jstate->highest_extern_param_id,
+ jstate->clocations[i].squashed ? " /*, ... */" : "");
+ num_constants_replaced++;
- /* Otherwise the constant is squashed away -- move forward */
+ /* move forward */
quer_loc = off + tok_len;
last_off = off;
last_tok_len = tok_len;
@@ -3017,6 +2969,9 @@ fill_in_constant_lengths(JumbleState *jstate, const char *query,
Assert(loc >= 0);
+ if (locs[i].squashed)
+ continue; /* squashable list, ignore */
+
if (loc <= last_loc)
continue; /* Duplicate constant, ignore */
diff --git a/contrib/pg_stat_statements/sql/extended.sql b/contrib/pg_stat_statements/sql/extended.sql
index 1af0711020c..9a6518e2f04 100644
--- a/contrib/pg_stat_statements/sql/extended.sql
+++ b/contrib/pg_stat_statements/sql/extended.sql
@@ -19,3 +19,28 @@ SELECT $1 \bind 'unnamed_val1' \g
\bind_named stmt1 'stmt1_val1' \g
SELECT calls, rows, query FROM pg_stat_statements ORDER BY query COLLATE "C";
+
+-- Various parameter numbering patterns
+-- Unique query IDs with parameter numbers switched.
+SELECT pg_stat_statements_reset() IS NOT NULL AS t;
+SELECT WHERE ($1::int, 7) IN ((8, $2::int), ($3::int, 9)) \bind '1' '2' '3' \g
+SELECT WHERE ($2::int, 10) IN ((11, $3::int), ($1::int, 12)) \bind '1' '2' '3' \g
+SELECT WHERE $1::int IN ($2::int, $3::int) \bind '1' '2' '3' \g
+SELECT WHERE $2::int IN ($3::int, $1::int) \bind '1' '2' '3' \g
+SELECT WHERE $3::int IN ($1::int, $2::int) \bind '1' '2' '3' \g
+SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
+-- Two groups of two queries with the same query ID.
+SELECT pg_stat_statements_reset() IS NOT NULL AS t;
+SELECT WHERE '1'::int IN ($1::int, '2'::int) \bind '1' \g
+SELECT WHERE '4'::int IN ($1::int, '5'::int) \bind '2' \g
+SELECT WHERE $2::int IN ($1::int, '1'::int) \bind '1' '2' \g
+SELECT WHERE $2::int IN ($1::int, '2'::int) \bind '3' '4' \g
+SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
+SELECT pg_stat_statements_reset() IS NOT NULL AS t;
+
+-- no squashable list, the parameters id's are kept as-is
+SELECT WHERE $3 = $1 AND $2 = $4 \bind 1 2 1 2 \g
+-- squashable list, so the parameter IDs will be re-assigned
+SELECT WHERE 1 IN (1, 2, 3) AND $3 = $1 AND $2 = $4 \bind 1 2 1 2 \g
+
+SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
diff --git a/contrib/pg_stat_statements/sql/level_tracking.sql b/contrib/pg_stat_statements/sql/level_tracking.sql
index 6b81230f186..86f007e8552 100644
--- a/contrib/pg_stat_statements/sql/level_tracking.sql
+++ b/contrib/pg_stat_statements/sql/level_tracking.sql
@@ -334,6 +334,32 @@ END; $$;
SELECT toplevel, calls, query FROM pg_stat_statements
ORDER BY query COLLATE "C", toplevel;
+-- DO block --- multiple inner queries with separators
+SET pg_stat_statements.track = 'all';
+SET pg_stat_statements.track_utility = TRUE;
+CREATE TABLE pgss_do_util_tab_1 (a int);
+CREATE TABLE pgss_do_util_tab_2 (a int);
+SELECT pg_stat_statements_reset() IS NOT NULL AS t;
+DO $$
+DECLARE BEGIN
+ EXECUTE 'CREATE TABLE pgss_do_table (id INT); DROP TABLE pgss_do_table';
+ EXECUTE 'SELECT a FROM pgss_do_util_tab_1; SELECT a FROM pgss_do_util_tab_2';
+END $$;
+SELECT toplevel, calls, rows, query FROM pg_stat_statements
+ WHERE toplevel IS FALSE
+ ORDER BY query COLLATE "C";
+SELECT pg_stat_statements_reset() IS NOT NULL AS t;
+-- Note the extra semicolon at the end of the query.
+DO $$
+DECLARE BEGIN
+ EXECUTE 'CREATE TABLE pgss_do_table (id INT); DROP TABLE pgss_do_table;';
+ EXECUTE 'SELECT a FROM pgss_do_util_tab_1; SELECT a FROM pgss_do_util_tab_2;';
+END $$;
+SELECT toplevel, calls, rows, query FROM pg_stat_statements
+ WHERE toplevel IS FALSE
+ ORDER BY query COLLATE "C";
+DROP TABLE pgss_do_util_tab_1, pgss_do_util_tab_2;
+
-- PL/pgSQL function - top-level tracking.
SET pg_stat_statements.track = 'top';
SET pg_stat_statements.track_utility = FALSE;
diff --git a/contrib/pg_stat_statements/sql/planning.sql b/contrib/pg_stat_statements/sql/planning.sql
index 9cfe206b3b0..46f5d9b951c 100644
--- a/contrib/pg_stat_statements/sql/planning.sql
+++ b/contrib/pg_stat_statements/sql/planning.sql
@@ -20,11 +20,11 @@ SELECT 42;
SELECT 42;
SELECT 42;
SELECT plans, calls, rows, query FROM pg_stat_statements
- WHERE query NOT LIKE 'SELECT COUNT%' ORDER BY query COLLATE "C";
+ WHERE query NOT LIKE 'PREPARE%' ORDER BY query COLLATE "C";
-- for the prepared statement we expect at least one replan, but cache
-- invalidations could force more
SELECT plans >= 2 AND plans <= calls AS plans_ok, calls, rows, query FROM pg_stat_statements
- WHERE query LIKE 'SELECT COUNT%' ORDER BY query COLLATE "C";
+ WHERE query LIKE 'PREPARE%' ORDER BY query COLLATE "C";
-- Cleanup
DROP TABLE stats_plan_test;
diff --git a/contrib/pg_stat_statements/sql/select.sql b/contrib/pg_stat_statements/sql/select.sql
index c5e0b84ee5b..11662cde08c 100644
--- a/contrib/pg_stat_statements/sql/select.sql
+++ b/contrib/pg_stat_statements/sql/select.sql
@@ -79,6 +79,22 @@ DEALLOCATE pgss_test;
SELECT calls, rows, query FROM pg_stat_statements ORDER BY query COLLATE "C";
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
+-- normalization of constants and parameters, with constant locations
+-- recorded one or more times.
+SELECT pg_stat_statements_reset() IS NOT NULL AS t;
+SELECT WHERE '1' IN ('1'::int, '3'::int::text);
+SELECT WHERE (1, 2) IN ((1, 2), (2, 3));
+SELECT WHERE (3, 4) IN ((5, 6), (8, 7));
+SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
+
+-- with the last element being an explicit function call with an argument, ensure
+-- the normalization of the squashing interval is correct.
+SELECT pg_stat_statements_reset() IS NOT NULL AS t;
+SELECT pg_stat_statements_reset() IS NOT NULL AS t;
+SELECT WHERE 1 IN (1, int4(1), int4(2));
+SELECT WHERE 1 = ANY (ARRAY[1, int4(1), int4(2)]);
+SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
+
--
-- queries with locking clauses
--
diff --git a/contrib/pg_stat_statements/sql/squashing.sql b/contrib/pg_stat_statements/sql/squashing.sql
index 03efd4b40c8..53138d125a9 100644
--- a/contrib/pg_stat_statements/sql/squashing.sql
+++ b/contrib/pg_stat_statements/sql/squashing.sql
@@ -3,101 +3,160 @@
--
CREATE EXTENSION pg_stat_statements;
-CREATE TABLE test_squash (id int, data int);
+--
+-- Simple Lists
+--
--- IN queries
+CREATE TABLE test_squash (id int, data int);
--- Normal scenario, too many simple constants for an IN query
+-- single element will not be squashed
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
SELECT * FROM test_squash WHERE id IN (1);
+SELECT ARRAY[1];
+SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
+
+-- more than 1 element in a list will be squashed
+SELECT pg_stat_statements_reset() IS NOT NULL AS t;
SELECT * FROM test_squash WHERE id IN (1, 2, 3);
+SELECT * FROM test_squash WHERE id IN (1, 2, 3, 4);
+SELECT * FROM test_squash WHERE id IN (1, 2, 3, 4, 5);
+SELECT ARRAY[1, 2, 3];
+SELECT ARRAY[1, 2, 3, 4];
+SELECT ARRAY[1, 2, 3, 4, 5];
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
-SELECT * FROM test_squash WHERE id IN (1, 2, 3, 4, 5, 6, 7, 8, 9);
-SELECT * FROM test_squash WHERE id IN (1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
-SELECT * FROM test_squash WHERE id IN (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11);
+-- built-in functions will be squashed
+-- the IN and ARRAY forms of this statement will have the same queryId
+SELECT pg_stat_statements_reset() IS NOT NULL AS t;
+SELECT WHERE 1 IN (1, int4(1), int4(2), 2);
+SELECT WHERE 1 = ANY (ARRAY[1, int4(1), int4(2), 2]);
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
--- More conditions in the query
+-- external parameters will be squashed
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
+SELECT * FROM test_squash WHERE id IN ($1, $2, $3, $4, $5) \bind 1 2 3 4 5
+;
+SELECT * FROM test_squash WHERE id::text = ANY(ARRAY[$1, $2, $3, $4, $5]) \bind 1 2 3 4 5
+;
+SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
+-- prepared statements will also be squashed
+-- the IN and ARRAY forms of this statement will have the same queryId
+SELECT pg_stat_statements_reset() IS NOT NULL AS t;
+PREPARE p1(int, int, int, int, int) AS
+SELECT * FROM test_squash WHERE id IN ($1, $2, $3, $4, $5);
+EXECUTE p1(1, 2, 3, 4, 5);
+DEALLOCATE p1;
+PREPARE p1(int, int, int, int, int) AS
+SELECT * FROM test_squash WHERE id = ANY(ARRAY[$1, $2, $3, $4, $5]);
+EXECUTE p1(1, 2, 3, 4, 5);
+DEALLOCATE p1;
+SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
+
+-- More conditions in the query
+SELECT pg_stat_statements_reset() IS NOT NULL AS t;
SELECT * FROM test_squash WHERE id IN (1, 2, 3, 4, 5, 6, 7, 8, 9) AND data = 2;
SELECT * FROM test_squash WHERE id IN (1, 2, 3, 4, 5, 6, 7, 8, 9, 10) AND data = 2;
SELECT * FROM test_squash WHERE id IN (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11) AND data = 2;
+SELECT * FROM test_squash WHERE id = ANY (ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9]) AND data = 2;
+SELECT * FROM test_squash WHERE id = ANY (ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) AND data = 2;
+SELECT * FROM test_squash WHERE id = ANY (ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) AND data = 2;
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
-- Multiple squashed intervals
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
-
SELECT * FROM test_squash WHERE id IN (1, 2, 3, 4, 5, 6, 7, 8, 9)
AND data IN (1, 2, 3, 4, 5, 6, 7, 8, 9);
SELECT * FROM test_squash WHERE id IN (1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
AND data IN (1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
SELECT * FROM test_squash WHERE id IN (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11)
AND data IN (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11);
+SELECT * FROM test_squash WHERE id = ANY (ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9])
+ AND data = ANY (ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9]);
+SELECT * FROM test_squash WHERE id = ANY (ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
+ AND data = ANY (ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
+SELECT * FROM test_squash WHERE id = ANY (ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
+ AND data = ANY (ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]);
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
-
--- No constants simplification for OpExpr
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
--- In the following two queries the operator expressions (+) and (@) have
--- different oppno, and will be given different query_id if squashed, even though
--- the normalized query will be the same
+-- No constants squashing for OpExpr
+-- The IN and ARRAY forms of this statement will have the same queryId
+SELECT pg_stat_statements_reset() IS NOT NULL AS t;
SELECT * FROM test_squash WHERE id IN
(1 + 1, 2 + 2, 3 + 3, 4 + 4, 5 + 5, 6 + 6, 7 + 7, 8 + 8, 9 + 9);
SELECT * FROM test_squash WHERE id IN
(@ '-1', @ '-2', @ '-3', @ '-4', @ '-5', @ '-6', @ '-7', @ '-8', @ '-9');
+SELECT * FROM test_squash WHERE id = ANY(ARRAY
+ [1 + 1, 2 + 2, 3 + 3, 4 + 4, 5 + 5, 6 + 6, 7 + 7, 8 + 8, 9 + 9]);
+SELECT * FROM test_squash WHERE id = ANY(ARRAY
+ [@ '-1', @ '-2', @ '-3', @ '-4', @ '-5', @ '-6', @ '-7', @ '-8', @ '-9']);
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
+--
-- FuncExpr
+--
-- Verify multiple type representation end up with the same query_id
CREATE TABLE test_float (data float);
+-- The casted ARRAY expressions will have the same queryId as the IN clause
+-- form of the query
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
SELECT data FROM test_float WHERE data IN (1, 2);
SELECT data FROM test_float WHERE data IN (1, '2');
SELECT data FROM test_float WHERE data IN ('1', 2);
SELECT data FROM test_float WHERE data IN ('1', '2');
SELECT data FROM test_float WHERE data IN (1.0, 1.0);
+SELECT data FROM test_float WHERE data = ANY(ARRAY['1'::double precision, '2'::double precision]);
+SELECT data FROM test_float WHERE data = ANY(ARRAY[1.0::double precision, 1.0::double precision]);
+SELECT data FROM test_float WHERE data = ANY(ARRAY[1, 2]);
+SELECT data FROM test_float WHERE data = ANY(ARRAY[1, '2']);
+SELECT data FROM test_float WHERE data = ANY(ARRAY['1', 2]);
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
-- Numeric type, implicit cast is squashed
CREATE TABLE test_squash_numeric (id int, data numeric(5, 2));
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
SELECT * FROM test_squash_numeric WHERE data IN (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11);
+SELECT * FROM test_squash_numeric WHERE data = ANY(ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]);
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
-- Bigint, implicit cast is squashed
CREATE TABLE test_squash_bigint (id int, data bigint);
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
SELECT * FROM test_squash_bigint WHERE data IN (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11);
+SELECT * FROM test_squash_bigint WHERE data = ANY(ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]);
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
--- Bigint, explicit cast is not squashed
+-- Bigint, explicit cast is squashed
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
SELECT * FROM test_squash_bigint WHERE data IN
(1::bigint, 2::bigint, 3::bigint, 4::bigint, 5::bigint, 6::bigint,
7::bigint, 8::bigint, 9::bigint, 10::bigint, 11::bigint);
+SELECT * FROM test_squash_bigint WHERE data = ANY(ARRAY[
+ 1::bigint, 2::bigint, 3::bigint, 4::bigint, 5::bigint, 6::bigint,
+ 7::bigint, 8::bigint, 9::bigint, 10::bigint, 11::bigint]);
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
--- Bigint, long tokens with parenthesis
+-- Bigint, long tokens with parenthesis, will not squash
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
SELECT * FROM test_squash_bigint WHERE id IN
(abs(100), abs(200), abs(300), abs(400), abs(500), abs(600), abs(700),
abs(800), abs(900), abs(1000), ((abs(1100))));
+SELECT * FROM test_squash_bigint WHERE id = ANY(ARRAY[
+ abs(100), abs(200), abs(300), abs(400), abs(500), abs(600), abs(700),
+ abs(800), abs(900), abs(1000), ((abs(1100)))]);
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
--- CoerceViaIO, SubLink instead of a Const
-CREATE TABLE test_squash_jsonb (id int, data jsonb);
+-- Multiple FuncExpr's. Will not squash
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
-SELECT * FROM test_squash_jsonb WHERE data IN
- ((SELECT '"1"')::jsonb, (SELECT '"2"')::jsonb, (SELECT '"3"')::jsonb,
- (SELECT '"4"')::jsonb, (SELECT '"5"')::jsonb, (SELECT '"6"')::jsonb,
- (SELECT '"7"')::jsonb, (SELECT '"8"')::jsonb, (SELECT '"9"')::jsonb,
- (SELECT '"10"')::jsonb);
+SELECT WHERE 1 IN (1::int::bigint::int, 2::int::bigint::int);
+SELECT WHERE 1 = ANY(ARRAY[1::int::bigint::int, 2::int::bigint::int]);
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
+--
-- CoerceViaIO
+--
-- Create some dummy type to force CoerceViaIO
CREATE TYPE casttesttype;
@@ -141,19 +200,74 @@ SELECT * FROM test_squash_cast WHERE data IN
4::int4::casttesttype, 5::int4::casttesttype, 6::int4::casttesttype,
7::int4::casttesttype, 8::int4::casttesttype, 9::int4::casttesttype,
10::int4::casttesttype, 11::int4::casttesttype);
+SELECT * FROM test_squash_cast WHERE data = ANY (ARRAY
+ [1::int4::casttesttype, 2::int4::casttesttype, 3::int4::casttesttype,
+ 4::int4::casttesttype, 5::int4::casttesttype, 6::int4::casttesttype,
+ 7::int4::casttesttype, 8::int4::casttesttype, 9::int4::casttesttype,
+ 10::int4::casttesttype, 11::int4::casttesttype]);
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
-- Some casting expression are simplified to Const
+CREATE TABLE test_squash_jsonb (id int, data jsonb);
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
SELECT * FROM test_squash_jsonb WHERE data IN
(('"1"')::jsonb, ('"2"')::jsonb, ('"3"')::jsonb, ('"4"')::jsonb,
- ( '"5"')::jsonb, ( '"6"')::jsonb, ( '"7"')::jsonb, ( '"8"')::jsonb,
- ( '"9"')::jsonb, ( '"10"')::jsonb);
+ ('"5"')::jsonb, ('"6"')::jsonb, ('"7"')::jsonb, ('"8"')::jsonb,
+ ('"9"')::jsonb, ('"10"')::jsonb);
+SELECT * FROM test_squash_jsonb WHERE data = ANY (ARRAY
+ [('"1"')::jsonb, ('"2"')::jsonb, ('"3"')::jsonb, ('"4"')::jsonb,
+ ('"5"')::jsonb, ('"6"')::jsonb, ('"7"')::jsonb, ('"8"')::jsonb,
+ ('"9"')::jsonb, ('"10"')::jsonb]);
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
+-- CoerceViaIO, SubLink instead of a Const. Will not squash
+SELECT pg_stat_statements_reset() IS NOT NULL AS t;
+SELECT * FROM test_squash_jsonb WHERE data IN
+ ((SELECT '"1"')::jsonb, (SELECT '"2"')::jsonb, (SELECT '"3"')::jsonb,
+ (SELECT '"4"')::jsonb, (SELECT '"5"')::jsonb, (SELECT '"6"')::jsonb,
+ (SELECT '"7"')::jsonb, (SELECT '"8"')::jsonb, (SELECT '"9"')::jsonb,
+ (SELECT '"10"')::jsonb);
+SELECT * FROM test_squash_jsonb WHERE data = ANY(ARRAY
+ [(SELECT '"1"')::jsonb, (SELECT '"2"')::jsonb, (SELECT '"3"')::jsonb,
+ (SELECT '"4"')::jsonb, (SELECT '"5"')::jsonb, (SELECT '"6"')::jsonb,
+ (SELECT '"7"')::jsonb, (SELECT '"8"')::jsonb, (SELECT '"9"')::jsonb,
+ (SELECT '"10"')::jsonb]);
+SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
+
+-- Multiple CoerceViaIO are squashed
+SELECT pg_stat_statements_reset() IS NOT NULL AS t;
+SELECT WHERE 1 IN (1::text::int::text::int, 1::text::int::text::int);
+SELECT WHERE 1 = ANY(ARRAY[1::text::int::text::int, 1::text::int::text::int]);
+SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
+
+--
-- RelabelType
+--
+
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
-SELECT * FROM test_squash WHERE id IN (1::oid, 2::oid, 3::oid, 4::oid, 5::oid, 6::oid, 7::oid, 8::oid, 9::oid);
+-- However many layers of RelabelType there are, the list will be squashable.
+SELECT * FROM test_squash WHERE id IN
+ (1::oid, 2::oid, 3::oid, 4::oid, 5::oid, 6::oid, 7::oid, 8::oid, 9::oid);
+SELECT ARRAY[1::oid, 2::oid, 3::oid, 4::oid, 5::oid, 6::oid, 7::oid, 8::oid, 9::oid];
+SELECT * FROM test_squash WHERE id IN (1::oid, 2::oid::int::oid);
+SELECT * FROM test_squash WHERE id = ANY(ARRAY[1::oid, 2::oid::int::oid]);
+-- RelabelType together with CoerceViaIO is also squashable
+SELECT * FROM test_squash WHERE id = ANY(ARRAY[1::oid::text::int::oid, 2::oid::int::oid]);
+SELECT * FROM test_squash WHERE id = ANY(ARRAY[1::text::int::oid, 2::oid::int::oid]);
+SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
+
+--
+-- edge cases
+--
+
+SELECT pg_stat_statements_reset() IS NOT NULL AS t;
+-- for nested arrays, only constants are squashed
+SELECT ARRAY[
+ ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
+ ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
+ ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
+ ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
+ ];
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
-- Test constants evaluation in a CTE, which was causing issues in the past
@@ -163,7 +277,26 @@ WITH cte AS (
SELECT ARRAY['a', 'b', 'c', const::varchar] AS result
FROM cte;
--- Simple array would be squashed as well
SELECT pg_stat_statements_reset() IS NOT NULL AS t;
-SELECT ARRAY[1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
+-- Rewritten as an OpExpr, so it will not be squashed
+select where '1' IN ('1'::int, '2'::int::text);
+-- Rewritten as an ArrayExpr, so it will be squashed
+select where '1' IN ('1'::int, '2'::int);
+SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
+
+SELECT pg_stat_statements_reset() IS NOT NULL AS t;
+-- Both of these queries will be rewritten as an ArrayExpr, so they
+-- will be squashed, and have a similar queryId
+select where '1' IN ('1'::int::text, '2'::int::text);
+select where '1' = ANY (array['1'::int::text, '2'::int::text]);
SELECT query, calls FROM pg_stat_statements ORDER BY query COLLATE "C";
+
+--
+-- cleanup
+--
+DROP TABLE test_squash;
+DROP TABLE test_float;
+DROP TABLE test_squash_numeric;
+DROP TABLE test_squash_bigint;
+DROP TABLE test_squash_cast CASCADE;
+DROP TABLE test_squash_jsonb;
diff --git a/contrib/postgres_fdw/postgres_fdw.c b/contrib/postgres_fdw/postgres_fdw.c
index 331f3fc088d..4283ce9f962 100644
--- a/contrib/postgres_fdw/postgres_fdw.c
+++ b/contrib/postgres_fdw/postgres_fdw.c
@@ -240,6 +240,7 @@ typedef struct PgFdwDirectModifyState
PGresult *result; /* result for query */
int num_tuples; /* # of result tuples */
int next_tuple; /* index of next one to return */
+ MemoryContextCallback result_cb; /* ensures result will get freed */
Relation resultRel; /* relcache entry for the target relation */
AttrNumber *attnoMap; /* array of attnums of input user columns */
AttrNumber ctidAttno; /* attnum of input ctid column */
@@ -2671,6 +2672,17 @@ postgresBeginDirectModify(ForeignScanState *node, int eflags)
node->fdw_state = dmstate;
/*
+ * We use a memory context callback to ensure that the dmstate's PGresult
+ * (if any) will be released, even if the query fails somewhere that's
+ * outside our control. The callback is always armed for the duration of
+ * the query; this relies on PQclear(NULL) being a no-op.
+ */
+ dmstate->result_cb.func = (MemoryContextCallbackFunction) PQclear;
+ dmstate->result_cb.arg = NULL;
+ MemoryContextRegisterResetCallback(CurrentMemoryContext,
+ &dmstate->result_cb);
+
+ /*
* Identify which user to do the remote access as. This should match what
* ExecCheckPermissions() does.
*/
@@ -2817,7 +2829,13 @@ postgresEndDirectModify(ForeignScanState *node)
return;
/* Release PGresult */
- PQclear(dmstate->result);
+ if (dmstate->result)
+ {
+ PQclear(dmstate->result);
+ dmstate->result = NULL;
+ /* ... and don't forget to disable the callback */
+ dmstate->result_cb.arg = NULL;
+ }
/* Release remote connection */
ReleaseConnection(dmstate->conn);
@@ -4591,13 +4609,17 @@ execute_dml_stmt(ForeignScanState *node)
/*
* Get the result, and check for success.
*
- * We don't use a PG_TRY block here, so be careful not to throw error
- * without releasing the PGresult.
+ * We use a memory context callback to ensure that the PGresult will be
+ * released, even if the query fails somewhere that's outside our control.
+ * The callback is already registered, just need to fill in its arg.
*/
+ Assert(dmstate->result == NULL);
dmstate->result = pgfdw_get_result(dmstate->conn);
+ dmstate->result_cb.arg = dmstate->result;
+
if (PQresultStatus(dmstate->result) !=
(dmstate->has_returning ? PGRES_TUPLES_OK : PGRES_COMMAND_OK))
- pgfdw_report_error(ERROR, dmstate->result, dmstate->conn, true,
+ pgfdw_report_error(ERROR, dmstate->result, dmstate->conn, false,
dmstate->query);
/* Get the number of rows affected. */
@@ -4641,30 +4663,16 @@ get_returning_data(ForeignScanState *node)
}
else
{
- /*
- * On error, be sure to release the PGresult on the way out. Callers
- * do not have PG_TRY blocks to ensure this happens.
- */
- PG_TRY();
- {
- HeapTuple newtup;
-
- newtup = make_tuple_from_result_row(dmstate->result,
- dmstate->next_tuple,
- dmstate->rel,
- dmstate->attinmeta,
- dmstate->retrieved_attrs,
- node,
- dmstate->temp_cxt);
- ExecStoreHeapTuple(newtup, slot, false);
- }
- PG_CATCH();
- {
- PQclear(dmstate->result);
- PG_RE_THROW();
- }
- PG_END_TRY();
+ HeapTuple newtup;
+ newtup = make_tuple_from_result_row(dmstate->result,
+ dmstate->next_tuple,
+ dmstate->rel,
+ dmstate->attinmeta,
+ dmstate->retrieved_attrs,
+ node,
+ dmstate->temp_cxt);
+ ExecStoreHeapTuple(newtup, slot, false);
/* Get the updated/deleted tuple. */
if (dmstate->rel)
resultSlot = slot;
diff --git a/contrib/sepgsql/t/001_sepgsql.pl b/contrib/sepgsql/t/001_sepgsql.pl
index cd00e4963db..f5e4645e4e6 100644
--- a/contrib/sepgsql/t/001_sepgsql.pl
+++ b/contrib/sepgsql/t/001_sepgsql.pl
@@ -1,5 +1,5 @@
-# Copyright (c) 2024, PostgreSQL Global Development Group
+# Copyright (c) 2024-2025, PostgreSQL Global Development Group
use strict;
use warnings FATAL => 'all';
diff --git a/contrib/test_decoding/expected/invalidation_distribution.out b/contrib/test_decoding/expected/invalidation_distribution.out
index ad0a944cbf3..ae53b1e61de 100644
--- a/contrib/test_decoding/expected/invalidation_distribution.out
+++ b/contrib/test_decoding/expected/invalidation_distribution.out
@@ -1,4 +1,4 @@
-Parsed test spec with 2 sessions
+Parsed test spec with 3 sessions
starting permutation: s1_insert_tbl1 s1_begin s1_insert_tbl1 s2_alter_pub_add_tbl s1_commit s1_insert_tbl1 s2_get_binary_changes
step s1_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (1, 1);
@@ -18,3 +18,24 @@ count
stop
(1 row)
+
+starting permutation: s1_begin s1_insert_tbl1 s3_begin s3_insert_tbl1 s2_alter_pub_add_tbl s1_insert_tbl1 s1_commit s3_commit s2_get_binary_changes
+step s1_begin: BEGIN;
+step s1_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (1, 1);
+step s3_begin: BEGIN;
+step s3_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (2, 2);
+step s2_alter_pub_add_tbl: ALTER PUBLICATION pub ADD TABLE tbl1;
+step s1_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (1, 1);
+step s1_commit: COMMIT;
+step s3_commit: COMMIT;
+step s2_get_binary_changes: SELECT count(data) FROM pg_logical_slot_get_binary_changes('isolation_slot', NULL, NULL, 'proto_version', '4', 'publication_names', 'pub') WHERE get_byte(data, 0) = 73;
+count
+-----
+ 1
+(1 row)
+
+?column?
+--------
+stop
+(1 row)
+
diff --git a/contrib/test_decoding/specs/invalidation_distribution.spec b/contrib/test_decoding/specs/invalidation_distribution.spec
index decbed627e3..67d41969ac1 100644
--- a/contrib/test_decoding/specs/invalidation_distribution.spec
+++ b/contrib/test_decoding/specs/invalidation_distribution.spec
@@ -28,5 +28,16 @@ setup { SET synchronous_commit=on; }
step "s2_alter_pub_add_tbl" { ALTER PUBLICATION pub ADD TABLE tbl1; }
step "s2_get_binary_changes" { SELECT count(data) FROM pg_logical_slot_get_binary_changes('isolation_slot', NULL, NULL, 'proto_version', '4', 'publication_names', 'pub') WHERE get_byte(data, 0) = 73; }
+session "s3"
+setup { SET synchronous_commit=on; }
+step "s3_begin" { BEGIN; }
+step "s3_insert_tbl1" { INSERT INTO tbl1 (val1, val2) VALUES (2, 2); }
+step "s3_commit" { COMMIT; }
+
# Expect to get one insert change. LOGICAL_REP_MSG_INSERT = 'I'
permutation "s1_insert_tbl1" "s1_begin" "s1_insert_tbl1" "s2_alter_pub_add_tbl" "s1_commit" "s1_insert_tbl1" "s2_get_binary_changes"
+
+# Expect to get one insert change with LOGICAL_REP_MSG_INSERT = 'I' from
+# the second "s1_insert_tbl1" executed after adding the table tbl1 to the
+# publication in "s2_alter_pub_add_tbl".
+permutation "s1_begin" "s1_insert_tbl1" "s3_begin" "s3_insert_tbl1" "s2_alter_pub_add_tbl" "s1_insert_tbl1" "s1_commit" "s3_commit" "s2_get_binary_changes"
diff --git a/doc/src/sgml/catalogs.sgml b/doc/src/sgml/catalogs.sgml
index cbd4e40a320..fa86c569dc4 100644
--- a/doc/src/sgml/catalogs.sgml
+++ b/doc/src/sgml/catalogs.sgml
@@ -1582,7 +1582,7 @@
<structfield>rolpassword</structfield> <type>text</type>
</para>
<para>
- Password (possibly encrypted); null if none. The format depends
+ Encrypted password; null if none. The format depends
on the form of encryption used.
</para></entry>
</row>
@@ -1627,11 +1627,6 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&l
<replaceable>ServerKey</replaceable> are in Base64 encoded format. This format is
the same as that specified by <ulink url="https://datatracker.ietf.org/doc/html/rfc5803">RFC 5803</ulink>.
</para>
-
- <para>
- A password that does not follow either of those formats is assumed to be
- unencrypted.
- </para>
</sect1>
@@ -2629,7 +2624,6 @@ SCRAM-SHA-256$<replaceable>&lt;iteration count&gt;</replaceable>:<replaceable>&l
</para>
<para>
Has the constraint been validated?
- Currently, can be false only for foreign keys and CHECK constraints
</para></entry>
</row>
diff --git a/doc/src/sgml/config.sgml b/doc/src/sgml/config.sgml
index 23d2b1be424..59a0874528a 100644
--- a/doc/src/sgml/config.sgml
+++ b/doc/src/sgml/config.sgml
@@ -140,7 +140,7 @@
An example of what this file might look like is:
<programlisting>
# This is a comment
-log_connections = yes
+log_connections = all
log_destination = 'syslog'
search_path = '"$user", public'
shared_buffers = 128MB
@@ -337,7 +337,7 @@ UPDATE pg_settings SET setting = reset_val WHERE name = 'configuration_parameter
<option>-c name=value</option> command-line parameter, or its equivalent
<option>--name=value</option> variation. For example,
<programlisting>
-postgres -c log_connections=yes --log-destination='syslog'
+postgres -c log_connections=all --log-destination='syslog'
</programlisting>
Settings provided in this way override those set via
<filename>postgresql.conf</filename> or <command>ALTER SYSTEM</command>,
@@ -1155,6 +1155,22 @@ include_dir 'conf.d'
</listitem>
</varlistentry>
+ <varlistentry id="guc-md5-password-warnings" xreflabel="md5_password_warnings">
+ <term><varname>md5_password_warnings</varname> (<type>boolean</type>)
+ <indexterm>
+ <primary><varname>md5_password_warnings</varname> configuration parameter</primary>
+ </indexterm>
+ </term>
+ <listitem>
+ <para>
+ Controls whether a <literal>WARNING</literal> about MD5 password
+ deprecation is produced when a <command>CREATE ROLE</command> or
+ <command>ALTER ROLE</command> statement sets an MD5-encrypted password.
+ The default value is <literal>on</literal>.
+ </para>
+ </listitem>
+ </varlistentry>
+
<varlistentry id="guc-krb-server-keyfile" xreflabel="krb_server_keyfile">
<term><varname>krb_server_keyfile</varname> (<type>string</type>)
<indexterm>
@@ -2347,7 +2363,7 @@ include_dir 'conf.d'
</listitem>
</varlistentry>
- <varlistentry id="guc_file_copy_method" xreflabel="file_copy_method">
+ <varlistentry id="guc-file-copy-method" xreflabel="file_copy_method">
<term><varname>file_copy_method</varname> (<type>enum</type>)
<indexterm>
<primary><varname>file_copy_method</varname> configuration parameter</primary>
@@ -2763,6 +2779,7 @@ include_dir 'conf.d'
</para>
</listitem>
</itemizedlist>
+ The default is <literal>worker</literal>.
</para>
<para>
This parameter can only be set at server start.
@@ -2771,7 +2788,7 @@ include_dir 'conf.d'
</varlistentry>
<varlistentry id="guc-io-workers" xreflabel="io_workers">
- <term><varname>io_workers</varname> (<type>int</type>)
+ <term><varname>io_workers</varname> (<type>integer</type>)
<indexterm>
<primary><varname>io_workers</varname> configuration parameter</primary>
</indexterm>
@@ -2877,7 +2894,8 @@ include_dir 'conf.d'
Sets the maximum number of parallel workers that can be
started by a single utility command. Currently, the parallel
utility commands that support the use of parallel workers are
- <command>CREATE INDEX</command> when building a B-tree or BRIN index,
+ <command>CREATE INDEX</command> when building a B-tree,
+ GIN, or BRIN index,
and <command>VACUUM</command> without <literal>FULL</literal>
option. Parallel workers are taken from the pool of processes
established by <xref linkend="guc-max-worker-processes"/>, limited
@@ -5748,7 +5766,7 @@ ANY <replaceable class="parameter">num_sync</replaceable> ( <replaceable class="
</listitem>
</varlistentry>
- <varlistentry id="guc-enable_self_join_elimination" xreflabel="enable_self_join_elimination">
+ <varlistentry id="guc-enable-self-join-elimination" xreflabel="enable_self_join_elimination">
<term><varname>enable_self_join_elimination</varname> (<type>boolean</type>)
<indexterm>
<primary><varname>enable_self_join_elimination</varname> configuration parameter</primary>
@@ -7511,12 +7529,12 @@ local0.* /var/log/postgresql
<entry><literal>setup_durations</literal></entry>
<entry>
Logs the time spent establishing the connection and setting up the
- backend at the time the connection is ready to execute its first
- query. The log message includes the total setup duration, starting
- from the postmaster accepting the incoming connection and ending
- when the connection is ready for query. It also includes the time
- it took to fork the new backend and the time it took to
- authenticate the user.
+ backend until the connection is ready to execute its first
+ query. The log message includes three durations: the total
+ setup duration (starting from the postmaster accepting the
+ incoming connection and ending when the connection is ready
+ for query), the time it took to fork the new backend, and
+ the time it took to authenticate the user.
</entry>
</row>
@@ -7907,10 +7925,10 @@ log_line_prefix = '%m [%p] %q%u@%d/%a '
</listitem>
</varlistentry>
- <varlistentry id="guc-log-lock-failure" xreflabel="log_lock_failure">
- <term><varname>log_lock_failure</varname> (<type>boolean</type>)
+ <varlistentry id="guc-log-lock-failures" xreflabel="log_lock_failures">
+ <term><varname>log_lock_failures</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>log_lock_failure</varname> configuration parameter</primary>
+ <primary><varname>log_lock_failures</varname> configuration parameter</primary>
</indexterm>
</term>
<listitem>
@@ -8118,22 +8136,6 @@ log_line_prefix = '%m [%p] %q%u@%d/%a '
</listitem>
</varlistentry>
- <varlistentry id="guc-md5-password-warnings" xreflabel="md5_password_warnings">
- <term><varname>md5_password_warnings</varname> (<type>boolean</type>)
- <indexterm>
- <primary><varname>md5_password_warnings</varname> configuration parameter</primary>
- </indexterm>
- </term>
- <listitem>
- <para>
- Controls whether a <literal>WARNING</literal> about MD5 password
- deprecation is produced when a <command>CREATE ROLE</command> or
- <command>ALTER ROLE</command> statement sets an MD5-encrypted password.
- The default value is <literal>on</literal>.
- </para>
- </listitem>
- </varlistentry>
-
</variablelist>
</sect2>
<sect2 id="runtime-config-logging-csvlog">
@@ -8600,7 +8602,8 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
timing information is displayed in
<link linkend="vacuum-progress-reporting"><structname>pg_stat_progress_vacuum</structname></link>,
<link linkend="analyze-progress-reporting"><structname>pg_stat_progress_analyze</structname></link>,
- in the output of <xref linkend="sql-vacuum"/> when the
+ in the output of <xref linkend="sql-vacuum"/> and
+ <xref linkend="sql-analyze"/> when the
<literal>VERBOSE</literal> option is used, and by autovacuum for
auto-vacuums and auto-analyzes when
<xref linkend="guc-log-autovacuum-min-duration"/> is set.
@@ -9338,7 +9341,8 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
<varlistentry id="guc-vacuum-truncate" xreflabel="vacuum_truncate">
<term><varname>vacuum_truncate</varname> (<type>boolean</type>)
<indexterm>
- <primary><varname>vacuum_truncate</varname> configuration parameter</primary>
+ <primary><varname>vacuum_truncate</varname></primary>
+ <secondary>configuration parameter</secondary>
</indexterm>
</term>
<listitem>
@@ -9542,7 +9546,8 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv;
<varlistentry id="guc-vacuum-max-eager-freeze-failure-rate" xreflabel="vacuum_max_eager_freeze_failure_rate">
<term><varname>vacuum_max_eager_freeze_failure_rate</varname> (<type>floating point</type>)
<indexterm>
- <primary><varname>vacuum_max_eager_freeze_failure_rate</varname> configuration parameter</primary>
+ <primary><varname>vacuum_max_eager_freeze_failure_rate</varname></primary>
+ <secondary>configuration parameter</secondary>
</indexterm>
</term>
<listitem>
diff --git a/doc/src/sgml/ddl.sgml b/doc/src/sgml/ddl.sgml
index fcd1cb85352..65bc070d2e5 100644
--- a/doc/src/sgml/ddl.sgml
+++ b/doc/src/sgml/ddl.sgml
@@ -421,6 +421,16 @@ CREATE TABLE people (
</listitem>
<listitem>
<para>
+ A virtual generated column cannot have a user-defined type, and the
+ generation expression of a virtual generated column must not reference
+ user-defined functions or types, that is, it can only use built-in
+ functions or types. This applies also indirectly, such as for functions
+ or types that underlie operators or casts. (This restriction does not
+ exist for stored generated columns.)
+ </para>
+ </listitem>
+ <listitem>
+ <para>
A generated column cannot have a column default or an identity definition.
</para>
</listitem>
@@ -2223,8 +2233,9 @@ REVOKE ALL ON accounts FROM PUBLIC;
<para>
Allows <command>VACUUM</command>, <command>ANALYZE</command>,
<command>CLUSTER</command>, <command>REFRESH MATERIALIZED VIEW</command>,
- <command>REINDEX</command>, and <command>LOCK TABLE</command> on a
- relation.
+ <command>REINDEX</command>, <command>LOCK TABLE</command>,
+ and database object statistics manipulation functions
+ (see <xref linkend="functions-admin-statsmod"/>) on a relation.
</para>
</listitem>
</varlistentry>
diff --git a/doc/src/sgml/docguide.sgml b/doc/src/sgml/docguide.sgml
index db4bcce56ea..7b61b4841aa 100644
--- a/doc/src/sgml/docguide.sgml
+++ b/doc/src/sgml/docguide.sgml
@@ -60,9 +60,7 @@
maintained by the <ulink url="https://www.oasis-open.org">
OASIS group</ulink>. The <ulink url="https://www.oasis-open.org/docbook/">
official DocBook site</ulink> has good introductory and reference documentation and
- a complete O'Reilly book for your online reading pleasure. The
- <ulink url="http://newbiedoc.sourceforge.net/metadoc/docbook-guide.html">
- NewbieDoc Docbook Guide</ulink> is very helpful for beginners.
+ a complete O'Reilly book for your online reading pleasure.
The <ulink url="https://www.freebsd.org/docproj/">
FreeBSD Documentation Project</ulink> also uses DocBook and has some good
information, including a number of style guidelines that might be
diff --git a/doc/src/sgml/filelist.sgml b/doc/src/sgml/filelist.sgml
index fef9584f908..bcde3cfd037 100644
--- a/doc/src/sgml/filelist.sgml
+++ b/doc/src/sgml/filelist.sgml
@@ -180,7 +180,7 @@
<!ENTITY sourcerepo SYSTEM "sourcerepo.sgml">
<!ENTITY release SYSTEM "release.sgml">
-<!ENTITY release-18 SYSTEM "release-18.sgml">
+<!ENTITY release-19 SYSTEM "release-19.sgml">
<!ENTITY limits SYSTEM "limits.sgml">
<!ENTITY acronyms SYSTEM "acronyms.sgml">
diff --git a/doc/src/sgml/func.sgml b/doc/src/sgml/func.sgml
index b405525a465..298791858be 100644
--- a/doc/src/sgml/func.sgml
+++ b/doc/src/sgml/func.sgml
@@ -1824,13 +1824,23 @@ SELECT NOT(ROW(table.*) IS NOT NULL) FROM TABLE; -- detect at least one null in
which <parameter>operand</parameter> falls in a histogram
having <parameter>count</parameter> equal-width buckets spanning the
range <parameter>low</parameter> to <parameter>high</parameter>.
- Returns <literal>0</literal>
+ The buckets have inclusive lower bounds and exclusive upper bounds.
+ Returns <literal>0</literal> for an input less
+ than <parameter>low</parameter>,
or <literal><parameter>count</parameter>+1</literal> for an input
- outside that range.
+ greater than or equal to <parameter>high</parameter>.
+ If <parameter>low</parameter> &gt; <parameter>high</parameter>,
+ the behavior is mirror-reversed, with bucket <literal>1</literal>
+ now being the one just below <parameter>low</parameter>, and the
+ inclusive bounds now being on the upper side.
</para>
<para>
<literal>width_bucket(5.35, 0.024, 10.06, 5)</literal>
<returnvalue>3</returnvalue>
+ </para>
+ <para>
+ <literal>width_bucket(9, 10, 0, 10)</literal>
+ <returnvalue>2</returnvalue>
</para></entry>
</row>
@@ -1842,8 +1852,8 @@ SELECT NOT(ROW(table.*) IS NOT NULL) FROM TABLE; -- detect at least one null in
<para>
Returns the number of the bucket in
which <parameter>operand</parameter> falls given an array listing the
- lower bounds of the buckets. Returns <literal>0</literal> for an
- input less than the first lower
+ inclusive lower bounds of the buckets.
+ Returns <literal>0</literal> for an input less than the first lower
bound. <parameter>operand</parameter> and the array elements can be
of any type having standard comparison operators.
The <parameter>thresholds</parameter> array <emphasis>must be
@@ -14374,7 +14384,7 @@ CREATE TYPE rainbow AS ENUM ('red', 'orange', 'yellow', 'green', 'blue', 'purple
<sect1 id="functions-uuid">
<title>UUID Functions</title>
- <indexterm zone="datatype-uuid">
+ <indexterm zone="functions-uuid">
<primary>UUID</primary>
<secondary>generating</secondary>
</indexterm>
@@ -28666,143 +28676,6 @@ acl | {postgres=arwdDxtm/postgres,foo=r/postgres}
<row>
<entry role="func_table_entry"><para role="func_signature">
<indexterm>
- <primary>pg_get_process_memory_contexts</primary>
- </indexterm>
- <function>pg_get_process_memory_contexts</function> ( <parameter>pid</parameter> <type>integer</type>, <parameter>summary</parameter> <type>boolean</type>, <parameter>timeout</parameter> <type>float</type> )
- <returnvalue>setof record</returnvalue>
- ( <parameter>name</parameter> <type>text</type>,
- <parameter>ident</parameter> <type>text</type>,
- <parameter>type</parameter> <type>text</type>,
- <parameter>path</parameter> <type>integer[]</type>,
- <parameter>level</parameter> <type>integer</type>,
- <parameter>total_bytes</parameter> <type>bigint</type>,
- <parameter>total_nblocks</parameter> <type>bigint</type>,
- <parameter>free_bytes</parameter> <type>bigint</type>,
- <parameter>free_chunks</parameter> <type>bigint</type>,
- <parameter>used_bytes</parameter> <type>bigint</type>,
- <parameter>num_agg_contexts</parameter> <type>integer</type>,
- <parameter>stats_timestamp</parameter> <type>timestamptz</type> )
- </para>
- <para>
- This function handles requests to display the memory contexts of a
- <productname>PostgreSQL</productname> process with the specified
- process ID. The function can be used to send requests to backends as
- well as <glossterm linkend="glossary-auxiliary-proc">auxiliary processes</glossterm>.
- </para>
- <para>
- The returned record contains extended statistics per each memory
- context:
- <itemizedlist spacing="compact">
- <listitem>
- <para>
- <parameter>name</parameter> - The name of the memory context.
- </para>
- </listitem>
- <listitem>
- <para>
- <parameter>ident</parameter> - Memory context ID (if any).
- </para>
- </listitem>
- <listitem>
- <para>
- <parameter>type</parameter> - The type of memory context, possible
- values are: AllocSet, Generation, Slab and Bump.
- </para>
- </listitem>
- <listitem>
- <para>
- <parameter>path</parameter> - Memory contexts are organized in a
- tree model with TopMemoryContext as the root, and all other memory
- contexts as nodes in the tree. The <parameter>path</parameter>
- displays the path from the root to the current memory context. The
- path is limited to 100 children per node, which each node limited
- to a max depth of 100, to preserve memory during reporting. The
- printed path will also be limited to 100 nodes counting from the
- TopMemoryContext.
- </para>
- </listitem>
- <listitem>
- <para>
- <parameter>level</parameter> - The level in the tree of the current
- memory context.
- </para>
- </listitem>
- <listitem>
- <para>
- <parameter>total_bytes</parameter> - The total number of bytes
- allocated to this memory context.
- </para>
- </listitem>
- <listitem>
- <para>
- <parameter>total_nblocks</parameter> - The total number of blocks
- used for the allocated memory.
- </para>
- </listitem>
- <listitem>
- <para>
- <parameter>free_bytes</parameter> - The amount of free memory in
- this memory context.
- </para>
- </listitem>
- <listitem>
- <para>
- <parameter>free_chunks</parameter> - The number of chunks that
- <parameter>free_bytes</parameter> corresponds to.
- </para>
- </listitem>
- <listitem>
- <para>
- <parameter>used_bytes</parameter> - The total number of bytes
- currently occupied.
- </para>
- </listitem>
- <listitem>
- <para>
- <parameter>num_agg_contexts</parameter> - The number of memory
- contexts aggregated in the displayed statistics.
- </para>
- </listitem>
- <listitem>
- <para>
- <parameter>stats_timestamp</parameter> - When the statistics were
- extracted from the process.
- </para>
- </listitem>
- </itemizedlist>
- </para>
- <para>
- When <parameter>summary</parameter> is <literal>true</literal>, statistics
- for memory contexts at levels 1 and 2 are displayed, with level 1
- representing the root node (i.e., <literal>TopMemoryContext</literal>).
- Statistics for contexts on level 2 and below are aggregates of all
- child contexts' statistics, where <literal>num_agg_contexts</literal>
- indicate the number aggregated child contexts. When
- <parameter>summary</parameter> is <literal>false</literal>,
- <literal>the num_agg_contexts</literal> value is <literal>1</literal>,
- indicating that individual statistics are being displayed.
- </para>
- <para>
- Busy processes can delay reporting memory context statistics,
- <parameter>timeout</parameter> specifies the number of seconds
- to wait for updated statistics. <parameter>timeout</parameter> can be
- specified in fractions of a second.
- </para>
- <para>
- After receiving memory context statistics from the target process, it
- returns the results as one row per context. If all the contexts don't
- fit within the pre-determined size limit, the remaining context
- statistics are aggregated and a cumulative total is displayed. The
- <literal>num_agg_contexts</literal> column indicates the number of
- contexts aggregated in the displayed statistics. When
- <literal>num_agg_contexts</literal> is <literal>1</literal> it means
- that the context statistics are displayed separately.
- </para></entry>
- </row>
-
- <row>
- <entry role="func_table_entry"><para role="func_signature">
- <indexterm>
<primary>pg_log_backend_memory_contexts</primary>
</indexterm>
<function>pg_log_backend_memory_contexts</function> ( <parameter>pid</parameter> <type>integer</type> )
@@ -28939,40 +28812,6 @@ LOG: Grand total: 1651920 bytes in 201 blocks; 622360 free (88 chunks); 1029560
because it may generate a large number of log messages.
</para>
- <para>
- <function>pg_get_process_memory_contexts</function> can be used to request
- memory contexts statistics of any <productname>PostgreSQL</productname>
- process. For example:
-<programlisting>
-postgres=# SELECT * FROM pg_get_process_memory_contexts(
- (SELECT pid FROM pg_stat_activity
- WHERE backend_type = 'checkpointer'),
- false, 0.5) LIMIT 1;
--[ RECORD 1 ]----+------------------------------
-name | TopMemoryContext
-ident |
-type | AllocSet
-path | {1}
-level | 1
-total_bytes | 90304
-total_nblocks | 3
-free_bytes | 2880
-free_chunks | 1
-used_bytes | 87424
-num_agg_contexts | 1
-stats_timestamp | 2025-03-24 13:55:47.796698+01
-</programlisting>
- <note>
- <para>
- While <function>pg_get_process_memory_contexts</function> can be used to
- query memory contexts of the local backend,
- <structname>pg_backend_memory_contexts</structname>
- (see <xref linkend="view-pg-backend-memory-contexts"/> for more details)
- will be less resource intensive when only the local backend is of interest.
- </para>
- </note>
- </para>
-
</sect2>
<sect2 id="functions-admin-backup">
@@ -29869,7 +29708,7 @@ postgres=# SELECT '0/0'::pg_lsn + pd.segment_number * ps.setting::int + :offset
</row>
<row>
- <entry role="func_table_entry"><para role="func_signature">
+ <entry id="pg-logical-slot-get-binary-changes" role="func_table_entry"><para role="func_signature">
<indexterm>
<primary>pg_logical_slot_get_binary_changes</primary>
</indexterm>
@@ -30141,7 +29980,9 @@ postgres=# SELECT '0/0'::pg_lsn + pd.segment_number * ps.setting::int + :offset
standby server. Temporary synced slots, if any, cannot be used for
logical decoding and must be dropped after promotion. See
<xref linkend="logicaldecoding-replication-slots-synchronization"/> for details.
- Note that this function cannot be executed if
+ Note that this function is primarily intended for testing and
+ debugging purposes and should be used with caution. Additionally,
+ this function cannot be executed if
<link linkend="guc-sync-replication-slots"><varname>
sync_replication_slots</varname></link> is enabled and the slotsync
worker is already running to perform the synchronization of slots.
diff --git a/doc/src/sgml/gist.sgml b/doc/src/sgml/gist.sgml
index a373a8aa4b2..ee86e170055 100644
--- a/doc/src/sgml/gist.sgml
+++ b/doc/src/sgml/gist.sgml
@@ -1170,7 +1170,7 @@ my_sortsupport(PG_FUNCTION_ARGS)
</varlistentry>
<varlistentry>
- <term><function>stratnum</function></term>
+ <term><function>translate_cmptype</function></term>
<listitem>
<para>
Given a <literal>CompareType</literal> value from
@@ -1189,11 +1189,22 @@ my_sortsupport(PG_FUNCTION_ARGS)
</para>
<para>
+ This support function corresponds to the index access method callback
+ function <structfield>amtranslatecmptype</structfield> (see <xref
+ linkend="index-functions"/>). The
+ <structfield>amtranslatecmptype</structfield> callback function for
+ GiST indexes merely calls down to the
+ <function>translate_cmptype</function> support function of the
+ respective operator family, since the GiST index access method has no
+ fixed strategy numbers itself.
+ </para>
+
+ <para>
The <acronym>SQL</acronym> declaration of the function must look like
this:
<programlisting>
-CREATE OR REPLACE FUNCTION my_stratnum(integer)
+CREATE OR REPLACE FUNCTION my_translate_cmptype(integer)
RETURNS smallint
AS 'MODULE_PATHNAME'
LANGUAGE C STRICT;
@@ -1202,7 +1213,7 @@ LANGUAGE C STRICT;
And the operator family registration must look like this:
<programlisting>
ALTER OPERATOR FAMILY my_opfamily USING gist ADD
- FUNCTION 12 ("any", "any") my_stratnum(int);
+ FUNCTION 12 ("any", "any") my_translate_cmptype(int);
</programlisting>
</para>
@@ -1210,10 +1221,10 @@ ALTER OPERATOR FAMILY my_opfamily USING gist ADD
The matching code in the C module could then follow this skeleton:
<programlisting>
-PG_FUNCTION_INFO_V1(my_stratnum);
+PG_FUNCTION_INFO_V1(my_translate_cmptype);
Datum
-my_stratnum(PG_FUNCTION_ARGS)
+my_translate_cmptype(PG_FUNCTION_ARGS)
{
CompareType cmptype = PG_GETARG_INT32(0);
StrategyNumber ret = InvalidStrategy;
@@ -1232,11 +1243,11 @@ my_stratnum(PG_FUNCTION_ARGS)
<para>
One translation function is provided by
<productname>PostgreSQL</productname>:
- <literal>gist_stratnum_common</literal> is for operator classes that
+ <literal>gist_translate_cmptype_common</literal> is for operator classes that
use the <literal>RT*StrategyNumber</literal> constants.
The <literal>btree_gist</literal>
extension defines a second translation function,
- <literal>gist_stratnum_btree</literal>, for operator classes that use
+ <literal>gist_translate_cmptype_btree</literal>, for operator classes that use
the <literal>BT*StrategyNumber</literal> constants.
</para>
</listitem>
diff --git a/doc/src/sgml/libpq.sgml b/doc/src/sgml/libpq.sgml
index 695fe958c3e..298c4b38ef9 100644
--- a/doc/src/sgml/libpq.sgml
+++ b/doc/src/sgml/libpq.sgml
@@ -1,6 +1,6 @@
<!-- doc/src/sgml/libpq.sgml -->
-<chapter id="libpq">
+<chapter id="libpq" xreflabel="libpq">
<title><application>libpq</application> &mdash; C Library</title>
<indexterm zone="libpq">
@@ -2168,6 +2168,24 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname
</listitem>
</varlistentry>
+ <varlistentry id="libpq-connect-ssl-max-protocol-version" xreflabel="ssl_max_protocol_version">
+ <term><literal>ssl_max_protocol_version</literal></term>
+ <listitem>
+ <para>
+ This parameter specifies the maximum SSL/TLS protocol version to allow
+ for the connection. Valid values are <literal>TLSv1</literal>,
+ <literal>TLSv1.1</literal>, <literal>TLSv1.2</literal> and
+ <literal>TLSv1.3</literal>. The supported protocols depend on the
+ version of <productname>OpenSSL</productname> used, older versions
+ not supporting the most modern protocol versions. If not set, this
+ parameter is ignored and the connection will use the maximum bound
+ defined by the backend, if set. Setting the maximum protocol version
+ is mainly useful for testing or if some component has issues working
+ with a newer protocol.
+ </para>
+ </listitem>
+ </varlistentry>
+
<varlistentry id="libpq-connect-min-protocol-version" xreflabel="min_protocol_version">
<term><literal>min_protocol_version</literal></term>
<listitem>
@@ -2216,24 +2234,6 @@ postgresql://%2Fvar%2Flib%2Fpostgresql/dbname
</listitem>
</varlistentry>
- <varlistentry id="libpq-connect-ssl-max-protocol-version" xreflabel="ssl_max_protocol_version">
- <term><literal>ssl_max_protocol_version</literal></term>
- <listitem>
- <para>
- This parameter specifies the maximum SSL/TLS protocol version to allow
- for the connection. Valid values are <literal>TLSv1</literal>,
- <literal>TLSv1.1</literal>, <literal>TLSv1.2</literal> and
- <literal>TLSv1.3</literal>. The supported protocols depend on the
- version of <productname>OpenSSL</productname> used, older versions
- not supporting the most modern protocol versions. If not set, this
- parameter is ignored and the connection will use the maximum bound
- defined by the backend, if set. Setting the maximum protocol version
- is mainly useful for testing or if some component has issues working
- with a newer protocol.
- </para>
- </listitem>
- </varlistentry>
-
<varlistentry id="libpq-connect-krbsrvname" xreflabel="krbsrvname">
<term><literal>krbsrvname</literal></term>
<listitem>
diff --git a/doc/src/sgml/logical-replication.sgml b/doc/src/sgml/logical-replication.sgml
index f288c049a5c..c32e6bc000d 100644
--- a/doc/src/sgml/logical-replication.sgml
+++ b/doc/src/sgml/logical-replication.sgml
@@ -363,34 +363,25 @@
<para>
Create some test tables on the publisher.
<programlisting>
-test_pub=# CREATE TABLE t1(a int, b text, PRIMARY KEY(a));
-CREATE TABLE
-test_pub=# CREATE TABLE t2(c int, d text, PRIMARY KEY(c));
-CREATE TABLE
-test_pub=# CREATE TABLE t3(e int, f text, PRIMARY KEY(e));
-CREATE TABLE
+/* pub # */ CREATE TABLE t1(a int, b text, PRIMARY KEY(a));
+/* pub # */ CREATE TABLE t2(c int, d text, PRIMARY KEY(c));
+/* pub # */ CREATE TABLE t3(e int, f text, PRIMARY KEY(e));
</programlisting></para>
<para>
Create the same tables on the subscriber.
<programlisting>
-test_sub=# CREATE TABLE t1(a int, b text, PRIMARY KEY(a));
-CREATE TABLE
-test_sub=# CREATE TABLE t2(c int, d text, PRIMARY KEY(c));
-CREATE TABLE
-test_sub=# CREATE TABLE t3(e int, f text, PRIMARY KEY(e));
-CREATE TABLE
+/* sub # */ CREATE TABLE t1(a int, b text, PRIMARY KEY(a));
+/* sub # */ CREATE TABLE t2(c int, d text, PRIMARY KEY(c));
+/* sub # */ CREATE TABLE t3(e int, f text, PRIMARY KEY(e));
</programlisting></para>
<para>
Insert data to the tables at the publisher side.
<programlisting>
-test_pub=# INSERT INTO t1 VALUES (1, 'one'), (2, 'two'), (3, 'three');
-INSERT 0 3
-test_pub=# INSERT INTO t2 VALUES (1, 'A'), (2, 'B'), (3, 'C');
-INSERT 0 3
-test_pub=# INSERT INTO t3 VALUES (1, 'i'), (2, 'ii'), (3, 'iii');
-INSERT 0 3
+/* pub # */ INSERT INTO t1 VALUES (1, 'one'), (2, 'two'), (3, 'three');
+/* pub # */ INSERT INTO t2 VALUES (1, 'A'), (2, 'B'), (3, 'C');
+/* pub # */ INSERT INTO t3 VALUES (1, 'i'), (2, 'ii'), (3, 'iii');
</programlisting></para>
<para>
@@ -399,41 +390,34 @@ INSERT 0 3
<link linkend="sql-createpublication-params-with-publish"><literal>publish</literal></link>
operations. The publication <literal>pub3b</literal> has a row filter (see
<xref linkend="logical-replication-row-filter"/>).
-<programlisting>
-test_pub=# CREATE PUBLICATION pub1 FOR TABLE t1;
-CREATE PUBLICATION
-test_pub=# CREATE PUBLICATION pub2 FOR TABLE t2 WITH (publish = 'truncate');
-CREATE PUBLICATION
-test_pub=# CREATE PUBLICATION pub3a FOR TABLE t3 WITH (publish = 'truncate');
-CREATE PUBLICATION
-test_pub=# CREATE PUBLICATION pub3b FOR TABLE t3 WHERE (e > 5);
-CREATE PUBLICATION
-</programlisting></para>
+<programlisting><![CDATA[
+/* pub # */ CREATE PUBLICATION pub1 FOR TABLE t1;
+/* pub # */ CREATE PUBLICATION pub2 FOR TABLE t2 WITH (publish = 'truncate');
+/* pub # */ CREATE PUBLICATION pub3a FOR TABLE t3 WITH (publish = 'truncate');
+/* pub # */ CREATE PUBLICATION pub3b FOR TABLE t3 WHERE (e > 5);
+]]></programlisting></para>
<para>
Create subscriptions for the publications. The subscription
<literal>sub3</literal> subscribes to both <literal>pub3a</literal> and
<literal>pub3b</literal>. All subscriptions will copy initial data by default.
<programlisting>
-test_sub=# CREATE SUBSCRIPTION sub1
-test_sub-# CONNECTION 'host=localhost dbname=test_pub application_name=sub1'
-test_sub-# PUBLICATION pub1;
-CREATE SUBSCRIPTION
-test_sub=# CREATE SUBSCRIPTION sub2
-test_sub-# CONNECTION 'host=localhost dbname=test_pub application_name=sub2'
-test_sub-# PUBLICATION pub2;
-CREATE SUBSCRIPTION
-test_sub=# CREATE SUBSCRIPTION sub3
-test_sub-# CONNECTION 'host=localhost dbname=test_pub application_name=sub3'
-test_sub-# PUBLICATION pub3a, pub3b;
-CREATE SUBSCRIPTION
+/* sub # */ CREATE SUBSCRIPTION sub1
+/* sub - */ CONNECTION 'host=localhost dbname=test_pub application_name=sub1'
+/* sub - */ PUBLICATION pub1;
+/* sub # */ CREATE SUBSCRIPTION sub2
+/* sub - */ CONNECTION 'host=localhost dbname=test_pub application_name=sub2'
+/* sub - */ PUBLICATION pub2;
+/* sub # */ CREATE SUBSCRIPTION sub3
+/* sub - */ CONNECTION 'host=localhost dbname=test_pub application_name=sub3'
+/* sub - */ PUBLICATION pub3a, pub3b;
</programlisting></para>
<para>
Observe that initial table data is copied, regardless of the
<literal>publish</literal> operation of the publication.
<programlisting>
-test_sub=# SELECT * FROM t1;
+/* sub # */ SELECT * FROM t1;
a | b
---+-------
1 | one
@@ -441,7 +425,7 @@ test_sub=# SELECT * FROM t1;
3 | three
(3 rows)
-test_sub=# SELECT * FROM t2;
+/* sub # */ SELECT * FROM t2;
c | d
---+---
1 | A
@@ -456,7 +440,7 @@ test_sub=# SELECT * FROM t2;
it means the copied table <literal>t3</literal> contains all rows even when
they do not match the row filter of publication <literal>pub3b</literal>.
<programlisting>
-test_sub=# SELECT * FROM t3;
+/* sub # */ SELECT * FROM t3;
e | f
---+-----
1 | i
@@ -468,18 +452,15 @@ test_sub=# SELECT * FROM t3;
<para>
Insert more data to the tables at the publisher side.
<programlisting>
-test_pub=# INSERT INTO t1 VALUES (4, 'four'), (5, 'five'), (6, 'six');
-INSERT 0 3
-test_pub=# INSERT INTO t2 VALUES (4, 'D'), (5, 'E'), (6, 'F');
-INSERT 0 3
-test_pub=# INSERT INTO t3 VALUES (4, 'iv'), (5, 'v'), (6, 'vi');
-INSERT 0 3
+/* pub # */ INSERT INTO t1 VALUES (4, 'four'), (5, 'five'), (6, 'six');
+/* pub # */ INSERT INTO t2 VALUES (4, 'D'), (5, 'E'), (6, 'F');
+/* pub # */ INSERT INTO t3 VALUES (4, 'iv'), (5, 'v'), (6, 'vi');
</programlisting></para>
<para>
Now the publisher side data looks like:
<programlisting>
-test_pub=# SELECT * FROM t1;
+/* pub # */ SELECT * FROM t1;
a | b
---+-------
1 | one
@@ -490,7 +471,7 @@ test_pub=# SELECT * FROM t1;
6 | six
(6 rows)
-test_pub=# SELECT * FROM t2;
+/* pub # */ SELECT * FROM t2;
c | d
---+---
1 | A
@@ -501,7 +482,7 @@ test_pub=# SELECT * FROM t2;
6 | F
(6 rows)
-test_pub=# SELECT * FROM t3;
+/* pub # */ SELECT * FROM t3;
e | f
---+-----
1 | i
@@ -521,7 +502,7 @@ test_pub=# SELECT * FROM t3;
only replicate data that matches the row filter of <literal>pub3b</literal>.
Now the subscriber side data looks like:
<programlisting>
-test_sub=# SELECT * FROM t1;
+/* sub # */ SELECT * FROM t1;
a | b
---+-------
1 | one
@@ -532,7 +513,7 @@ test_sub=# SELECT * FROM t1;
6 | six
(6 rows)
-test_sub=# SELECT * FROM t2;
+/* sub # */ SELECT * FROM t2;
c | d
---+---
1 | A
@@ -540,7 +521,7 @@ test_sub=# SELECT * FROM t2;
3 | C
(3 rows)
-test_sub=# SELECT * FROM t3;
+/* sub # */ SELECT * FROM t3;
e | f
---+-----
1 | i
@@ -567,8 +548,7 @@ test_sub=# SELECT * FROM t3;
<para>
First, create a publication for the examples to use.
<programlisting>
-test_pub=# CREATE PUBLICATION pub1 FOR ALL TABLES;
-CREATE PUBLICATION
+/* pub # */ CREATE PUBLICATION pub1 FOR ALL TABLES;
</programlisting></para>
<para>
Example 1: Where the subscription says <literal>connect = false</literal>
@@ -579,13 +559,12 @@ CREATE PUBLICATION
<para>
Create the subscription.
<programlisting>
-test_sub=# CREATE SUBSCRIPTION sub1
-test_sub-# CONNECTION 'host=localhost dbname=test_pub'
-test_sub-# PUBLICATION pub1
-test_sub-# WITH (connect=false);
+/* sub # */ CREATE SUBSCRIPTION sub1
+/* sub - */ CONNECTION 'host=localhost dbname=test_pub'
+/* sub - */ PUBLICATION pub1
+/* sub - */ WITH (connect=false);
WARNING: subscription was created, but is not connected
HINT: To initiate replication, you must manually create the replication slot, enable the subscription, and refresh the subscription.
-CREATE SUBSCRIPTION
</programlisting></para>
</listitem>
<listitem>
@@ -594,7 +573,7 @@ CREATE SUBSCRIPTION
specified during <literal>CREATE SUBSCRIPTION</literal>, the name of the
slot to create is same as the subscription name, e.g. "sub1".
<programlisting>
-test_pub=# SELECT * FROM pg_create_logical_replication_slot('sub1', 'pgoutput');
+/* pub # */ SELECT * FROM pg_create_logical_replication_slot('sub1', 'pgoutput');
slot_name | lsn
-----------+-----------
sub1 | 0/19404D0
@@ -606,10 +585,8 @@ test_pub=# SELECT * FROM pg_create_logical_replication_slot('sub1', 'pgoutput');
On the subscriber, complete the activation of the subscription. After
this the tables of <literal>pub1</literal> will start replicating.
<programlisting>
-test_sub=# ALTER SUBSCRIPTION sub1 ENABLE;
-ALTER SUBSCRIPTION
-test_sub=# ALTER SUBSCRIPTION sub1 REFRESH PUBLICATION;
-ALTER SUBSCRIPTION
+/* sub # */ ALTER SUBSCRIPTION sub1 ENABLE;
+/* sub # */ ALTER SUBSCRIPTION sub1 REFRESH PUBLICATION;
</programlisting></para>
</listitem>
</itemizedlist>
@@ -625,13 +602,12 @@ ALTER SUBSCRIPTION
<para>
Create the subscription.
<programlisting>
-test_sub=# CREATE SUBSCRIPTION sub1
-test_sub-# CONNECTION 'host=localhost dbname=test_pub'
-test_sub-# PUBLICATION pub1
-test_sub-# WITH (connect=false, slot_name='myslot');
+/* sub # */ CREATE SUBSCRIPTION sub1
+/* sub - */ CONNECTION 'host=localhost dbname=test_pub'
+/* sub - */ PUBLICATION pub1
+/* sub - */ WITH (connect=false, slot_name='myslot');
WARNING: subscription was created, but is not connected
HINT: To initiate replication, you must manually create the replication slot, enable the subscription, and refresh the subscription.
-CREATE SUBSCRIPTION
</programlisting></para>
</listitem>
<listitem>
@@ -639,7 +615,7 @@ CREATE SUBSCRIPTION
On the publisher, manually create a slot using the same name that was
specified during <literal>CREATE SUBSCRIPTION</literal>, e.g. "myslot".
<programlisting>
-test_pub=# SELECT * FROM pg_create_logical_replication_slot('myslot', 'pgoutput');
+/* pub # */ SELECT * FROM pg_create_logical_replication_slot('myslot', 'pgoutput');
slot_name | lsn
-----------+-----------
myslot | 0/19059A0
@@ -651,10 +627,8 @@ test_pub=# SELECT * FROM pg_create_logical_replication_slot('myslot', 'pgoutput'
On the subscriber, the remaining subscription activation steps are the
same as before.
<programlisting>
-test_sub=# ALTER SUBSCRIPTION sub1 ENABLE;
-ALTER SUBSCRIPTION
-test_sub=# ALTER SUBSCRIPTION sub1 REFRESH PUBLICATION;
-ALTER SUBSCRIPTION
+/* sub # */ ALTER SUBSCRIPTION sub1 ENABLE;
+/* sub # */ ALTER SUBSCRIPTION sub1 REFRESH PUBLICATION;
</programlisting></para>
</listitem>
</itemizedlist>
@@ -669,18 +643,17 @@ ALTER SUBSCRIPTION
<literal>enabled = false</literal>, and
<literal>create_slot = false</literal> are also needed.
<programlisting>
-test_sub=# CREATE SUBSCRIPTION sub1
-test_sub-# CONNECTION 'host=localhost dbname=test_pub'
-test_sub-# PUBLICATION pub1
-test_sub-# WITH (slot_name=NONE, enabled=false, create_slot=false);
-CREATE SUBSCRIPTION
+/* sub # */ CREATE SUBSCRIPTION sub1
+/* sub - */ CONNECTION 'host=localhost dbname=test_pub'
+/* sub - */ PUBLICATION pub1
+/* sub - */ WITH (slot_name=NONE, enabled=false, create_slot=false);
</programlisting></para>
</listitem>
<listitem>
<para>
On the publisher, manually create a slot using any name, e.g. "myslot".
<programlisting>
-test_pub=# SELECT * FROM pg_create_logical_replication_slot('myslot', 'pgoutput');
+/* pub # */ SELECT * FROM pg_create_logical_replication_slot('myslot', 'pgoutput');
slot_name | lsn
-----------+-----------
myslot | 0/1905930
@@ -692,18 +665,15 @@ test_pub=# SELECT * FROM pg_create_logical_replication_slot('myslot', 'pgoutput'
On the subscriber, associate the subscription with the slot name just
created.
<programlisting>
-test_sub=# ALTER SUBSCRIPTION sub1 SET (slot_name='myslot');
-ALTER SUBSCRIPTION
+/* sub # */ ALTER SUBSCRIPTION sub1 SET (slot_name='myslot');
</programlisting></para>
</listitem>
<listitem>
<para>
The remaining subscription activation steps are same as before.
<programlisting>
-test_sub=# ALTER SUBSCRIPTION sub1 ENABLE;
-ALTER SUBSCRIPTION
-test_sub=# ALTER SUBSCRIPTION sub1 REFRESH PUBLICATION;
-ALTER SUBSCRIPTION
+/* sub # */ ALTER SUBSCRIPTION sub1 ENABLE;
+/* sub # */ ALTER SUBSCRIPTION sub1 REFRESH PUBLICATION;
</programlisting></para>
</listitem>
</itemizedlist>
@@ -752,7 +722,7 @@ ALTER SUBSCRIPTION
will return the relevant replication slots associated with the
failover-enabled subscriptions.
<programlisting>
-test_sub=# SELECT
+/* sub # */ SELECT
array_agg(quote_literal(s.subslotname)) AS slots
FROM pg_subscription s
WHERE s.subfailover AND
@@ -775,7 +745,7 @@ test_sub=# SELECT
as they will either be dropped or re-created on the new primary server in those
cases.
<programlisting>
-test_sub=# SELECT
+/* sub # */ SELECT
array_agg(quote_literal(slot_name)) AS slots
FROM
(
@@ -794,7 +764,7 @@ test_sub=# SELECT
Check that the logical replication slots identified above exist on
the standby server and are ready for failover.
<programlisting>
-test_standby=# SELECT slot_name, (synced AND NOT temporary AND NOT conflicting) AS failover_ready
+/* standby # */ SELECT slot_name, (synced AND NOT temporary AND NOT conflicting) AS failover_ready
FROM pg_replication_slots
WHERE slot_name IN
('sub1','sub2','sub3', 'pg_16394_sync_16385_7394666715149055164');
@@ -1024,12 +994,9 @@ test_standby=# SELECT slot_name, (synced AND NOT temporary AND NOT conflicting)
<para>
Create some tables to be used in the following examples.
<programlisting>
-test_pub=# CREATE TABLE t1(a int, b int, c text, PRIMARY KEY(a,c));
-CREATE TABLE
-test_pub=# CREATE TABLE t2(d int, e int, f int, PRIMARY KEY(d));
-CREATE TABLE
-test_pub=# CREATE TABLE t3(g int, h int, i int, PRIMARY KEY(g));
-CREATE TABLE
+/* pub # */ CREATE TABLE t1(a int, b int, c text, PRIMARY KEY(a,c));
+/* pub # */ CREATE TABLE t2(d int, e int, f int, PRIMARY KEY(d));
+/* pub # */ CREATE TABLE t3(g int, h int, i int, PRIMARY KEY(g));
</programlisting></para>
<para>
@@ -1038,43 +1005,40 @@ CREATE TABLE
<literal>p2</literal> has two tables. Table <literal>t1</literal> has no row
filter, and table <literal>t2</literal> has a row filter. Publication
<literal>p3</literal> has two tables, and both of them have a row filter.
-<programlisting>
-test_pub=# CREATE PUBLICATION p1 FOR TABLE t1 WHERE (a > 5 AND c = 'NSW');
-CREATE PUBLICATION
-test_pub=# CREATE PUBLICATION p2 FOR TABLE t1, t2 WHERE (e = 99);
-CREATE PUBLICATION
-test_pub=# CREATE PUBLICATION p3 FOR TABLE t2 WHERE (d = 10), t3 WHERE (g = 10);
-CREATE PUBLICATION
-</programlisting></para>
+<programlisting><![CDATA[
+/* pub # */ CREATE PUBLICATION p1 FOR TABLE t1 WHERE (a > 5 AND c = 'NSW');
+/* pub # */ CREATE PUBLICATION p2 FOR TABLE t1, t2 WHERE (e = 99);
+/* pub # */ CREATE PUBLICATION p3 FOR TABLE t2 WHERE (d = 10), t3 WHERE (g = 10);
+]]></programlisting></para>
<para>
<command>psql</command> can be used to show the row filter expressions (if
defined) for each publication.
-<programlisting>
-test_pub=# \dRp+
- Publication p1
+<programlisting><![CDATA[
+/* pub # */ \dRp+
+ Publication p1
Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root
----------+------------+---------+---------+---------+-----------+----------
postgres | f | t | t | t | t | f
Tables:
- "public.t1" WHERE ((a > 5) AND (c = 'NSW'::text))
+ "public.t1" WHERE ((a > 5) AND (c = 'NSW'::text))
- Publication p2
+ Publication p2
Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root
----------+------------+---------+---------+---------+-----------+----------
postgres | f | t | t | t | t | f
Tables:
- "public.t1"
- "public.t2" WHERE (e = 99)
+ "public.t1"
+ "public.t2" WHERE (e = 99)
- Publication p3
+ Publication p3
Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root
----------+------------+---------+---------+---------+-----------+----------
postgres | f | t | t | t | t | f
Tables:
- "public.t2" WHERE (d = 10)
- "public.t3" WHERE (g = 10)
-</programlisting></para>
+ "public.t2" WHERE (d = 10)
+ "public.t3" WHERE (g = 10)
+]]></programlisting></para>
<para>
<command>psql</command> can be used to show the row filter expressions (if
@@ -1082,8 +1046,8 @@ Tables:
of two publications, but has a row filter only in <literal>p1</literal>.
See that table <literal>t2</literal> is a member of two publications, and
has a different row filter in each of them.
-<programlisting>
-test_pub=# \d t1
+<programlisting><![CDATA[
+/* pub # */ \d t1
Table "public.t1"
Column | Type | Collation | Nullable | Default
--------+---------+-----------+----------+---------
@@ -1096,7 +1060,7 @@ Publications:
"p1" WHERE ((a > 5) AND (c = 'NSW'::text))
"p2"
-test_pub=# \d t2
+/* pub # */ \d t2
Table "public.t2"
Column | Type | Collation | Nullable | Default
--------+---------+-----------+----------+---------
@@ -1109,7 +1073,7 @@ Publications:
"p2" WHERE (e = 99)
"p3" WHERE (d = 10)
-test_pub=# \d t3
+/* pub # */ \d t3
Table "public.t3"
Column | Type | Collation | Nullable | Default
--------+---------+-----------+----------+---------
@@ -1120,43 +1084,33 @@ Indexes:
"t3_pkey" PRIMARY KEY, btree (g)
Publications:
"p3" WHERE (g = 10)
-</programlisting></para>
+]]></programlisting></para>
<para>
On the subscriber node, create a table <literal>t1</literal> with the same
definition as the one on the publisher, and also create the subscription
<literal>s1</literal> that subscribes to the publication <literal>p1</literal>.
<programlisting>
-test_sub=# CREATE TABLE t1(a int, b int, c text, PRIMARY KEY(a,c));
-CREATE TABLE
-test_sub=# CREATE SUBSCRIPTION s1
-test_sub-# CONNECTION 'host=localhost dbname=test_pub application_name=s1'
-test_sub-# PUBLICATION p1;
-CREATE SUBSCRIPTION
+/* sub # */ CREATE TABLE t1(a int, b int, c text, PRIMARY KEY(a,c));
+/* sub # */ CREATE SUBSCRIPTION s1
+/* sub - */ CONNECTION 'host=localhost dbname=test_pub application_name=s1'
+/* sub - */ PUBLICATION p1;
</programlisting></para>
<para>
Insert some rows. Only the rows satisfying the <literal>t1 WHERE</literal>
clause of publication <literal>p1</literal> are replicated.
<programlisting>
-test_pub=# INSERT INTO t1 VALUES (2, 102, 'NSW');
-INSERT 0 1
-test_pub=# INSERT INTO t1 VALUES (3, 103, 'QLD');
-INSERT 0 1
-test_pub=# INSERT INTO t1 VALUES (4, 104, 'VIC');
-INSERT 0 1
-test_pub=# INSERT INTO t1 VALUES (5, 105, 'ACT');
-INSERT 0 1
-test_pub=# INSERT INTO t1 VALUES (6, 106, 'NSW');
-INSERT 0 1
-test_pub=# INSERT INTO t1 VALUES (7, 107, 'NT');
-INSERT 0 1
-test_pub=# INSERT INTO t1 VALUES (8, 108, 'QLD');
-INSERT 0 1
-test_pub=# INSERT INTO t1 VALUES (9, 109, 'NSW');
-INSERT 0 1
-
-test_pub=# SELECT * FROM t1;
+/* pub # */ INSERT INTO t1 VALUES (2, 102, 'NSW');
+/* pub # */ INSERT INTO t1 VALUES (3, 103, 'QLD');
+/* pub # */ INSERT INTO t1 VALUES (4, 104, 'VIC');
+/* pub # */ INSERT INTO t1 VALUES (5, 105, 'ACT');
+/* pub # */ INSERT INTO t1 VALUES (6, 106, 'NSW');
+/* pub # */ INSERT INTO t1 VALUES (7, 107, 'NT');
+/* pub # */ INSERT INTO t1 VALUES (8, 108, 'QLD');
+/* pub # */ INSERT INTO t1 VALUES (9, 109, 'NSW');
+
+/* pub # */ SELECT * FROM t1;
a | b | c
---+-----+-----
2 | 102 | NSW
@@ -1170,7 +1124,7 @@ test_pub=# SELECT * FROM t1;
(8 rows)
</programlisting>
<programlisting>
-test_sub=# SELECT * FROM t1;
+/* sub # */ SELECT * FROM t1;
a | b | c
---+-----+-----
6 | 106 | NSW
@@ -1184,10 +1138,9 @@ test_sub=# SELECT * FROM t1;
<literal>p1</literal>. The <command>UPDATE</command> replicates
the change as normal.
<programlisting>
-test_pub=# UPDATE t1 SET b = 999 WHERE a = 6;
-UPDATE 1
+/* pub # */ UPDATE t1 SET b = 999 WHERE a = 6;
-test_pub=# SELECT * FROM t1;
+/* pub # */ SELECT * FROM t1;
a | b | c
---+-----+-----
2 | 102 | NSW
@@ -1201,7 +1154,7 @@ test_pub=# SELECT * FROM t1;
(8 rows)
</programlisting>
<programlisting>
-test_sub=# SELECT * FROM t1;
+/* sub # */ SELECT * FROM t1;
a | b | c
---+-----+-----
9 | 109 | NSW
@@ -1216,10 +1169,9 @@ test_sub=# SELECT * FROM t1;
transformed into an <command>INSERT</command> and the change is replicated.
See the new row on the subscriber.
<programlisting>
-test_pub=# UPDATE t1 SET a = 555 WHERE a = 2;
-UPDATE 1
+/* pub # */ UPDATE t1 SET a = 555 WHERE a = 2;
-test_pub=# SELECT * FROM t1;
+/* pub # */ SELECT * FROM t1;
a | b | c
-----+-----+-----
3 | 103 | QLD
@@ -1233,7 +1185,7 @@ test_pub=# SELECT * FROM t1;
(8 rows)
</programlisting>
<programlisting>
-test_sub=# SELECT * FROM t1;
+/* sub # */ SELECT * FROM t1;
a | b | c
-----+-----+-----
9 | 109 | NSW
@@ -1249,10 +1201,9 @@ test_sub=# SELECT * FROM t1;
transformed into a <command>DELETE</command> and the change is replicated.
See that the row is removed from the subscriber.
<programlisting>
-test_pub=# UPDATE t1 SET c = 'VIC' WHERE a = 9;
-UPDATE 1
+/* pub # */ UPDATE t1 SET c = 'VIC' WHERE a = 9;
-test_pub=# SELECT * FROM t1;
+/* pub # */ SELECT * FROM t1;
a | b | c
-----+-----+-----
3 | 103 | QLD
@@ -1266,7 +1217,7 @@ test_pub=# SELECT * FROM t1;
(8 rows)
</programlisting>
<programlisting>
-test_sub=# SELECT * FROM t1;
+/* sub # */ SELECT * FROM t1;
a | b | c
-----+-----+-----
6 | 999 | NSW
@@ -1284,17 +1235,13 @@ test_sub=# SELECT * FROM t1;
<para>
Create a partitioned table on the publisher.
<programlisting>
-test_pub=# CREATE TABLE parent(a int PRIMARY KEY) PARTITION BY RANGE(a);
-CREATE TABLE
-test_pub=# CREATE TABLE child PARTITION OF parent DEFAULT;
-CREATE TABLE
+/* pub # */ CREATE TABLE parent(a int PRIMARY KEY) PARTITION BY RANGE(a);
+/* pub # */ CREATE TABLE child PARTITION OF parent DEFAULT;
</programlisting>
Create the same tables on the subscriber.
<programlisting>
-test_sub=# CREATE TABLE parent(a int PRIMARY KEY) PARTITION BY RANGE(a);
-CREATE TABLE
-test_sub=# CREATE TABLE child PARTITION OF parent DEFAULT;
-CREATE TABLE
+/* sub # */ CREATE TABLE parent(a int PRIMARY KEY) PARTITION BY RANGE(a);
+/* sub # */ CREATE TABLE child PARTITION OF parent DEFAULT;
</programlisting></para>
<para>
@@ -1302,16 +1249,14 @@ CREATE TABLE
publication parameter <literal>publish_via_partition_root</literal> is set
as true. There are row filters defined on both the partitioned table
(<literal>parent</literal>), and on the partition (<literal>child</literal>).
-<programlisting>
-test_pub=# CREATE PUBLICATION p4 FOR TABLE parent WHERE (a &lt; 5), child WHERE (a >= 5)
-test_pub-# WITH (publish_via_partition_root=true);
-CREATE PUBLICATION
-</programlisting>
-<programlisting>
-test_sub=# CREATE SUBSCRIPTION s4
-test_sub-# CONNECTION 'host=localhost dbname=test_pub application_name=s4'
-test_sub-# PUBLICATION p4;
-CREATE SUBSCRIPTION
+<programlisting><![CDATA[
+/* pub # */ CREATE PUBLICATION p4 FOR TABLE parent WHERE (a < 5), child WHERE (a >= 5)
+/* pub - */ WITH (publish_via_partition_root=true);
+]]></programlisting>
+<programlisting>
+/* sub # */ CREATE SUBSCRIPTION s4
+/* sub - */ CONNECTION 'host=localhost dbname=test_pub application_name=s4'
+/* sub - */ PUBLICATION p4;
</programlisting></para>
<para>
@@ -1320,12 +1265,10 @@ CREATE SUBSCRIPTION
<literal>parent</literal> (because <literal>publish_via_partition_root</literal>
is true).
<programlisting>
-test_pub=# INSERT INTO parent VALUES (2), (4), (6);
-INSERT 0 3
-test_pub=# INSERT INTO child VALUES (3), (5), (7);
-INSERT 0 3
+/* pub # */ INSERT INTO parent VALUES (2), (4), (6);
+/* pub # */ INSERT INTO child VALUES (3), (5), (7);
-test_pub=# SELECT * FROM parent ORDER BY a;
+/* pub # */ SELECT * FROM parent ORDER BY a;
a
---
2
@@ -1337,7 +1280,7 @@ test_pub=# SELECT * FROM parent ORDER BY a;
(6 rows)
</programlisting>
<programlisting>
-test_sub=# SELECT * FROM parent ORDER BY a;
+/* sub # */ SELECT * FROM parent ORDER BY a;
a
---
2
@@ -1350,16 +1293,13 @@ test_sub=# SELECT * FROM parent ORDER BY a;
Repeat the same test, but with a different value for <literal>publish_via_partition_root</literal>.
The publication parameter <literal>publish_via_partition_root</literal> is
set as false. A row filter is defined on the partition (<literal>child</literal>).
+<programlisting><![CDATA[
+/* pub # */ DROP PUBLICATION p4;
+/* pub # */ CREATE PUBLICATION p4 FOR TABLE parent, child WHERE (a >= 5)
+/* pub - */ WITH (publish_via_partition_root=false);
+]]></programlisting>
<programlisting>
-test_pub=# DROP PUBLICATION p4;
-DROP PUBLICATION
-test_pub=# CREATE PUBLICATION p4 FOR TABLE parent, child WHERE (a >= 5)
-test_pub-# WITH (publish_via_partition_root=false);
-CREATE PUBLICATION
-</programlisting>
-<programlisting>
-test_sub=# ALTER SUBSCRIPTION s4 REFRESH PUBLICATION;
-ALTER SUBSCRIPTION
+/* sub # */ ALTER SUBSCRIPTION s4 REFRESH PUBLICATION;
</programlisting></para>
<para>
@@ -1367,14 +1307,11 @@ ALTER SUBSCRIPTION
row filter of <literal>child</literal> (because
<literal>publish_via_partition_root</literal> is false).
<programlisting>
-test_pub=# TRUNCATE parent;
-TRUNCATE TABLE
-test_pub=# INSERT INTO parent VALUES (2), (4), (6);
-INSERT 0 3
-test_pub=# INSERT INTO child VALUES (3), (5), (7);
-INSERT 0 3
+/* pub # */ TRUNCATE parent;
+/* pub # */ INSERT INTO parent VALUES (2), (4), (6);
+/* pub # */ INSERT INTO child VALUES (3), (5), (7);
-test_pub=# SELECT * FROM parent ORDER BY a;
+/* pub # */ SELECT * FROM parent ORDER BY a;
a
---
2
@@ -1386,7 +1323,7 @@ test_pub=# SELECT * FROM parent ORDER BY a;
(6 rows)
</programlisting>
<programlisting>
-test_sub=# SELECT * FROM child ORDER BY a;
+/* sub # */ SELECT * FROM child ORDER BY a;
a
---
5
@@ -1505,8 +1442,7 @@ test_sub=# SELECT * FROM child ORDER BY a;
<para>
Create a table <literal>t1</literal> to be used in the following example.
<programlisting>
-test_pub=# CREATE TABLE t1(id int, a text, b text, c text, d text, e text, PRIMARY KEY(id));
-CREATE TABLE
+/* pub # */ CREATE TABLE t1(id int, a text, b text, c text, d text, e text, PRIMARY KEY(id));
</programlisting></para>
<para>
@@ -1515,15 +1451,14 @@ CREATE TABLE
replicated. Notice that the order of column names in the column list does
not matter.
<programlisting>
-test_pub=# CREATE PUBLICATION p1 FOR TABLE t1 (id, b, a, d);
-CREATE PUBLICATION
+/* pub # */ CREATE PUBLICATION p1 FOR TABLE t1 (id, b, a, d);
</programlisting></para>
<para>
<literal>psql</literal> can be used to show the column lists (if defined)
for each publication.
<programlisting>
-test_pub=# \dRp+
+/* pub # */ \dRp+
Publication p1
Owner | All tables | Inserts | Updates | Deletes | Truncates | Via root
----------+------------+---------+---------+---------+-----------+----------
@@ -1536,7 +1471,7 @@ Tables:
<literal>psql</literal> can be used to show the column lists (if defined)
for each table.
<programlisting>
-test_pub=# \d t1
+/* pub # */ \d t1
Table "public.t1"
Column | Type | Collation | Nullable | Default
--------+---------+-----------+----------+---------
@@ -1559,24 +1494,19 @@ Publications:
<literal>s1</literal> that subscribes to the publication
<literal>p1</literal>.
<programlisting>
-test_sub=# CREATE TABLE t1(id int, b text, a text, d text, PRIMARY KEY(id));
-CREATE TABLE
-test_sub=# CREATE SUBSCRIPTION s1
-test_sub-# CONNECTION 'host=localhost dbname=test_pub application_name=s1'
-test_sub-# PUBLICATION p1;
-CREATE SUBSCRIPTION
+/* sub # */ CREATE TABLE t1(id int, b text, a text, d text, PRIMARY KEY(id));
+/* sub # */ CREATE SUBSCRIPTION s1
+/* sub - */ CONNECTION 'host=localhost dbname=test_pub application_name=s1'
+/* sub - */ PUBLICATION p1;
</programlisting></para>
<para>
On the publisher node, insert some rows to table <literal>t1</literal>.
<programlisting>
-test_pub=# INSERT INTO t1 VALUES(1, 'a-1', 'b-1', 'c-1', 'd-1', 'e-1');
-INSERT 0 1
-test_pub=# INSERT INTO t1 VALUES(2, 'a-2', 'b-2', 'c-2', 'd-2', 'e-2');
-INSERT 0 1
-test_pub=# INSERT INTO t1 VALUES(3, 'a-3', 'b-3', 'c-3', 'd-3', 'e-3');
-INSERT 0 1
-test_pub=# SELECT * FROM t1 ORDER BY id;
+/* pub # */ INSERT INTO t1 VALUES(1, 'a-1', 'b-1', 'c-1', 'd-1', 'e-1');
+/* pub # */ INSERT INTO t1 VALUES(2, 'a-2', 'b-2', 'c-2', 'd-2', 'e-2');
+/* pub # */ INSERT INTO t1 VALUES(3, 'a-3', 'b-3', 'c-3', 'd-3', 'e-3');
+/* pub # */ SELECT * FROM t1 ORDER BY id;
id | a | b | c | d | e
----+-----+-----+-----+-----+-----
1 | a-1 | b-1 | c-1 | d-1 | e-1
@@ -1589,7 +1519,7 @@ test_pub=# SELECT * FROM t1 ORDER BY id;
Only data from the column list of publication <literal>p1</literal> is
replicated.
<programlisting>
-test_sub=# SELECT * FROM t1 ORDER BY id;
+/* sub # */ SELECT * FROM t1 ORDER BY id;
id | b | a | d
----+-----+-----+-----
1 | b-1 | a-1 | d-1
@@ -1617,13 +1547,10 @@ test_sub=# SELECT * FROM t1 ORDER BY id;
For example, note below that subscriber table generated column value comes from the
subscriber column's calculation.
<programlisting>
-test_pub=# CREATE TABLE tab_gen_to_gen (a int, b int GENERATED ALWAYS AS (a + 1) STORED);
-CREATE TABLE
-test_pub=# INSERT INTO tab_gen_to_gen VALUES (1),(2),(3);
-INSERT 0 3
-test_pub=# CREATE PUBLICATION pub1 FOR TABLE tab_gen_to_gen;
-CREATE PUBLICATION
-test_pub=# SELECT * FROM tab_gen_to_gen;
+/* pub # */ CREATE TABLE tab_gen_to_gen (a int, b int GENERATED ALWAYS AS (a + 1) STORED);
+/* pub # */ INSERT INTO tab_gen_to_gen VALUES (1),(2),(3);
+/* pub # */ CREATE PUBLICATION pub1 FOR TABLE tab_gen_to_gen;
+/* pub # */ SELECT * FROM tab_gen_to_gen;
a | b
---+---
1 | 2
@@ -1631,11 +1558,9 @@ test_pub=# SELECT * FROM tab_gen_to_gen;
3 | 4
(3 rows)
-test_sub=# CREATE TABLE tab_gen_to_gen (a int, b int GENERATED ALWAYS AS (a * 100) STORED);
-CREATE TABLE
-test_sub=# CREATE SUBSCRIPTION sub1 CONNECTION 'dbname=test_pub' PUBLICATION pub1;
-CREATE SUBSCRIPTION
-test_sub=# SELECT * from tab_gen_to_gen;
+/* sub # */ CREATE TABLE tab_gen_to_gen (a int, b int GENERATED ALWAYS AS (a * 100) STORED);
+/* sub # */ CREATE SUBSCRIPTION sub1 CONNECTION 'dbname=test_pub' PUBLICATION pub1;
+/* sub # */ SELECT * from tab_gen_to_gen;
a | b
---+----
1 | 100
@@ -2488,7 +2413,7 @@ CONTEXT: processing remote data for replication origin "pg_16395" during "INSER
</para>
<sect2 id="prepare-publisher-upgrades">
- <title>Prepare for publisher upgrades</title>
+ <title>Prepare for Publisher Upgrades</title>
<para>
<application>pg_upgrade</application> attempts to migrate logical
@@ -2560,7 +2485,7 @@ CONTEXT: processing remote data for replication origin "pg_16395" during "INSER
</sect2>
<sect2 id="prepare-subscriber-upgrades">
- <title>Prepare for subscriber upgrades</title>
+ <title>Prepare for Subscriber Upgrades</title>
<para>
Setup the <link linkend="logical-replication-config-subscriber">
@@ -2610,7 +2535,7 @@ CONTEXT: processing remote data for replication origin "pg_16395" during "INSER
</sect2>
<sect2 id="upgrading-logical-replication-clusters">
- <title>Upgrading logical replication clusters</title>
+ <title>Upgrading Logical Replication Clusters</title>
<para>
While upgrading a subscriber, write operations can be performed in the
@@ -2674,7 +2599,7 @@ CONTEXT: processing remote data for replication origin "pg_16395" during "INSER
</para>
<sect3 id="steps-two-node-logical-replication-cluster">
- <title>Steps to upgrade a two-node logical replication cluster</title>
+ <title>Steps to Upgrade a Two-node Logical Replication Cluster</title>
<para>
Let's say publisher is in <literal>node1</literal> and subscriber is
in <literal>node2</literal>. The subscriber <literal>node2</literal> has
@@ -2690,8 +2615,7 @@ CONTEXT: processing remote data for replication origin "pg_16395" during "INSER
<link linkend="sql-altersubscription-params-disable"><command>ALTER SUBSCRIPTION ... DISABLE</command></link>,
e.g.:
<programlisting>
-node2=# ALTER SUBSCRIPTION sub1_node1_node2 DISABLE;
-ALTER SUBSCRIPTION
+/* node2 # */ ALTER SUBSCRIPTION sub1_node1_node2 DISABLE;
</programlisting>
</para>
</step>
@@ -2780,8 +2704,7 @@ pg_ctl -D /opt/PostgreSQL/data2_upgraded start -l logfile
<xref linkend="two-node-cluster-disable-subscriptions-node2"/>
and now, e.g.:
<programlisting>
-node2=# CREATE TABLE distributors (did integer PRIMARY KEY, name varchar(40));
-CREATE TABLE
+/* node2 # */ CREATE TABLE distributors (did integer PRIMARY KEY, name varchar(40));
</programlisting>
</para>
</step>
@@ -2793,8 +2716,7 @@ CREATE TABLE
<link linkend="sql-altersubscription-params-enable"><command>ALTER SUBSCRIPTION ... ENABLE</command></link>,
e.g.:
<programlisting>
-node2=# ALTER SUBSCRIPTION sub1_node1_node2 ENABLE;
-ALTER SUBSCRIPTION
+/* node2 # */ ALTER SUBSCRIPTION sub1_node1_node2 ENABLE;
</programlisting>
</para>
</step>
@@ -2805,8 +2727,7 @@ ALTER SUBSCRIPTION
<link linkend="sql-altersubscription-params-refresh-publication"><command>ALTER SUBSCRIPTION ... REFRESH PUBLICATION</command></link>,
e.g.:
<programlisting>
-node2=# ALTER SUBSCRIPTION sub1_node1_node2 REFRESH PUBLICATION;
-ALTER SUBSCRIPTION
+/* node2 # */ ALTER SUBSCRIPTION sub1_node1_node2 REFRESH PUBLICATION;
</programlisting>
</para>
</step>
@@ -2822,7 +2743,7 @@ ALTER SUBSCRIPTION
</sect3>
<sect3 id="steps-cascaded-logical-replication-cluster">
- <title>Steps to upgrade a cascaded logical replication cluster</title>
+ <title>Steps to Upgrade a Cascaded Logical Replication Cluster</title>
<para>
Let's say we have a cascaded logical replication setup
<literal>node1</literal>-><literal>node2</literal>-><literal>node3</literal>.
@@ -2844,8 +2765,7 @@ ALTER SUBSCRIPTION
<link linkend="sql-altersubscription-params-disable"><command>ALTER SUBSCRIPTION ... DISABLE</command></link>,
e.g.:
<programlisting>
-node2=# ALTER SUBSCRIPTION sub1_node1_node2 DISABLE;
-ALTER SUBSCRIPTION
+/* node2 # */ ALTER SUBSCRIPTION sub1_node1_node2 DISABLE;
</programlisting>
</para>
</step>
@@ -2896,8 +2816,7 @@ pg_ctl -D /opt/PostgreSQL/data1_upgraded start -l logfile
<link linkend="sql-altersubscription-params-disable"><command>ALTER SUBSCRIPTION ... DISABLE</command></link>,
e.g.:
<programlisting>
-node3=# ALTER SUBSCRIPTION sub1_node2_node3 DISABLE;
-ALTER SUBSCRIPTION
+/* node3 # */ ALTER SUBSCRIPTION sub1_node2_node3 DISABLE;
</programlisting>
</para>
</step>
@@ -2948,8 +2867,7 @@ pg_ctl -D /opt/PostgreSQL/data2_upgraded start -l logfile
<xref linkend="cascaded-cluster-disable-sub-node1-node2"/>
and now, e.g.:
<programlisting>
-node2=# CREATE TABLE distributors (did integer PRIMARY KEY, name varchar(40));
-CREATE TABLE
+/* node2 # */ CREATE TABLE distributors (did integer PRIMARY KEY, name varchar(40));
</programlisting>
</para>
</step>
@@ -2961,8 +2879,7 @@ CREATE TABLE
<link linkend="sql-altersubscription-params-enable"><command>ALTER SUBSCRIPTION ... ENABLE</command></link>,
e.g.:
<programlisting>
-node2=# ALTER SUBSCRIPTION sub1_node1_node2 ENABLE;
-ALTER SUBSCRIPTION
+/* node2 # */ ALTER SUBSCRIPTION sub1_node1_node2 ENABLE;
</programlisting>
</para>
</step>
@@ -2973,8 +2890,7 @@ ALTER SUBSCRIPTION
<link linkend="sql-altersubscription-params-refresh-publication"><command>ALTER SUBSCRIPTION ... REFRESH PUBLICATION</command></link>,
e.g.:
<programlisting>
-node2=# ALTER SUBSCRIPTION sub1_node1_node2 REFRESH PUBLICATION;
-ALTER SUBSCRIPTION
+/* node2 # */ ALTER SUBSCRIPTION sub1_node1_node2 REFRESH PUBLICATION;
</programlisting>
</para>
</step>
@@ -3025,8 +2941,7 @@ pg_ctl -D /opt/PostgreSQL/data3_upgraded start -l logfile
<xref linkend="cascaded-cluster-disable-sub-node2-node3"/> and now,
e.g.:
<programlisting>
-node3=# CREATE TABLE distributors (did integer PRIMARY KEY, name varchar(40));
-CREATE TABLE
+/* node3 # */ CREATE TABLE distributors (did integer PRIMARY KEY, name varchar(40));
</programlisting>
</para>
</step>
@@ -3038,8 +2953,7 @@ CREATE TABLE
<link linkend="sql-altersubscription-params-enable"><command>ALTER SUBSCRIPTION ... ENABLE</command></link>,
e.g.:
<programlisting>
-node3=# ALTER SUBSCRIPTION sub1_node2_node3 ENABLE;
-ALTER SUBSCRIPTION
+/* node3 # */ ALTER SUBSCRIPTION sub1_node2_node3 ENABLE;
</programlisting>
</para>
</step>
@@ -3050,8 +2964,7 @@ ALTER SUBSCRIPTION
<link linkend="sql-altersubscription-params-refresh-publication"><command>ALTER SUBSCRIPTION ... REFRESH PUBLICATION</command></link>,
e.g.:
<programlisting>
-node3=# ALTER SUBSCRIPTION sub1_node2_node3 REFRESH PUBLICATION;
-ALTER SUBSCRIPTION
+/* node3 # */ ALTER SUBSCRIPTION sub1_node2_node3 REFRESH PUBLICATION;
</programlisting>
</para>
</step>
@@ -3059,7 +2972,7 @@ ALTER SUBSCRIPTION
</sect3>
<sect3 id="steps-two-node-circular-logical-replication-cluster">
- <title>Steps to upgrade a two-node circular logical replication cluster</title>
+ <title>Steps to Upgrade a Two-node Circular Logical Replication Cluster</title>
<para>
Let's say we have a circular logical replication setup
<literal>node1</literal>-><literal>node2</literal> and
@@ -3082,8 +2995,7 @@ ALTER SUBSCRIPTION
<link linkend="sql-altersubscription-params-disable"><command>ALTER SUBSCRIPTION ... DISABLE</command></link>,
e.g.:
<programlisting>
-node2=# ALTER SUBSCRIPTION sub1_node1_node2 DISABLE;
-ALTER SUBSCRIPTION
+/* node2 # */ ALTER SUBSCRIPTION sub1_node1_node2 DISABLE;
</programlisting>
</para>
</step>
@@ -3134,8 +3046,7 @@ pg_ctl -D /opt/PostgreSQL/data1_upgraded start -l logfile
<link linkend="sql-altersubscription-params-enable"><command>ALTER SUBSCRIPTION ... ENABLE</command></link>,
e.g.:
<programlisting>
-node2=# ALTER SUBSCRIPTION sub1_node1_node2 ENABLE;
-ALTER SUBSCRIPTION
+/* node2 # */ ALTER SUBSCRIPTION sub1_node1_node2 ENABLE;
</programlisting>
</para>
</step>
@@ -3146,8 +3057,7 @@ ALTER SUBSCRIPTION
<literal>node2</literal> between <xref linkend="circular-cluster-disable-sub-node2"/>
and now, e.g.:
<programlisting>
-node1=# CREATE TABLE distributors (did integer PRIMARY KEY, name varchar(40));
-CREATE TABLE
+/* node1 # */ CREATE TABLE distributors (did integer PRIMARY KEY, name varchar(40));
</programlisting>
</para>
</step>
@@ -3160,8 +3070,7 @@ CREATE TABLE
<link linkend="sql-altersubscription-params-refresh-publication"><command>ALTER SUBSCRIPTION ... REFRESH PUBLICATION</command></link>,
e.g.:
<programlisting>
-node1=# ALTER SUBSCRIPTION sub1_node2_node1 REFRESH PUBLICATION;
-ALTER SUBSCRIPTION
+/* node1 # */ ALTER SUBSCRIPTION sub1_node2_node1 REFRESH PUBLICATION;
</programlisting>
</para>
</step>
@@ -3173,8 +3082,7 @@ ALTER SUBSCRIPTION
<link linkend="sql-altersubscription-params-disable"><command>ALTER SUBSCRIPTION ... DISABLE</command></link>,
e.g.:
<programlisting>
-node1=# ALTER SUBSCRIPTION sub1_node2_node1 DISABLE;
-ALTER SUBSCRIPTION
+/* node1 # */ ALTER SUBSCRIPTION sub1_node2_node1 DISABLE;
</programlisting>
</para>
</step>
@@ -3225,8 +3133,7 @@ pg_ctl -D /opt/PostgreSQL/data2_upgraded start -l logfile
<link linkend="sql-altersubscription-params-enable"><command>ALTER SUBSCRIPTION ... ENABLE</command></link>,
e.g.:
<programlisting>
-node1=# ALTER SUBSCRIPTION sub1_node2_node1 ENABLE;
-ALTER SUBSCRIPTION
+/* node1 # */ ALTER SUBSCRIPTION sub1_node2_node1 ENABLE;
</programlisting>
</para>
</step>
@@ -3237,8 +3144,7 @@ ALTER SUBSCRIPTION
the upgraded <literal>node1</literal> between <xref linkend="circular-cluster-disable-sub-node1"/>
and now, e.g.:
<programlisting>
-node2=# CREATE TABLE distributors (did integer PRIMARY KEY, name varchar(40));
-CREATE TABLE
+/* node2 # */ CREATE TABLE distributors (did integer PRIMARY KEY, name varchar(40));
</programlisting>
</para>
</step>
@@ -3250,8 +3156,7 @@ CREATE TABLE
<link linkend="sql-altersubscription-params-refresh-publication"><command>ALTER SUBSCRIPTION ... REFRESH PUBLICATION</command></link>,
e.g.:
<programlisting>
-node2=# ALTER SUBSCRIPTION sub1_node1_node2 REFRESH PUBLICATION;
-ALTER SUBSCRIPTION
+/* node2 # */ ALTER SUBSCRIPTION sub1_node1_node2 REFRESH PUBLICATION;
</programlisting>
</para>
</step>
diff --git a/doc/src/sgml/logicaldecoding.sgml b/doc/src/sgml/logicaldecoding.sgml
index 3f2bcd45a1e..fc288d691b9 100644
--- a/doc/src/sgml/logicaldecoding.sgml
+++ b/doc/src/sgml/logicaldecoding.sgml
@@ -169,7 +169,7 @@ COMMIT 693
$ pg_recvlogical -d postgres --slot=test --drop-slot
Example 2:
-$ pg_recvlogical -d postgres --slot=test --create-slot --two-phase
+$ pg_recvlogical -d postgres --slot=test --create-slot --enable-two-phase
$ pg_recvlogical -d postgres --slot=test --start -f -
<keycombo action="simul"><keycap>Control</keycap><keycap>Z</keycap></keycombo>
$ psql -d postgres -c "BEGIN;INSERT INTO data(data) VALUES('5');PREPARE TRANSACTION 'test';"
@@ -370,10 +370,10 @@ postgres=# select * from pg_logical_slot_get_changes('regression_slot', NULL, NU
<function>pg_create_logical_replication_slot</function></link>, or by
using the <link linkend="sql-createsubscription-params-with-failover">
<literal>failover</literal></link> option of
- <command>CREATE SUBSCRIPTION</command> during slot creation, and then calling
- <link linkend="pg-sync-replication-slots">
- <function>pg_sync_replication_slots</function></link>
- on the standby. By setting <link linkend="guc-sync-replication-slots">
+ <command>CREATE SUBSCRIPTION</command> during slot creation.
+ Additionally, enabling <link linkend="guc-sync-replication-slots">
+ <varname>sync_replication_slots</varname></link> on the standby
+ is required. By enabling <link linkend="guc-sync-replication-slots">
<varname>sync_replication_slots</varname></link>
on the standby, the failover slots can be synchronized periodically in
the slotsync worker. For the synchronization to work, it is mandatory to
@@ -398,6 +398,52 @@ postgres=# select * from pg_logical_slot_get_changes('regression_slot', NULL, NU
receiving the WAL up to the latest flushed position on the primary server.
</para>
+ <note>
+ <para>
+ While enabling <link linkend="guc-sync-replication-slots">
+ <varname>sync_replication_slots</varname></link> allows for automatic
+ periodic synchronization of failover slots, they can also be manually
+ synchronized using the <link linkend="pg-sync-replication-slots">
+ <function>pg_sync_replication_slots</function></link> function on the standby.
+ However, this function is primarily intended for testing and debugging and
+ should be used with caution. Unlike automatic synchronization, it does not
+ include cyclic retries, making it more prone to synchronization failures,
+ particularly during initial sync scenarios where the required WAL files
+ or catalog rows for the slot may have already been removed or are at risk
+ of being removed on the standby. In contrast, automatic synchronization
+ via <varname>sync_replication_slots</varname> provides continuous slot
+ updates, enabling seamless failover and supporting high availability.
+ Therefore, it is the recommended method for synchronizing slots.
+ </para>
+ </note>
+
+ <para>
+ When slot synchronization is configured as recommended,
+ and the initial synchronization is performed either automatically or
+ manually via pg_sync_replication_slot, the standby can persist the
+ synchronized slot only if the following condition is met: The logical
+ replication slot on the primary must retain WALs and system catalog
+ rows that are still available on the standby. This ensures data
+ integrity and allows logical replication to continue smoothly after
+ promotion.
+ If the required WALs or catalog rows have already been purged from the
+ standby, the slot will not be persisted to avoid data loss. In such
+ cases, the following log message may appear:
+<programlisting>
+ LOG: could not synchronize replication slot "failover_slot"
+ DETAIL: Synchronization could lead to data loss as the remote slot needs WAL at LSN 0/3003F28 and catalog xmin 754, but the standby has LSN 0/3003F28 and catalog xmin 756
+</programlisting>
+ If the logical replication slot is actively used by a consumer, no
+ manual intervention is needed; the slot will advance automatically,
+ and synchronization will resume in the next cycle. However, if no
+ consumer is configured, it is advisable to manually advance the slot
+ on the primary using <link linkend="pg-logical-slot-get-changes">
+ <function>pg_logical_slot_get_changes</function></link> or
+ <link linkend="pg-logical-slot-get-binary-changes">
+ <function>pg_logical_slot_get_binary_changes</function></link>,
+ allowing synchronization to proceed.
+ </para>
+
<para>
The ability to resume logical replication after failover depends upon the
<link linkend="view-pg-replication-slots">pg_replication_slots</link>.<structfield>synced</structfield>
@@ -455,9 +501,8 @@ postgres=# select * from pg_logical_slot_get_changes('regression_slot', NULL, NU
using the slot's contents without losing any changes.
</para>
<para>
- Creation of a snapshot is not always possible. In particular, it will
- fail when connected to a hot standby. Applications that do not require
- snapshot export may suppress it with the <literal>NOEXPORT_SNAPSHOT</literal>
+ Applications that do not require
+ snapshot export may suppress it with the <literal>SNAPSHOT 'nothing'</literal>
option.
</para>
</sect2>
diff --git a/doc/src/sgml/pgstattuple.sgml b/doc/src/sgml/pgstattuple.sgml
index 4071da4ed94..c747a5818ab 100644
--- a/doc/src/sgml/pgstattuple.sgml
+++ b/doc/src/sgml/pgstattuple.sgml
@@ -270,6 +270,15 @@ leaf_fragmentation | 0
page than is accounted for by <literal>internal_pages + leaf_pages +
empty_pages + deleted_pages</literal>, because it also includes the
index's metapage.
+ <literal>avg_leaf_density</literal> is the fraction of the index size that
+ is taken up by user data. Since indexes have a default fillfactor of 90,
+ this should be around 90 for newly built indexes of non-negligible size,
+ but usually deteriorates over time.
+ <literal>leaf_fragmentation</literal> represents a measure of disorder.
+ A higher <literal>leaf_fragmentation</literal> indicates that the
+ physical order of the index leaf pages increasingly deviates from their
+ logical order. This can have a significant impact if a large part
+ of the index is read from disk.
</para>
<para>
diff --git a/doc/src/sgml/plpython.sgml b/doc/src/sgml/plpython.sgml
index bee817ea822..cb065bf5f88 100644
--- a/doc/src/sgml/plpython.sgml
+++ b/doc/src/sgml/plpython.sgml
@@ -1,6 +1,6 @@
<!-- doc/src/sgml/plpython.sgml -->
-<chapter id="plpython">
+<chapter id="plpython" xreflabel="PL/Python">
<title>PL/Python &mdash; Python Procedural Language</title>
<indexterm zone="plpython"><primary>PL/Python</primary></indexterm>
diff --git a/doc/src/sgml/protocol.sgml b/doc/src/sgml/protocol.sgml
index c4d3853cbf2..82fe3f93761 100644
--- a/doc/src/sgml/protocol.sgml
+++ b/doc/src/sgml/protocol.sgml
@@ -189,7 +189,7 @@
</sect2>
<sect2 id="protocol-versions">
- <title>Protocol versions</title>
+ <title>Protocol Versions</title>
<para>
The current, latest version of the protocol is version 3.2. However, for
@@ -226,7 +226,7 @@
</para>
<table id="protocol-versions-table">
- <title>Protocol versions</title>
+ <title>Protocol Versions</title>
<tgroup cols="3">
<thead>
@@ -7292,8 +7292,8 @@ psql "dbname=postgres replication=database" -c "IDENTIFY_SYSTEM;"
<term>Int64 (XLogRecPtr)</term>
<listitem>
<para>
- The LSN of the abort. This field is available since protocol version
- 4.
+ The LSN of the abort operation, present only when streaming is set to parallel.
+ This field is available since protocol version 4.
</para>
</listitem>
</varlistentry>
@@ -7302,9 +7302,9 @@ psql "dbname=postgres replication=database" -c "IDENTIFY_SYSTEM;"
<term>Int64 (TimestampTz)</term>
<listitem>
<para>
- Abort timestamp of the transaction. The value is in number
- of microseconds since PostgreSQL epoch (2000-01-01). This field is
- available since protocol version 4.
+ Abort timestamp of the transaction, present only when streaming is set to
+ parallel. The value is in number of microseconds since PostgreSQL epoch (2000-01-01).
+ This field is available since protocol version 4.
</para>
</listitem>
</varlistentry>
diff --git a/doc/src/sgml/ref/alter_database.sgml b/doc/src/sgml/ref/alter_database.sgml
index 9da8920e12e..1fc051e11a3 100644
--- a/doc/src/sgml/ref/alter_database.sgml
+++ b/doc/src/sgml/ref/alter_database.sgml
@@ -83,7 +83,7 @@ ALTER DATABASE <replaceable class="parameter">name</replaceable> RESET ALL
must be empty for this database, and no one can be connected to
the database. Tables and indexes in non-default tablespaces are
unaffected. The method used to copy files to the new tablespace
- is affected by the <xref linkend="guc_file_copy_method"/> setting.
+ is affected by the <xref linkend="guc-file-copy-method"/> setting.
</para>
<para>
diff --git a/doc/src/sgml/ref/alter_table.sgml b/doc/src/sgml/ref/alter_table.sgml
index d63f3a621ac..d1696991683 100644
--- a/doc/src/sgml/ref/alter_table.sgml
+++ b/doc/src/sgml/ref/alter_table.sgml
@@ -460,8 +460,8 @@ WITH ( MODULUS <replaceable class="parameter">numeric_literal</replaceable>, REM
<para>
This form adds a new constraint to a table using the same constraint
syntax as <link linkend="sql-createtable"><command>CREATE TABLE</command></link>, plus the option <literal>NOT
- VALID</literal>, which is currently only allowed for foreign key,
- <literal>CHECK</literal> constraints and not-null constraints.
+ VALID</literal>, which is currently only allowed for foreign-key,
+ <literal>CHECK</literal>, and not-null constraints.
</para>
<para>
@@ -469,7 +469,7 @@ WITH ( MODULUS <replaceable class="parameter">numeric_literal</replaceable>, REM
existing rows in the table satisfy the new constraint. But if
the <literal>NOT VALID</literal> option is used, this
potentially-lengthy scan is skipped. The constraint will still be
- enforced against subsequent inserts or updates (that is, they'll fail
+ applied against subsequent inserts or updates (that is, they'll fail
unless there is a matching row in the referenced table, in the case
of foreign keys, or they'll fail unless the new row matches the
specified check condition). But the
@@ -591,7 +591,7 @@ WITH ( MODULUS <replaceable class="parameter">numeric_literal</replaceable>, REM
This form validates a foreign key, check, or not-null constraint that was
previously created as <literal>NOT VALID</literal>, by scanning the
table to ensure there are no rows for which the constraint is not
- satisfied. If the constraint is not enforced, an error is thrown.
+ satisfied. If the constraint was set to <literal>NOT ENFORCED</literal>, an error is thrown.
Nothing happens if the constraint is already marked valid.
(See <xref linkend="sql-altertable-notes"/> below for an explanation
of the usefulness of this command.)
@@ -1466,11 +1466,11 @@ WITH ( MODULUS <replaceable class="parameter">numeric_literal</replaceable>, REM
</para>
<para>
- Adding an enforced <literal>CHECK</literal> or <literal>NOT NULL</literal>
+ Adding a <literal>CHECK</literal> or <literal>NOT NULL</literal>
constraint requires scanning the table to verify that existing rows meet the
constraint, but does not require a table rewrite. If a <literal>CHECK</literal>
- constraint is added as <literal>NOT ENFORCED</literal>, the validation will
- not be performed.
+ constraint is added as <literal>NOT ENFORCED</literal>, no verification will
+ be performed.
</para>
<para>
@@ -1485,7 +1485,7 @@ WITH ( MODULUS <replaceable class="parameter">numeric_literal</replaceable>, REM
</para>
<para>
- Scanning a large table to verify a new foreign key or check constraint
+ Scanning a large table to verify new foreign-key, check, or not-null constraints
can take a long time, and other updates to the table are locked out
until the <command>ALTER TABLE ADD CONSTRAINT</command> command is
committed. The main purpose of the <literal>NOT VALID</literal>
diff --git a/doc/src/sgml/ref/create_database.sgml b/doc/src/sgml/ref/create_database.sgml
index 640c0425fae..4da8aeebb50 100644
--- a/doc/src/sgml/ref/create_database.sgml
+++ b/doc/src/sgml/ref/create_database.sgml
@@ -140,7 +140,7 @@ CREATE DATABASE <replaceable class="parameter">name</replaceable>
after the creation of the new database. In some situations, this may
have a noticeable negative impact on overall system performance. The
<literal>FILE_COPY</literal> strategy is affected by the <xref
- linkend="guc_file_copy_method"/> setting.
+ linkend="guc-file-copy-method"/> setting.
</para>
</listitem>
</varlistentry>
diff --git a/doc/src/sgml/ref/create_foreign_table.sgml b/doc/src/sgml/ref/create_foreign_table.sgml
index d08834ac9d2..009fa46532b 100644
--- a/doc/src/sgml/ref/create_foreign_table.sgml
+++ b/doc/src/sgml/ref/create_foreign_table.sgml
@@ -232,7 +232,7 @@ WITH ( MODULUS <replaceable class="parameter">numeric_literal</replaceable>, REM
<term><literal>INCLUDING COMMENTS</literal></term>
<listitem>
<para>
- Comments for the copied columns, constraints, and indexes will be
+ Comments for the copied columns and constraints will be
copied. The default behavior is to exclude comments, resulting in
the copied columns and constraints in the new table having no
comments.
diff --git a/doc/src/sgml/ref/create_index.sgml b/doc/src/sgml/ref/create_index.sgml
index 147a8f7587c..b9c679c41e8 100644
--- a/doc/src/sgml/ref/create_index.sgml
+++ b/doc/src/sgml/ref/create_index.sgml
@@ -814,7 +814,7 @@ Indexes:
leveraging multiple CPUs in order to process the table rows faster.
This feature is known as <firstterm>parallel index
build</firstterm>. For index methods that support building indexes
- in parallel (currently, B-tree and BRIN),
+ in parallel (currently, B-tree, GIN, and BRIN),
<varname>maintenance_work_mem</varname> specifies the maximum
amount of memory that can be used by each index build operation as
a whole, regardless of how many worker processes were started.
diff --git a/doc/src/sgml/ref/create_operator.sgml b/doc/src/sgml/ref/create_operator.sgml
index 3553d364541..d2ffb1b2a50 100644
--- a/doc/src/sgml/ref/create_operator.sgml
+++ b/doc/src/sgml/ref/create_operator.sgml
@@ -23,7 +23,7 @@ PostgreSQL documentation
<synopsis>
CREATE OPERATOR <replaceable>name</replaceable> (
{FUNCTION|PROCEDURE} = <replaceable class="parameter">function_name</replaceable>
- [, LEFTARG = <replaceable class="parameter">left_type</replaceable> ] [, RIGHTARG = <replaceable class="parameter">right_type</replaceable> ]
+ [, LEFTARG = <replaceable class="parameter">left_type</replaceable> ] , RIGHTARG = <replaceable class="parameter">right_type</replaceable>
[, COMMUTATOR = <replaceable class="parameter">com_op</replaceable> ] [, NEGATOR = <replaceable class="parameter">neg_op</replaceable> ]
[, RESTRICT = <replaceable class="parameter">res_proc</replaceable> ] [, JOIN = <replaceable class="parameter">join_proc</replaceable> ]
[, HASHES ] [, MERGES ]
@@ -88,8 +88,8 @@ CREATE OPERATOR <replaceable>name</replaceable> (
<para>
For binary operators, both <literal>LEFTARG</literal> and
- <literal>RIGHTARG</literal> must be defined. For prefix operators only
- <literal>RIGHTARG</literal> should be defined.
+ <literal>RIGHTARG</literal> must be defined. For prefix operators, only
+ <literal>RIGHTARG</literal> must be defined.
The <replaceable class="parameter">function_name</replaceable>
function must have been previously defined using <command>CREATE
FUNCTION</command> and must be defined to accept the correct number
diff --git a/doc/src/sgml/ref/create_table.sgml b/doc/src/sgml/ref/create_table.sgml
index 4a41b2f5530..dc000e913c1 100644
--- a/doc/src/sgml/ref/create_table.sgml
+++ b/doc/src/sgml/ref/create_table.sgml
@@ -448,11 +448,6 @@ WITH ( MODULUS <replaceable class="parameter">numeric_literal</replaceable>, REM
</para>
<para>
- Partitioned tables do not support <literal>EXCLUDE</literal> constraints;
- however, you can define these constraints on individual partitions.
- </para>
-
- <para>
See <xref linkend="ddl-partitioning"/> for more discussion on table
partitioning.
</para>
@@ -929,6 +924,15 @@ WITH ( MODULUS <replaceable class="parameter">numeric_literal</replaceable>, REM
not other generated columns. Any functions and operators used must be
immutable. References to other tables are not allowed.
</para>
+
+ <para>
+ A virtual generated column cannot have a user-defined type, and the
+ generation expression of a virtual generated column must not reference
+ user-defined functions or types, that is, it can only use built-in
+ functions or types. This applies also indirectly, such as for functions
+ or types that underlie operators or casts. (This restriction does not
+ exist for stored generated columns.)
+ </para>
</listitem>
</varlistentry>
@@ -1162,6 +1166,18 @@ WITH ( MODULUS <replaceable class="parameter">numeric_literal</replaceable>, REM
exclusion constraint on a subset of the table; internally this creates a
partial index. Note that parentheses are required around the predicate.
</para>
+
+ <para>
+ When establishing an exclusion constraint for a multi-level partition
+ hierarchy, all the columns in the partition key of the target
+ partitioned table, as well as those of all its descendant partitioned
+ tables, must be included in the constraint definition. Additionally,
+ those columns must be compared using the equality operator. These
+ restrictions ensure that potentially-conflicting rows will exist in the
+ same partition. The constraint may also refer to other columns which
+ are not a part of any partition key, which can be compared using any
+ appropriate operator.
+ </para>
</listitem>
</varlistentry>
@@ -1687,7 +1703,8 @@ WITH ( MODULUS <replaceable class="parameter">numeric_literal</replaceable>, REM
<varlistentry id="reloption-vacuum-truncate" xreflabel="vacuum_truncate">
<term><literal>vacuum_truncate</literal>, <literal>toast.vacuum_truncate</literal> (<type>boolean</type>)
<indexterm>
- <primary><varname>vacuum_truncate</varname> storage parameter</primary>
+ <primary><varname>vacuum_truncate</varname></primary>
+ <secondary>storage parameter</secondary>
</indexterm>
</term>
<listitem>
diff --git a/doc/src/sgml/ref/merge.sgml b/doc/src/sgml/ref/merge.sgml
index ecbcd8345d8..c2e181066a4 100644
--- a/doc/src/sgml/ref/merge.sgml
+++ b/doc/src/sgml/ref/merge.sgml
@@ -23,37 +23,37 @@ PostgreSQL documentation
<synopsis>
[ WITH <replaceable class="parameter">with_query</replaceable> [, ...] ]
MERGE INTO [ ONLY ] <replaceable class="parameter">target_table_name</replaceable> [ * ] [ [ AS ] <replaceable class="parameter">target_alias</replaceable> ]
-USING <replaceable class="parameter">data_source</replaceable> ON <replaceable class="parameter">join_condition</replaceable>
-<replaceable class="parameter">when_clause</replaceable> [...]
-[ RETURNING [ WITH ( { OLD | NEW } AS <replaceable class="parameter">output_alias</replaceable> [, ...] ) ]
- { * | <replaceable class="parameter">output_expression</replaceable> [ [ AS ] <replaceable class="parameter">output_name</replaceable> ] } [, ...] ]
+ USING <replaceable class="parameter">data_source</replaceable> ON <replaceable class="parameter">join_condition</replaceable>
+ <replaceable class="parameter">when_clause</replaceable> [...]
+ [ RETURNING [ WITH ( { OLD | NEW } AS <replaceable class="parameter">output_alias</replaceable> [, ...] ) ]
+ { * | <replaceable class="parameter">output_expression</replaceable> [ [ AS ] <replaceable class="parameter">output_name</replaceable> ] } [, ...] ]
<phrase>where <replaceable class="parameter">data_source</replaceable> is:</phrase>
-{ [ ONLY ] <replaceable class="parameter">source_table_name</replaceable> [ * ] | ( <replaceable class="parameter">source_query</replaceable> ) } [ [ AS ] <replaceable class="parameter">source_alias</replaceable> ]
+ { [ ONLY ] <replaceable class="parameter">source_table_name</replaceable> [ * ] | ( <replaceable class="parameter">source_query</replaceable> ) } [ [ AS ] <replaceable class="parameter">source_alias</replaceable> ]
<phrase>and <replaceable class="parameter">when_clause</replaceable> is:</phrase>
-{ WHEN MATCHED [ AND <replaceable class="parameter">condition</replaceable> ] THEN { <replaceable class="parameter">merge_update</replaceable> | <replaceable class="parameter">merge_delete</replaceable> | DO NOTHING } |
- WHEN NOT MATCHED BY SOURCE [ AND <replaceable class="parameter">condition</replaceable> ] THEN { <replaceable class="parameter">merge_update</replaceable> | <replaceable class="parameter">merge_delete</replaceable> | DO NOTHING } |
- WHEN NOT MATCHED [ BY TARGET ] [ AND <replaceable class="parameter">condition</replaceable> ] THEN { <replaceable class="parameter">merge_insert</replaceable> | DO NOTHING } }
+ { WHEN MATCHED [ AND <replaceable class="parameter">condition</replaceable> ] THEN { <replaceable class="parameter">merge_update</replaceable> | <replaceable class="parameter">merge_delete</replaceable> | DO NOTHING } |
+ WHEN NOT MATCHED BY SOURCE [ AND <replaceable class="parameter">condition</replaceable> ] THEN { <replaceable class="parameter">merge_update</replaceable> | <replaceable class="parameter">merge_delete</replaceable> | DO NOTHING } |
+ WHEN NOT MATCHED [ BY TARGET ] [ AND <replaceable class="parameter">condition</replaceable> ] THEN { <replaceable class="parameter">merge_insert</replaceable> | DO NOTHING } }
<phrase>and <replaceable class="parameter">merge_insert</replaceable> is:</phrase>
-INSERT [( <replaceable class="parameter">column_name</replaceable> [, ...] )]
-[ OVERRIDING { SYSTEM | USER } VALUE ]
-{ VALUES ( { <replaceable class="parameter">expression</replaceable> | DEFAULT } [, ...] ) | DEFAULT VALUES }
+ INSERT [( <replaceable class="parameter">column_name</replaceable> [, ...] )]
+ [ OVERRIDING { SYSTEM | USER } VALUE ]
+ { VALUES ( { <replaceable class="parameter">expression</replaceable> | DEFAULT } [, ...] ) | DEFAULT VALUES }
<phrase>and <replaceable class="parameter">merge_update</replaceable> is:</phrase>
-UPDATE SET { <replaceable class="parameter">column_name</replaceable> = { <replaceable class="parameter">expression</replaceable> | DEFAULT } |
- ( <replaceable class="parameter">column_name</replaceable> [, ...] ) = [ ROW ] ( { <replaceable class="parameter">expression</replaceable> | DEFAULT } [, ...] ) |
- ( <replaceable class="parameter">column_name</replaceable> [, ...] ) = ( <replaceable class="parameter">sub-SELECT</replaceable> )
- } [, ...]
+ UPDATE SET { <replaceable class="parameter">column_name</replaceable> = { <replaceable class="parameter">expression</replaceable> | DEFAULT } |
+ ( <replaceable class="parameter">column_name</replaceable> [, ...] ) = [ ROW ] ( { <replaceable class="parameter">expression</replaceable> | DEFAULT } [, ...] ) |
+ ( <replaceable class="parameter">column_name</replaceable> [, ...] ) = ( <replaceable class="parameter">sub-SELECT</replaceable> )
+ } [, ...]
<phrase>and <replaceable class="parameter">merge_delete</replaceable> is:</phrase>
-DELETE
+ DELETE
</synopsis>
</refsynopsisdiv>
@@ -106,10 +106,11 @@ DELETE
to compute and return value(s) based on each row inserted, updated, or
deleted. Any expression using the source or target table's columns, or
the <link linkend="merge-action"><function>merge_action()</function></link>
- function can be computed. When an <command>INSERT</command> or
+ function can be computed. By default, when an <command>INSERT</command> or
<command>UPDATE</command> action is performed, the new values of the target
- table's columns are used. When a <command>DELETE</command> is performed,
- the old values of the target table's columns are used. The syntax of the
+ table's columns are used, and when a <command>DELETE</command> is performed,
+ the old values of the target table's columns are used, but it is also
+ possible to explicitly request old and new values. The syntax of the
<literal>RETURNING</literal> list is identical to that of the output list
of <command>SELECT</command>.
</para>
diff --git a/doc/src/sgml/ref/pg_createsubscriber.sgml b/doc/src/sgml/ref/pg_createsubscriber.sgml
index 4b1d08d5f16..bb9cc72576c 100644
--- a/doc/src/sgml/ref/pg_createsubscriber.sgml
+++ b/doc/src/sgml/ref/pg_createsubscriber.sgml
@@ -170,36 +170,6 @@ PostgreSQL documentation
</varlistentry>
<varlistentry>
- <term><option>-R <replaceable class="parameter">objtype</replaceable></option></term>
- <term><option>--remove=<replaceable class="parameter">objtype</replaceable></option></term>
- <listitem>
- <para>
- Remove all objects of the specified type from specified databases on the
- target server.
- </para>
- <para>
- <itemizedlist>
- <listitem>
- <para>
- <literal>publications</literal>:
- The <literal>FOR ALL TABLES</literal> publications established for this
- subscriber are always removed; specifying this object type causes all
- other publications replicated from the source server to be dropped as
- well.
- </para>
- </listitem>
- </itemizedlist>
- </para>
- <para>
- The objects selected to be dropped are individually logged, including during
- a <option>--dry-run</option>. There is no opportunity to affect or stop the
- dropping of the selected objects, so consider taking a backup of them
- using <application>pg_dump</application>.
- </para>
- </listitem>
- </varlistentry>
-
- <varlistentry>
<term><option>-s <replaceable class="parameter">dir</replaceable></option></term>
<term><option>--socketdir=<replaceable class="parameter">dir</replaceable></option></term>
<listitem>
@@ -260,6 +230,35 @@ PostgreSQL documentation
</varlistentry>
<varlistentry>
+ <term><option>--clean=<replaceable class="parameter">objtype</replaceable></option></term>
+ <listitem>
+ <para>
+ Drop all objects of the specified type from specified databases on the
+ target server.
+ </para>
+ <para>
+ <itemizedlist>
+ <listitem>
+ <para>
+ <literal>publications</literal>:
+ The <literal>FOR ALL TABLES</literal> publications established for this
+ subscriber are always dropped; specifying this object type causes all
+ other publications replicated from the source server to be dropped as
+ well.
+ </para>
+ </listitem>
+ </itemizedlist>
+ </para>
+ <para>
+ The objects selected to be dropped are individually logged, including during
+ a <option>--dry-run</option>. There is no opportunity to affect or stop the
+ dropping of the selected objects, so consider taking a backup of them
+ using <application>pg_dump</application>.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
<term><option>--config-file=<replaceable class="parameter">filename</replaceable></option></term>
<listitem>
<para>
diff --git a/doc/src/sgml/ref/pg_dump.sgml b/doc/src/sgml/ref/pg_dump.sgml
index c10bca63e55..2ae084b5fa6 100644
--- a/doc/src/sgml/ref/pg_dump.sgml
+++ b/doc/src/sgml/ref/pg_dump.sgml
@@ -18,7 +18,7 @@ PostgreSQL documentation
<refname>pg_dump</refname>
<refpurpose>
- extract a <productname>PostgreSQL</productname> database into a script file or other archive file
+ export a <productname>PostgreSQL</productname> database as an SQL script or to other formats
</refpurpose>
</refnamediv>
@@ -1134,7 +1134,7 @@ PostgreSQL documentation
<term><option>--no-statistics</option></term>
<listitem>
<para>
- Do not dump statistics.
+ Do not dump statistics. This is the default.
</para>
</listitem>
</varlistentry>
@@ -1277,11 +1277,11 @@ PostgreSQL documentation
</para>
<para>
The data section contains actual table data, large-object
- contents, statistics for tables and materialized views and
- sequence values.
+ contents, sequence values, and statistics for tables,
+ materialized views, and foreign tables.
Post-data items include definitions of indexes, triggers, rules,
statistics for indexes, and constraints other than validated check
- constraints.
+ and not-null constraints.
Pre-data items include all other data definition items.
</para>
</listitem>
@@ -1359,7 +1359,8 @@ PostgreSQL documentation
<listitem>
<para>
Dump only the statistics, not the schema (data definitions) or data.
- Statistics for tables, materialized views, and indexes are dumped.
+ Statistics for tables, materialized views, foreign tables,
+ and indexes are dumped.
</para>
</listitem>
</varlistentry>
@@ -1461,7 +1462,7 @@ PostgreSQL documentation
<term><option>--with-statistics</option></term>
<listitem>
<para>
- Dump statistics. This is the default.
+ Dump statistics.
</para>
</listitem>
</varlistentry>
@@ -1681,14 +1682,14 @@ CREATE DATABASE foo WITH TEMPLATE template0;
</para>
<para>
- By default, <command>pg_dump</command> will include most optimizer
- statistics in the resulting dump file. However, some statistics may not be
- included, such as those created explicitly with <xref
- linkend="sql-createstatistics"/> or custom statistics added by an
- extension. Therefore, it may be useful to run <command>ANALYZE</command>
- after restoring from a dump file to ensure optimal performance; see <xref
- linkend="vacuum-for-statistics"/> and <xref linkend="autovacuum"/> for more
- information.
+ If <option>--with-statistics</option> is specified,
+ <command>pg_dump</command> will include most optimizer statistics in the
+ resulting dump file. However, some statistics may not be included, such as
+ those created explicitly with <xref linkend="sql-createstatistics"/> or
+ custom statistics added by an extension. Therefore, it may be useful to
+ run <command>ANALYZE</command> after restoring from a dump file to ensure
+ optimal performance; see <xref linkend="vacuum-for-statistics"/> and <xref
+ linkend="autovacuum"/> for more information.
</para>
<para>
diff --git a/doc/src/sgml/ref/pg_dumpall.sgml b/doc/src/sgml/ref/pg_dumpall.sgml
index 8c5141d036c..8ca68da5a55 100644
--- a/doc/src/sgml/ref/pg_dumpall.sgml
+++ b/doc/src/sgml/ref/pg_dumpall.sgml
@@ -16,7 +16,10 @@ PostgreSQL documentation
<refnamediv>
<refname>pg_dumpall</refname>
- <refpurpose>extract a <productname>PostgreSQL</productname> database cluster using a specified dump format</refpurpose>
+
+ <refpurpose>
+ export a <productname>PostgreSQL</productname> database cluster as an SQL script or to other formats
+ </refpurpose>
</refnamediv>
<refsynopsisdiv>
@@ -33,7 +36,7 @@ PostgreSQL documentation
<para>
<application>pg_dumpall</application> is a utility for writing out
(<quote>dumping</quote>) all <productname>PostgreSQL</productname> databases
- of a cluster into an archive. The archive contains
+ of a cluster into an SQL script file or an archive. The output contains
<acronym>SQL</acronym> commands that can be used as input to <xref
linkend="app-psql"/> to restore the databases. It does this by
calling <xref linkend="app-pgdump"/> for each database in the cluster.
@@ -567,7 +570,7 @@ exclude database <replaceable class="parameter">PATTERN</replaceable>
<term><option>--no-statistics</option></term>
<listitem>
<para>
- Do not dump statistics.
+ Do not dump statistics. This is the default.
</para>
</listitem>
</varlistentry>
@@ -690,7 +693,8 @@ exclude database <replaceable class="parameter">PATTERN</replaceable>
<listitem>
<para>
Dump only the statistics, not the schema (data definitions) or data.
- Statistics for tables, materialized views, and indexes are dumped.
+ Statistics for tables, materialized views, foreign tables,
+ and indexes are dumped.
</para>
</listitem>
</varlistentry>
@@ -741,7 +745,7 @@ exclude database <replaceable class="parameter">PATTERN</replaceable>
<term><option>--with-statistics</option></term>
<listitem>
<para>
- Dump statistics. This is the default.
+ Dump statistics.
</para>
</listitem>
</varlistentry>
@@ -957,14 +961,14 @@ exclude database <replaceable class="parameter">PATTERN</replaceable>
</para>
<para>
- By default, <command>pg_dumpall</command> will include most optimizer
- statistics in the resulting dump file. However, some statistics may not be
- included, such as those created explicitly with <xref
- linkend="sql-createstatistics"/> or custom statistics added by an
- extension. Therefore, it may be useful to run <command>ANALYZE</command>
- on each database after restoring from a dump file to ensure optimal
- performance. You can also run <command>vacuumdb -a -z</command> to analyze
- all databases.
+ If <option>--with-statistics</option> is specified,
+ <command>pg_dumpall</command> will include most optimizer statistics in the
+ resulting dump file. However, some statistics may not be included, such as
+ those created explicitly with <xref linkend="sql-createstatistics"/> or
+ custom statistics added by an extension. Therefore, it may be useful to
+ run <command>ANALYZE</command> on each database after restoring from a dump
+ file to ensure optimal performance. You can also run <command>vacuumdb -a
+ -z</command> to analyze all databases.
</para>
<para>
diff --git a/doc/src/sgml/ref/pg_recvlogical.sgml b/doc/src/sgml/ref/pg_recvlogical.sgml
index 63a45c7018a..f68182266a9 100644
--- a/doc/src/sgml/ref/pg_recvlogical.sgml
+++ b/doc/src/sgml/ref/pg_recvlogical.sgml
@@ -79,8 +79,8 @@ PostgreSQL documentation
</para>
<para>
- The <option>--two-phase</option> and <option>--failover</option> options
- can be specified with <option>--create-slot</option>.
+ The <option>--enable-two-phase</option> and <option>--enable-failover</option>
+ options can be specified with <option>--create-slot</option>.
</para>
</listitem>
</varlistentry>
@@ -166,7 +166,7 @@ PostgreSQL documentation
</varlistentry>
<varlistentry>
- <term><option>--failover</option></term>
+ <term><option>--enable-failover</option></term>
<listitem>
<para>
Enables the slot to be synchronized to the standbys. This option may
@@ -300,7 +300,8 @@ PostgreSQL documentation
<varlistentry>
<term><option>-t</option></term>
- <term><option>--two-phase</option></term>
+ <term><option>--enable-two-phase</option></term>
+ <term><option>--two-phase</option> (deprecated)</term>
<listitem>
<para>
Enables decoding of prepared transactions. This option may only be specified with
diff --git a/doc/src/sgml/ref/pg_restore.sgml b/doc/src/sgml/ref/pg_restore.sgml
index 2295df62d03..b649bd3a5ae 100644
--- a/doc/src/sgml/ref/pg_restore.sgml
+++ b/doc/src/sgml/ref/pg_restore.sgml
@@ -18,8 +18,8 @@ PostgreSQL documentation
<refname>pg_restore</refname>
<refpurpose>
- restore a <productname>PostgreSQL</productname> database or cluster
- from an archive created by <application>pg_dump</application> or
+ restore <productname>PostgreSQL</productname> databases from archives
+ created by <application>pg_dump</application> or
<application>pg_dumpall</application>
</refpurpose>
</refnamediv>
@@ -923,7 +923,8 @@ PostgreSQL documentation
<term><option>--with-data</option></term>
<listitem>
<para>
- Dump data. This is the default.
+ Output commands to restore data, if the archive contains them.
+ This is the default.
</para>
</listitem>
</varlistentry>
@@ -932,7 +933,8 @@ PostgreSQL documentation
<term><option>--with-schema</option></term>
<listitem>
<para>
- Dump schema (data definitions). This is the default.
+ Output commands to restore schema (data definitions), if the archive
+ contains them. This is the default.
</para>
</listitem>
</varlistentry>
@@ -941,7 +943,8 @@ PostgreSQL documentation
<term><option>--with-statistics</option></term>
<listitem>
<para>
- Dump statistics. This is the default.
+ Output commands to restore statistics, if the archive contains them.
+ This is the default.
</para>
</listitem>
</varlistentry>
diff --git a/doc/src/sgml/ref/psql-ref.sgml b/doc/src/sgml/ref/psql-ref.sgml
index 8f7d8758ca0..95f4cac2467 100644
--- a/doc/src/sgml/ref/psql-ref.sgml
+++ b/doc/src/sgml/ref/psql-ref.sgml
@@ -1067,8 +1067,8 @@ INSERT INTO tbls1 VALUES ($1, $2) \parse stmt1
</listitem>
</varlistentry>
- <varlistentry id="app-psql-meta-command-close">
- <term><literal>\close</literal> <replaceable class="parameter">prepared_statement_name</replaceable></term>
+ <varlistentry id="app-psql-meta-command-close-prepared">
+ <term><literal>\close_prepared</literal> <replaceable class="parameter">prepared_statement_name</replaceable></term>
<listitem>
<para>
@@ -1081,7 +1081,7 @@ INSERT INTO tbls1 VALUES ($1, $2) \parse stmt1
Example:
<programlisting>
SELECT $1 \parse stmt1
-\close stmt1
+\close_prepared stmt1
</programlisting>
</para>
@@ -1101,7 +1101,16 @@ SELECT $1 \parse stmt1
<listitem>
<para>
Outputs information about the current database connection,
- including TLS-related information if TLS is in use.
+ including SSL-related information if SSL is in use.
+ </para>
+ <para>
+ Note that the <structfield>Client User</structfield> field shows
+ the user at the time of connection, while the
+ <structfield>Superuser</structfield> field indicates whether
+ the current user (in the current execution context) has
+ superuser privileges. These users are usually the same, but they can
+ differ, for example, if the current user was changed with the
+ <command>SET ROLE</command> command.
</para>
</listitem>
</varlistentry>
@@ -3701,7 +3710,7 @@ testdb=&gt; <userinput>\setenv LESS -imx4F</userinput>
All queries executed while a pipeline is ongoing use the extended
query protocol. Queries are appended to the pipeline when ending with
a semicolon. The meta-commands <literal>\bind</literal>,
- <literal>\bind_named</literal>, <literal>\close</literal> or
+ <literal>\bind_named</literal>, <literal>\close_prepared</literal> or
<literal>\parse</literal> can be used in an ongoing pipeline. While
a pipeline is ongoing, <literal>\sendpipeline</literal> will append
the current query buffer to the pipeline. Other meta-commands like
@@ -3734,6 +3743,10 @@ testdb=&gt; <userinput>\setenv LESS -imx4F</userinput>
</para>
<para>
+ <command>COPY</command> is not supported while in pipeline mode.
+ </para>
+
+ <para>
Example:
<programlisting>
\startpipeline
@@ -3853,7 +3866,7 @@ SELECT 1 \bind \sendpipeline
(if given) is reached, or the query no longer returns the minimum number
of rows. Wait the specified number of seconds (default 2) between executions.
The default wait can be changed with the variable
- <xref linkend="app-psql-variables-watch-interval"/>).
+ <xref linkend="app-psql-variables-watch-interval"/>.
For backwards compatibility,
<replaceable class="parameter">seconds</replaceable> can be specified
with or without an <literal>interval=</literal> prefix.
@@ -4752,9 +4765,10 @@ bar
<term><varname>WATCH_INTERVAL</varname></term>
<listitem>
<para>
- This variable sets the default interval which <command>\watch</command>
- waits between executing the query. Specifying an interval in the
- command overrides this variable.
+ This variable sets the default interval, in seconds, which
+ <command>\watch</command> waits between executing the query. The
+ default is 2 seconds. Specifying an interval in the command overrides
+ this variable.
</para>
</listitem>
</varlistentry>
diff --git a/doc/src/sgml/ref/reindex.sgml b/doc/src/sgml/ref/reindex.sgml
index 5b3c769800e..c4055397146 100644
--- a/doc/src/sgml/ref/reindex.sgml
+++ b/doc/src/sgml/ref/reindex.sgml
@@ -465,14 +465,17 @@ Indexes:
</programlisting>
If the index marked <literal>INVALID</literal> is suffixed
- <literal>ccnew</literal>, then it corresponds to the transient
+ <literal>_ccnew</literal>, then it corresponds to the transient
index created during the concurrent operation, and the recommended
recovery method is to drop it using <literal>DROP INDEX</literal>,
then attempt <command>REINDEX CONCURRENTLY</command> again.
- If the invalid index is instead suffixed <literal>ccold</literal>,
+ If the invalid index is instead suffixed <literal>_ccold</literal>,
it corresponds to the original index which could not be dropped;
the recommended recovery method is to just drop said index, since the
rebuild proper has been successful.
+ A nonzero number may be appended to the suffix of the invalid index
+ names to keep them unique, like <literal>_ccnew1</literal>,
+ <literal>_ccold2</literal>, etc.
</para>
<para>
diff --git a/doc/src/sgml/ref/security_label.sgml b/doc/src/sgml/ref/security_label.sgml
index e5e5fb483e9..aa45c0af248 100644
--- a/doc/src/sgml/ref/security_label.sgml
+++ b/doc/src/sgml/ref/security_label.sgml
@@ -84,6 +84,10 @@ SECURITY LABEL [ FOR <replaceable class="parameter">provider</replaceable> ] ON
based on object labels, rather than traditional discretionary access control
(DAC) concepts such as users and groups.
</para>
+
+ <para>
+ You must own the database object to use <command>SECURITY LABEL</command>.
+ </para>
</refsect1>
<refsect1>
diff --git a/doc/src/sgml/ref/update.sgml b/doc/src/sgml/ref/update.sgml
index 12ec5ba0709..40cca063946 100644
--- a/doc/src/sgml/ref/update.sgml
+++ b/doc/src/sgml/ref/update.sgml
@@ -57,7 +57,8 @@ UPDATE [ ONLY ] <replaceable class="parameter">table_name</replaceable> [ * ] [
to compute and return value(s) based on each row actually updated.
Any expression using the table's columns, and/or columns of other
tables mentioned in <literal>FROM</literal>, can be computed.
- The new (post-update) values of the table's columns are used.
+ By default, the new (post-update) values of the table's columns are used,
+ but it is also possible to request the old (pre-update) values.
The syntax of the <literal>RETURNING</literal> list is identical to that of the
output list of <command>SELECT</command>.
</para>
diff --git a/doc/src/sgml/release-18.sgml b/doc/src/sgml/release-18.sgml
deleted file mode 100644
index cdf47ac6d2a..00000000000
--- a/doc/src/sgml/release-18.sgml
+++ /dev/null
@@ -1,3554 +0,0 @@
-<!-- doc/src/sgml/release-18.sgml -->
-<!-- See header comment in release.sgml about typical markup -->
-
- <sect1 id="release-18">
- <title>Release 18</title>
-
- <formalpara>
- <title>Release date:</title>
- <para>2025-??-??, CURRENT AS OF 2025-05-01</para>
- </formalpara>
-
- <sect2 id="release-18-highlights">
- <title>Overview</title>
-
- <para>
- <productname>PostgreSQL</productname> 18 contains many new features
- and enhancements, including:
- </para>
-
- <itemizedlist>
-
- <listitem>
- <para>
- (to be completed)
- </para>
- </listitem>
- </itemizedlist>
-
- <para>
- The above items and other new features of
- <productname>PostgreSQL</productname> 18 are explained in more detail
- in the sections below.
- </para>
-
- </sect2>
-
- <sect2 id="release-18-migration">
-
- <title>Migration to Version 18</title>
-
- <para>
- A dump/restore using <xref linkend="app-pg-dumpall"/> or use of
- <xref linkend="pgupgrade"/> or logical replication is required for
- those wishing to migrate data from any previous release. See <xref
- linkend="upgrading"/> for general information on migrating to new
- major releases.
- </para>
-
- <para>
- Version 18 contains a number of changes that may affect compatibility
- with previous releases. Observe the following incompatibilities:
- </para>
-
- <itemizedlist>
-
-<!--
-Author: Tom Lane <tgl@sss.pgh.pa.us>
-2025-01-16 [d7674c9fa] Seek zone abbreviations in the IANA data before timezone
--->
-
-<listitem>
-<para>
-Change time zone abbreviation handling (Tom Lane)
-<ulink url="&commit_baseurl;d7674c9fa">&sect;</ulink>
-</para>
-
-<para>
-The system will now favor the current session's time zone abbreviations before checking the server variable timezone_abbreviations. Previously timezone_abbreviations was
-checked first.
-</para>
-</listitem>
-
-<!--
-Author: Nathan Bossart <nathan@postgresql.org>
-2024-12-02 [db6a4a985] Deprecate MD5 passwords.
--->
-
-<listitem>
-<para>
-Deprecate MD5 password authentication (Nathan Bossart)
-<ulink url="&commit_baseurl;db6a4a985">&sect;</ulink>
-</para>
-
-<para>
-Warnings generated by their use can be disabled by the server variable md5_password_warnings.
-</para>
-</listitem>
-
-<!--
-Author: David Rowley <drowley@postgresql.org>
-2024-09-24 [62ddf7ee9] Add ONLY support for VACUUM and ANALYZE
--->
-
-<listitem>
-<para>
-Change VACUUM and ANALYZE to process the inheritance children of a parent (Michael Harris)
-<ulink url="&commit_baseurl;62ddf7ee9">&sect;</ulink>
-</para>
-
-<para>
-The previous behavior can be performed by using the new ONLY option.
-</para>
-</listitem>
-
-<!--
-Author: Tom Lane <tgl@sss.pgh.pa.us>
-2024-09-30 [770233748] Do not treat \. as an EOF marker in CSV mode for COPY IN
-Author: Tom Lane <tgl@sss.pgh.pa.us>
-2024-10-01 [da8a4c166] Reject a copy EOF marker that has data ahead of it on th
--->
-
-<listitem>
-<para>
-Prevent COPY FROM from treating \. as an end-of-file marker when reading CSV files (Daniel Vérité, Tom Lane)
-<ulink url="&commit_baseurl;770233748">&sect;</ulink>
-<ulink url="&commit_baseurl;da8a4c166">&sect;</ulink>
-</para>
-
-<para>
-psql will still treat \. as an end-of-file marker when reading CSV files from STDIN. Older psql clients connecting to Postgres 18 servers might experience \copy problems. This
-release also enforces that \. must appear alone on a line.
-</para>
-</listitem>
-
-<!--
-Author: Michael Paquier <michael@paquier.xyz>
-2024-10-03 [e2bab2d79] Remove support for unlogged on partitioned tables
--->
-
-<listitem>
-<para>
-Disallow unlogged partitioned tables (Michael Paquier)
-<ulink url="&commit_baseurl;e2bab2d79">&sect;</ulink>
-</para>
-
-<para>
-Previously ALTER TABLE SET [UN]LOGGED did nothing, and the creation of an unlogged partitioned table did not cause its children to be unlogged.
-</para>
-</listitem>
-
-<!--
-Author: Fujii Masao <fujii@postgresql.org>
-2024-09-12 [fefa76f70] Remove old RULE privilege completely.
--->
-
-<listitem>
-<para>
-Remove non-functional support for RULE privileges in GRANT/REVOKE (Fujii Masao)
-<ulink url="&commit_baseurl;fefa76f70">&sect;</ulink>
-</para>
-
-<para>
-These have been non-functional since Postgres 8.2.
-</para>
-</listitem>
-
-<!--
-Author: David Rowley <drowley@postgresql.org>
-2024-08-12 [f0d112759] Remove "parent" column from pg_backend_memory_contexts
--->
-
-<listitem>
-<para>
-Remove column pg_backend_memory_contexts.parent (Melih Mutlu)
-<ulink url="&commit_baseurl;f0d112759">&sect;</ulink>
-</para>
-
-<para>
-This is now longer needed since pg_backend_memory_contexts.path was added.
-</para>
-</listitem>
-
-<!--
-Author: David Rowley <drowley@postgresql.org>
-2024-07-25 [32d3ed816] Add path column to pg_backend_memory_contexts view
-Author: David Rowley <drowley@postgresql.org>
-2025-04-18 [d9e03864b] Make levels 1-based in
-pg_log_backend_memory_contexts()
-Author: Fujii Masao <fujii@postgresql.org>
-2025-04-21 [706cbed35] doc: Fix memory context level in pg_log_backend_memory_c
--->
-
-<listitem>
-<para>
-Change pg_backend_memory_contexts.level and pg_log_backend_memory_contexts() to be one-based (Melih Mutlu, Atsushi Torikoshi, David Rowley, Fujii Masao)
-<ulink url="&commit_baseurl;32d3ed816">&sect;</ulink>
-<ulink url="&commit_baseurl;d9e03864b">&sect;</ulink>
-<ulink url="&commit_baseurl;706cbed35">&sect;</ulink>
-</para>
-
-<para>
-These were previously zero-based.
-</para>
-</listitem>
-
- </itemizedlist>
-
- </sect2>
-
- <sect2 id="release-18-changes">
- <title>Changes</title>
-
- <para>
- Below you will find a detailed account of the changes between
- <productname>PostgreSQL</productname> 18 and the previous major
- release.
- </para>
-
- <sect3 id="release-18-server">
- <title>Server</title>
-
- <sect4 id="release-18-optimizer">
- <title>Optimizer</title>
-
- <itemizedlist>
-
-<!--
-Author: Alexander Korotkov <akorotkov@postgresql.org>
-2025-02-17 [fc069a3a6] Implement Self-Join Elimination
--->
-
-<listitem>
-<para>
-Remove some unnecessary table self-joins (Andrey Lepikhov, Alexander Kuzmenkov, Alexander Korotkov, Alena Rybakina)
-<ulink url="&commit_baseurl;fc069a3a6">&sect;</ulink>
-</para>
-
-<para>
-This optimization can be disabled using server variable enable_self_join_elimination.
-</para>
-</listitem>
-
-<!--
-Author: Alexander Korotkov <akorotkov@postgresql.org>
-2025-04-04 [c0962a113] Convert 'x IN (VALUES ...)' to 'x = ANY ...' then approp
--->
-
-<listitem>
-<para>
-Convert some 'IN (VALUES ...)' to 'x = ANY ...' for better optimizer statistics (Alena Rybakina, Andrei Lepikhov)
-<ulink url="&commit_baseurl;c0962a113">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Alexander Korotkov <akorotkov@postgresql.org>
-2024-11-24 [ae4569161] Teach bitmap path generation about transforming OR-claus
--->
-
-<listitem>
-<para>
-Allow transforming OR-clauses to arrays for faster index processing (Alexander Korotkov, Andrey Lepikhov)
-<ulink url="&commit_baseurl;ae4569161">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Tom Lane <tgl@sss.pgh.pa.us>
-2024-09-10 [52c707483] Use a hash table to de-duplicate column names in ruleuti
-Author: Tom Lane <tgl@sss.pgh.pa.us>
-2024-12-19 [276279295] Convert SetOp to read its inputs as outerPlan and innerP
-Author: Tom Lane <tgl@sss.pgh.pa.us>
-2024-12-19 [8d96f57d5] Improve planner's handling of SetOp plans.
-Author: David Rowley <drowley@postgresql.org>
-2024-09-05 [908a96861] Optimize WindowAgg's use of tuplestores
--->
-
-<listitem>
-<para>
-Speed up the processing of INTERSECT, EXCEPT, window aggregates, and view column aliases (Tom Lane, David Rowley)
-<ulink url="&commit_baseurl;52c707483">&sect;</ulink>
-<ulink url="&commit_baseurl;276279295">&sect;</ulink>
-<ulink url="&commit_baseurl;8d96f57d5">&sect;</ulink>
-<ulink url="&commit_baseurl;908a96861">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Richard Guo <rguo@postgresql.org>
-2024-11-26 [a8ccf4e93] Reordering DISTINCT keys to match input path's pathkeys
--->
-
-<listitem>
-<para>
-Allow the keys of SELECT DISTINCT to be internally reordered to avoid sorting (Richard Guo)
-<ulink url="&commit_baseurl;a8ccf4e93">&sect;</ulink>
-</para>
-
-<para>
-This optimization can be disabled using enable_distinct_reordering.
-</para>
-</listitem>
-
-<!--
-Author: David Rowley <drowley@postgresql.org>
-2024-12-12 [bd10ec529] Detect redundant GROUP BY columns using UNIQUE indexes
--->
-
-<listitem>
-<para>
-Ignore GROUP BY columns that are functionally dependent on other columns (Zhang Mingli, Jian He, David Rowley)
-<ulink url="&commit_baseurl;bd10ec529">&sect;</ulink>
-</para>
-
-<para>
-If a GROUP BY clause includes all columns of a unique index, as well as other columns of the same table, those other columns are redundant and can be dropped
-from the grouping. This was already true for non-deferred primary keys.
-</para>
-</listitem>
-
-<!--
-Author: Richard Guo <rguo@postgresql.org>
-2024-10-09 [67a54b9e8] Allow pushdown of HAVING clauses with grouping sets
-Author: Richard Guo <rguo@postgresql.org>
-2024-09-10 [247dea89f] Introduce an RTE for the grouping step
-Author: Richard Guo <rguo@postgresql.org>
-2024-09-10 [f5050f795] Mark expressions nullable by grouping sets
-Author: Richard Guo <rguo@postgresql.org>
-2025-03-13 [cc5d98525] Fix incorrect handling of subquery pullup
--->
-
-<listitem>
-<para>
-Allow some HAVING clauses on GROUPING SETS to be pushed to WHERE clauses (Richard Guo)
-<ulink url="&commit_baseurl;67a54b9e8">&sect;</ulink>
-<ulink url="&commit_baseurl;247dea89f">&sect;</ulink>
-<ulink url="&commit_baseurl;f5050f795">&sect;</ulink>
-<ulink url="&commit_baseurl;cc5d98525">&sect;</ulink>
-</para>
-
-<para>
-This allows earlier row filtering. This release also fixes some GROUPING SETS queries that used to return incorrect results.
-</para>
-</listitem>
-
-<!--
-Author: David Rowley <drowley@postgresql.org>
-2024-07-09 [036bdcec9] Teach planner how to estimate rows for timestamp generat
-Author: Dean Rasheed <dean.a.rasheed@gmail.com>
-2024-12-02 [97173536e] Add a planner support function for numeric generate_seri
--->
-
-<listitem>
-<para>
-Improve row estimates for generate_series() using numeric and timestamp values (David Rowley, Song Jinzhou)
-<ulink url="&commit_baseurl;036bdcec9">&sect;</ulink>
-<ulink url="&commit_baseurl;97173536e">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Richard Guo <rguo@postgresql.org>
-2024-07-05 [aa86129e1] Support "Right Semi Join" plan shapes
--->
-
-<listitem>
-<para>
-Allow the optimizer to use "Right Semi Join" plans (Richard Guo)
-<ulink url="&commit_baseurl;aa86129e1">&sect;</ulink>
-</para>
-
-<para>
-Semi-joins are used when needing to find if there is at least one match.
-</para>
-</listitem>
-
-<!--
-Author: Richard Guo <rguo@postgresql.org>
-2024-10-09 [828e94c9d] Consider explicit incremental sort for mergejoins
--->
-
-<listitem>
-<para>
-Allow merge joins to use incremental sorts (Richard Guo)
-<ulink url="&commit_baseurl;828e94c9d">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Amit Langote <amitlan@postgresql.org>
-2025-04-04 [88f55bc97] Make derived clause lookup in EquivalenceClass more effi
-Author: David Rowley <drowley@postgresql.org>
-2025-04-08 [d69d45a5a] Speedup child EquivalenceMember lookup in planner
--->
-
-<listitem>
-<para>
-Improve the efficiency of planning queries accessing many partitions (Ashutosh Bapat, Yuya Watari, David Rowley)
-<ulink url="&commit_baseurl;88f55bc97">&sect;</ulink>
-<ulink url="&commit_baseurl;d69d45a5a">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Richard Guo <rguo@postgresql.org>
-2024-07-30 [9b282a935] Fix partitionwise join with partially-redundant join cla
-Author: Richard Guo <rguo@postgresql.org>
-2024-07-29 [513f4472a] Reduce memory used by partitionwise joins
--->
-
-<listitem>
-<para>
-Allow partitionwise joins in more cases, and reduce its memory usage (Richard Guo, Tom Lane, Ashutosh Bapat)
-<ulink url="&commit_baseurl;9b282a935">&sect;</ulink>
-<ulink url="&commit_baseurl;513f4472a">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Alexander Korotkov <akorotkov@postgresql.org>
-2025-03-10 [fae535da0] Teach Append to consider tuple_fraction when accumulatin
--->
-
-<listitem>
-<para>
-Improve cost estimates of partition queries (Nikita Malakhov, Andrei Lepikhov)
-<ulink url="&commit_baseurl;fae535da0">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Tom Lane <tgl@sss.pgh.pa.us>
-2025-04-02 [0dca5d68d] Change SQL-language functions to use the plan cache.
-Author: Tom Lane <tgl@sss.pgh.pa.us>
-2025-04-17 [09b07c295] Minor performance improvement for SQL-language functions
--->
-
-<listitem>
-<para>
-Improve SQL-language function plan caching (Alexander Pyhalov, Tom Lane)
-<ulink url="&commit_baseurl;0dca5d68d">&sect;</ulink>
-<ulink url="&commit_baseurl;09b07c295">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Robert Haas <rhaas@postgresql.org>
-2024-08-21 [e22253467] Treat number of disabled nodes in a path as a separate c
--->
-
-<listitem>
-<para>
-Improve handling of disabled optimizer features (Robert Haas)
-<ulink url="&commit_baseurl;e22253467">&sect;</ulink>
-</para>
-</listitem>
-
- </itemizedlist>
-
- </sect4>
-
- <sect4 id="release-18-indexes">
- <title>Indexes</title>
-
- <itemizedlist>
-
-<!--
-Author: Peter Geoghegan <pg@bowt.ie>
-2025-04-04 [92fe23d93] Add nbtree skip scan optimization.
-Author: Peter Geoghegan <pg@bowt.ie>
-2025-04-04 [8a510275d] Further optimize nbtree search scan key comparisons.
-Author: Peter Geoghegan <pg@bowt.ie>
-2025-04-04 [8a510275d] Further optimize nbtree search scan key comparisons.
--->
-
-<listitem>
-<para>
-Allow skip scans of btree indexes (Peter Geoghegan)
-<ulink url="&commit_baseurl;92fe23d93">&sect;</ulink>
-<ulink url="&commit_baseurl;8a510275d">&sect;</ulink>
-<ulink url="&commit_baseurl;8a510275d">&sect;</ulink>
-</para>
-
-<para>
-This is effective if the earlier non-referenced columns contain few unique values.
-</para>
-</listitem>
-
-<!--
-Author: Peter Eisentraut <peter@eisentraut.org>
-2025-03-18 [f278e1fe3] Allow non-btree unique indexes for partition keys
-Author: Peter Eisentraut <peter@eisentraut.org>
-2025-03-18 [9d6db8bec] Allow non-btree unique indexes for matviews
--->
-
-<listitem>
-<para>
-Allow non-btree unique indexes to be used as partition keys and in materialized views (Mark Dilger)
-<ulink url="&commit_baseurl;f278e1fe3">&sect;</ulink>
-<ulink url="&commit_baseurl;9d6db8bec">&sect;</ulink>
-</para>
-
-<para>
-The index type must still support equality.
-</para>
-</listitem>
-
-<!--
-Author: Tomas Vondra <tomas.vondra@postgresql.org>
-2025-03-03 [8492feb98] Allow parallel CREATE INDEX for GIN indexes
--->
-
-<listitem>
-<para>
-Allow GIN indexes to be created in parallel (Tomas Vondra, Matthias van de Meent)
-<ulink url="&commit_baseurl;8492feb98">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Heikki Linnakangas <heikki.linnakangas@iki.fi>
-2025-04-02 [e9e7b6604] Add GiST and btree sortsupport routines for range types
--->
-
-<listitem>
-<para>
-Allow values to be sorted to speed rangetype GiST and btree index builds (Bernd Helmle)
-<ulink url="&commit_baseurl;e9e7b6604">&sect;</ulink>
-</para>
-</listitem>
-
- </itemizedlist>
-
- </sect4>
-
- <sect4 id="release-18-performance">
- <title>General Performance</title>
-
- <itemizedlist>
-
-<!--
-Author: Andres Freund <andres@anarazel.de>
-2025-03-17 [02844012b] aio: Basic subsystem initialization
-Author: Andres Freund <andres@anarazel.de>
-2025-03-17 [da7226993] aio: Add core asynchronous I/O infrastructure
-Author: Andres Freund <andres@anarazel.de>
-2025-03-18 [55b454d0e] aio: Infrastructure for io_method=worker
-Author: Andres Freund <andres@anarazel.de>
-2025-03-18 [247ce06b8] aio: Add io_method=worker
-Author: Thomas Munro <tmunro@postgresql.org>
-2025-03-19 [10f664684] Introduce io_max_combine_limit.
-Author: Thomas Munro <tmunro@postgresql.org>
-2025-03-19 [06fb5612c] Increase io_combine_limit range to 1MB.
-Author: Andres Freund <andres@anarazel.de>
-2025-03-26 [c325a7633] aio: Add io_method=io_uring
-Author: Andres Freund <andres@anarazel.de>
-2025-03-29 [50cb7505b] aio: Implement support for reads in smgr/md/fd
-Author: Andres Freund <andres@anarazel.de>
-2025-03-30 [047cba7fa] bufmgr: Implement AIO read support
-Author: Andres Freund <andres@anarazel.de>
-2025-03-30 [12ce89fd0] bufmgr: Use AIO in StartReadBuffers()
-Author: Andres Freund <andres@anarazel.de>
-2025-03-30 [2a5e709e7] Enable IO concurrency on all systems
--->
-
-<listitem>
-<para>
-Add an asynchronous I/O subsystem (Andres Freund, Thomas Munro, Nazir Bilal Yavuz, Melanie Plageman)
-<ulink url="&commit_baseurl;02844012b">&sect;</ulink>
-<ulink url="&commit_baseurl;da7226993">&sect;</ulink>
-<ulink url="&commit_baseurl;55b454d0e">&sect;</ulink>
-<ulink url="&commit_baseurl;247ce06b8">&sect;</ulink>
-<ulink url="&commit_baseurl;10f664684">&sect;</ulink>
-<ulink url="&commit_baseurl;06fb5612c">&sect;</ulink>
-<ulink url="&commit_baseurl;c325a7633">&sect;</ulink>
-<ulink url="&commit_baseurl;50cb7505b">&sect;</ulink>
-<ulink url="&commit_baseurl;047cba7fa">&sect;</ulink>
-<ulink url="&commit_baseurl;12ce89fd0">&sect;</ulink>
-<ulink url="&commit_baseurl;2a5e709e7">&sect;</ulink>
-</para>
-
-<para>
-This is enabled by server variable io_method, with server variables io_combine_limit and io_max_combine_limit added to control it. This also enables
-effective_io_concurrency and maintenance_io_concurrency values greater than zero for systems without fadvise() support. The new system view pg_aios shows the file handles being used
-for asynchronous I/O.
-</para>
-</listitem>
-
-<!--
-Author: Tomas Vondra <tomas.vondra@postgresql.org>
-2024-09-21 [c4d5cb71d] Increase the number of fast-path lock slots
--->
-
-<listitem>
-<para>
-Improve the locking performance of queries that access many relations (Tomas Vondra)
-<ulink url="&commit_baseurl;c4d5cb71d">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Amit Langote <amitlan@postgresql.org>
-2025-01-30 [bb3ec16e1] Move PartitionPruneInfo out of plan nodes into PlannedSt
-Author: Amit Langote <amitlan@postgresql.org>
-2025-01-31 [d47cbf474] Perform runtime initial pruning outside ExecInitNode()
-Author: Amit Langote <amitlan@postgresql.org>
-2025-02-07 [cbc127917] Track unpruned relids to avoid processing pruned relatio
-Author: Amit Langote <amitlan@postgresql.org>
-2025-02-20 [525392d57] Don't lock partitions pruned by initial pruning
--->
-
-<listitem>
-<para>
-Avoid the locking of pruned partitions during execution (Amit Langote)
-<ulink url="&commit_baseurl;bb3ec16e1">&sect;</ulink>
-<ulink url="&commit_baseurl;d47cbf474">&sect;</ulink>
-<ulink url="&commit_baseurl;cbc127917">&sect;</ulink>
-<ulink url="&commit_baseurl;525392d57">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: David Rowley <drowley@postgresql.org>
-2024-08-20 [adf97c156] Speed up Hash Join by making ExprStates support hashing
-Author: David Rowley <drowley@postgresql.org>
-2024-12-11 [0f5738202] Use ExprStates for hashing in GROUP BY and SubPlans
-Author: Jeff Davis <jdavis@postgresql.org>
-2025-03-24 [4d143509c] Create accessor functions for TupleHashEntry.
-Author: Jeff Davis <jdavis@postgresql.org>
-2025-03-24 [a0942f441] Add ExecCopySlotMinimalTupleExtra().
-Author: Jeff Davis <jdavis@postgresql.org>
-2025-03-24 [626df47ad] Remove 'additional' pointer from TupleHashEntryData.
--->
-
-<listitem>
-<para>
-Improve the performance and reduce memory usage of hash joins and GROUP BY (David Rowley, Jeff Davis)
-<ulink url="&commit_baseurl;adf97c156">&sect;</ulink>
-<ulink url="&commit_baseurl;0f5738202">&sect;</ulink>
-<ulink url="&commit_baseurl;4d143509c">&sect;</ulink>
-<ulink url="&commit_baseurl;a0942f441">&sect;</ulink>
-<ulink url="&commit_baseurl;626df47ad">&sect;</ulink>
-</para>
-
-<para>
-This also improves hash set operations used by EXCEPT, and hash lookups of subplan values.
-</para>
-</listitem>
-
-<!--
-Author: Melanie Plageman <melanieplageman@gmail.com>
-2025-02-11 [052026c9b] Eagerly scan all-visible pages to amortize aggressive va
-Author: Melanie Plageman <melanieplageman@gmail.com>
-2025-03-03 [06eae9e62] Trigger more frequent autovacuums with relallfrozen
--->
-
-<listitem>
-<para>
-Allow normal vacuums to freeze some pages, even though they are all-visible (Melanie Plageman)
-<ulink url="&commit_baseurl;052026c9b">&sect;</ulink>
-<ulink url="&commit_baseurl;06eae9e62">&sect;</ulink>
-</para>
-
-<para>
-This reduces the overhead of later full-relation freezing. The aggressiveness of this can be controlled by server variable and per-table setting vacuum_max_eager_freeze_failure_rate.
-Previously vacuum never processed all-visible pages until freezing was required.
-</para>
-</listitem>
-
-<!--
-Author: Nathan Bossart <nathan@postgresql.org>
-2025-03-20 [0164a0f9e] Add vacuum_truncate configuration parameter.
--->
-
-<listitem>
-<para>
-Add server variable vacuum_truncate to control file truncation during VACUUM (Nathan Bossart, Gurjeet Singh)
-<ulink url="&commit_baseurl;0164a0f9e">&sect;</ulink>
-</para>
-
-<para>
-A storage-level parameter with the same name and behavior already existed.
-</para>
-</listitem>
-
-<!--
-Author: Melanie Plageman <melanieplageman@gmail.com>
-2025-03-12 [ff79b5b2a] Increase default effective_io_concurrency to 16
-Author: Melanie Plageman <melanieplageman@gmail.com>
-2025-03-18 [cc6be07eb] Increase default maintenance_io_concurrency to 16
--->
-
-<listitem>
-<para>
-Increase server variables effective_io_concurrency's and maintenance_io_concurrency's default values to 16 (Melanie Plageman)
-<ulink url="&commit_baseurl;ff79b5b2a">&sect;</ulink>
-<ulink url="&commit_baseurl;cc6be07eb">&sect;</ulink>
-</para>
-
-<para>
-This more accurately reflects modern hardware.
-</para>
-</listitem>
-
- </itemizedlist>
-
- </sect4>
-
- <sect4 id="release-18-monitoring">
- <title>Monitoring</title>
-
- <itemizedlist>
-
-<!--
-Author: Melanie Plageman <melanieplageman@gmail.com>
-2025-03-12 [9219093ca] Modularize log_connections output
-Author: Melanie Plageman <melanieplageman@gmail.com>
-2025-03-12 [18cd15e70] Add connection establishment duration logging
--->
-
-<listitem>
-<para>
-Increase the logging granularity of server variable log_connections (Melanie Plageman)
-<ulink url="&commit_baseurl;9219093ca">&sect;</ulink>
-<ulink url="&commit_baseurl;18cd15e70">&sect;</ulink>
-</para>
-
-<para>
-This server variable was previously only boolean; these options are still supported.
-</para>
-</listitem>
-
-<!--
-Author: Tom Lane <tgl@sss.pgh.pa.us>
-2025-04-07 [3516ea768] Add local-address escape "%L" to log_line_prefix.
--->
-
-<listitem>
-<para>
-Add log_line_prefix escape "%L" to output the client IP address (Greg Sabino Mullane)
-<ulink url="&commit_baseurl;3516ea768">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Fujii Masao <fujii@postgresql.org>
-2025-03-14 [6d376c3b0] Add GUC option to log lock acquisition failures.
--->
-
-<listitem>
-<para>
-Add server variable log_lock_failure to log lock acquisition failures (Yuki Seino)
-<ulink url="&commit_baseurl;6d376c3b0">&sect;</ulink>
-</para>
-
-<para>
-Specifically it reports SELECT ... NOWAIT lock failures.
-</para>
-</listitem>
-
-<!--
-Author: Michael Paquier <michael@paquier.xyz>
-2025-01-28 [30a6ed0ce] Track per-relation cumulative time spent in [auto]vacuum
--->
-
-<listitem>
-<para>
-Modify pg_stat_all_tables and its variants to report the time spent in vacuum, analyze, and their automatic variants (Sami Imseih)
-<ulink url="&commit_baseurl;30a6ed0ce">&sect;</ulink>
-</para>
-
-<para>
-The new columns are total_vacuum_time, total_autovacuum_time, total_analyze_time, and total_autoanalyze_time.
-</para>
-</listitem>
-
-<!--
-Author: Nathan Bossart <nathan@postgresql.org>
-2025-02-11 [bb8dff999] Add cost-based vacuum delay time to progress views.
-Author: Nathan Bossart <nathan@postgresql.org>
-2025-02-14 [7720082ae] Add delay time to VACUUM/ANALYZE (VERBOSE) and autovacuu
--->
-
-<listitem>
-<para>
-Add delay time reporting to VACUUM and ANALYZE (Bertrand Drouvot, Nathan Bossart)
-<ulink url="&commit_baseurl;bb8dff999">&sect;</ulink>
-<ulink url="&commit_baseurl;7720082ae">&sect;</ulink>
-</para>
-
-<para>
-This information appears in the autovacuum logs, the system views pg_stat_progress_vacuum and pg_stat_progress_analyze, and the output of VACUUM and ANALYZE when in VERBOSE
-mode; tracking must be enabled with the server variable track_cost_delay_timing.
-</para>
-</listitem>
-
-<!--
-Author: Michael Paquier <michael@paquier.xyz>
-2024-12-19 [9aea73fc6] Add backend-level statistics to pgstats
-Author: Michael Paquier <michael@paquier.xyz>
-2025-03-03 [3f1db99bf] Handle auxiliary processes in SQL functions of backend s
--->
-
-<listitem>
-<para>
-Add per-backend I/O statistics reporting (Bertrand Drouvot)
-<ulink url="&commit_baseurl;9aea73fc6">&sect;</ulink>
-<ulink url="&commit_baseurl;3f1db99bf">&sect;</ulink>
-</para>
-
-<para>
-The statistics are accessed via pg_stat_get_backend_io(). Per-backend I/O statistics can be cleared via pg_stat_reset_backend_stats().
-</para>
-</listitem>
-
-<!--
-Author: Michael Paquier <michael@paquier.xyz>
-2025-01-14 [f92c854cf] Make pg_stat_io count IOs as bytes instead of blocks for
--->
-
-<listitem>
-<para>
-Add pg_stat_io columns to report I/O activity in bytes (Nazir Bilal Yavuz)
-<ulink url="&commit_baseurl;f92c854cf">&sect;</ulink>
-</para>
-
-<para>
-The new columns are read_bytes, write_bytes, and extend_bytes. The op_bytes column, which always equaled BLCKSZ, has been removed.
-</para>
-</listitem>
-
-<!--
-Author: Michael Paquier <michael@paquier.xyz>
-2025-02-04 [a051e71e2] Add data for WAL in pg_stat_io and backend statistics
-Author: Michael Paquier <michael@paquier.xyz>
-2025-02-20 [4538bd3f1] doc: Add details about object "wal" in pg_stat_io
-Author: Michael Paquier <michael@paquier.xyz>
-2025-03-06 [7f7f324eb] Add more monitoring data for WAL writes in the WAL recei
--->
-
-<listitem>
-<para>
-Add WAL I/O activity rows to pg_stat_io (Nazir Bilal Yavuz, Bertrand Drouvot, Michael Paquier)
-<ulink url="&commit_baseurl;a051e71e2">&sect;</ulink>
-<ulink url="&commit_baseurl;4538bd3f1">&sect;</ulink>
-<ulink url="&commit_baseurl;7f7f324eb">&sect;</ulink>
-</para>
-
-<para>
-This includes WAL receiver activity and a wait event for such writes.
-</para>
-
-</listitem>
-
-<!--
-Author: Michael Paquier <michael@paquier.xyz>
-2025-02-26 [6c349d83b] Re-add GUC track_wal_io_timing
--->
-
-<listitem>
-<para>
-Change server variable track_wal_io_timing to control tracking WAL timing in pg_stat_io instead of pg_stat_wal (Bertrand Drouvot)
-<ulink url="&commit_baseurl;6c349d83b">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Michael Paquier <michael@paquier.xyz>
-2025-02-24 [2421e9a51] Remove read/sync fields from pg_stat_wal and GUC track_w
-Author: Michael Paquier <michael@paquier.xyz>
-2025-02-26 [6c349d83b] Re-add GUC track_wal_io_timing
--->
-
-<listitem>
-<para>
-Remove read/sync columns from pg_stat_wal (Bertrand Drouvot)
-<ulink url="&commit_baseurl;2421e9a51">&sect;</ulink>
-<ulink url="&commit_baseurl;6c349d83b">&sect;</ulink>
-</para>
-
-<para>
-This removes columns wal_write, wal_sync, wal_write_time, and wal_sync_time.
-</para>
-</listitem>
-
-<!--
-Author: Michael Paquier <michael@paquier.xyz>
-2025-03-11 [76def4cdd] Add WAL data to backend statistics
--->
-
-<listitem>
-<para>
-Add function pg_stat_get_backend_wal() to return per-backend WAL statistics (Bertrand Drouvot)
-<ulink url="&commit_baseurl;76def4cdd">&sect;</ulink>
-</para>
-
-<para>
-Per-backend WAL statistics can be cleared via pg_stat_reset_backend_stats().
-</para>
-</listitem>
-
-<!--
-Author: Nathan Bossart <nathan@postgresql.org>
-2024-10-11 [4e1fad378] Add pg_ls_summariesdir().
--->
-
-<listitem>
-<para>
-Add function pg_ls_summariesdir() to specifically list the contents of PGDATA/pg_wal/summaries (Yushi Ogiwara)
-<ulink url="&commit_baseurl;4e1fad378">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Fujii Masao <fujii@postgresql.org>
-2024-09-30 [559efce1d] Add num_done counter to the pg_stat_checkpointer view.
--->
-
-<listitem>
-<para>
-Add column pg_stat_checkpointer.num_done to report the number of completed checkpoints (Anton A. Melnikov)
-<ulink url="&commit_baseurl;559efce1d">&sect;</ulink>
-</para>
-
-<para>
-Columns num_timed and num_requested count both completed and skipped checkpoints.
-</para>
-</listitem>
-
-<!--
-Author: Fujii Masao <fujii@postgresql.org>
-2024-10-02 [17cc5f666] Fix inconsistent reporting of checkpointer stats.
--->
-
-<listitem>
-<para>
-Add column pg_stat_checkpointer.slru_written to report SLRU buffers written (Nitin Jadhav)
-<ulink url="&commit_baseurl;17cc5f666">&sect;</ulink>
-</para>
-
-<para>
-Also, modify the checkpoint server log message to report separate shared buffer and SLRU buffer values.
-</para>
-</listitem>
-
-<!--
-Author: Michael Paquier <michael@paquier.xyz>
-2024-11-11 [e7a9496de] Add two attributes to pg_stat_database for parallel work
--->
-
-<listitem>
-<para>
-Add columns to pg_stat_database to report parallel workers activity (Benoit Lobréau)
-<ulink url="&commit_baseurl;e7a9496de">&sect;</ulink>
-</para>
-
-<para>
-The new columns are parallel_workers_to_launch and parallel_workers_launched.
-</para>
-</listitem>
-
-<!--
-Author: Álvaro Herrera <alvherre@alvh.no-ip.org>
-2025-03-18 [62d712ecf] Introduce squashing of constant lists in query jumbling
-Author: Álvaro Herrera <alvherre@alvh.no-ip.org>
-2025-03-27 [9fbd53dea] Remove the query_id_squash_values GUC
--->
-
-<listitem>
-<para>
-Have query jumbling of arrays consider only the first and last array elements (Dmitry Dolgov, Sami Imseih)
-<ulink url="&commit_baseurl;62d712ecf">&sect;</ulink>
-<ulink url="&commit_baseurl;9fbd53dea">&sect;</ulink>
-</para>
-
-<para>
-Jumbling is used by pg_stat_statements.
-</para>
-</listitem>
-
-<!--
-Author: Michael Paquier <michael@paquier.xyz>
-2025-03-26 [787514b30] Use relation name instead of OID in query jumbling for R
--->
-
-<listitem>
-<para>
-Adjust query jumbling to group together queries using the same relation name (Michael Paquier, Sami Imseih)
-<ulink url="&commit_baseurl;787514b30">&sect;</ulink>
-</para>
-
-<para>
-This is true even if the tables in different schemas have different column names.
-</para>
-</listitem>
-
-<!--
-Author: Daniel Gustafsson <dgustafsson@postgresql.org>
-2025-04-08 [042a66291] Add function to get memory context stats for processes
-Author: Daniel Gustafsson <dgustafsson@postgresql.org>
-2025-04-08 [c57971034] Rename argument in pg_get_process_memory_contexts().
--->
-
-<listitem>
-<para>
-Add function pg_get_process_memory_contexts() to report process memory context statistics (Rahila Syed)
-<ulink url="&commit_baseurl;042a66291">&sect;</ulink>
-<ulink url="&commit_baseurl;c57971034">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: David Rowley <drowley@postgresql.org>
-2024-07-01 [12227a1d5] Add context type field to pg_backend_memory_contexts
--->
-
-<listitem>
-<para>
-Add column pg_backend_memory_contexts.type to report the type of memory context (David Rowley)
-<ulink url="&commit_baseurl;12227a1d5">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: David Rowley <drowley@postgresql.org>
-2024-07-25 [32d3ed816] Add path column to pg_backend_memory_contexts view
--->
-
-<listitem>
-<para>
-Add column pg_backend_memory_contexts.path to show memory context parents (Melih Mutlu)
-<ulink url="&commit_baseurl;32d3ed816">&sect;</ulink>
-</para>
-</listitem>
-
- </itemizedlist>
-
- </sect4>
-
- <sect4 id="release-18-privileges">
- <title>Privileges</title>
-
- <itemizedlist>
-
-<!--
-Author: Michael Paquier <michael@paquier.xyz>
-2024-07-04 [4564f1ceb] Add pg_get_acl() to get the ACL for a database object
-Author: Michael Paquier <michael@paquier.xyz>
-2024-07-10 [d898665bf] Extend pg_get_acl() to handle sub-object IDs
--->
-
-<listitem>
-<para>
-Add function pg_get_acl() to retrieve database access control details (Joel Jacobson)
-<ulink url="&commit_baseurl;4564f1ceb">&sect;</ulink>
-<ulink url="&commit_baseurl;d898665bf">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Fujii Masao <fujii@postgresql.org>
-2024-09-12 [4eada203a] Add has_largeobject_privilege function.
--->
-
-<listitem>
-<para>
-Add function has_largeobject_privilege() to check large object privileges (Yugo Nagata)
-<ulink url="&commit_baseurl;4eada203a">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Fujii Masao <fujii@postgresql.org>
-2025-04-04 [0d6c47766] Extend ALTER DEFAULT PRIVILEGES to define default privil
--->
-
-<listitem>
-<para>
-Allow ALTER DEFAULT PRIVILEGES to define large object default privileges (Takatsuka Haruka, Yugo Nagata, Laurenz Albe)
-<ulink url="&commit_baseurl;0d6c47766">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Nathan Bossart <nathan@postgresql.org>
-2024-07-09 [ccd38024b] Introduce pg_signal_autovacuum_worker.
--->
-
-<listitem>
-<para>
-Add predefined role pg_signal_autovacuum_worker (Kirill Reshke)
-<ulink url="&commit_baseurl;ccd38024b">&sect;</ulink>
-</para>
-
-<para>
-This allows sending signals to autovacuum workers.
-</para>
-</listitem>
-
- </itemizedlist>
-
- </sect4>
-
- <sect4 id="release-18-server-config">
- <title>Server Configuration</title>
-
- <itemizedlist>
-
-<!--
-Author: Daniel Gustafsson <dgustafsson@postgresql.org>
-2025-02-20 [b3f0be788] Add support for OAUTHBEARER SASL mechanism
--->
-
-<listitem>
-<para>
-Add support for the OAuth authentication method (Jacob Champion, Daniel Gustafsson, Thomas Munro)
-<ulink url="&commit_baseurl;b3f0be788">&sect;</ulink>
-</para>
-
-<para>
-This adds an "oauth" authentication method to pg_hba.conf, libpq OAuth options, a server variable oauth_validator_libraries to load token validation libraries, and
-a configure flag --with-libcurl to add the required compile-time libraries.
-</para>
-</listitem>
-
-<!--
-Author: Daniel Gustafsson <dgustafsson@postgresql.org>
-2024-10-24 [45188c2ea] Support configuring TLSv1.3 cipher suites
--->
-
-<listitem>
-<para>
-Add server variable ssl_tls13_ciphers to allow specification of multiple colon-separated TLSv1.3 cipher suites (Erica Zhang, Daniel Gustafsson)
-<ulink url="&commit_baseurl;45188c2ea">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Daniel Gustafsson <dgustafsson@postgresql.org>
-2025-03-18 [daa02c6bd] Add X25519 to the default set of curves
--->
-
-<listitem>
-<para>
-Change server variable ssl_groups's default to include elliptic curve X25519 (Daniel Gustafsson, Jacob Champion)
-<ulink url="&commit_baseurl;daa02c6bd">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Daniel Gustafsson <dgustafsson@postgresql.org>
-2024-10-24 [3d1ef3a15] Support configuring multiple ECDH curves
--->
-
-<listitem>
-<para>
-Rename server variable ssl_ecdh_curve to ssl_groups and allow multiple colon-separated ECDH curves to be specified (Erica Zhang, Daniel Gustafsson)
-<ulink url="&commit_baseurl;3d1ef3a15">&sect;</ulink>
-</para>
-
-<para>The previous name still works.
-</para>
-</listitem>
-
-<!--
-Author: Daniel Gustafsson <dgustafsson@postgresql.org>
-2025-01-24 [924d89a35] pgcrypto: Add function to check FIPS mode
--->
-
-<listitem>
-<para>
-Add function pg_check_fipsmode() to report the server's FIPS mode (Daniel Gustafsson)
-<ulink url="&commit_baseurl;924d89a35">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Heikki Linnakangas <heikki.linnakangas@iki.fi>
-2025-04-02 [a460251f0] Make cancel request keys longer
-Author: Heikki Linnakangas <heikki.linnakangas@iki.fi>
-2024-07-29 [9d9b9d46f] Move cancel key generation to after forking the backend
--->
-
-<listitem>
-<para>
-Make cancel request keys 256 bits (Heikki Linnakangas, Jelte Fennema-Nio)
-<ulink url="&commit_baseurl;a460251f0">&sect;</ulink>
-<ulink url="&commit_baseurl;9d9b9d46f">&sect;</ulink>
-</para>
-
-<para>
-This is only possible when the server and client support wire protocol version 3.2, introduced in this release.
-</para>
-</listitem>
-
-<!--
-Author: Nathan Bossart <nathan@postgresql.org>
-2025-01-06 [c758119e5] Allow changing autovacuum_max_workers without restarting
--->
-
-<listitem>
-<para>
-Add server variable autovacuum_worker_slots to specify the maximum number of background workers (Nathan Bossart)
-<ulink url="&commit_baseurl;c758119e5">&sect;</ulink>
-</para>
-
-<para>
-With this variable set, autovacuum_max_workers can be adjusted at runtime up to this maximum without a server restart.
-</para>
-</listitem>
-
-<!--
-Author: Nathan Bossart <nathan@postgresql.org>
-2025-02-05 [306dc520b] Introduce autovacuum_vacuum_max_threshold.
--->
-
-<listitem>
-<para>
-Allow specification of the fixed number of dead tuples that will trigger an autovacuum (Nathan Bossart, Frédéric Yhuel)
-<ulink url="&commit_baseurl;306dc520b">&sect;</ulink>
-</para>
-
-<para>
-The server variable is autovacuum_vacuum_max_threshold. Percentages are still used for triggering.
-</para>
-</listitem>
-
-<!--
-Author: Andres Freund <andres@anarazel.de>
-2025-03-24 [adb5f85fa] Redefine max_files_per_process to control additionally o
--->
-
-<listitem>
-<para>
-Change server variable max_files_per_process to limit only files opened by a backend (Andres Freund)
-<ulink url="&commit_baseurl;adb5f85fa">&sect;</ulink>
-</para>
-
-<para>
-Previously files opened by the postmaster were also counted toward this limit.
-</para>
-</listitem>
-
-<!--
-Author: Nathan Bossart <nathan@postgresql.org>
-2024-07-26 [0dcaea569] Introduce num_os_semaphores GUC.
--->
-
-<listitem>
-<para>
-Add server variable num_os_semaphores to report the required number of semaphores (Nathan Bossart)
-<ulink url="&commit_baseurl;0dcaea569">&sect;</ulink>
-</para>
-
-<para>
-This is useful for operating system configuration.
-</para>
-</listitem>
-
-<!--
-Author: Peter Eisentraut <peter@eisentraut.org>
-2025-03-19 [4f7f7b037] extension_control_path
--->
-
-<listitem>
-<para>
-Add server variable extension_control_path to specify the location of extension control files (Peter Eisentraut, Matheus Alcantara)
-<ulink url="&commit_baseurl;4f7f7b037">&sect;</ulink>
-</para>
-</listitem>
-
- </itemizedlist>
-
- </sect4>
-
- <sect4 id="release-18-replication">
- <title>Streaming Replication and Recovery</title>
-
- <itemizedlist>
-
-<!--
-Author: Amit Kapila <akapila@postgresql.org>
-2025-02-19 [ac0e33136] Invalidate inactive replication slots.
--->
-
-<listitem>
-<para>
-Allow inactive replication slots to be automatically invalided using server variable idle_replication_slot_timeout (Nisha Moond, Bharath Rupireddy)
-<ulink url="&commit_baseurl;ac0e33136">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Masahiko Sawada <msawada@postgresql.org>
-2025-03-21 [04ff636cb] Add GUC option to control maximum active replication ori
--->
-
-<listitem>
-<para>
-Add server variable max_active_replication_origins to control the maximum active replication origins (Euler Taveira)
-<ulink url="&commit_baseurl;04ff636cb">&sect;</ulink>
-</para>
-
-<para>
-This was previously controlled by max_replication_slots, but this new setting allows a higher origin count in cases where fewer slots are required.
-</para>
-</listitem>
-
- </itemizedlist>
-
- </sect4>
-
- <sect4 id="release-18-logical">
- <title><link linkend="logical-replication">Logical Replication</link></title>
-
- <itemizedlist>
-
-<!--
-Author: Amit Kapila <akapila@postgresql.org>
-2024-10-30 [745217a05] Replicate generated columns when specified in the column
-Author: Amit Kapila <akapila@postgresql.org>
-2024-11-07 [7054186c4] Replicate generated columns when 'publish_generated_colu
-Author: Amit Kapila <akapila@postgresql.org>
-2024-12-04 [87ce27de6] Ensure stored generated columns must be published when r
-Author: Amit Kapila <akapila@postgresql.org>
-2025-01-30 [6252b1eaf] Doc: Generated column replication.
--->
-
-<listitem>
-<para>
-Allow the values of generated columns to be logically replicated (Shubham Khanna, Vignesh C, Zhijie Hou, Shlok Kyal, Peter Smith)
-<ulink url="&commit_baseurl;745217a05">&sect;</ulink>
-<ulink url="&commit_baseurl;7054186c4">&sect;</ulink>
-<ulink url="&commit_baseurl;87ce27de6">&sect;</ulink>
-<ulink url="&commit_baseurl;6252b1eaf">&sect;</ulink>
-</para>
-
-<para>
-If the publication specifies a column list, all specified columns, generated and non-generated, are published. Without a specified column list, publication option publish_generated_columns
-controls whether generated columns are published. Previously generated columns were not replicated and the subscriber had to compute the values if possible; this is particularly
-useful for non-Postgres subscribers which lack such a capability.
-</para>
-</listitem>
-
-<!--
-Author: Amit Kapila <akapila@postgresql.org>
-2024-10-28 [1bf1140be] Change the default value of the streaming option to 'par
--->
-
-<listitem>
-<para>
-Change the default CREATE SUBSCRIPTION streaming option from "off" to "parallel" (Hayato Kuroda, Masahiko Sawada, Peter Smith, Amit Kapila)
-<ulink url="&commit_baseurl;1bf1140be">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Amit Kapila <akapila@postgresql.org>
-2024-07-24 [1462aad2e] Allow altering of two_phase option of a SUBSCRIPTION.
-Author: Amit Kapila <akapila@postgresql.org>
-2025-04-03 [4868c96bc] Fix slot synchronization for two_phase enabled slots.
--->
-
-<listitem>
-<para>
-Allow ALTER SUBSCRIPTION to change the replication slot's two-phase commit behavior (Hayato Kuroda, Ajin Cherian, Amit Kapila, Zhijie Hou)
-<ulink url="&commit_baseurl;1462aad2e">&sect;</ulink>
-<ulink url="&commit_baseurl;4868c96bc">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Amit Kapila <akapila@postgresql.org>
-2024-08-20 [9758174e2] Log the conflicts while applying changes in logical repl
-Author: Amit Kapila <akapila@postgresql.org>
-2024-08-22 [edcb71258] Doc: explain the log format of logical replication confl
-Author: Amit Kapila <akapila@postgresql.org>
-2024-08-29 [640178c92] Rename the conflict types for the origin differ cases.
-Author: Amit Kapila <akapila@postgresql.org>
-2024-09-04 [6c2b5edec] Collect statistics about conflicts in logical replicatio
-Author: Amit Kapila <akapila@postgresql.org>
-2025-03-24 [73eba5004] Detect and Log multiple_unique_conflicts type conflict.
--->
-
-<listitem>
-<para>
-Log conflicts while applying logical replication changes (Zhijie Hou, Nisha Moond)
-<ulink url="&commit_baseurl;9758174e2">&sect;</ulink>
-<ulink url="&commit_baseurl;edcb71258">&sect;</ulink>
-<ulink url="&commit_baseurl;640178c92">&sect;</ulink>
-<ulink url="&commit_baseurl;6c2b5edec">&sect;</ulink>
-<ulink url="&commit_baseurl;73eba5004">&sect;</ulink>
-</para>
-
-<para>
-Also report in new columns of pg_stat_subscription_stats.
-</para>
-</listitem>
-
- </itemizedlist>
-
- </sect4>
-
- </sect3>
-
- <sect3 id="release-18-utility">
- <title>Utility Commands</title>
-
- <itemizedlist>
-
-<!--
-Author: Peter Eisentraut <peter@eisentraut.org>
-2025-02-07 [83ea6c540] Virtual generated columns
-Author: Peter Eisentraut <peter@eisentraut.org>
-2025-03-28 [cdc168ad4] Add support for not-null constraints on virtual generate
-Author: Richard Guo <rguo@postgresql.org>
-2025-02-25 [1e4351af3] Expand virtual generated columns in the planner
--->
-
-<listitem>
-<para>
-Allow generated columns to be virtual, and make them the default (Peter Eisentraut, Jian He, Richard Guo, Dean Rasheed)
-<ulink url="&commit_baseurl;83ea6c540">&sect;</ulink>
-<ulink url="&commit_baseurl;cdc168ad4">&sect;</ulink>
-<ulink url="&commit_baseurl;1e4351af3">&sect;</ulink>
-</para>
-
-<para>
-Virtual generated columns generate their values when the columns are read, not written. The write behavior can still be specified via the STORED option.
-</para>
-</listitem>
-
-<!--
-Author: Dean Rasheed <dean.a.rasheed@gmail.com>
-2025-01-16 [80feb727c] Add OLD/NEW support to RETURNING in DML queries.
--->
-
-<listitem>
-<para>
-Add OLD/NEW support to RETURNING in DML queries (Dean Rasheed)
-<ulink url="&commit_baseurl;80feb727c">&sect;</ulink>
-</para>
-
-<para>
-Previously RETURNING only returned new values for INSERT and UPDATE, and old values for DELETE; MERGE would return the appropriate value for the internal query executed. This new syntax
-allows the RETURNING list of INSERT/UPDATE/DELETE/MERGE to explicitly return old and new values by using the special aliases "old" and "new". These aliases can be renamed to
-avoid identifier conflicts.
-</para>
-</listitem>
-
-<!--
-Author: Michael Paquier <michael@paquier.xyz>
-2025-02-19 [302cf1575] Add support for LIKE in CREATE FOREIGN TABLE
--->
-
-<listitem>
-<para>
-Allow foreign tables to be created like existing local tables (Zhang Mingli)
-<ulink url="&commit_baseurl;302cf1575">&sect;</ulink>
-</para>
-
-<para>
-The syntax is CREATE FOREIGN TABLE ... LIKE.
-</para>
-</listitem>
-
-<!--
-Author: Peter Eisentraut <peter@eisentraut.org>
-2024-11-27 [85b7efa1c] Support LIKE with nondeterministic collations
--->
-
-<listitem>
-<para>
-Allow LIKE with nondeterministic collations (Peter Eisentraut)
-<ulink url="&commit_baseurl;85b7efa1c">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Peter Eisentraut <peter@eisentraut.org>
-2025-02-21 [329304c90] Support text position search functions with nondetermini
--->
-
-<listitem>
-<para>
-Allow text position search functions with nondeterministic collations (Peter Eisentraut)
-<ulink url="&commit_baseurl;329304c90">&sect;</ulink>
-</para>
-
-<para>
-These used to generate an error.
-</para>
-</listitem>
-
-<!--
-Author: Jeff Davis <jdavis@postgresql.org>
-2025-01-17 [d3d098316] Support PG_UNICODE_FAST locale in the builtin collation
--->
-
-<listitem>
-<para>
-Add builtin collation provider PG_UNICODE_FAST (Jeff Davis)
-<ulink url="&commit_baseurl;d3d098316">&sect;</ulink>
-</para>
-
-<para>
-This locale supports case mapping, but sorts in code point order, not natural language order.
-</para>
-</listitem>
-
-<!--
-Author: David Rowley <drowley@postgresql.org>
-2024-09-24 [62ddf7ee9] Add ONLY support for VACUUM and ANALYZE
--->
-
-<listitem>
-<para>
-Allow VACUUM and ANALYZE to process partitioned tables without processing their children (Michael Harris)
-<ulink url="&commit_baseurl;62ddf7ee9">&sect;</ulink>
-</para>
-
-<para>
-This is enabled with the new ONLY option. This is useful since autovacuum does not process partitioned tables, just its children.
-</para>
-</listitem>
-
-<!--
-Author: Jeff Davis <jdavis@postgresql.org>
-2024-10-11 [e839c8ecc] Create functions pg_set_relation_stats, pg_clear_relatio
-Author: Jeff Davis <jdavis@postgresql.org>
-2024-10-24 [d32d14639] Add functions pg_restore_relation_stats(), pg_restore_at
-Author: Jeff Davis <jdavis@postgresql.org>
-2025-03-25 [650ab8aaf] Stats: use schemaname/relname instead of regclass.
--->
-
-<listitem>
-<para>
-Add functions to modify per-relation and per-column optimizer statistics (Corey Huinker)
-<ulink url="&commit_baseurl;e839c8ecc">&sect;</ulink>
-<ulink url="&commit_baseurl;d32d14639">&sect;</ulink>
-<ulink url="&commit_baseurl;650ab8aaf">&sect;</ulink>
-</para>
-
-<para>
-The functions are pg_restore_relation_stats(), pg_restore_attribute_stats(), pg_clear_relation_stats(), and pg_clear_attribute_stats.
-</para>
-</listitem>
-
-
-<!--
-Author: Thomas Munro <tmunro@postgresql.org>
-2025-04-08 [f78ca6f3e] Introduce file_copy_method setting.
--->
-
-<listitem>
-<para>
-Add server variable file_copy_method to control the file copying method (Nazir Bilal Yavuz)
-<ulink url="&commit_baseurl;f78ca6f3e">&sect;</ulink>
-</para>
-
-<para>
-This controls whether CREATE DATABASE ... STRATEGY=FILE_COPY and ALTER DATABASE ... SET TABLESPACE uses file copy or clone.
-</para>
-</listitem>
-
- </itemizedlist>
-
- <sect4 id="release-18-constraints">
- <title><link linkend="ddl-constraints">Constraints</link></title>
-
- <itemizedlist>
-
-<!--
-Author: Peter Eisentraut <peter@eisentraut.org>
-2024-09-17 [fc0438b4e] Add temporal PRIMARY KEY and UNIQUE constraints
--->
-
-<listitem>
-<para>
-Allow the specification of non-overlapping PRIMARY KEY and UNIQUE constraints (Paul A. Jungwirth)
-<ulink url="&commit_baseurl;fc0438b4e">&sect;</ulink>
-</para>
-
-<para>
-This is specified by WITHOUT OVERLAPS on the last column.
-</para>
-</listitem>
-
-<!--
-Author: Peter Eisentraut <peter@eisentraut.org>
-2025-01-11 [ca87c415e] Add support for NOT ENFORCED in CHECK constraints
-Author: Peter Eisentraut <peter@eisentraut.org>
-2025-04-02 [eec0040c4] Add support for NOT ENFORCED in foreign key constraints
--->
-
-<listitem>
-<para>
-Allow CHECK and foreign key constraints to be specified as NOT ENFORCED (Amul Sul)
-<ulink url="&commit_baseurl;ca87c415e">&sect;</ulink>
-<ulink url="&commit_baseurl;eec0040c4">&sect;</ulink>
-</para>
-
-<para>
-This also adds column pg_constraint.conenforced.
-</para>
-</listitem>
-
-<!--
-Author: Peter Eisentraut <peter@eisentraut.org>
-2024-11-15 [9321d2fdf] Fix collation handling for foreign keys
--->
-
-<listitem>
-<para>
-Require primary/foreign key relationships to use either deterministic collations or the the same nondeterministic collations (Peter Eisentraut)
-<ulink url="&commit_baseurl;9321d2fdf">&sect;</ulink>
-</para>
-
-<para>
-The restore of a pg_dump, also used by pg_upgrade, will fail if these requirements are not met; schema changes must be made for these upgrade methods to succeed.
-</para>
-</listitem>
-
-<!--
-Author: Álvaro Herrera <alvherre@alvh.no-ip.org>
-2024-11-08 [14e87ffa5] Add pg_constraint rows for not-null constraints
--->
-
-<listitem>
-<para>
-Store column NOT NULL specifications in pg_constraint (Álvaro Herrera, Bernd Helmle)
-<ulink url="&commit_baseurl;14e87ffa5">&sect;</ulink>
-</para>
-
-<para>
-This allows names to be specified for NOT NULL constraint. This also adds NOT NULL constraints to foreign tables and NOT NULL inheritance control to local tables.
-</para>
-</listitem>
-
-<!--
-Author: Álvaro Herrera <alvherre@alvh.no-ip.org>
-2025-04-07 [a379061a2] Allow NOT NULL constraints to be added as NOT VALID
--->
-
-<listitem>
-<para>
-Allow ALTER TABLE to set the NOT VALID attribute of NOT NULL constraints (Rushabh Lathia, Jian He)
-<ulink url="&commit_baseurl;a379061a2">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Álvaro Herrera <alvherre@alvh.no-ip.org>
-2025-03-05 [f4e53e10b] Add ALTER TABLE ... ALTER CONSTRAINT ... SET [NO] INHERI
-Author: Álvaro Herrera <alvherre@alvh.no-ip.org>
-2025-03-27 [4a02af8b1] Simplify syntax for ALTER TABLE ALTER CONSTRAINT NO INHE
--->
-
-<listitem>
-<para>
-Allow modification of the inheritability of NOT NULL constraints (Suraj Kharage, Álvaro Herrera)
-<ulink url="&commit_baseurl;f4e53e10b">&sect;</ulink>
-<ulink url="&commit_baseurl;4a02af8b1">&sect;</ulink>
-</para>
-
-<para>
-The syntax is ALTER TABLE ... ALTER CONSTRAINT ... [NO] INHERIT.
-</para>
-</listitem>
-
-<!--
-Author: Álvaro Herrera <alvherre@alvh.no-ip.org>
-2025-01-23 [b663b9436] Allow NOT VALID foreign key constraints on partitioned t
--->
-
-<listitem>
-<para>
-Allow NOT VALID foreign key constraints on partitioned tables (Amul Sul)
-<ulink url="&commit_baseurl;b663b9436">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Álvaro Herrera <alvherre@alvh.no-ip.org>
-2024-09-30 [4dea33ce7] Don't disallow DROP of constraints ONLY on partitioned t
--->
-
-<listitem>
-<para>
-Allow dropping of constraints ONLY on partitioned tables (Álvaro Herrera)
-<ulink url="&commit_baseurl;4dea33ce7">&sect;</ulink>
-</para>
-
-<para>
-This was previously erroneously prohibited.
-</para>
-</listitem>
-
- </itemizedlist>
- </sect4>
-
- <sect4 id="release-18-copy">
- <title><link linkend="sql-copy"><command>COPY</command></link></title>
-
- <itemizedlist>
-
-<!--
-Author: Fujii Masao <fujii@postgresql.org>
-2024-10-08 [4ac2a9bec] Add REJECT_LIMIT option to the COPY command.
--->
-
-<listitem>
-<para>
-Add REJECT_LIMIT to control the number of invalid rows COPY FROM can ignore (Atsushi Torikoshi)
-<ulink url="&commit_baseurl;4ac2a9bec">&sect;</ulink>
-</para>
-
-<para>
-This is available when ON_ERROR = 'ignore'.
-</para>
-</listitem>
-
-<!--
-Author: Fujii Masao <fujii@postgresql.org>
-2025-04-04 [534874fac] Allow "COPY table TO" command to copy rows from material
--->
-
-<listitem>
-<para>
-Allow COPY TO to copy rows from populated materialized view (Jian He)
-<ulink url="&commit_baseurl;534874fac">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Fujii Masao <fujii@postgresql.org>
-2024-10-03 [e7834a1a2] Add LOG_VERBOSITY = 'silent' support to COPY command.
--->
-
-<listitem>
-<para>
-Add COPY LOG_VERBOSITY level "silent" to suppress log output of ignored rows (Atsushi Torikoshi)
-<ulink url="&commit_baseurl;e7834a1a2">&sect;</ulink>
-</para>
-
-<para>
-This new level suppresses output for discarded input rows when on_error = 'ignore'.
-</para>
-</listitem>
-
-<!--
-Author: Nathan Bossart <nathan@postgresql.org>
-2025-02-06 [401a6956f] Disallow COPY FREEZE on foreign tables.
--->
-
-<listitem>
-<para>
-Disallow COPY FREEZE on foreign tables (Nathan Bossart)
-<ulink url="&commit_baseurl;401a6956f">&sect;</ulink>
-</para>
-
-<para>
-Previously, the COPY worked but the FREEZE was ignored, so disallow this command.
-</para>
-</listitem>
-
- </itemizedlist>
- </sect4>
-
- <sect4 id="release-18-explain">
- <title><link linkend="sql-explain"><command>EXPLAIN</command></link></title>
-
- <itemizedlist>
-
-<!--
-Author: David Rowley <drowley@postgresql.org>
-2024-12-11 [c2a4078eb] Enable BUFFERS with EXPLAIN ANALYZE by default
--->
-
-<listitem>
-<para>
-Automatically include BUFFERS output in EXPLAIN ANALYZE (Guillaume Lelarge, David Rowley)
-<ulink url="&commit_baseurl;c2a4078eb">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Masahiko Sawada <msawada@postgresql.org>
-2024-08-13 [4c1b4cdb8] Add resource statistics reporting to ANALYZE VERBOSE.
-Author: Masahiko Sawada <msawada@postgresql.org>
-2024-09-09 [bb7775234] Add WAL usage reporting to ANALYZE VERBOSE output.
--->
-
-<listitem>
-<para>
-Add WAL, CPU, and average read statistics output to EXPLAIN ANALYZE VERBOSE (Anthonin Bonnefoy)
-<ulink url="&commit_baseurl;4c1b4cdb8">&sect;</ulink>
-<ulink url="&commit_baseurl;bb7775234">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Michael Paquier <michael@paquier.xyz>
-2025-02-17 [320545bfc] Add information about WAL buffers being full to EXPLAIN
-Author: Michael Paquier <michael@paquier.xyz>
-2025-02-17 [6a8a7ce47] Add information about WAL buffers full to VACUUM/ANALYZE
--->
-
-<listitem>
-<para>
-Add full WAL buffer count to EXPLAIN (WAL), VACUUM/ANALYZE (VERBOSE), and autovacuum log output (Bertrand Drouvot)
-<ulink url="&commit_baseurl;320545bfc">&sect;</ulink>
-<ulink url="&commit_baseurl;6a8a7ce47">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Peter Geoghegan <pg@bowt.ie>
-2025-03-11 [0fbceae84] Show index search count in EXPLAIN ANALYZE, take 2.
--->
-
-<listitem>
-<para>
-In EXPLAIN ANALYZE, report the number of index lookups used per index scan node (Peter Geoghegan)
-<ulink url="&commit_baseurl;0fbceae84">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Robert Haas <rhaas@postgresql.org>
-2025-02-21 [ddb17e387] Allow EXPLAIN to indicate fractional rows.
-Author: Robert Haas <rhaas@postgresql.org>
-2025-02-27 [95dbd827f] EXPLAIN: Always use two fractional digits for row counts
--->
-
-<listitem>
-<para>
-Modify EXPLAIN to output fractional row counts (Ibrar Ahmed, Ilia Evdokimov, Robert Haas)
-<ulink url="&commit_baseurl;ddb17e387">&sect;</ulink>
-<ulink url="&commit_baseurl;95dbd827f">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: David Rowley <drowley@postgresql.org>
-2024-07-05 [1eff8279d] Add memory/disk usage for Material nodes in EXPLAIN
-Author: David Rowley <drowley@postgresql.org>
-2024-07-05 [53abb1e0e] Fix newly introduced issue in EXPLAIN for Materialize no
-Author: Tatsuo Ishii <ishii@postgresql.org>
-2024-09-17 [95d6e9af0] Add memory/disk usage for Window aggregate nodes in EXPL
-Author: Tatsuo Ishii <ishii@postgresql.org>
-2024-09-23 [40708acd6] Add memory/disk usage for more executor nodes.
--->
-
-<listitem>
-<para>
-Add memory and disk usage details to Material, Window Aggregate, and common table expression nodes in EXPLAIN (David Rowley, Tatsuo Ishii)
-<ulink url="&commit_baseurl;1eff8279d">&sect;</ulink>
-<ulink url="&commit_baseurl;53abb1e0e">&sect;</ulink>
-<ulink url="&commit_baseurl;95d6e9af0">&sect;</ulink>
-<ulink url="&commit_baseurl;40708acd6">&sect;</ulink>
-</para>
-</listitem>
-
-
-<!--
-Author: Tom Lane <tgl@sss.pgh.pa.us>
-2025-03-11 [8b1b34254] Improve EXPLAIN's display of window functions.
--->
-
-<listitem>
-<para>
-Add details about window function arguments to EXPLAIN output (Tom Lane)
-<ulink url="&commit_baseurl;8b1b34254">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: David Rowley <drowley@postgresql.org>
-2024-07-09 [5a1e6df3b] Show Parallel Bitmap Heap Scan worker stats in EXPLAIN A
--->
-
-<listitem>
-<para>
-Add "Parallel Bitmap Heap Scan" worker cache statistics to EXPLAIN ANALYZE (David Geier, Heikki Linnakangas, Donghang Lin, Alena Rybakina, David Rowley)
-<ulink url="&commit_baseurl;5a1e6df3b">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Robert Haas <rhaas@postgresql.org>
-2024-08-21 [c01743aa4] Show number of disabled nodes in EXPLAIN ANALYZE output.
-Author: David Rowley <drowley@postgresql.org>
-2024-10-11 [161320b4b] Adjust EXPLAIN's output for disabled nodes
-Author: David Rowley <drowley@postgresql.org>
-2024-10-29 [84b8fccbe] Doc: add detail about EXPLAIN's "Disabled" property
--->
-
-<listitem>
-<para>
-Indicate disabled nodes in EXPLAIN ANALYZE output (Robert Haas, David Rowley, Laurenz Albe)
-<ulink url="&commit_baseurl;c01743aa4">&sect;</ulink>
-<ulink url="&commit_baseurl;161320b4b">&sect;</ulink>
-<ulink url="&commit_baseurl;84b8fccbe">&sect;</ulink>
-</para>
-</listitem>
-
- </itemizedlist>
-
- </sect4>
-
- </sect3>
-
- <sect3 id="release-18-datatypes">
- <title>Data Types</title>
-
- <itemizedlist>
-
-<!--
-Author: Jeff Davis <jdavis@postgresql.org>
-2025-01-23 [4e7f62bc3] Add support for Unicode case folding.
-Author: Jeff Davis <jdavis@postgresql.org>
-2025-01-17 [286a365b9] Support Unicode full case mapping and conversion.
--->
-
-<listitem>
-<para>
-Improve Unicode full case mapping and conversion (Jeff Davis)
-<ulink url="&commit_baseurl;4e7f62bc3">&sect;</ulink>
-<ulink url="&commit_baseurl;286a365b9">&sect;</ulink>
-</para>
-
-<para>
-This adds the ability to do conditional and title case mapping, and case map single characters to multiple characters.
-</para>
-</listitem>
-
-<!--
-Author: Tom Lane <tgl@sss.pgh.pa.us>
-2025-01-24 [a5579a90a] Make jsonb casts to scalar types translate JSON null to
--->
-
-<listitem>
-<para>
-Allow jsonb "null" values to be cast to scalar types as NULL (Tom Lane)
-<ulink url="&commit_baseurl;a5579a90a">&sect;</ulink>
-</para>
-
-<para>
-Previously such casts generated an error.
-</para>
-</listitem>
-
-<!--
-Author: Andrew Dunstan <andrew@dunslane.net>
-2025-03-05 [4603903d2] Allow json{b}_strip_nulls to remove null array elements
--->
-
-<listitem>
-<para>
-Add optional parameter to json{b}_strip_nulls to allow removal of null array elements (Florents Tselai)
-<ulink url="&commit_baseurl;4603903d2">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Tom Lane <tgl@sss.pgh.pa.us>
-2025-04-01 [6c12ae09f] Introduce a SQL-callable function array_sort(anyarray).
--->
-
-<listitem>
-<para>
-Add function array_sort() which sorts an array's first dimension (Junwang Zhao, Jian He)
-<ulink url="&commit_baseurl;6c12ae09f">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Michael Paquier <michael@paquier.xyz>
-2024-11-01 [49d6c7d8d] Add SQL function array_reverse()
--->
-
-<listitem>
-<para>
-Add function array_reverse() which reverses an array's first dimension (Aleksander Alekseev)
-<ulink url="&commit_baseurl;49d6c7d8d">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Nathan Bossart <nathan@postgresql.org>
-2025-03-13 [0697b2390] Add reverse(bytea).
--->
-
-<listitem>
-<para>
-Add function reverse() to reverse bytea bytes (Aleksander Alekseev)
-<ulink url="&commit_baseurl;0697b2390">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Dean Rasheed <dean.a.rasheed@gmail.com>
-2025-03-07 [6da469bad] Allow casting between bytea and integer types.
--->
-
-<listitem>
-<para>
-Allow casting between integer types and bytea (Aleksander Alekseev)
-<ulink url="&commit_baseurl;6da469bad">&sect;</ulink>
-</para>
-
-<para>
-The integer values are stored as bytea two's complement values.
-</para>
-</listitem>
-
-<!--
-Author: Peter Eisentraut <peter@eisentraut.org>
-2025-04-03 [82a46cca9] Update Unicode data to Unicode 16.0.0
--->
-
-<listitem>
-<para>
-Update Unicode data to Unicode 16.0.0 (Peter Eisentraut)
-<ulink url="&commit_baseurl;82a46cca9">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Tom Lane <tgl@sss.pgh.pa.us>
-2025-02-18 [b464e51ab] Update to latest Snowball sources.
--->
-
-<listitem>
-<para>
-Add full text search stemming for Estonian (Tom Lane)
-<ulink url="&commit_baseurl;b464e51ab">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Tom Lane <tgl@sss.pgh.pa.us>
-2024-09-24 [cd838e200] Neaten up our choices of SQLSTATEs for XML-related error
--->
-
-<listitem>
-<para>
-Improve the XML error codes to more closely match the SQL standard (Tom Lane)
-<ulink url="&commit_baseurl;cd838e200">&sect;</ulink>
-</para>
-
-<para>
-These errors are reported via SQLSTATE.
-</para>
-</listitem>
-
- </itemizedlist>
-
- </sect3>
-
- <sect3 id="release-18-functions">
- <title>Functions</title>
-
- <itemizedlist>
-
-<!--
-Author: Jeff Davis <jdavis@postgresql.org>
-2025-01-24 [bfc599206] Add SQL function CASEFOLD().
--->
-
-<listitem>
-<para>
-Add function CASEFOLD() to allow for more sophisticated case-insensitive matching (Jeff Davis)
-<ulink url="&commit_baseurl;bfc599206">&sect;</ulink>
-</para>
-
-<para>
-Allows more accurate comparison, i.e., a character can have multiple upper or lower case equivalents, or upper or lower case conversion changes the number of characters.
-</para>
-</listitem>
-
-<!--
-Author: Tom Lane <tgl@sss.pgh.pa.us>
-2024-07-11 [a0f1fce80] Add min and max aggregates for composite types (records)
-Author: Tom Lane <tgl@sss.pgh.pa.us>
-2024-10-08 [2d24fd942] Add min and max aggregates for bytea type.
--->
-
-<listitem>
-<para>
-Allow MIN()/MAX() aggregates on arrays and composite types (Aleksander Alekseev, Marat Buharov)
-<ulink url="&commit_baseurl;a0f1fce80">&sect;</ulink>
-<ulink url="&commit_baseurl;2d24fd942">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Tom Lane <tgl@sss.pgh.pa.us>
-2024-08-16 [6be39d77a] Fix extraction of week and quarter fields from intervals
--->
-
-<listitem>
-<para>
-Add a WEEK option to EXTRACT() (Tom Lane)
-<ulink url="&commit_baseurl;6be39d77a">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Tom Lane <tgl@sss.pgh.pa.us>
-2024-08-16 [6be39d77a] Fix extraction of week and quarter fields from intervals
--->
-
-<listitem>
-<para>
-Improve the output EXTRACT(QUARTER ...) for negative values (Tom Lane)
-<ulink url="&commit_baseurl;6be39d77a">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Tom Lane <tgl@sss.pgh.pa.us>
-2025-01-22 [172e6b3ad] Support RN (roman-numeral format) in to_number().
--->
-
-<listitem>
-<para>
-Add roman numeral support to to_number() (Hunaid Sohail)
-<ulink url="&commit_baseurl;172e6b3ad">&sect;</ulink>
-</para>
-
-<para>
-This is accessed via the "RN" pattern.
-</para>
-</listitem>
-
-<!--
-Author: Masahiko Sawada <msawada@postgresql.org>
-2024-12-11 [78c5e141e] Add UUID version 7 generation function.
--->
-
-<listitem>
-<para>
-Add UUID version 7 generation function uuidv7() (Andrey Borodin)
-<ulink url="&commit_baseurl;78c5e141e">&sect;</ulink>
-</para>
-
-<para>
-This UUID value is temporally sortable. Function alias uuidv4() has been added to explicitly generate version 4 UUIDs.
-</para>
-</listitem>
-
-<!--
-Author: Nathan Bossart <nathan@postgresql.org>
-2024-08-12 [760162fed] Add user-callable CRC functions.
--->
-
-<listitem>
-<para>
-Add functions crc32() and crc32c() to compute CRC values (Aleksander Alekseev)
-<ulink url="&commit_baseurl;760162fed">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Dean Rasheed <dean.a.rasheed@gmail.com>
-2025-03-26 [a3b6dfd41] Add support for gamma() and lgamma() functions.
--->
-
-<listitem>
-<para>
-Add math functions gamma() and lgamma() (Dean Rasheed)
-<ulink url="&commit_baseurl;a3b6dfd41">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Tom Lane <tgl@sss.pgh.pa.us>
-2025-03-03 [246dedc5d] Allow =&gt; syntax for named cursor arguments in plpgsql.
--->
-
-<listitem>
-<para>
-Allow "=&gt;" syntax for named cursor arguments in plpgsql (Pavel Stehule)
-<ulink url="&commit_baseurl;246dedc5d">&sect;</ulink>
-</para>
-
-<para>
-We previously only accepted ":=".
-</para>
-</listitem>
-
-<!--
-Author: Tom Lane <tgl@sss.pgh.pa.us>
-2024-07-25 [580f8727c] Add argument names to the regexp_XXX functions.
--->
-
-<listitem>
-<para>
-Allow regexp_match[es]/regexp_like/regexp_replace/regexp_count/regexp_instr/regexp_substr/regexp_split_to_table/regexp_split_to_array() to use named arguments (Jian He)
-<ulink url="&commit_baseurl;580f8727c">&sect;</ulink>
-</para>
-</listitem>
-
- </itemizedlist>
-
- </sect3>
-
- <sect3 id="release-18-libpq">
- <title><link linkend="libpq">libpq</link></title>
-
- <itemizedlist>
-
-<!--
-Author: Robert Haas <rhaas@postgresql.org>
-2024-09-09 [cdb6b0fdb] Add PQfullProtocolVersion() to surface the precise proto
--->
-
-<listitem>
-<para>
-Add function PQfullProtocolVersion() to report the full, including minor, protocol version number (Jacob Champion, Jelte Fennema-Nio)
-<ulink url="&commit_baseurl;cdb6b0fdb">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Heikki Linnakangas <heikki.linnakangas@iki.fi>
-2025-04-02 [285613c60] libpq: Add min/max_protocol_version connection options
-Author: Heikki Linnakangas <heikki.linnakangas@iki.fi>
-2025-04-02 [507034910] libpq: Handle NegotiateProtocolVersion message different
--->
-
-<listitem>
-<para>
-Add libpq connection parameters and environment variables to specify the minimum and maximum acceptable protocol version for connections (Jelte Fennema-Nio)
-<ulink url="&commit_baseurl;285613c60">&sect;</ulink>
-<ulink url="&commit_baseurl;507034910">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Michael Paquier <michael@paquier.xyz>
-2024-12-18 [4b99fed75] libpq: Add service name to PGconn and PQservice()
--->
-
-<listitem>
-<para>
-Add libpq function PQservice() to return the connection service name (Michael Banck)
-<ulink url="&commit_baseurl;4b99fed75">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Tomas Vondra <tomas.vondra@postgresql.org>
-2024-08-19 [28a1121fd] Mark search_path as GUC_REPORT
-Author: Tomas Vondra <tomas.vondra@postgresql.org>
-2024-08-19 [0d06a7eac] Document that search_path is reported by the server
--->
-
-<listitem>
-<para>
-Report search_path changes to the client (Alexander Kukushkin, Jelte Fennema-Nio, Tomas Vondra)
-<ulink url="&commit_baseurl;28a1121fd">&sect;</ulink>
-<ulink url="&commit_baseurl;0d06a7eac">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Álvaro Herrera <alvherre@alvh.no-ip.org>
-2024-08-12 [ea92f3a0a] libpq: Trace frontend authentication challenges
-Author: Álvaro Herrera <alvherre@alvh.no-ip.org>
-2024-08-14 [a5c6b8f22] libpq: Trace responses to SSLRequest and GSSENCRequest
-Author: Álvaro Herrera <alvherre@alvh.no-ip.org>
-2024-08-16 [b8b3f861f] libpq: Trace all messages received from the server
-Author: Robert Haas <rhaas@postgresql.org>
-2025-02-24 [e87c14b19] libpq: Trace all NegotiateProtocolVersion fields
-Author: Álvaro Herrera <alvherre@alvh.no-ip.org>
-2024-08-09 [7adec2d5f] libpq: Trace StartupMessage/SSLRequest/GSSENCRequest cor
--->
-
-<listitem>
-<para>
-Add PQtrace() output for all message types, including authentication (Jelte Fennema-Nio)
-<ulink url="&commit_baseurl;ea92f3a0a">&sect;</ulink>
-<ulink url="&commit_baseurl;a5c6b8f22">&sect;</ulink>
-<ulink url="&commit_baseurl;b8b3f861f">&sect;</ulink>
-<ulink url="&commit_baseurl;e87c14b19">&sect;</ulink>
-<ulink url="&commit_baseurl;7adec2d5f">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Daniel Gustafsson <dgustafsson@postgresql.org>
-2025-04-03 [2da74d8d6] libpq: Add support for dumping SSL key material to file
--->
-
-<listitem>
-<para>
-Add libpq connection parameter sslkeylogfile which dumps out SSL key material (Abhishek Chanda, Daniel Gustafsson)
-<ulink url="&commit_baseurl;2da74d8d6">&sect;</ulink>
-</para>
-
-<para>
-This is useful for debugging.
-</para>
-</listitem>
-
-<!--
-Author: Thomas Munro <tmunro@postgresql.org>
-2025-03-25 [3c86223c9] libpq: Deprecate pg_int64.
--->
-
-<listitem>
-<para>
-Modify some libpq function signatures to use int64_t (Thomas Munro)
-<ulink url="&commit_baseurl;3c86223c9">&sect;</ulink>
-</para>
-
-<para>
-These previously used pg_int64, which is now deprecated.
-</para>
-</listitem>
-
- </itemizedlist>
-
- </sect3>
-
- <sect3 id="release-18-psql">
- <title><xref linkend="app-psql"/></title>
-
- <itemizedlist>
-
-<!--
-Author: Michael Paquier <michael@paquier.xyz>
-2024-08-22 [d55322b0d] psql: Add more meta-commands able to use the extended pr
--->
-
-<listitem>
-<para>
-Allow psql to parse, bind, and close named prepared statements (Anthonin Bonnefoy, Michael Paquier)
-<ulink url="&commit_baseurl;d55322b0d">&sect;</ulink>
-</para>
-
-<para>
-This is accomplished with new commands \parse, \bind_named, and \close.
-</para>
-</listitem>
-
-<!--
-Author: Michael Paquier <michael@paquier.xyz>
-2025-02-21 [41625ab8e] psql: Add support for pipelines
-Author: Michael Paquier <michael@paquier.xyz>
-2025-03-18 [17caf6644] psql: Add \sendpipeline to send query buffers while in a
-Author: Michael Paquier <michael@paquier.xyz>
-2025-03-19 [2cce0fe44] psql: Allow queries terminated by semicolons while in pi
--->
-
-<listitem>
-<para>
-Add psql backslash commands to allowing issuance of pipeline queries (Anthonin Bonnefoy)
-<ulink url="&commit_baseurl;41625ab8e">&sect;</ulink>
-<ulink url="&commit_baseurl;17caf6644">&sect;</ulink>
-<ulink url="&commit_baseurl;2cce0fe44">&sect;</ulink>
-</para>
-
-<para>
-The new commands are \startpipeline, \syncpipeline, \sendpipeline, \endpipeline, \flushrequest, \flush, and \getresults.
-</para>
-</listitem>
-
-<!--
-Author: Michael Paquier <michael@paquier.xyz>
-2025-02-25 [3ce357584] psql: Add pipeline status to prompt and some state varia
--->
-
-<listitem>
-<para>
-Allow adding pipeline status to the psql prompt and add related state variables (Anthonin Bonnefoy)
-<ulink url="&commit_baseurl;3ce357584">&sect;</ulink>
-</para>
-
-<para>
-The new prompt character is "%P" and the new psql variables are PIPELINE_SYNC_COUNT, PIPELINE_COMMAND_COUNT, and PIPELINE_RESULT_COUNT.
-</para>
-</listitem>
-
-<!--
-Author: Michael Paquier <michael@paquier.xyz>
-2024-12-18 [477728b5d] psql: Add more information about service name
--->
-
-<listitem>
-<para>
-Allow adding the connection service name to the psql prompt or access it via psql variable (Michael Banck)
-<ulink url="&commit_baseurl;477728b5d">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Dean Rasheed <dean.a.rasheed@gmail.com>
-2025-01-14 [00f4c2959] psql: Add option to use expanded mode to all list comman
--->
-
-<listitem>
-<para>
-Add psql option to use expanded mode on all list commands (Dean Rasheed)
-<ulink url="&commit_baseurl;00f4c2959">&sect;</ulink>
-</para>
-
-<para>
-Adding 'x' enables this.
-</para>
-</listitem>
-
-<!--
-Author: Álvaro Herrera <alvherre@alvh.no-ip.org>
-2025-02-22 [bba2fbc62] Change \conninfo to use tabular format
--->
-
-<listitem>
-<para>
-Change psql's \conninfo to use tabular format and include more information (Álvaro Herrera, Maiquel Grassi, Hunaid Sohail)
-<ulink url="&commit_baseurl;bba2fbc62">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Dean Rasheed <dean.a.rasheed@gmail.com>
-2025-01-14 [2355e5111] psql: Add leakproof indicator to \df+, \do+, \dAo+, and
--->
-
-<listitem>
-<para>
-Add function's leakproof indicator to psql's \df+, \do+, \dAo+, and \dC+ outputs (Yugo Nagata)
-<ulink url="&commit_baseurl;2355e5111">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Michael Paquier <michael@paquier.xyz>
-2024-07-02 [978f38c77] Add information about access method for partitioned rela
--->
-
-<listitem>
-<para>
-Add access method details for partitioned relations in \dP+ (Justin Pryzby)
-<ulink url="&commit_baseurl;978f38c77">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Magnus Hagander <magnus@hagander.net>
-2025-03-24 [d696406a9] psql: show default extension version in \dx output
--->
-
-<listitem>
-<para>
-Add "default_version" to the psql \dx extension output (Magnus Hagander)
-<ulink url="&commit_baseurl;d696406a9">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Daniel Gustafsson <dgustafsson@postgresql.org>
-2025-03-25 [1a759c832] psql: Make default \watch interval configurable
--->
-
-<listitem>
-<para>
-Add psql variable WATCH_INTERVAL to set the default \watch wait time (Daniel Gustafsson)
-<ulink url="&commit_baseurl;1a759c832">&sect;</ulink>
-</para>
-</listitem>
-
- </itemizedlist>
-
- </sect3>
-
- <sect3 id="release-18-server-apps">
- <title>Server Applications</title>
-
- <itemizedlist>
-
-<!--
-Author: Peter Eisentraut <peter@eisentraut.org>
-2024-10-01 [983a588e0] initdb: Add new option "- -no-data-checksums"
-Author: Peter Eisentraut <peter@eisentraut.org>
-2024-10-16 [04bec894a] initdb: Change default to using data checksums.
--->
-
-<listitem>
-<para>
-Change initdb to default to enabling checksums (Greg Sabino Mullane)
-<ulink url="&commit_baseurl;983a588e0">&sect;</ulink>
-<ulink url="&commit_baseurl;04bec894a">&sect;</ulink>
-</para>
-
-<para>
-The new initdb option --no-data-checksums disables checksums.
-</para>
-</listitem>
-
-<!--
-Author: Nathan Bossart <nathan@postgresql.org>
-2025-03-25 [cf131fa94] initdb: Add - -no-sync-data-files.
--->
-
-<listitem>
-<para>
-Add initdb option --no-sync-data-files to avoid syncing heap/index files (Nathan Bossart)
-<ulink url="&commit_baseurl;cf131fa94">&sect;</ulink>
-</para>
-
-<para>
-initdb --no-sync is still available to avoid syncing any files.
-</para>
-</listitem>
-
-<!--
-Author: Nathan Bossart <nathan@postgresql.org>
-2025-03-18 [edba754f0] vacuumdb: Add option for analyzing only relations missin
-Author: Nathan Bossart <nathan@postgresql.org>
-2025-04-30 [987910502] vacuumdb: Don't skip empty relations in - -missing-stats-
--->
-
-<listitem>
-<para>
-Add vacuumdb option --missing-stats-only to compute only missing optimizer statistics (Corey Huinker, Nathan Bossart)
-<ulink url="&commit_baseurl;edba754f0">&sect;</ulink>
-<ulink url="&commit_baseurl;987910502">&sect;</ulink>
-</para>
-
-<para>
-This option can only be used by --analyze-only and --analyze-in-stages.
-</para>
-</listitem>
-
-<!--
-Author: Robert Haas <rhaas@postgresql.org>
-2025-03-17 [99aeb8470] pg_combinebackup: Add -k, - -link option.
--->
-
-<listitem>
-<para>
-Add pg_combinebackup option -k/--link to enable hard linking (Israel Barth Rubio, Robert Haas)
-<ulink url="&commit_baseurl;99aeb8470">&sect;</ulink>
-</para>
-
-<para>
-Only some files can be hard linked. This should not be used if the backups will be used independently.
-</para>
-</listitem>
-
-<!--
-Author: Robert Haas <rhaas@postgresql.org>
-2024-09-27 [8dfd31290] pg_verifybackup: Verify tar-format backups.
--->
-
-<listitem>
-<para>
-Allow pg_verifybackup to verify tar-format backups (Amul Sul)
-<ulink url="&commit_baseurl;8dfd31290">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Masahiko Sawada <msawada@postgresql.org>
-2025-03-12 [4ecdd4110] pg_rewind: Add dbname to primary_conninfo when using - -w
--->
-
-<listitem>
-<para>
-If pg_rewind's --source-server specifies a database name, use it in --write-recovery-conf output (Masahiko Sawada)
-<ulink url="&commit_baseurl;4ecdd4110">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Masahiko Sawada <msawada@postgresql.org>
-2025-02-21 [30666d185] pg_resetwal: Add - -char-signedness option to change the
--->
-
-<listitem>
-<para>
-Add pg_resetwal option --char-signedness to change the default char signedness (Masahiko Sawada)
-<ulink url="&commit_baseurl;30666d185">&sect;</ulink>
-</para>
-</listitem>
-
- </itemizedlist>
-
-
- <sect4 id="release-18-pgdump">
- <title><link
- linkend="app-pgdump"><application>pg_dump</application></link>/<link
- linkend="app-pg-dumpall"><application>pg_dumpall</application></link>/<link
- linkend="app-pgrestore"><application>pg_restore</application></link></title>
-
- <itemizedlist>
-
-<!--
-Author: Andrew Dunstan <andrew@dunslane.net>
-2025-04-04 [1495eff7b] Non text modes for pg_dumpall, correspondingly change pg
--->
-
-<listitem>
-<para>
-Allow pg_dumpall to dump in the same output formats as pg_dump supports (Mahendra Singh Thalor, Andrew Dunstan)
-<ulink url="&commit_baseurl;1495eff7b">&sect;</ulink>
-</para>
-
-<para>
-Also modify pg_restore to handle such dumps. Previously pg_dumpall only supported text format.
-</para>
-</listitem>
-
-<!--
-Author: Jeff Davis <jdavis@postgresql.org>
-2025-03-25 [bde2fb797] Add pg_dump - -with-{schema|data|statistics} options.
--->
-
-<listitem>
-<para>
-Add pg_dump options --with-schema, --with-data, and --with-statistics (Jeff Davis)
-<ulink url="&commit_baseurl;bde2fb797">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Nathan Bossart <nathan@postgresql.org>
-2025-03-25 [9c49f0e8c] pg_dump: Add - -sequence-data.
--->
-
-<listitem>
-<para>
-Add pg_dump option --sequence-data to dump sequence data that would normally be excluded (Nathan Bossart)
-<ulink url="&commit_baseurl;9c49f0e8c">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Jeff Davis <jdavis@postgresql.org>
-2025-02-20 [1fd1bd871] Transfer statistics during pg_upgrade.
--->
-
-<listitem>
-<para>
-Add pg_dump, pg_dumpall, and pg_restore options --statistics-only, --no-statistics, --no-data, and --no-schema (Corey Huinker, Jeff Davis)
-<ulink url="&commit_baseurl;1fd1bd871">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Tom Lane <tgl@sss.pgh.pa.us>
-2025-03-16 [cd3c45125] pg_dump, pg_dumpall, pg_restore: Add - -no-policies optio
--->
-
-<listitem>
-<para>
-Add option --no-policies to disable row level security policy processing in pg_dump, pg_dumpall, pg_restore (Nikolay Samokhvalov)
-<ulink url="&commit_baseurl;cd3c45125">&sect;</ulink>
-</para>
-
-<para>
-This is useful for migrating to systems with different policies.
-</para>
-</listitem>
-
- </itemizedlist>
-
- </sect4>
-
- <sect4 id="release-18-pgupgrade">
- <title><link linkend="pgupgrade"><application>pg_upgrade</application></link></title>
-
- <itemizedlist>
-
-<!--
-Author: Jeff Davis <jdavis@postgresql.org>
-2025-02-20 [1fd1bd871] Transfer statistics during pg_upgrade.
-Author: Nathan Bossart <nathan@postgresql.org>
-2025-03-18 [c9d502eb6] Update guidance for running vacuumdb after pg_upgrade.
-Author: Nathan Bossart <nathan@postgresql.org>
-2025-04-30 [d5f1b6a75] Further adjust guidance for running vacuumdb after pg_up
-Author: Jeff Davis <jdavis@postgresql.org>
-2025-02-20 [1fd1bd871] Transfer statistics during pg_upgrade.
--->
-
-<listitem>
-<para>
-Allow pg_upgrade to preserve optimizer statistics (Corey Huinker, Jeff Davis, Nathan Bossart)
-<ulink url="&commit_baseurl;1fd1bd871">&sect;</ulink>
-<ulink url="&commit_baseurl;c9d502eb6">&sect;</ulink>
-<ulink url="&commit_baseurl;d5f1b6a75">&sect;</ulink>
-<ulink url="&commit_baseurl;1fd1bd871">&sect;</ulink>
-</para>
-
-<para>
-Extended statistics are not preserved. Also add pg_upgrade option --no-statistics to disable statistics preservation.
-</para>
-</listitem>
-
-<!--
-Author: Nathan Bossart <nathan@postgresql.org>
-2024-09-16 [40e2e5e92] Introduce framework for parallelizing various pg_upgrade
-Author: Nathan Bossart <nathan@postgresql.org>
-2024-09-16 [6d3d2e8e5] pg_upgrade: Parallelize retrieving relation information.
-Author: Nathan Bossart <nathan@postgresql.org>
-2024-09-16 [7baa36de5] pg_upgrade: Parallelize subscription check.
-Author: Nathan Bossart <nathan@postgresql.org>
-2024-09-16 [46cad8b31] pg_upgrade: Parallelize retrieving loadable libraries.
-Author: Nathan Bossart <nathan@postgresql.org>
-2024-09-16 [6ab8f27bc] pg_upgrade: Parallelize retrieving extension updates.
-Author: Nathan Bossart <nathan@postgresql.org>
-2024-09-16 [bbf83cab9] pg_upgrade: Parallelize data type checks.
-Author: Nathan Bossart <nathan@postgresql.org>
-2024-09-16 [9db3018cf] pg_upgrade: Parallelize contrib/isn check.
-Author: Nathan Bossart <nathan@postgresql.org>
-2024-09-16 [c34eabfbb] pg_upgrade: Parallelize postfix operator check.
-Author: Nathan Bossart <nathan@postgresql.org>
-2024-09-16 [cf2f82a37] pg_upgrade: Parallelize incompatible polymorphics check.
-Author: Nathan Bossart <nathan@postgresql.org>
-2024-09-16 [f93f5f7b9] pg_upgrade: Parallelize WITH OIDS check.
-Author: Nathan Bossart <nathan@postgresql.org>
-2024-09-16 [c880cf258] pg_upgrade: Parallelize encoding conversion check.
--->
-
-<listitem>
-<para>
-Allow pg_upgrade to process database checks in parallel (Nathan Bossart)
-<ulink url="&commit_baseurl;40e2e5e92">&sect;</ulink>
-<ulink url="&commit_baseurl;6d3d2e8e5">&sect;</ulink>
-<ulink url="&commit_baseurl;7baa36de5">&sect;</ulink>
-<ulink url="&commit_baseurl;46cad8b31">&sect;</ulink>
-<ulink url="&commit_baseurl;6ab8f27bc">&sect;</ulink>
-<ulink url="&commit_baseurl;bbf83cab9">&sect;</ulink>
-<ulink url="&commit_baseurl;9db3018cf">&sect;</ulink>
-<ulink url="&commit_baseurl;c34eabfbb">&sect;</ulink>
-<ulink url="&commit_baseurl;cf2f82a37">&sect;</ulink>
-<ulink url="&commit_baseurl;f93f5f7b9">&sect;</ulink>
-<ulink url="&commit_baseurl;c880cf258">&sect;</ulink>
-</para>
-
-<para>
-This is controlled by the existing --jobs option.
-</para>
-</listitem>
-
-<!--
-Author: Nathan Bossart <nathan@postgresql.org>
-2025-03-25 [626d7236b] pg_upgrade: Add - -swap for faster file transfer.
--->
-
-<listitem>
-<para>
-Add pg_upgrade option --swap to swap directories rather than copy, clone, or link files (Nathan Bossart)
-<ulink url="&commit_baseurl;626d7236b">&sect;</ulink>
-</para>
-
-<para>
-This mode is potentially the fastest.
-</para>
-</listitem>
-
-<!--
-Author: Masahiko Sawada <msawada@postgresql.org>
-2025-02-21 [a8238f87f] pg_upgrade: Preserve default char signedness value from
-Author: Masahiko Sawada <msawada@postgresql.org>
-2025-02-21 [1aab68059] pg_upgrade: Add - -set-char-signedness to set the default
--->
-
-<listitem>
-<para>
-Add pg_upgrade option --set-char-signedness to set the default char signedness of new cluster (Masahiko Sawada)
-<ulink url="&commit_baseurl;a8238f87f">&sect;</ulink>
-<ulink url="&commit_baseurl;1aab68059">&sect;</ulink>
-</para>
-
-<para>
-This is to handle cases where a pre-Postgres 18 cluster's default CPU signedness does not match the new cluster.
-</para>
-</listitem>
-
- </itemizedlist>
-
- </sect4>
-
- <sect4 id="release-18-logicalrep-app">
- <title>Logical Replication Applications></title>
-
- <itemizedlist>
-
-<!--
-Author: Amit Kapila <akapila@postgresql.org>
-2025-03-28 [fb2ea12f4] pg_createsubscriber: Add '- -all' option.
--->
-
-<listitem>
-<para>
-Add pg_createsubscriber option --all to create logical replicas for all databases (Shubham Khanna)
-<ulink url="&commit_baseurl;fb2ea12f4">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Amit Kapila <akapila@postgresql.org>
-2025-03-20 [e5aeed4b8] pg_createsubscriber: Add -R publications option.
--->
-
-<listitem>
-<para>
-Add pg_createsubscriber option --remove to remove publications (Shubham Khanna)
-<ulink url="&commit_baseurl;e5aeed4b8">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Amit Kapila <akapila@postgresql.org>
-2025-02-26 [e117cfb2f] Add two-phase option in pg_createsubscriber.
--->
-
-<listitem>
-<para>
-Add pg_createsubscriber option --enable-two-phase to enable prepared transactions (Shubham Khanna)
-<ulink url="&commit_baseurl;e117cfb2f">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Masahiko Sawada <msawada@postgresql.org>
-2025-04-04 [cf2655a90] pg_recvlogical: Add - -failover option.
--->
-
-<listitem>
-<para>
-Add pg_recvlogical option --failover to specify failover slots (Hayato Kuroda)
-<ulink url="&commit_baseurl;cf2655a90">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Fujii Masao <fujii@postgresql.org>
-2025-03-25 [c68100aa4] Allow pg_recvlogical - -drop-slot to work without - -dbnam
--->
-
-<listitem>
-<para>
-Allow pg_recvlogical --drop-slot to work without --dbname (Hayato Kuroda)
-<ulink url="&commit_baseurl;c68100aa4">&sect;</ulink>
-</para>
-</listitem>
-
- </itemizedlist>
-
- </sect4>
-
- </sect3>
-
- <sect3 id="release-18-source-code">
- <title>Source Code</title>
-
- <itemizedlist>
-
-<!--
-Author: Michael Paquier <michael@paquier.xyz>
-2024-07-05 [4b211003e] Support loading of injection points
-Author: Michael Paquier <michael@paquier.xyz>
-2024-07-18 [a0a5869a8] Add INJECTION_POINT_CACHED() to run injection points dir
--->
-
-<listitem>
-<para>
-Separate the loading and running of injection points (Michael Paquier, Heikki Linnakangas)
-<ulink url="&commit_baseurl;4b211003e">&sect;</ulink>
-<ulink url="&commit_baseurl;a0a5869a8">&sect;</ulink>
-</para>
-
-<para>
-Injection points can now be created, but not run, via INJECTION_POINT_LOAD(), and such injection points can be run via INJECTION_POINT_CACHED().
-</para>
-</listitem>
-
-<!--
-Author: Heikki Linnakangas <heikki.linnakangas@iki.fi>
-2024-07-26 [20e0e7da9] Add test for early backend startup errors
--->
-
-<listitem>
-<para>
-Allow inline injection point test code with IS_INJECTION_POINT_ATTACHED() (Heikki Linnakangas)
-<ulink url="&commit_baseurl;20e0e7da9">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: David Rowley <drowley@postgresql.org>
-2024-08-05 [ca6fde922] Optimize JSON escaping using SIMD
--->
-
-<listitem>
-<para>
-Improve the performance of processing long JSON strings using SIMD instructions (David Rowley)
-<ulink url="&commit_baseurl;ca6fde922">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: John Naylor <john.naylor@postgresql.org>
-2025-04-06 [3c6e8c123] Compute CRC32C using AVX-512 instructions where availabl
--->
-
-<listitem>
-<para>
-Speed up CRC32C calculations using x86 AVX-512 instructions (Raghuveer Devulapalli, Paul Amonson)
-<ulink url="&commit_baseurl;3c6e8c123">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Nathan Bossart <nathan@postgresql.org>
-2025-03-28 [6be53c276] Optimize popcount functions with ARM Neon intrinsics.
-Author: Nathan Bossart <nathan@postgresql.org>
-2025-03-28 [519338ace] Optimize popcount functions with ARM SVE intrinsics.
--->
-
-<listitem>
-<para>
-Add ARM Neon and SVE CPU intrinsics for popcount (integer bit counting) (Chiranmoy Bhattacharya, Devanga Susmitha, Rama Malladi)
-<ulink url="&commit_baseurl;6be53c276">&sect;</ulink>
-<ulink url="&commit_baseurl;519338ace">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Dean Rasheed <dean.a.rasheed@gmail.com>
-2024-08-15 [8dc28d7eb] Optimise numeric multiplication using base-NBASE^2 arith
--->
-
-<listitem>
-<para>
-Improve the speed of multiplication (Joel Jacobson, Dean Rasheed)
-<ulink url="&commit_baseurl;8dc28d7eb">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Tomas Vondra <tomas.vondra@postgresql.org>
-2025-04-07 [65c298f61] Add support for basic NUMA awareness
-Author: Tomas Vondra <tomas.vondra@postgresql.org>
-2025-04-07 [8cc139bec] Introduce pg_shmem_allocations_numa view
-Author: Tomas Vondra <tomas.vondra@postgresql.org>
-2025-04-07 [ba2a3c230] Add pg_buffercache_numa view with NUMA node info
--->
-
-<listitem>
-<para>
-Add configure option --with-libnuma to enable NUMA awareness (Jakub Wartak, Bertrand Drouvot)
-<ulink url="&commit_baseurl;65c298f61">&sect;</ulink>
-<ulink url="&commit_baseurl;8cc139bec">&sect;</ulink>
-<ulink url="&commit_baseurl;ba2a3c230">&sect;</ulink>
-</para>
-
-<para>
-The function pg_numa_available() reports on NUMA awareness, and system views pg_shmem_allocations_numa and pg_buffercache_numa which report on shared memory distribution across
-NUMA nodes.
-</para>
-</listitem>
-
-<!--
-Author: Nathan Bossart <nathan@postgresql.org>
-2024-09-18 [b52c4fc3c] Add TOAST table to pg_index.
--->
-
-<listitem>
-<para>
-Add TOAST table to pg_index to allow for very large index expression indexes (Nathan Bossart)
-<ulink url="&commit_baseurl;b52c4fc3c">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: David Rowley <drowley@postgresql.org>
-2024-12-20 [02a8d0c45] Remove pg_attribute.attcacheoff column
--->
-
-<listitem>
-<para>
-Remove column pg_attribute.attcacheoff (David Rowley)
-<ulink url="&commit_baseurl;02a8d0c45">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Melanie Plageman <melanieplageman@gmail.com>
-2025-03-03 [99f8f3fbb] Add relallfrozen to pg_class
--->
-
-<listitem>
-<para>
-Add column pg_class.relallfrozen (Melanie Plageman)
-<ulink url="&commit_baseurl;99f8f3fbb">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Peter Eisentraut <peter@eisentraut.org>
-2024-09-10 [56fead44d] Add amgettreeheight index AM API routine
-Author: Peter Eisentraut <peter@eisentraut.org>
-2025-03-07 [af4002b38] Rename amcancrosscompare
--->
-
-<listitem>
-<para>
-Add amgettreeheight, amconsistentequality, and amconsistentordering to the index access method API (Mark Dilger)
-<ulink url="&commit_baseurl;56fead44d">&sect;</ulink>
-<ulink url="&commit_baseurl;af4002b38">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Peter Eisentraut <peter@eisentraut.org>
-2024-09-17 [7406ab623] Add stratnum GiST support function
--->
-
-<listitem>
-<para>
-Add GiST support function stratnum (Paul A. Jungwirth)
-<ulink url="&commit_baseurl;7406ab623">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Masahiko Sawada <msawada@postgresql.org>
-2025-02-21 [44fe30fda] Add default_char_signedness field to ControlFileData.
--->
-
-<listitem>
-<para>
-Record the default CPU signedness of "char" in pg_controldata (Masahiko Sawada)
-<ulink url="&commit_baseurl;44fe30fda">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Peter Eisentraut <peter@eisentraut.org>
-2025-03-12 [72a3d0462] Prepare for Python "Limited API" in PL/Python
-Author: Peter Eisentraut <peter@eisentraut.org>
-2025-03-14 [0793ab810] Activate Python "Limited API" in PL/Python
--->
-
-<listitem>
-<para>
-Add support for Python "Limited API" in PL/Python (Peter Eisentraut)
-<ulink url="&commit_baseurl;72a3d0462">&sect;</ulink>
-<ulink url="&commit_baseurl;0793ab810">&sect;</ulink>
-</para>
-
-<para>
-This helps prevent problems caused by Python 3.x version mismatches.
-</para>
-</listitem>
-
-<!--
-Author: Jacob Champion <jchampion@postgresql.org>
-2025-04-29 [45363fca6] Bump the minimum supported Python version to 3.6.8
--->
-
-<listitem>
-<para>
-Change the minimum supported Python version to 3.6.8 (Jacob Champion)
-<ulink url="&commit_baseurl;45363fca6">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Daniel Gustafsson <dgustafsson@postgresql.org>
-2024-09-02 [a70e01d43] Remove support for OpenSSL older than 1.1.0
-Author: Daniel Gustafsson <dgustafsson@postgresql.org>
-2024-10-24 [6c66b7443] Raise the minimum supported OpenSSL version to 1.1.1
--->
-
-<listitem>
-<para>
-Remove support for OpenSSL versions older than 1.1.1 (Daniel Gustafsson)
-<ulink url="&commit_baseurl;a70e01d43">&sect;</ulink>
-<ulink url="&commit_baseurl;6c66b7443">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Peter Eisentraut <peter@eisentraut.org>
-2024-10-01 [972c2cd28] jit: Require at least LLVM 14, if enabled.
--->
-
-<listitem>
-<para>
-If LLVM is enabled, require version 14 or later (Thomas Munro)
-<ulink url="&commit_baseurl;972c2cd28">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Tom Lane <tgl@sss.pgh.pa.us>
-2025-03-26 [9324c8c58] Introduce PG_MODULE_MAGIC_EXT macro.
--->
-
-<listitem>
-<para>
-Add macro PG_MODULE_MAGIC_EXT to allow extensions to report their name and version (Andrei Lepikhov)
-<ulink url="&commit_baseurl;9324c8c58">&sect;</ulink>
-</para>
-
-<para>
-This information can be access via the new function pg_get_loaded_modules().
-</para>
-</listitem>
-
-<!--
-Author: Tom Lane <tgl@sss.pgh.pa.us>
-2024-09-09 [218527d01] Don't bother checking the result of SPI_connect[_ext] an
--->
-
-<listitem>
-<para>
-Document that SPI_connect/SPI_connect_ext() always returns success (SPI_OK_CONNECT) (Stepan Neretin)
-<ulink url="&commit_baseurl;218527d01">&sect;</ulink>
-</para>
-
-<para>
-Errors are always reported via ereport().
-</para>
-</listitem>
-
-<!--
-Author: Peter Eisentraut <peter@eisentraut.org>
-2025-01-27 [5afaba629] doc: Meson is not experimental on Windows
--->
-
-<listitem>
-<para>
-Remove the experimental designation of Meson builds on Windows (Aleksander Alekseev)
-<ulink url="&commit_baseurl;5afaba629">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Peter Eisentraut <peter@eisentraut.org>
-2024-07-31 [e54a42ac9] Add API and ABI stability guidance to the C language doc
--->
-
-<listitem>
-<para>
-Add documentation section about API and ABI compatibility (David Wheeler, Peter Eisentraut)
-<ulink url="&commit_baseurl;e54a42ac9">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Thomas Munro <tmunro@postgresql.org>
-2024-07-30 [e25626677] Remove - -disable-spinlocks.
-Author: Thomas Munro <tmunro@postgresql.org>
-2024-07-30 [813852613] Remove - -disable-atomics, require 32 bit atomics.
--->
-
-<listitem>
-<para>
-Remove configure options --disable-spinlocks and --disable-atomics (Thomas Munro)
-<ulink url="&commit_baseurl;e25626677">&sect;</ulink>
-<ulink url="&commit_baseurl;813852613">&sect;</ulink>
-</para>
-
-<para>
-Thirty-two bit atomic operations are now required.
-</para>
-</listitem>
-
-<!--
-Author: Tom Lane <tgl@sss.pgh.pa.us>
-2024-07-01 [edadeb071] Remove support for HPPA (a/k/a PA-RISC) architecture.
--->
-
-<listitem>
-<para>
-Remove support for the HPPA/PA-RISC architecture (Tom Lane)
-<ulink url="&commit_baseurl;edadeb071">&sect;</ulink>
-</para>
-</listitem>
-
- </itemizedlist>
-
- </sect3>
-
- <sect3 id="release-18-modules">
- <title>Additional Modules</title>
-
- <itemizedlist>
-
-<!--
-Author: Masahiko Sawada <msawada@postgresql.org>
-2024-10-14 [7cdfeee32] Add contrib/pg_logicalinspect.
--->
-
-<listitem>
-<para>
-Add extension pg_logicalinspect to inspect logical snapshots (Bertrand Drouvot)
-<ulink url="&commit_baseurl;7cdfeee32">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Robert Haas <rhaas@postgresql.org>
-2025-03-26 [8d5ceb113] pg_overexplain: Additional EXPLAIN options for debugging
--->
-
-<listitem>
-<para>
-Add extension pg_overexplain which adds debug details to EXPLAIN output (Robert Haas)
-<ulink url="&commit_baseurl;8d5ceb113">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Fujii Masao <fujii@postgresql.org>
-2024-07-26 [c297a47c5] postgres_fdw: Add "used_in_xact" column to postgres_fdw_
-Author: Fujii Masao <fujii@postgresql.org>
-2024-07-26 [857df3cef] postgres_fdw: Add connection status check to postgres_fd
-Author: Fujii Masao <fujii@postgresql.org>
-2024-09-18 [4f08ab554] postgres_fdw: Extend postgres_fdw_get_connections to ret
-Author: Fujii Masao <fujii@postgresql.org>
-2025-03-03 [fe186bda7] postgres_fdw: Extend postgres_fdw_get_connections to ret
--->
-
-<listitem>
-<para>
-Add output columns to postgres_fdw_get_connections() (Hayato Kuroda, Sagar Dilip Shedge)
-<ulink url="&commit_baseurl;c297a47c5">&sect;</ulink>
-<ulink url="&commit_baseurl;857df3cef">&sect;</ulink>
-<ulink url="&commit_baseurl;4f08ab554">&sect;</ulink>
-<ulink url="&commit_baseurl;fe186bda7">&sect;</ulink>
-</para>
-
-<para>
-New output column "used_in_xact" indicates if the foreign data wrapper is being used by a current transaction, "closed" indicates if it is closed, "user_name" indicates the
-user name, and "remote_backend_pid" indicates the remote backend process identifier.
-</para>
-</listitem>
-
-<!--
-Author: Peter Eisentraut <peter@eisentraut.org>
-2025-01-15 [761c79508] postgres_fdw: SCRAM authentication pass-through
--->
-
-<listitem>
-<para>
-Allow SCRAM authentication from the client to be passed to postgres_fdw servers (Matheus Alcantara, Peter Eisentraut)
-<ulink url="&commit_baseurl;761c79508">&sect;</ulink>
-</para>
-
-<para>
-This avoids storing postgres_fdw authentication information in the database, and is enabled with the postgres_fdw "use_scram_passthrough" connection option. libpq uses new connection
-parameters scram_client_key and scram_server_key.
-</para>
-</listitem>
-
-<!--
-Author: Peter Eisentraut <peter@eisentraut.org>
-2025-03-26 [3642df265] dblink: SCRAM authentication pass-through
--->
-
-<listitem>
-<para>
-Allow SCRAM authentication from the client to be passed to dblink servers (Matheus Alcantara)
-<ulink url="&commit_baseurl;3642df265">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Fujii Masao <fujii@postgresql.org>
-2024-10-03 [a1c4c8a9e] file_fdw: Add on_error and log_verbosity options to file
--->
-
-<listitem>
-<para>
-Add on_error and log_verbosity options to file_fdw (Atsushi Torikoshi)
-<ulink url="&commit_baseurl;a1c4c8a9e">&sect;</ulink>
-</para>
-
-<para>
-These control how file_fdw handles and reports invalid file rows.
-</para>
-</listitem>
-
-<!--
-Author: Fujii Masao <fujii@postgresql.org>
-2024-11-20 [6c8f67032] file_fdw: Add REJECT_LIMIT option to file_fdw.
--->
-
-<listitem>
-<para>
-Add "reject_limit" to control the number of invalid rows file_fdw can ignore (Atsushi Torikoshi)
-<ulink url="&commit_baseurl;6c8f67032">&sect;</ulink>
-</para>
-
-<para>
-This is active when ON_ERROR = 'ignore'.
-</para>
-</listitem>
-
-<!--
-Author: Nathan Bossart <nathan@postgresql.org>
-2025-01-07 [f7e1b3828] Add passwordcheck.min_password_length.
--->
-
-<listitem>
-<para>
-Add configurable variable min_password_length to passwordcheck (Emanuele Musella, Maurizio Boriani)
-<ulink url="&commit_baseurl;f7e1b3828">&sect;</ulink>
-</para>
-
-<para>
-This controls the minimum password length.
-</para>
-</listitem>
-
-<!--
-Author: Tatsuo Ishii <ishii@postgresql.org>
-2024-10-11 [cae0f3c40] pgbench: Improve result outputs related to failed transa
--->
-
-<listitem>
-<para>
-Have pgbench report the number of failed, retried, or skipped transactions in per-script reports (Yugo Nagata)
-<ulink url="&commit_baseurl;cae0f3c40">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Tom Lane <tgl@sss.pgh.pa.us>
-2025-03-16 [448904423] contrib/isn: Make weak mode a GUC setting, and fix relat
--->
-
-<listitem>
-<para>
-Add isn server variable "weak" to control invalid check digit acceptance (Viktor Holmberg)
-<ulink url="&commit_baseurl;448904423">&sect;</ulink>
-</para>
-
-<para>
-This was previously only controlled by function isn_weak().
-</para>
-</listitem>
-
-<!--
-Author: Heikki Linnakangas <heikki.linnakangas@iki.fi>
-2025-04-03 [e4309f73f] Add support for sorted gist index builds to btree_gist
--->
-
-<listitem>
-<para>
-Allow values to be sorted to speed btree_gist index builds (Bernd Helmle, Andrey Borodin)
-<ulink url="&commit_baseurl;e4309f73f">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Tomas Vondra <tomas.vondra@postgresql.org>
-2025-03-29 [14ffaece0] amcheck: Add gin_index_check() to verify GIN index
--->
-
-<listitem>
-<para>
-Add amcheck function gin_index_check() to verify GIN indexes (Grigory Kryachko, Heikki Linnakangas, Andrey Borodin)
-<ulink url="&commit_baseurl;14ffaece0">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Andres Freund <andres@anarazel.de>
-2025-04-08 [dcf7e1697] Add pg_buffercache_evict_{relation,all} functions
--->
-
-<listitem>
-<para>
-Add functions pg_buffercache_evict_relation() and pg_buffercache_evict_all() to evict unpinned shared buffers (Nazir Bilal Yavuz)
-<ulink url="&commit_baseurl;dcf7e1697">&sect;</ulink>
-</para>
-
-<para>
-The existing function pg_buffercache_evict() now returns the buffer flush status.
-</para>
-</listitem>
-
-<!--
-Author: Robert Haas <rhaas@postgresql.org>
-2025-03-18 [c65bc2e1d] Make it possible for loadable modules to add EXPLAIN opt
-Author: Robert Haas <rhaas@postgresql.org>
-2025-03-18 [4fd02bf7c] Add some new hooks so extensions can add details to EXPL
-Author: Robert Haas <rhaas@postgresql.org>
-2025-03-20 [50ba65e73] Add an additional hook for EXPLAIN option validation.
--->
-
-<listitem>
-<para>
-Allow extensions to install custom EXPLAIN options (Robert Haas, Sami Imseih)
-<ulink url="&commit_baseurl;c65bc2e1d">&sect;</ulink>
-<ulink url="&commit_baseurl;4fd02bf7c">&sect;</ulink>
-<ulink url="&commit_baseurl;50ba65e73">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Michael Paquier <michael@paquier.xyz>
-2024-08-04 [7949d9594] Introduce pluggable APIs for Cumulative Statistics
-Author: Michael Paquier <michael@paquier.xyz>
-2024-08-05 [2eff9e678] Add helper routines to retrieve data for custom fixed-nu
--->
-
-<listitem>
-<para>
-Allow extensions to use the server's cumulative statistics API (Michael Paquier)
-<ulink url="&commit_baseurl;7949d9594">&sect;</ulink>
-<ulink url="&commit_baseurl;2eff9e678">&sect;</ulink>
-</para>
-</listitem>
-
- </itemizedlist>
-
- <sect4 id="release-18-pgstatstatements">
- <title><link linkend="pgstatstatements"><application>pg_stat_statements</application></link></title>
-
- <itemizedlist>
-
-<!--
-Author: Michael Paquier <michael@paquier.xyz>
-2024-10-28 [6b652e6ce] Set query ID for inner queries of CREATE TABLE AS and DE
--->
-
-<listitem>
-<para>
-Allow the queries of CREATE TABLE AS and DECLARE to be tracked by pg_stat_statements (Anthonin Bonnefoy)
-<ulink url="&commit_baseurl;6b652e6ce">&sect;</ulink>
-</para>
-
-<para>
-They are also now assigned query ids.
-</para>
-</listitem>
-
-<!--
-Author: Michael Paquier <michael@paquier.xyz>
-2024-09-30 [dc6851596] Show values of SET statements as constants in pg_stat_st
--->
-
-<listitem>
-<para>
-Allow the parameterization of SET values in pg_stat_statements (Greg Sabino Mullane, Michael Paquier)
-<ulink url="&commit_baseurl;dc6851596">&sect;</ulink>
-</para>
-
-<para>
-This reduces the bloat caused by SET statements with differing constants.
-</para>
-</listitem>
-
-<!--
-Author: Michael Paquier <michael@paquier.xyz>
-2024-10-09 [cf54a2c00] pg_stat_statements: Add columns to track parallel worker
--->
-
-<listitem>
-<para>
-Add pg_stat_statements columns to report parallel activity (Guillaume Lelarge)
-<ulink url="&commit_baseurl;cf54a2c00">&sect;</ulink>
-</para>
-
-<para>
-The new columns are parallel_workers_to_launch and parallel_workers_launched.
-</para>
-</listitem>
-
-<!--
-Author: Michael Paquier <michael@paquier.xyz>
-2025-02-17 [ce5bcc4a9] pg_stat_statements: Add wal_buffers_full
--->
-
-<listitem>
-<para>
-Add pg_stat_statements.wal_buffers_full to report full WAL buffers (Bertrand Drouvot)
-<ulink url="&commit_baseurl;ce5bcc4a9">&sect;</ulink>
-</para>
-</listitem>
-
- </itemizedlist>
-
- </sect4>
-
- <sect4 id="release-18-pgcrypto">
- <title><link linkend="pgcrypto"><application>pgcrypto</application></link></title>
-
- <itemizedlist>
-
-<!--
-Author: Álvaro Herrera <alvherre@alvh.no-ip.org>
-2025-04-05 [749a9e20c] Add modern SHA-2 based password hashes to pgcrypto.
--->
-
-<listitem>
-<para>
-Add pgcrypto functions sha256crypt() and sha512crypt() (Bernd Helmle)
-<ulink url="&commit_baseurl;749a9e20c">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Daniel Gustafsson <dgustafsson@postgresql.org>
-2025-02-14 [9ad1b3d01] pgcrypto: Add support for CFB mode in AES encryption
--->
-
-<listitem>
-<para>
-Add CFB mode to pgcrypto encryption and decryption (Umar Hayat)
-<ulink url="&commit_baseurl;9ad1b3d01">&sect;</ulink>
-</para>
-</listitem>
-
-<!--
-Author: Daniel Gustafsson <dgustafsson@postgresql.org>
-2025-01-24 [035f99cbe] pgcrypto: Make it possible to disable built-in crypto
--->
-
-<listitem>
-<para>
-Add pgcrypto server variable builtin_crypto_enabled to allow disabling builtin non-FIPS mode cryptographic functions (Daniel Gustafsson, Joe Conway)
-<ulink url="&commit_baseurl;035f99cbe">&sect;</ulink>
-</para>
-
-<para>
-This is useful for guaranteeing FIPS mode behavior.
-</para>
-</listitem>
-
- </itemizedlist>
- </sect4>
-
- </sect3>
-
- </sect2>
-
- <sect2 id="release-18-acknowledgements">
- <title>Acknowledgments</title>
-
- <para>
- The following individuals (in alphabetical order) have contributed
- to this release as patch authors, committers, reviewers, testers,
- or reporters of issues.
- </para>
-
- <simplelist>
- <member>(to be completed)</member>
- </simplelist>
- </sect2>
-
- </sect1>
diff --git a/doc/src/sgml/release-19.sgml b/doc/src/sgml/release-19.sgml
new file mode 100644
index 00000000000..8d242b5b281
--- /dev/null
+++ b/doc/src/sgml/release-19.sgml
@@ -0,0 +1,16 @@
+<!-- doc/src/sgml/release-19.sgml -->
+<!-- See header comment in release.sgml about typical markup -->
+
+<sect1 id="release-19">
+ <title>Release 19</title>
+
+ <formalpara>
+ <title>Release date:</title>
+ <para>2026-??-??</para>
+ </formalpara>
+
+ <para>
+ This is just a placeholder for now.
+ </para>
+
+</sect1>
diff --git a/doc/src/sgml/release.sgml b/doc/src/sgml/release.sgml
index cee577ff8d3..a659d382db9 100644
--- a/doc/src/sgml/release.sgml
+++ b/doc/src/sgml/release.sgml
@@ -70,7 +70,7 @@ For new features, add links to the documentation sections.
All the active branches have to be edited concurrently when doing that.
-->
-&release-18;
+&release-19;
<sect1 id="release-prior">
<title>Prior Releases</title>
diff --git a/doc/src/sgml/system-views.sgml b/doc/src/sgml/system-views.sgml
index b58c52ea50f..986ae1f543d 100644
--- a/doc/src/sgml/system-views.sgml
+++ b/doc/src/sgml/system-views.sgml
@@ -3932,7 +3932,7 @@ SELECT * FROM pg_locks pl LEFT JOIN pg_prepared_xacts ppx
<structfield>passwd</structfield> <type>text</type>
</para>
<para>
- Password (possibly encrypted); null if none. See
+ Encrypted password; null if none. See
<link linkend="catalog-pg-authid"><structname>pg_authid</structname></link>
for details of how encrypted passwords are stored.
</para></entry>
diff --git a/doc/src/sgml/textsearch.sgml b/doc/src/sgml/textsearch.sgml
index 908857a54af..89928ed1829 100644
--- a/doc/src/sgml/textsearch.sgml
+++ b/doc/src/sgml/textsearch.sgml
@@ -1355,7 +1355,7 @@ ts_headline(<optional> <replaceable class="parameter">config</replaceable> <type
</itemizedlist>
<warning>
- <title>Warning: Cross-site scripting (XSS) safety</title>
+ <title>Warning: Cross-site Scripting (XSS) Safety</title>
<para>
The output from <function>ts_headline</function> is not guaranteed to
be safe for direct inclusion in web pages. When
diff --git a/doc/src/sgml/trigger.sgml b/doc/src/sgml/trigger.sgml
index e9214dcf1b1..bb1b5faf34e 100644
--- a/doc/src/sgml/trigger.sgml
+++ b/doc/src/sgml/trigger.sgml
@@ -129,10 +129,9 @@
In all cases, a trigger is executed as part of the same transaction as
the statement that triggered it, so if either the statement or the
trigger causes an error, the effects of both will be rolled back.
- Also, the trigger will always run in the security context of the role
- that executed the statement that caused the trigger to fire, unless
- the trigger function is defined as <literal>SECURITY DEFINER</literal>,
- in which case it will run as the function owner.
+ Also, the trigger will always run as the role that queued the trigger
+ event, unless the trigger function is marked as <literal>SECURITY
+ DEFINER</literal>, in which case it will run as the function owner.
</para>
<para>
diff --git a/doc/src/sgml/xindex.sgml b/doc/src/sgml/xindex.sgml
index 7e23a7b6e43..3d315df2f98 100644
--- a/doc/src/sgml/xindex.sgml
+++ b/doc/src/sgml/xindex.sgml
@@ -598,7 +598,7 @@
<entry>11</entry>
</row>
<row>
- <entry><function>stratnum</function></entry>
+ <entry><function>translate_cmptype</function></entry>
<entry>translate compare types to strategy numbers
used by the operator class (optional)</entry>
<entry>12</entry>
diff --git a/doc/src/sgml/xoper.sgml b/doc/src/sgml/xoper.sgml
index 954a90d77d0..853b07a9f14 100644
--- a/doc/src/sgml/xoper.sgml
+++ b/doc/src/sgml/xoper.sgml
@@ -21,7 +21,7 @@
<para>
<productname>PostgreSQL</productname> supports prefix
- and infix operators. Operators can be
+ and binary (or infix) operators. Operators can be
overloaded;<indexterm><primary>overloading</primary><secondary>operators</secondary></indexterm>
that is, the same operator name can be used for different operators
that have different numbers and types of operands. When a query is
diff --git a/meson.build b/meson.build
index 12de5e80c31..36e168a1a2a 100644
--- a/meson.build
+++ b/meson.build
@@ -8,7 +8,7 @@
project('postgresql',
['c'],
- version: '18beta1',
+ version: '19devel',
license: 'PostgreSQL',
# We want < 0.56 for python 3.5 compatibility on old platforms. EPEL for
@@ -1205,7 +1205,7 @@ if not perlopt.disabled()
if cc.get_id() == 'msvc'
# prevent binary mismatch between MSVC built plperl and Strawberry or
# msys ucrt perl libraries
- perl_v = run_command(perl, '-V').stdout()
+ perl_v = run_command(perl, '-V', check: false).stdout()
if not perl_v.contains('USE_THREAD_SAFE_LOCALE')
perl_ccflags += ['-DNO_THREAD_SAFE_LOCALE']
endif
@@ -2465,6 +2465,7 @@ int main(void)
{
__m128i z;
+ x = _mm512_xor_si512(_mm512_zextsi128_si512(_mm_cvtsi32_si128(0)), x);
y = _mm512_clmulepi64_epi128(x, y, 0);
z = _mm_ternarylogic_epi64(
_mm512_castsi512_si128(y),
@@ -2654,6 +2655,7 @@ decl_checks += [
['preadv', 'sys/uio.h'],
['pwritev', 'sys/uio.h'],
['strchrnul', 'string.h'],
+ ['memset_s', 'string.h', '#define __STDC_WANT_LIB_EXT1__ 1'],
]
# Check presence of some optional LLVM functions.
@@ -2667,21 +2669,23 @@ endif
foreach c : decl_checks
func = c.get(0)
header = c.get(1)
- args = c.get(2, {})
+ prologue = c.get(2, '')
+ args = c.get(3, {})
varname = 'HAVE_DECL_' + func.underscorify().to_upper()
found = cc.compiles('''
-#include <@0@>
+@0@
+#include <@1@>
int main()
{
-#ifndef @1@
- (void) @1@;
+#ifndef @2@
+ (void) @2@;
#endif
return 0;
}
-'''.format(header, func),
+'''.format(prologue, header, func),
name: 'test whether @0@ is declared'.format(func),
# need to add cflags_warn to get at least
# -Werror=unguarded-availability-new if applicable
@@ -2880,7 +2884,6 @@ func_checks = [
['kqueue'],
['localeconv_l'],
['mbstowcs_l'],
- ['memset_s'],
['mkdtemp'],
['posix_fadvise'],
['posix_fallocate'],
diff --git a/src/Makefile.shlib b/src/Makefile.shlib
index fa81f6ffdd6..3825af5b228 100644
--- a/src/Makefile.shlib
+++ b/src/Makefile.shlib
@@ -112,7 +112,7 @@ ifeq ($(PORTNAME), darwin)
ifneq ($(SO_MAJOR_VERSION), 0)
version_link = -compatibility_version $(SO_MAJOR_VERSION) -current_version $(SO_MAJOR_VERSION).$(SO_MINOR_VERSION)
endif
- LINK.shared = $(COMPILER) -dynamiclib -install_name '$(libdir)/lib$(NAME).$(SO_MAJOR_VERSION)$(DLSUFFIX)' $(version_link) $(exported_symbols_list)
+ LINK.shared = $(COMPILER) -dynamiclib -install_name '$(libdir)/lib$(NAME).$(SO_MAJOR_VERSION)$(DLSUFFIX)' $(version_link)
shlib = lib$(NAME).$(SO_MAJOR_VERSION)$(DLSUFFIX)
shlib_major = lib$(NAME).$(SO_MAJOR_VERSION)$(DLSUFFIX)
else
@@ -122,7 +122,7 @@ ifeq ($(PORTNAME), darwin)
BUILD.exports = $(AWK) '/^[^\#]/ {printf "_%s\n",$$1}' $< >$@
exports_file = $(SHLIB_EXPORTS:%.txt=%.list)
ifneq (,$(exports_file))
- exported_symbols_list = -exported_symbols_list $(exports_file)
+ LINK.shared += -exported_symbols_list $(exports_file)
endif
endif
diff --git a/src/backend/access/brin/brin.c b/src/backend/access/brin/brin.c
index 01e1db7f856..4204088fa0d 100644
--- a/src/backend/access/brin/brin.c
+++ b/src/backend/access/brin/brin.c
@@ -68,7 +68,7 @@ typedef struct BrinShared
int scantuplesortstates;
/* Query ID, for report in worker processes */
- uint64 queryid;
+ int64 queryid;
/*
* workersdonecv is used to monitor the progress of workers. All parallel
diff --git a/src/backend/access/common/reloptions.c b/src/backend/access/common/reloptions.c
index 46c1dce222d..50747c16396 100644
--- a/src/backend/access/common/reloptions.c
+++ b/src/backend/access/common/reloptions.c
@@ -1243,8 +1243,9 @@ transformRelOptions(Datum oldOptions, List *defList, const char *namspace,
}
else
{
- text *t;
+ const char *name;
const char *value;
+ text *t;
Size len;
/*
@@ -1291,11 +1292,19 @@ transformRelOptions(Datum oldOptions, List *defList, const char *namspace,
* have just "name", assume "name=true" is meant. Note: the
* namespace is not output.
*/
+ name = def->defname;
if (def->arg != NULL)
value = defGetString(def);
else
value = "true";
+ /* Insist that name not contain "=", else "a=b=c" is ambiguous */
+ if (strchr(name, '=') != NULL)
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("invalid option name \"%s\": must not contain \"=\"",
+ name)));
+
/*
* This is not a great place for this test, but there's no other
* convenient place to filter the option out. As WITH (oids =
@@ -1303,7 +1312,7 @@ transformRelOptions(Datum oldOptions, List *defList, const char *namspace,
* amount of ugly.
*/
if (acceptOidsOff && def->defnamespace == NULL &&
- strcmp(def->defname, "oids") == 0)
+ strcmp(name, "oids") == 0)
{
if (defGetBoolean(def))
ereport(ERROR,
@@ -1313,11 +1322,11 @@ transformRelOptions(Datum oldOptions, List *defList, const char *namspace,
continue;
}
- len = VARHDRSZ + strlen(def->defname) + 1 + strlen(value);
+ len = VARHDRSZ + strlen(name) + 1 + strlen(value);
/* +1 leaves room for sprintf's trailing null */
t = (text *) palloc(len + 1);
SET_VARSIZE(t, len);
- sprintf(VARDATA(t), "%s=%s", def->defname, value);
+ sprintf(VARDATA(t), "%s=%s", name, value);
astate = accumArrayResult(astate, PointerGetDatum(t),
false, TEXTOID,
diff --git a/src/backend/access/common/tupdesc.c b/src/backend/access/common/tupdesc.c
index ffd0c78f905..020d00cd01c 100644
--- a/src/backend/access/common/tupdesc.c
+++ b/src/backend/access/common/tupdesc.c
@@ -142,11 +142,18 @@ void
verify_compact_attribute(TupleDesc tupdesc, int attnum)
{
#ifdef USE_ASSERT_CHECKING
- CompactAttribute *cattr = &tupdesc->compact_attrs[attnum];
+ CompactAttribute cattr;
Form_pg_attribute attr = TupleDescAttr(tupdesc, attnum);
CompactAttribute tmp;
/*
+ * Make a temp copy of the TupleDesc's CompactAttribute. This may be a
+ * shared TupleDesc and the attcacheoff might get changed by another
+ * backend.
+ */
+ memcpy(&cattr, &tupdesc->compact_attrs[attnum], sizeof(CompactAttribute));
+
+ /*
* Populate the temporary CompactAttribute from the corresponding
* Form_pg_attribute
*/
@@ -156,11 +163,11 @@ verify_compact_attribute(TupleDesc tupdesc, int attnum)
* Make the attcacheoff match since it's been reset to -1 by
* populate_compact_attribute_internal. Same with attnullability.
*/
- tmp.attcacheoff = cattr->attcacheoff;
- tmp.attnullability = cattr->attnullability;
+ tmp.attcacheoff = cattr.attcacheoff;
+ tmp.attnullability = cattr.attnullability;
/* Check the freshly populated CompactAttribute matches the TupleDesc's */
- Assert(memcmp(&tmp, cattr, sizeof(CompactAttribute)) == 0);
+ Assert(memcmp(&tmp, &cattr, sizeof(CompactAttribute)) == 0);
#endif
}
diff --git a/src/backend/access/gist/gistutil.c b/src/backend/access/gist/gistutil.c
index a6b701943d3..c0aa7d0222f 100644
--- a/src/backend/access/gist/gistutil.c
+++ b/src/backend/access/gist/gistutil.c
@@ -1058,11 +1058,11 @@ gistGetFakeLSN(Relation rel)
}
/*
- * This is a stratnum support function for GiST opclasses that use the
- * RT*StrategyNumber constants.
+ * This is a stratnum translation support function for GiST opclasses that use
+ * the RT*StrategyNumber constants.
*/
Datum
-gist_stratnum_common(PG_FUNCTION_ARGS)
+gist_translate_cmptype_common(PG_FUNCTION_ARGS)
{
CompareType cmptype = PG_GETARG_INT32(0);
@@ -1090,9 +1090,9 @@ gist_stratnum_common(PG_FUNCTION_ARGS)
/*
* Returns the opclass's private stratnum used for the given compare type.
*
- * Calls the opclass's GIST_STRATNUM_PROC support function, if any,
- * and returns the result.
- * Returns InvalidStrategy if the function is not defined.
+ * Calls the opclass's GIST_TRANSLATE_CMPTYPE_PROC support function, if any,
+ * and returns the result. Returns InvalidStrategy if the function is not
+ * defined.
*/
StrategyNumber
gisttranslatecmptype(CompareType cmptype, Oid opfamily)
@@ -1101,7 +1101,7 @@ gisttranslatecmptype(CompareType cmptype, Oid opfamily)
Datum result;
/* Check whether the function is provided. */
- funcid = get_opfamily_proc(opfamily, ANYOID, ANYOID, GIST_STRATNUM_PROC);
+ funcid = get_opfamily_proc(opfamily, ANYOID, ANYOID, GIST_TRANSLATE_CMPTYPE_PROC);
if (!OidIsValid(funcid))
return InvalidStrategy;
diff --git a/src/backend/access/gist/gistvalidate.c b/src/backend/access/gist/gistvalidate.c
index 2a49e6d20f0..2ed6f74fce9 100644
--- a/src/backend/access/gist/gistvalidate.c
+++ b/src/backend/access/gist/gistvalidate.c
@@ -138,7 +138,7 @@ gistvalidate(Oid opclassoid)
ok = check_amproc_signature(procform->amproc, VOIDOID, true,
1, 1, INTERNALOID);
break;
- case GIST_STRATNUM_PROC:
+ case GIST_TRANSLATE_CMPTYPE_PROC:
ok = check_amproc_signature(procform->amproc, INT2OID, true,
1, 1, INT4OID) &&
procform->amproclefttype == ANYOID &&
@@ -265,7 +265,7 @@ gistvalidate(Oid opclassoid)
if (i == GIST_DISTANCE_PROC || i == GIST_FETCH_PROC ||
i == GIST_COMPRESS_PROC || i == GIST_DECOMPRESS_PROC ||
i == GIST_OPTIONS_PROC || i == GIST_SORTSUPPORT_PROC ||
- i == GIST_STRATNUM_PROC)
+ i == GIST_TRANSLATE_CMPTYPE_PROC)
continue; /* optional methods */
ereport(INFO,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
@@ -336,7 +336,7 @@ gistadjustmembers(Oid opfamilyoid,
case GIST_FETCH_PROC:
case GIST_OPTIONS_PROC:
case GIST_SORTSUPPORT_PROC:
- case GIST_STRATNUM_PROC:
+ case GIST_TRANSLATE_CMPTYPE_PROC:
/* Optional, so force it to be a soft family dependency */
op->ref_is_hard = false;
op->ref_is_family = true;
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index 9ec8cda1c68..0dcd6ee817e 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -213,6 +213,27 @@ static const int MultiXactStatusLock[MaxMultiXactStatus + 1] =
#define TUPLOCK_from_mxstatus(status) \
(MultiXactStatusLock[(status)])
+/*
+ * Check that we have a valid snapshot if we might need TOAST access.
+ */
+static inline void
+AssertHasSnapshotForToast(Relation rel)
+{
+#ifdef USE_ASSERT_CHECKING
+
+ /* bootstrap mode in particular breaks this rule */
+ if (!IsNormalProcessingMode())
+ return;
+
+ /* if the relation doesn't have a TOAST table, we are good */
+ if (!OidIsValid(rel->rd_rel->reltoastrelid))
+ return;
+
+ Assert(HaveRegisteredOrActiveSnapshot());
+
+#endif /* USE_ASSERT_CHECKING */
+}
+
/* ----------------------------------------------------------------
* heap support routines
* ----------------------------------------------------------------
@@ -2066,6 +2087,8 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
Assert(HeapTupleHeaderGetNatts(tup->t_data) <=
RelationGetNumberOfAttributes(relation));
+ AssertHasSnapshotForToast(relation);
+
/*
* Fill in tuple header fields and toast the tuple if necessary.
*
@@ -2343,6 +2366,8 @@ heap_multi_insert(Relation relation, TupleTableSlot **slots, int ntuples,
/* currently not needed (thus unsupported) for heap_multi_insert() */
Assert(!(options & HEAP_INSERT_NO_LOGICAL));
+ AssertHasSnapshotForToast(relation);
+
needwal = RelationNeedsWAL(relation);
saveFreeSpace = RelationGetTargetPageFreeSpace(relation,
HEAP_DEFAULT_FILLFACTOR);
@@ -2765,6 +2790,8 @@ heap_delete(Relation relation, ItemPointer tid,
Assert(ItemPointerIsValid(tid));
+ AssertHasSnapshotForToast(relation);
+
/*
* Forbid this during a parallel operation, lest it allocate a combo CID.
* Other workers might need that combo CID for visibility checks, and we
@@ -3260,6 +3287,8 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
Assert(HeapTupleHeaderGetNatts(newtup->t_data) <=
RelationGetNumberOfAttributes(relation));
+ AssertHasSnapshotForToast(relation);
+
/*
* Forbid this during a parallel operation, lest it allocate a combo CID.
* Other workers might need that combo CID for visibility checks, and we
@@ -4953,7 +4982,7 @@ l3:
case LockWaitError:
if (!ConditionalMultiXactIdWait((MultiXactId) xwait,
status, infomask, relation,
- NULL, log_lock_failure))
+ NULL, log_lock_failures))
ereport(ERROR,
(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
errmsg("could not obtain lock on row in relation \"%s\"",
@@ -4991,7 +5020,7 @@ l3:
}
break;
case LockWaitError:
- if (!ConditionalXactLockTableWait(xwait, log_lock_failure))
+ if (!ConditionalXactLockTableWait(xwait, log_lock_failures))
ereport(ERROR,
(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
errmsg("could not obtain lock on row in relation \"%s\"",
@@ -5256,7 +5285,7 @@ heap_acquire_tuplock(Relation relation, ItemPointer tid, LockTupleMode mode,
break;
case LockWaitError:
- if (!ConditionalLockTupleTuplock(relation, tid, mode, log_lock_failure))
+ if (!ConditionalLockTupleTuplock(relation, tid, mode, log_lock_failures))
ereport(ERROR,
(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
errmsg("could not obtain lock on row in relation \"%s\"",
diff --git a/src/backend/access/heap/heapam_handler.c b/src/backend/access/heap/heapam_handler.c
index ac082fefa77..cb4bc35c93e 100644
--- a/src/backend/access/heap/heapam_handler.c
+++ b/src/backend/access/heap/heapam_handler.c
@@ -464,7 +464,7 @@ tuple_lock_retry:
return TM_WouldBlock;
break;
case LockWaitError:
- if (!ConditionalXactLockTableWait(SnapshotDirty.xmax, log_lock_failure))
+ if (!ConditionalXactLockTableWait(SnapshotDirty.xmax, log_lock_failures))
ereport(ERROR,
(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
errmsg("could not obtain lock on row in relation \"%s\"",
diff --git a/src/backend/access/heap/heapam_xlog.c b/src/backend/access/heap/heapam_xlog.c
index 30f4c2d3c67..eb4bd3d6ae3 100644
--- a/src/backend/access/heap/heapam_xlog.c
+++ b/src/backend/access/heap/heapam_xlog.c
@@ -438,6 +438,9 @@ heap_xlog_insert(XLogReaderState *record)
ItemPointerSetBlockNumber(&target_tid, blkno);
ItemPointerSetOffsetNumber(&target_tid, xlrec->offnum);
+ /* No freezing in the heap_insert() code path */
+ Assert(!(xlrec->flags & XLH_INSERT_ALL_FROZEN_SET));
+
/*
* The visibility map may need to be fixed even if the heap page is
* already up-to-date.
@@ -508,10 +511,6 @@ heap_xlog_insert(XLogReaderState *record)
if (xlrec->flags & XLH_INSERT_ALL_VISIBLE_CLEARED)
PageClearAllVisible(page);
- /* XLH_INSERT_ALL_FROZEN_SET implies that all tuples are visible */
- if (xlrec->flags & XLH_INSERT_ALL_FROZEN_SET)
- PageSetAllVisible(page);
-
MarkBufferDirty(buffer);
}
if (BufferIsValid(buffer))
diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c
index f28326bad09..75979530897 100644
--- a/src/backend/access/heap/vacuumlazy.c
+++ b/src/backend/access/heap/vacuumlazy.c
@@ -423,7 +423,7 @@ typedef struct LVSavedErrInfo
/* non-export function prototypes */
static void lazy_scan_heap(LVRelState *vacrel);
static void heap_vacuum_eager_scan_setup(LVRelState *vacrel,
- VacuumParams *params);
+ const VacuumParams params);
static BlockNumber heap_vac_scan_next_block(ReadStream *stream,
void *callback_private_data,
void *per_buffer_data);
@@ -485,7 +485,7 @@ static void restore_vacuum_error_info(LVRelState *vacrel,
* vacuum options or for relfrozenxid/relminmxid advancement.
*/
static void
-heap_vacuum_eager_scan_setup(LVRelState *vacrel, VacuumParams *params)
+heap_vacuum_eager_scan_setup(LVRelState *vacrel, const VacuumParams params)
{
uint32 randseed;
BlockNumber allvisible;
@@ -504,7 +504,7 @@ heap_vacuum_eager_scan_setup(LVRelState *vacrel, VacuumParams *params)
vacrel->eager_scan_remaining_successes = 0;
/* If eager scanning is explicitly disabled, just return. */
- if (params->max_eager_freeze_failure_rate == 0)
+ if (params.max_eager_freeze_failure_rate == 0)
return;
/*
@@ -581,11 +581,11 @@ heap_vacuum_eager_scan_setup(LVRelState *vacrel, VacuumParams *params)
vacrel->next_eager_scan_region_start = randseed % EAGER_SCAN_REGION_SIZE;
- Assert(params->max_eager_freeze_failure_rate > 0 &&
- params->max_eager_freeze_failure_rate <= 1);
+ Assert(params.max_eager_freeze_failure_rate > 0 &&
+ params.max_eager_freeze_failure_rate <= 1);
vacrel->eager_scan_max_fails_per_region =
- params->max_eager_freeze_failure_rate *
+ params.max_eager_freeze_failure_rate *
EAGER_SCAN_REGION_SIZE;
/*
@@ -612,7 +612,7 @@ heap_vacuum_eager_scan_setup(LVRelState *vacrel, VacuumParams *params)
* and locked the relation.
*/
void
-heap_vacuum_rel(Relation rel, VacuumParams *params,
+heap_vacuum_rel(Relation rel, const VacuumParams params,
BufferAccessStrategy bstrategy)
{
LVRelState *vacrel;
@@ -634,9 +634,9 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
ErrorContextCallback errcallback;
char **indnames = NULL;
- verbose = (params->options & VACOPT_VERBOSE) != 0;
+ verbose = (params.options & VACOPT_VERBOSE) != 0;
instrument = (verbose || (AmAutoVacuumWorkerProcess() &&
- params->log_min_duration >= 0));
+ params.log_min_duration >= 0));
if (instrument)
{
pg_rusage_init(&ru0);
@@ -699,9 +699,9 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
* The truncate param allows user to avoid attempting relation truncation,
* though it can't force truncation to happen.
*/
- Assert(params->index_cleanup != VACOPTVALUE_UNSPECIFIED);
- Assert(params->truncate != VACOPTVALUE_UNSPECIFIED &&
- params->truncate != VACOPTVALUE_AUTO);
+ Assert(params.index_cleanup != VACOPTVALUE_UNSPECIFIED);
+ Assert(params.truncate != VACOPTVALUE_UNSPECIFIED &&
+ params.truncate != VACOPTVALUE_AUTO);
/*
* While VacuumFailSafeActive is reset to false before calling this, we
@@ -711,14 +711,14 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
vacrel->consider_bypass_optimization = true;
vacrel->do_index_vacuuming = true;
vacrel->do_index_cleanup = true;
- vacrel->do_rel_truncate = (params->truncate != VACOPTVALUE_DISABLED);
- if (params->index_cleanup == VACOPTVALUE_DISABLED)
+ vacrel->do_rel_truncate = (params.truncate != VACOPTVALUE_DISABLED);
+ if (params.index_cleanup == VACOPTVALUE_DISABLED)
{
/* Force disable index vacuuming up-front */
vacrel->do_index_vacuuming = false;
vacrel->do_index_cleanup = false;
}
- else if (params->index_cleanup == VACOPTVALUE_ENABLED)
+ else if (params.index_cleanup == VACOPTVALUE_ENABLED)
{
/* Force index vacuuming. Note that failsafe can still bypass. */
vacrel->consider_bypass_optimization = false;
@@ -726,7 +726,7 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
else
{
/* Default/auto, make all decisions dynamically */
- Assert(params->index_cleanup == VACOPTVALUE_AUTO);
+ Assert(params.index_cleanup == VACOPTVALUE_AUTO);
}
/* Initialize page counters explicitly (be tidy) */
@@ -757,7 +757,6 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
vacrel->vm_new_visible_pages = 0;
vacrel->vm_new_visible_frozen_pages = 0;
vacrel->vm_new_frozen_pages = 0;
- vacrel->rel_pages = orig_rel_pages = RelationGetNumberOfBlocks(rel);
/*
* Get cutoffs that determine which deleted tuples are considered DEAD,
@@ -776,7 +775,9 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
* to increase the number of dead tuples it can prune away.)
*/
vacrel->aggressive = vacuum_get_cutoffs(rel, params, &vacrel->cutoffs);
+ vacrel->rel_pages = orig_rel_pages = RelationGetNumberOfBlocks(rel);
vacrel->vistest = GlobalVisTestFor(rel);
+
/* Initialize state used to track oldest extant XID/MXID */
vacrel->NewRelfrozenXid = vacrel->cutoffs.OldestXmin;
vacrel->NewRelminMxid = vacrel->cutoffs.OldestMxact;
@@ -788,7 +789,7 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
*/
vacrel->skippedallvis = false;
skipwithvm = true;
- if (params->options & VACOPT_DISABLE_PAGE_SKIPPING)
+ if (params.options & VACOPT_DISABLE_PAGE_SKIPPING)
{
/*
* Force aggressive mode, and disable skipping blocks using the
@@ -829,7 +830,7 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
* is already dangerously old.)
*/
lazy_check_wraparound_failsafe(vacrel);
- dead_items_alloc(vacrel, params->nworkers);
+ dead_items_alloc(vacrel, params.nworkers);
/*
* Call lazy_scan_heap to perform all required heap pruning, index
@@ -946,9 +947,9 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
{
TimestampTz endtime = GetCurrentTimestamp();
- if (verbose || params->log_min_duration == 0 ||
+ if (verbose || params.log_min_duration == 0 ||
TimestampDifferenceExceeds(starttime, endtime,
- params->log_min_duration))
+ params.log_min_duration))
{
long secs_dur;
int usecs_dur;
@@ -983,10 +984,10 @@ heap_vacuum_rel(Relation rel, VacuumParams *params,
* Aggressiveness already reported earlier, in dedicated
* VACUUM VERBOSE ereport
*/
- Assert(!params->is_wraparound);
+ Assert(!params.is_wraparound);
msgfmt = _("finished vacuuming \"%s.%s.%s\": index scans: %d\n");
}
- else if (params->is_wraparound)
+ else if (params.is_wraparound)
{
/*
* While it's possible for a VACUUM to be both is_wraparound
@@ -1413,12 +1414,26 @@ lazy_scan_heap(LVRelState *vacrel)
if (vm_page_frozen)
{
- Assert(vacrel->eager_scan_remaining_successes > 0);
- vacrel->eager_scan_remaining_successes--;
+ if (vacrel->eager_scan_remaining_successes > 0)
+ vacrel->eager_scan_remaining_successes--;
if (vacrel->eager_scan_remaining_successes == 0)
{
/*
+ * Report only once that we disabled eager scanning. We
+ * may eagerly read ahead blocks in excess of the success
+ * or failure caps before attempting to freeze them, so we
+ * could reach here even after disabling additional eager
+ * scanning.
+ */
+ if (vacrel->eager_scan_max_fails_per_region > 0)
+ ereport(vacrel->verbose ? INFO : DEBUG2,
+ (errmsg("disabling eager scanning after freezing %u eagerly scanned blocks of relation \"%s.%s.%s\"",
+ orig_eager_scan_success_limit,
+ vacrel->dbname, vacrel->relnamespace,
+ vacrel->relname)));
+
+ /*
* If we hit our success cap, permanently disable eager
* scanning by setting the other eager scan management
* fields to their disabled values.
@@ -1426,19 +1441,10 @@ lazy_scan_heap(LVRelState *vacrel)
vacrel->eager_scan_remaining_fails = 0;
vacrel->next_eager_scan_region_start = InvalidBlockNumber;
vacrel->eager_scan_max_fails_per_region = 0;
-
- ereport(vacrel->verbose ? INFO : DEBUG2,
- (errmsg("disabling eager scanning after freezing %u eagerly scanned blocks of \"%s.%s.%s\"",
- orig_eager_scan_success_limit,
- vacrel->dbname, vacrel->relnamespace,
- vacrel->relname)));
}
}
- else
- {
- Assert(vacrel->eager_scan_remaining_fails > 0);
+ else if (vacrel->eager_scan_remaining_fails > 0)
vacrel->eager_scan_remaining_fails--;
- }
}
/*
@@ -1866,8 +1872,6 @@ lazy_scan_new_or_empty(LVRelState *vacrel, Buffer buf, BlockNumber blkno,
*/
if (!PageIsAllVisible(page))
{
- uint8 old_vmbits;
-
START_CRIT_SECTION();
/* mark buffer dirty before writing a WAL record */
@@ -1887,24 +1891,16 @@ lazy_scan_new_or_empty(LVRelState *vacrel, Buffer buf, BlockNumber blkno,
log_newpage_buffer(buf, true);
PageSetAllVisible(page);
- old_vmbits = visibilitymap_set(vacrel->rel, blkno, buf,
- InvalidXLogRecPtr,
- vmbuffer, InvalidTransactionId,
- VISIBILITYMAP_ALL_VISIBLE |
- VISIBILITYMAP_ALL_FROZEN);
+ visibilitymap_set(vacrel->rel, blkno, buf,
+ InvalidXLogRecPtr,
+ vmbuffer, InvalidTransactionId,
+ VISIBILITYMAP_ALL_VISIBLE |
+ VISIBILITYMAP_ALL_FROZEN);
END_CRIT_SECTION();
- /*
- * If the page wasn't already set all-visible and/or all-frozen in
- * the VM, count it as newly set for logging.
- */
- if ((old_vmbits & VISIBILITYMAP_ALL_VISIBLE) == 0)
- {
- vacrel->vm_new_visible_pages++;
- vacrel->vm_new_visible_frozen_pages++;
- }
- else if ((old_vmbits & VISIBILITYMAP_ALL_FROZEN) == 0)
- vacrel->vm_new_frozen_pages++;
+ /* Count the newly all-frozen pages for logging */
+ vacrel->vm_new_visible_pages++;
+ vacrel->vm_new_visible_frozen_pages++;
}
freespace = PageGetHeapFreeSpace(page);
@@ -2909,7 +2905,6 @@ lazy_vacuum_heap_page(LVRelState *vacrel, BlockNumber blkno, Buffer buffer,
if (heap_page_is_all_visible(vacrel, buffer, &visibility_cutoff_xid,
&all_frozen))
{
- uint8 old_vmbits;
uint8 flags = VISIBILITYMAP_ALL_VISIBLE;
if (all_frozen)
@@ -2919,25 +2914,15 @@ lazy_vacuum_heap_page(LVRelState *vacrel, BlockNumber blkno, Buffer buffer,
}
PageSetAllVisible(page);
- old_vmbits = visibilitymap_set(vacrel->rel, blkno, buffer,
- InvalidXLogRecPtr,
- vmbuffer, visibility_cutoff_xid,
- flags);
-
- /*
- * If the page wasn't already set all-visible and/or all-frozen in the
- * VM, count it as newly set for logging.
- */
- if ((old_vmbits & VISIBILITYMAP_ALL_VISIBLE) == 0)
- {
- vacrel->vm_new_visible_pages++;
- if (all_frozen)
- vacrel->vm_new_visible_frozen_pages++;
- }
+ visibilitymap_set(vacrel->rel, blkno, buffer,
+ InvalidXLogRecPtr,
+ vmbuffer, visibility_cutoff_xid,
+ flags);
- else if ((old_vmbits & VISIBILITYMAP_ALL_FROZEN) == 0 &&
- all_frozen)
- vacrel->vm_new_frozen_pages++;
+ /* Count the newly set VM page for logging */
+ vacrel->vm_new_visible_pages++;
+ if (all_frozen)
+ vacrel->vm_new_visible_frozen_pages++;
}
/* Revert to the previous phase information for error traceback */
diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c
index 765659887af..fdff960c130 100644
--- a/src/backend/access/nbtree/nbtree.c
+++ b/src/backend/access/nbtree/nbtree.c
@@ -228,6 +228,8 @@ btgettuple(IndexScanDesc scan, ScanDirection dir)
BTScanOpaque so = (BTScanOpaque) scan->opaque;
bool res;
+ Assert(scan->heapRelation != NULL);
+
/* btree indexes are never lossy */
scan->xs_recheck = false;
@@ -289,6 +291,8 @@ btgetbitmap(IndexScanDesc scan, TIDBitmap *tbm)
int64 ntids = 0;
ItemPointer heapTid;
+ Assert(scan->heapRelation == NULL);
+
/* Each loop iteration performs another primitive index scan */
do
{
@@ -393,6 +397,34 @@ btrescan(IndexScanDesc scan, ScanKey scankey, int nscankeys,
BTScanPosInvalidate(so->currPos);
}
+ /*
+ * We prefer to eagerly drop leaf page pins before btgettuple returns.
+ * This avoids making VACUUM wait to acquire a cleanup lock on the page.
+ *
+ * We cannot safely drop leaf page pins during index-only scans due to a
+ * race condition involving VACUUM setting pages all-visible in the VM.
+ * It's also unsafe for plain index scans that use a non-MVCC snapshot.
+ *
+ * When we drop pins eagerly, the mechanism that marks so->killedItems[]
+ * index tuples LP_DEAD has to deal with concurrent TID recycling races.
+ * The scheme used to detect unsafe TID recycling won't work when scanning
+ * unlogged relations (since it involves saving an affected page's LSN).
+ * Opt out of eager pin dropping during unlogged relation scans for now
+ * (this is preferable to opting out of kill_prior_tuple LP_DEAD setting).
+ *
+ * Also opt out of dropping leaf page pins eagerly during bitmap scans.
+ * Pins cannot be held for more than an instant during bitmap scans either
+ * way, so we might as well avoid wasting cycles on acquiring page LSNs.
+ *
+ * See nbtree/README section on making concurrent TID recycling safe.
+ *
+ * Note: so->dropPin should never change across rescans.
+ */
+ so->dropPin = (!scan->xs_want_itup &&
+ IsMVCCSnapshot(scan->xs_snapshot) &&
+ RelationNeedsWAL(scan->indexRelation) &&
+ scan->heapRelation != NULL);
+
so->markItemIndex = -1;
so->needPrimScan = false;
so->scanBehind = false;
diff --git a/src/backend/access/nbtree/nbtsearch.c b/src/backend/access/nbtree/nbtsearch.c
index fe9a3886913..36544ecfd58 100644
--- a/src/backend/access/nbtree/nbtsearch.c
+++ b/src/backend/access/nbtree/nbtsearch.c
@@ -25,7 +25,7 @@
#include "utils/rel.h"
-static void _bt_drop_lock_and_maybe_pin(IndexScanDesc scan, BTScanPos sp);
+static inline void _bt_drop_lock_and_maybe_pin(Relation rel, BTScanOpaque so);
static Buffer _bt_moveright(Relation rel, Relation heaprel, BTScanInsert key,
Buffer buf, bool forupdate, BTStack stack,
int access);
@@ -57,24 +57,29 @@ static bool _bt_endpoint(IndexScanDesc scan, ScanDirection dir);
/*
* _bt_drop_lock_and_maybe_pin()
*
- * Unlock the buffer; and if it is safe to release the pin, do that, too.
- * This will prevent vacuum from stalling in a blocked state trying to read a
- * page when a cursor is sitting on it.
- *
- * See nbtree/README section on making concurrent TID recycling safe.
+ * Unlock so->currPos.buf. If scan is so->dropPin, drop the pin, too.
+ * Dropping the pin prevents VACUUM from blocking on acquiring a cleanup lock.
*/
-static void
-_bt_drop_lock_and_maybe_pin(IndexScanDesc scan, BTScanPos sp)
+static inline void
+_bt_drop_lock_and_maybe_pin(Relation rel, BTScanOpaque so)
{
- _bt_unlockbuf(scan->indexRelation, sp->buf);
-
- if (IsMVCCSnapshot(scan->xs_snapshot) &&
- RelationNeedsWAL(scan->indexRelation) &&
- !scan->xs_want_itup)
+ if (!so->dropPin)
{
- ReleaseBuffer(sp->buf);
- sp->buf = InvalidBuffer;
+ /* Just drop the lock (not the pin) */
+ _bt_unlockbuf(rel, so->currPos.buf);
+ return;
}
+
+ /*
+ * Drop both the lock and the pin.
+ *
+ * Have to set so->currPos.lsn so that _bt_killitems has a way to detect
+ * when concurrent heap TID recycling by VACUUM might have taken place.
+ */
+ Assert(RelationNeedsWAL(rel));
+ so->currPos.lsn = BufferGetLSNAtomic(so->currPos.buf);
+ _bt_relbuf(rel, so->currPos.buf);
+ so->currPos.buf = InvalidBuffer;
}
/*
@@ -866,8 +871,8 @@ _bt_compare(Relation rel,
* if backwards scan, the last item) in the tree that satisfies the
* qualifications in the scan key. On success exit, data about the
* matching tuple(s) on the page has been loaded into so->currPos. We'll
- * drop all locks and hold onto a pin on page's buffer, except when
- * _bt_drop_lock_and_maybe_pin dropped the pin to avoid blocking VACUUM.
+ * drop all locks and hold onto a pin on page's buffer, except during
+ * so->dropPin scans, when we drop both the lock and the pin.
* _bt_returnitem sets the next item to return to scan on success exit.
*
* If there are no matching items in the index, we return false, with no
@@ -1610,7 +1615,13 @@ _bt_readpage(IndexScanDesc scan, ScanDirection dir, OffsetNumber offnum,
so->currPos.currPage = BufferGetBlockNumber(so->currPos.buf);
so->currPos.prevPage = opaque->btpo_prev;
so->currPos.nextPage = opaque->btpo_next;
+ /* delay setting so->currPos.lsn until _bt_drop_lock_and_maybe_pin */
+ so->currPos.dir = dir;
+ so->currPos.nextTupleOffset = 0;
+ /* either moreRight or moreLeft should be set now (may be unset later) */
+ Assert(ScanDirectionIsForward(dir) ? so->currPos.moreRight :
+ so->currPos.moreLeft);
Assert(!P_IGNORE(opaque));
Assert(BTScanPosIsPinned(so->currPos));
Assert(!so->needPrimScan);
@@ -1626,14 +1637,6 @@ _bt_readpage(IndexScanDesc scan, ScanDirection dir, OffsetNumber offnum,
so->currPos.currPage);
}
- /* initialize remaining currPos fields related to current page */
- so->currPos.lsn = BufferGetLSNAtomic(so->currPos.buf);
- so->currPos.dir = dir;
- so->currPos.nextTupleOffset = 0;
- /* either moreLeft or moreRight should be set now (may be unset later) */
- Assert(ScanDirectionIsForward(dir) ? so->currPos.moreRight :
- so->currPos.moreLeft);
-
PredicateLockPage(rel, so->currPos.currPage, scan->xs_snapshot);
/* initialize local variables */
@@ -2107,10 +2110,9 @@ _bt_returnitem(IndexScanDesc scan, BTScanOpaque so)
*
* Wrapper on _bt_readnextpage that performs final steps for the current page.
*
- * On entry, if so->currPos.buf is valid the buffer is pinned but not locked.
- * If there's no pin held, it's because _bt_drop_lock_and_maybe_pin dropped
- * the pin eagerly earlier on. The scan must have so->currPos.currPage set to
- * a valid block, in any case.
+ * On entry, so->currPos must be valid. Its buffer will be pinned, though
+ * never locked. (Actually, when so->dropPin there won't even be a pin held,
+ * though so->currPos.currPage must still be set to a valid block number.)
*/
static bool
_bt_steppage(IndexScanDesc scan, ScanDirection dir)
@@ -2251,12 +2253,14 @@ _bt_readfirstpage(IndexScanDesc scan, OffsetNumber offnum, ScanDirection dir)
*/
if (_bt_readpage(scan, dir, offnum, true))
{
+ Relation rel = scan->indexRelation;
+
/*
* _bt_readpage succeeded. Drop the lock (and maybe the pin) on
* so->currPos.buf in preparation for btgettuple returning tuples.
*/
Assert(BTScanPosIsPinned(so->currPos));
- _bt_drop_lock_and_maybe_pin(scan, &so->currPos);
+ _bt_drop_lock_and_maybe_pin(rel, so);
return true;
}
@@ -2278,9 +2282,12 @@ _bt_readfirstpage(IndexScanDesc scan, OffsetNumber offnum, ScanDirection dir)
* previously-saved right link or left link. lastcurrblkno is the page that
* was current at the point where the blkno link was saved, which we use to
* reason about concurrent page splits/page deletions during backwards scans.
+ * In the common case where seized=false, blkno is either so->currPos.nextPage
+ * or so->currPos.prevPage, and lastcurrblkno is so->currPos.currPage.
*
- * On entry, caller shouldn't hold any locks or pins on any page (we work
- * directly off of blkno and lastcurrblkno instead). Parallel scan callers
+ * On entry, so->currPos shouldn't be locked by caller. so->currPos.buf must
+ * be InvalidBuffer/unpinned as needed by caller (note that lastcurrblkno
+ * won't need to be read again in almost all cases). Parallel scan callers
* that seized the scan before calling here should pass seized=true; such a
* caller's blkno and lastcurrblkno arguments come from the seized scan.
* seized=false callers just pass us the blkno/lastcurrblkno taken from their
@@ -2294,11 +2301,11 @@ _bt_readfirstpage(IndexScanDesc scan, OffsetNumber offnum, ScanDirection dir)
*
* On success exit, so->currPos is updated to contain data from the next
* interesting page, and we return true. We hold a pin on the buffer on
- * success exit, except when _bt_drop_lock_and_maybe_pin decided it was safe
- * to eagerly drop the pin (to avoid blocking VACUUM).
+ * success exit (except during so->dropPin index scans, when we drop the pin
+ * eagerly to avoid blocking VACUUM).
*
- * If there are no more matching records in the given direction, we drop all
- * locks and pins, invalidate so->currPos, and return false.
+ * If there are no more matching records in the given direction, we invalidate
+ * so->currPos (while ensuring it retains no locks or pins), and return false.
*
* We always release the scan for a parallel scan caller, regardless of
* success or failure; we'll call _bt_parallel_release as soon as possible.
@@ -2413,7 +2420,7 @@ _bt_readnextpage(IndexScanDesc scan, BlockNumber blkno,
*/
Assert(so->currPos.currPage == blkno);
Assert(BTScanPosIsPinned(so->currPos));
- _bt_drop_lock_and_maybe_pin(scan, &so->currPos);
+ _bt_drop_lock_and_maybe_pin(rel, so);
return true;
}
diff --git a/src/backend/access/nbtree/nbtsort.c b/src/backend/access/nbtree/nbtsort.c
index 3794cc924ad..9d70e89c1f3 100644
--- a/src/backend/access/nbtree/nbtsort.c
+++ b/src/backend/access/nbtree/nbtsort.c
@@ -105,7 +105,7 @@ typedef struct BTShared
int scantuplesortstates;
/* Query ID, for report in worker processes */
- uint64 queryid;
+ int64 queryid;
/*
* workersdonecv is used to monitor the progress of workers. All parallel
diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c
index 1a15dfcb7d3..c71d1b6f2e1 100644
--- a/src/backend/access/nbtree/nbtutils.c
+++ b/src/backend/access/nbtree/nbtutils.c
@@ -3330,87 +3330,85 @@ _bt_checkkeys_look_ahead(IndexScanDesc scan, BTReadPageState *pstate,
* current page and killed tuples thereon (generally, this should only be
* called if so->numKilled > 0).
*
- * The caller does not have a lock on the page and may or may not have the
- * page pinned in a buffer. Note that read-lock is sufficient for setting
- * LP_DEAD status (which is only a hint).
+ * Caller should not have a lock on the so->currPos page, but must hold a
+ * buffer pin when !so->dropPin. When we return, it still won't be locked.
+ * It'll continue to hold whatever pins were held before calling here.
*
- * We match items by heap TID before assuming they are the right ones to
- * delete. We cope with cases where items have moved right due to insertions.
- * If an item has moved off the current page due to a split, we'll fail to
- * find it and do nothing (this is not an error case --- we assume the item
- * will eventually get marked in a future indexscan).
+ * We match items by heap TID before assuming they are the right ones to set
+ * LP_DEAD. If the scan is one that holds a buffer pin on the target page
+ * continuously from initially reading the items until applying this function
+ * (if it is a !so->dropPin scan), VACUUM cannot have deleted any items on the
+ * page, so the page's TIDs can't have been recycled by now. There's no risk
+ * that we'll confuse a new index tuple that happens to use a recycled TID
+ * with a now-removed tuple with the same TID (that used to be on this same
+ * page). We can't rely on that during scans that drop buffer pins eagerly
+ * (so->dropPin scans), though, so we must condition setting LP_DEAD bits on
+ * the page LSN having not changed since back when _bt_readpage saw the page.
+ * We totally give up on setting LP_DEAD bits when the page LSN changed.
*
- * Note that if we hold a pin on the target page continuously from initially
- * reading the items until applying this function, VACUUM cannot have deleted
- * any items from the page, and so there is no need to search left from the
- * recorded offset. (This observation also guarantees that the item is still
- * the right one to delete, which might otherwise be questionable since heap
- * TIDs can get recycled.) This holds true even if the page has been modified
- * by inserts and page splits, so there is no need to consult the LSN.
- *
- * If the pin was released after reading the page, then we re-read it. If it
- * has been modified since we read it (as determined by the LSN), we dare not
- * flag any entries because it is possible that the old entry was vacuumed
- * away and the TID was re-used by a completely different heap tuple.
+ * We give up much less often during !so->dropPin scans, but it still happens.
+ * We cope with cases where items have moved right due to insertions. If an
+ * item has moved off the current page due to a split, we'll fail to find it
+ * and just give up on it.
*/
void
_bt_killitems(IndexScanDesc scan)
{
+ Relation rel = scan->indexRelation;
BTScanOpaque so = (BTScanOpaque) scan->opaque;
Page page;
BTPageOpaque opaque;
OffsetNumber minoff;
OffsetNumber maxoff;
- int i;
int numKilled = so->numKilled;
bool killedsomething = false;
- bool droppedpin PG_USED_FOR_ASSERTS_ONLY;
+ Buffer buf;
+ Assert(numKilled > 0);
Assert(BTScanPosIsValid(so->currPos));
+ Assert(scan->heapRelation != NULL); /* can't be a bitmap index scan */
- /*
- * Always reset the scan state, so we don't look for same items on other
- * pages.
- */
+ /* Always invalidate so->killedItems[] before leaving so->currPos */
so->numKilled = 0;
- if (BTScanPosIsPinned(so->currPos))
+ if (!so->dropPin)
{
/*
* We have held the pin on this page since we read the index tuples,
* so all we need to do is lock it. The pin will have prevented
- * re-use of any TID on the page, so there is no need to check the
- * LSN.
+ * concurrent VACUUMs from recycling any of the TIDs on the page.
*/
- droppedpin = false;
- _bt_lockbuf(scan->indexRelation, so->currPos.buf, BT_READ);
-
- page = BufferGetPage(so->currPos.buf);
+ Assert(BTScanPosIsPinned(so->currPos));
+ buf = so->currPos.buf;
+ _bt_lockbuf(rel, buf, BT_READ);
}
else
{
- Buffer buf;
+ XLogRecPtr latestlsn;
- droppedpin = true;
- /* Attempt to re-read the buffer, getting pin and lock. */
- buf = _bt_getbuf(scan->indexRelation, so->currPos.currPage, BT_READ);
+ Assert(!BTScanPosIsPinned(so->currPos));
+ Assert(RelationNeedsWAL(rel));
+ buf = _bt_getbuf(rel, so->currPos.currPage, BT_READ);
- page = BufferGetPage(buf);
- if (BufferGetLSNAtomic(buf) == so->currPos.lsn)
- so->currPos.buf = buf;
- else
+ latestlsn = BufferGetLSNAtomic(buf);
+ Assert(!XLogRecPtrIsInvalid(so->currPos.lsn));
+ Assert(so->currPos.lsn <= latestlsn);
+ if (so->currPos.lsn != latestlsn)
{
- /* Modified while not pinned means hinting is not safe. */
- _bt_relbuf(scan->indexRelation, buf);
+ /* Modified, give up on hinting */
+ _bt_relbuf(rel, buf);
return;
}
+
+ /* Unmodified, hinting is safe */
}
+ page = BufferGetPage(buf);
opaque = BTPageGetOpaque(page);
minoff = P_FIRSTDATAKEY(opaque);
maxoff = PageGetMaxOffsetNumber(page);
- for (i = 0; i < numKilled; i++)
+ for (int i = 0; i < numKilled; i++)
{
int itemIndex = so->killedItems[i];
BTScanPosItem *kitem = &so->currPos.items[itemIndex];
@@ -3442,7 +3440,7 @@ _bt_killitems(IndexScanDesc scan)
* correctness.
*
* Note that the page may have been modified in almost any way
- * since we first read it (in the !droppedpin case), so it's
+ * since we first read it (in the !so->dropPin case), so it's
* possible that this posting list tuple wasn't a posting list
* tuple when we first encountered its heap TIDs.
*/
@@ -3458,7 +3456,7 @@ _bt_killitems(IndexScanDesc scan)
* though only in the common case where the page can't
* have been concurrently modified
*/
- Assert(kitem->indexOffset == offnum || !droppedpin);
+ Assert(kitem->indexOffset == offnum || !so->dropPin);
/*
* Read-ahead to later kitems here.
@@ -3522,10 +3520,13 @@ _bt_killitems(IndexScanDesc scan)
if (killedsomething)
{
opaque->btpo_flags |= BTP_HAS_GARBAGE;
- MarkBufferDirtyHint(so->currPos.buf, true);
+ MarkBufferDirtyHint(buf, true);
}
- _bt_unlockbuf(scan->indexRelation, so->currPos.buf);
+ if (!so->dropPin)
+ _bt_unlockbuf(rel, buf);
+ else
+ _bt_relbuf(rel, buf);
}
diff --git a/src/backend/access/rmgrdesc/xactdesc.c b/src/backend/access/rmgrdesc/xactdesc.c
index 715cc1f7bad..305598e2865 100644
--- a/src/backend/access/rmgrdesc/xactdesc.c
+++ b/src/backend/access/rmgrdesc/xactdesc.c
@@ -252,6 +252,8 @@ ParsePrepareRecord(uint8 info, xl_xact_prepare *xlrec, xl_xact_parsed_prepare *p
parsed->nsubxacts = xlrec->nsubxacts;
parsed->nrels = xlrec->ncommitrels;
parsed->nabortrels = xlrec->nabortrels;
+ parsed->nstats = xlrec->ncommitstats;
+ parsed->nabortstats = xlrec->nabortstats;
parsed->nmsgs = xlrec->ninvalmsgs;
strncpy(parsed->twophase_gid, bufptr, xlrec->gidlen);
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index 1914859b2ee..47ffc0a2307 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -7498,6 +7498,10 @@ CreateCheckPoint(int flags)
if (PriorRedoPtr != InvalidXLogRecPtr)
UpdateCheckPointDistanceEstimate(RedoRecPtr - PriorRedoPtr);
+#ifdef USE_INJECTION_POINTS
+ INJECTION_POINT("checkpoint-before-old-wal-removal", NULL);
+#endif
+
/*
* Delete old log files, those no longer needed for last checkpoint to
* prevent the disk holding the xlog from growing full.
diff --git a/src/backend/catalog/heap.c b/src/backend/catalog/heap.c
index fbaed5359ad..fd6537567ea 100644
--- a/src/backend/catalog/heap.c
+++ b/src/backend/catalog/heap.c
@@ -665,6 +665,15 @@ CheckAttributeType(const char *attname,
}
/*
+ * For consistency with check_virtual_generated_security().
+ */
+ if ((flags & CHKATYPE_IS_VIRTUAL) && atttypid >= FirstUnpinnedObjectId)
+ ereport(ERROR,
+ errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("virtual generated column \"%s\" cannot have a user-defined type", attname),
+ errdetail("Virtual generated columns that make use of user-defined types are not yet supported."));
+
+ /*
* This might not be strictly invalid per SQL standard, but it is pretty
* useless, and it cannot be dumped, so we must disallow it.
*/
@@ -1100,6 +1109,7 @@ AddNewRelationType(const char *typeName,
* if false, relacl is always set NULL
* allow_system_table_mods: true to allow creation in system namespaces
* is_internal: is this a system-generated catalog?
+ * relrewrite: link to original relation during a table rewrite
*
* Output parameters:
* typaddress: if not null, gets the object address of the new pg_type entry
@@ -2996,7 +3006,7 @@ AddRelationNotNullConstraints(Relation rel, List *constraints,
if (constr->is_no_inherit)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("cannot define not-null constraint on column \"%s\" with NO INHERIT",
+ errmsg("cannot define not-null constraint with NO INHERIT on column \"%s\"",
strVal(linitial(constr->keys))),
errdetail("The column has an inherited not-null constraint.")));
@@ -3215,6 +3225,86 @@ check_nested_generated(ParseState *pstate, Node *node)
}
/*
+ * Check security of virtual generated column expression.
+ *
+ * Just like selecting from a view is exploitable (CVE-2024-7348), selecting
+ * from a table with virtual generated columns is exploitable. Users who are
+ * concerned about this can avoid selecting from views, but telling them to
+ * avoid selecting from tables is less practical.
+ *
+ * To address this, this restricts generation expressions for virtual
+ * generated columns are restricted to using built-in functions and types. We
+ * assume that built-in functions and types cannot be exploited for this
+ * purpose. Note the overall security also requires that all functions in use
+ * a immutable. (For example, there are some built-in non-immutable functions
+ * that can run arbitrary SQL.) The immutability is checked elsewhere, since
+ * that is a property that needs to hold independent of security
+ * considerations.
+ *
+ * In the future, this could be expanded by some new mechanism to declare
+ * other functions and types as safe or trusted for this purpose, but that is
+ * to be designed.
+ */
+
+/*
+ * Callback for check_functions_in_node() that determines whether a function
+ * is user-defined.
+ */
+static bool
+contains_user_functions_checker(Oid func_id, void *context)
+{
+ return (func_id >= FirstUnpinnedObjectId);
+}
+
+/*
+ * Checks for all the things we don't want in the generation expressions of
+ * virtual generated columns for security reasons. Errors out if it finds
+ * one.
+ */
+static bool
+check_virtual_generated_security_walker(Node *node, void *context)
+{
+ ParseState *pstate = context;
+
+ if (node == NULL)
+ return false;
+
+ if (!IsA(node, List))
+ {
+ if (check_functions_in_node(node, contains_user_functions_checker, NULL))
+ ereport(ERROR,
+ errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("generation expression uses user-defined function"),
+ errdetail("Virtual generated columns that make use of user-defined functions are not yet supported."),
+ parser_errposition(pstate, exprLocation(node)));
+
+ /*
+ * check_functions_in_node() doesn't check some node types (see
+ * comment there). We handle CoerceToDomain and MinMaxExpr by
+ * checking for built-in types. The other listed node types cannot
+ * call user-definable SQL-visible functions.
+ *
+ * We furthermore need this type check to handle built-in, immutable
+ * polymorphic functions such as array_eq().
+ */
+ if (exprType(node) >= FirstUnpinnedObjectId)
+ ereport(ERROR,
+ errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("generation expression uses user-defined type"),
+ errdetail("Virtual generated columns that make use of user-defined types are not yet supported."),
+ parser_errposition(pstate, exprLocation(node)));
+ }
+
+ return expression_tree_walker(node, check_virtual_generated_security_walker, context);
+}
+
+static void
+check_virtual_generated_security(ParseState *pstate, Node *node)
+{
+ check_virtual_generated_security_walker(node, pstate);
+}
+
+/*
* Take a raw default and convert it to a cooked format ready for
* storage.
*
@@ -3253,6 +3343,10 @@ cookDefault(ParseState *pstate,
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
errmsg("generation expression is not immutable")));
+
+ /* Check security of expressions for virtual generated column */
+ if (attgenerated == ATTRIBUTE_GENERATED_VIRTUAL)
+ check_virtual_generated_security(pstate, expr);
}
else
{
diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c
index 739a92bdcc1..aa216683b74 100644
--- a/src/backend/catalog/index.c
+++ b/src/backend/catalog/index.c
@@ -3020,7 +3020,7 @@ index_build(Relation heapRelation,
/*
* Determine worker process details for parallel CREATE INDEX. Currently,
- * only btree and BRIN have support for parallel builds.
+ * only btree, GIN, and BRIN have support for parallel builds.
*
* Note that planner considers parallel safety for us.
*/
diff --git a/src/backend/catalog/system_views.sql b/src/backend/catalog/system_views.sql
index 15efb02badb..08f780a2e63 100644
--- a/src/backend/catalog/system_views.sql
+++ b/src/backend/catalog/system_views.sql
@@ -674,11 +674,6 @@ GRANT SELECT ON pg_backend_memory_contexts TO pg_read_all_stats;
REVOKE EXECUTE ON FUNCTION pg_get_backend_memory_contexts() FROM PUBLIC;
GRANT EXECUTE ON FUNCTION pg_get_backend_memory_contexts() TO pg_read_all_stats;
-REVOKE EXECUTE ON FUNCTION
- pg_get_process_memory_contexts(integer, boolean, float) FROM PUBLIC;
-GRANT EXECUTE ON FUNCTION
- pg_get_process_memory_contexts(integer, boolean, float) TO pg_read_all_stats;
-
-- Statistics views
CREATE VIEW pg_stat_all_tables AS
diff --git a/src/backend/commands/analyze.c b/src/backend/commands/analyze.c
index 4fffb76e557..7111d5d5334 100644
--- a/src/backend/commands/analyze.c
+++ b/src/backend/commands/analyze.c
@@ -76,7 +76,7 @@ static BufferAccessStrategy vac_strategy;
static void do_analyze_rel(Relation onerel,
- VacuumParams *params, List *va_cols,
+ const VacuumParams params, List *va_cols,
AcquireSampleRowsFunc acquirefunc, BlockNumber relpages,
bool inh, bool in_outer_xact, int elevel);
static void compute_index_stats(Relation onerel, double totalrows,
@@ -107,7 +107,7 @@ static Datum ind_fetch_func(VacAttrStatsP stats, int rownum, bool *isNull);
*/
void
analyze_rel(Oid relid, RangeVar *relation,
- VacuumParams *params, List *va_cols, bool in_outer_xact,
+ const VacuumParams params, List *va_cols, bool in_outer_xact,
BufferAccessStrategy bstrategy)
{
Relation onerel;
@@ -116,7 +116,7 @@ analyze_rel(Oid relid, RangeVar *relation,
BlockNumber relpages = 0;
/* Select logging level */
- if (params->options & VACOPT_VERBOSE)
+ if (params.options & VACOPT_VERBOSE)
elevel = INFO;
else
elevel = DEBUG2;
@@ -138,8 +138,8 @@ analyze_rel(Oid relid, RangeVar *relation,
*
* Make sure to generate only logs for ANALYZE in this case.
*/
- onerel = vacuum_open_relation(relid, relation, params->options & ~(VACOPT_VACUUM),
- params->log_min_duration >= 0,
+ onerel = vacuum_open_relation(relid, relation, params.options & ~(VACOPT_VACUUM),
+ params.log_min_duration >= 0,
ShareUpdateExclusiveLock);
/* leave if relation could not be opened or locked */
@@ -155,7 +155,7 @@ analyze_rel(Oid relid, RangeVar *relation,
*/
if (!vacuum_is_permitted_for_relation(RelationGetRelid(onerel),
onerel->rd_rel,
- params->options & ~VACOPT_VACUUM))
+ params.options & ~VACOPT_VACUUM))
{
relation_close(onerel, ShareUpdateExclusiveLock);
return;
@@ -227,7 +227,7 @@ analyze_rel(Oid relid, RangeVar *relation,
else
{
/* No need for a WARNING if we already complained during VACUUM */
- if (!(params->options & VACOPT_VACUUM))
+ if (!(params.options & VACOPT_VACUUM))
ereport(WARNING,
(errmsg("skipping \"%s\" --- cannot analyze non-tables or special system tables",
RelationGetRelationName(onerel))));
@@ -275,7 +275,7 @@ analyze_rel(Oid relid, RangeVar *relation,
* appropriate acquirefunc for each child table.
*/
static void
-do_analyze_rel(Relation onerel, VacuumParams *params,
+do_analyze_rel(Relation onerel, const VacuumParams params,
List *va_cols, AcquireSampleRowsFunc acquirefunc,
BlockNumber relpages, bool inh, bool in_outer_xact,
int elevel)
@@ -309,9 +309,9 @@ do_analyze_rel(Relation onerel, VacuumParams *params,
PgStat_Counter startreadtime = 0;
PgStat_Counter startwritetime = 0;
- verbose = (params->options & VACOPT_VERBOSE) != 0;
+ verbose = (params.options & VACOPT_VERBOSE) != 0;
instrument = (verbose || (AmAutoVacuumWorkerProcess() &&
- params->log_min_duration >= 0));
+ params.log_min_duration >= 0));
if (inh)
ereport(elevel,
(errmsg("analyzing \"%s.%s\" inheritance tree",
@@ -706,7 +706,7 @@ do_analyze_rel(Relation onerel, VacuumParams *params,
* amvacuumcleanup() when called in ANALYZE-only mode. The only exception
* among core index AMs is GIN/ginvacuumcleanup().
*/
- if (!(params->options & VACOPT_VACUUM))
+ if (!(params.options & VACOPT_VACUUM))
{
for (ind = 0; ind < nindexes; ind++)
{
@@ -736,9 +736,9 @@ do_analyze_rel(Relation onerel, VacuumParams *params,
{
TimestampTz endtime = GetCurrentTimestamp();
- if (verbose || params->log_min_duration == 0 ||
+ if (verbose || params.log_min_duration == 0 ||
TimestampDifferenceExceeds(starttime, endtime,
- params->log_min_duration))
+ params.log_min_duration))
{
long delay_in_ms;
WalUsage walusage;
diff --git a/src/backend/commands/cluster.c b/src/backend/commands/cluster.c
index 54a08e4102e..b55221d44cd 100644
--- a/src/backend/commands/cluster.c
+++ b/src/backend/commands/cluster.c
@@ -917,7 +917,7 @@ copy_table_data(Relation NewHeap, Relation OldHeap, Relation OldIndex, bool verb
* not to be aggressive about this.
*/
memset(&params, 0, sizeof(VacuumParams));
- vacuum_get_cutoffs(OldHeap, &params, &cutoffs);
+ vacuum_get_cutoffs(OldHeap, params, &cutoffs);
/*
* FreezeXid will become the table's new relfrozenxid, and that mustn't go
diff --git a/src/backend/commands/copyto.c b/src/backend/commands/copyto.c
index f87e405351d..ea6f18f2c80 100644
--- a/src/backend/commands/copyto.c
+++ b/src/backend/commands/copyto.c
@@ -835,7 +835,7 @@ BeginCopyTo(ParseState *pstate,
((DR_copy *) dest)->cstate = cstate;
/* Create a QueryDesc requesting no output */
- cstate->queryDesc = CreateQueryDesc(plan, NULL, pstate->p_sourcetext,
+ cstate->queryDesc = CreateQueryDesc(plan, pstate->p_sourcetext,
GetActiveSnapshot(),
InvalidSnapshot,
dest, NULL, NULL, 0);
@@ -845,8 +845,7 @@ BeginCopyTo(ParseState *pstate,
*
* ExecutorStart computes a result tupdesc for us
*/
- if (!ExecutorStart(cstate->queryDesc, 0))
- elog(ERROR, "ExecutorStart() failed unexpectedly");
+ ExecutorStart(cstate->queryDesc, 0);
tupDesc = cstate->queryDesc->tupDesc;
}
diff --git a/src/backend/commands/createas.c b/src/backend/commands/createas.c
index 0a4155773eb..dfd2ab8e862 100644
--- a/src/backend/commands/createas.c
+++ b/src/backend/commands/createas.c
@@ -334,13 +334,12 @@ ExecCreateTableAs(ParseState *pstate, CreateTableAsStmt *stmt,
UpdateActiveSnapshotCommandId();
/* Create a QueryDesc, redirecting output to our tuple receiver */
- queryDesc = CreateQueryDesc(plan, NULL, pstate->p_sourcetext,
+ queryDesc = CreateQueryDesc(plan, pstate->p_sourcetext,
GetActiveSnapshot(), InvalidSnapshot,
dest, params, queryEnv, 0);
/* call ExecutorStart to prepare the plan for execution */
- if (!ExecutorStart(queryDesc, GetIntoRelEFlags(into)))
- elog(ERROR, "ExecutorStart() failed unexpectedly");
+ ExecutorStart(queryDesc, GetIntoRelEFlags(into));
/* run the plan to completion */
ExecutorRun(queryDesc, ForwardScanDirection, 0);
diff --git a/src/backend/commands/dbcommands.c b/src/backend/commands/dbcommands.c
index 5fbbcdaabb1..c95eb945016 100644
--- a/src/backend/commands/dbcommands.c
+++ b/src/backend/commands/dbcommands.c
@@ -1065,16 +1065,41 @@ createdb(ParseState *pstate, const CreatedbStmt *stmt)
/* Check that the chosen locales are valid, and get canonical spellings */
if (!check_locale(LC_COLLATE, dbcollate, &canonname))
- ereport(ERROR,
- (errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("invalid LC_COLLATE locale name: \"%s\"", dbcollate),
- errhint("If the locale name is specific to ICU, use ICU_LOCALE.")));
+ {
+ if (dblocprovider == COLLPROVIDER_BUILTIN)
+ ereport(ERROR,
+ (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+ errmsg("invalid LC_COLLATE locale name: \"%s\"", dbcollate),
+ errhint("If the locale name is specific to the builtin provider, use BUILTIN_LOCALE.")));
+ else if (dblocprovider == COLLPROVIDER_ICU)
+ ereport(ERROR,
+ (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+ errmsg("invalid LC_COLLATE locale name: \"%s\"", dbcollate),
+ errhint("If the locale name is specific to the ICU provider, use ICU_LOCALE.")));
+ else
+ ereport(ERROR,
+ (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+ errmsg("invalid LC_COLLATE locale name: \"%s\"", dbcollate)));
+ }
dbcollate = canonname;
if (!check_locale(LC_CTYPE, dbctype, &canonname))
- ereport(ERROR,
- (errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("invalid LC_CTYPE locale name: \"%s\"", dbctype),
- errhint("If the locale name is specific to ICU, use ICU_LOCALE.")));
+ {
+ if (dblocprovider == COLLPROVIDER_BUILTIN)
+ ereport(ERROR,
+ (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+ errmsg("invalid LC_CTYPE locale name: \"%s\"", dbctype),
+ errhint("If the locale name is specific to the builtin provider, use BUILTIN_LOCALE.")));
+ else if (dblocprovider == COLLPROVIDER_ICU)
+ ereport(ERROR,
+ (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+ errmsg("invalid LC_CTYPE locale name: \"%s\"", dbctype),
+ errhint("If the locale name is specific to the ICU provider, use ICU_LOCALE.")));
+ else
+ ereport(ERROR,
+ (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+ errmsg("invalid LC_CTYPE locale name: \"%s\"", dbctype)));
+ }
+
dbctype = canonname;
check_encoding_locale_matches(encoding, dbcollate, dbctype);
diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c
index 786ee865f14..7e2792ead71 100644
--- a/src/backend/commands/explain.c
+++ b/src/backend/commands/explain.c
@@ -369,8 +369,7 @@ standard_ExplainOneQuery(Query *query, int cursorOptions,
}
/* run it (if needed) and produce output */
- ExplainOnePlan(plan, NULL, NULL, -1, into, es, queryString, params,
- queryEnv,
+ ExplainOnePlan(plan, into, es, queryString, params, queryEnv,
&planduration, (es->buffers ? &bufusage : NULL),
es->memory ? &mem_counters : NULL);
}
@@ -492,9 +491,7 @@ ExplainOneUtility(Node *utilityStmt, IntoClause *into, ExplainState *es,
* to call it.
*/
void
-ExplainOnePlan(PlannedStmt *plannedstmt, CachedPlan *cplan,
- CachedPlanSource *plansource, int query_index,
- IntoClause *into, ExplainState *es,
+ExplainOnePlan(PlannedStmt *plannedstmt, IntoClause *into, ExplainState *es,
const char *queryString, ParamListInfo params,
QueryEnvironment *queryEnv, const instr_time *planduration,
const BufferUsage *bufusage,
@@ -550,7 +547,7 @@ ExplainOnePlan(PlannedStmt *plannedstmt, CachedPlan *cplan,
dest = None_Receiver;
/* Create a QueryDesc for the query */
- queryDesc = CreateQueryDesc(plannedstmt, cplan, queryString,
+ queryDesc = CreateQueryDesc(plannedstmt, queryString,
GetActiveSnapshot(), InvalidSnapshot,
dest, params, queryEnv, instrument_option);
@@ -564,17 +561,8 @@ ExplainOnePlan(PlannedStmt *plannedstmt, CachedPlan *cplan,
if (into)
eflags |= GetIntoRelEFlags(into);
- /* Prepare the plan for execution. */
- if (queryDesc->cplan)
- {
- ExecutorStartCachedPlan(queryDesc, eflags, plansource, query_index);
- Assert(queryDesc->planstate);
- }
- else
- {
- if (!ExecutorStart(queryDesc, eflags))
- elog(ERROR, "ExecutorStart() failed unexpectedly");
- }
+ /* call ExecutorStart to prepare the plan for execution */
+ ExecutorStart(queryDesc, eflags);
/* Execute the plan for statistics if asked for */
if (es->analyze)
@@ -823,14 +811,10 @@ ExplainPrintPlan(ExplainState *es, QueryDesc *queryDesc)
* the queryid in any of the EXPLAIN plans to keep stable the results
* generated by regression test suites.
*/
- if (es->verbose && queryDesc->plannedstmt->queryId != UINT64CONST(0) &&
+ if (es->verbose && queryDesc->plannedstmt->queryId != INT64CONST(0) &&
compute_query_id != COMPUTE_QUERY_ID_REGRESS)
{
- /*
- * Output the queryid as an int64 rather than a uint64 so we match
- * what would be seen in the BIGINT pg_stat_statements.queryid column.
- */
- ExplainPropertyInteger("Query Identifier", NULL, (int64)
+ ExplainPropertyInteger("Query Identifier", NULL,
queryDesc->plannedstmt->queryId, es);
}
}
@@ -1232,6 +1216,10 @@ ExplainPreScanNode(PlanState *planstate, Bitmapset **rels_used)
if (((ModifyTable *) plan)->exclRelRTI)
*rels_used = bms_add_member(*rels_used,
((ModifyTable *) plan)->exclRelRTI);
+ /* Ensure Vars used in RETURNING will have refnames */
+ if (plan->targetlist)
+ *rels_used = bms_add_member(*rels_used,
+ linitial_int(((ModifyTable *) plan)->resultRelations));
break;
case T_Append:
*rels_used = bms_add_members(*rels_used,
diff --git a/src/backend/commands/extension.c b/src/backend/commands/extension.c
index 73c52e970f6..e6f9ab6dfd6 100644
--- a/src/backend/commands/extension.c
+++ b/src/backend/commands/extension.c
@@ -993,13 +993,11 @@ execute_sql_string(const char *sql, const char *filename)
QueryDesc *qdesc;
qdesc = CreateQueryDesc(stmt,
- NULL,
sql,
GetActiveSnapshot(), NULL,
dest, NULL, NULL, 0);
- if (!ExecutorStart(qdesc, 0))
- elog(ERROR, "ExecutorStart() failed unexpectedly");
+ ExecutorStart(qdesc, 0);
ExecutorRun(qdesc, ForwardScanDirection, 0);
ExecutorFinish(qdesc);
ExecutorEnd(qdesc);
diff --git a/src/backend/commands/foreigncmds.c b/src/backend/commands/foreigncmds.c
index c14e038d54f..8d2d7431544 100644
--- a/src/backend/commands/foreigncmds.c
+++ b/src/backend/commands/foreigncmds.c
@@ -71,15 +71,26 @@ optionListToArray(List *options)
foreach(cell, options)
{
DefElem *def = lfirst(cell);
+ const char *name;
const char *value;
Size len;
text *t;
+ name = def->defname;
value = defGetString(def);
- len = VARHDRSZ + strlen(def->defname) + 1 + strlen(value);
+
+ /* Insist that name not contain "=", else "a=b=c" is ambiguous */
+ if (strchr(name, '=') != NULL)
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("invalid option name \"%s\": must not contain \"=\"",
+ name)));
+
+ len = VARHDRSZ + strlen(name) + 1 + strlen(value);
+ /* +1 leaves room for sprintf's trailing null */
t = palloc(len + 1);
SET_VARSIZE(t, len);
- sprintf(VARDATA(t), "%s=%s", def->defname, value);
+ sprintf(VARDATA(t), "%s=%s", name, value);
astate = accumArrayResult(astate, PointerGetDatum(t),
false, TEXTOID,
diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c
index d962fe392cd..6f753ab6d7a 100644
--- a/src/backend/commands/indexcmds.c
+++ b/src/backend/commands/indexcmds.c
@@ -2469,8 +2469,8 @@ GetOperatorFromCompareType(Oid opclass, Oid rhstype, CompareType cmptype,
cmptype == COMPARE_EQ ? errmsg("could not identify an equality operator for type %s", format_type_be(opcintype)) :
cmptype == COMPARE_OVERLAP ? errmsg("could not identify an overlaps operator for type %s", format_type_be(opcintype)) :
cmptype == COMPARE_CONTAINED_BY ? errmsg("could not identify a contained-by operator for type %s", format_type_be(opcintype)) : 0,
- errdetail("Could not translate compare type %d for operator family \"%s\", input type %s, access method \"%s\".",
- cmptype, get_opfamily_name(opfamily, false), format_type_be(opcintype), get_am_name(amid)));
+ errdetail("Could not translate compare type %d for operator family \"%s\" of access method \"%s\".",
+ cmptype, get_opfamily_name(opfamily, false), get_am_name(amid)));
/*
* We parameterize rhstype so foreign keys can ask for a <@ operator
@@ -2592,7 +2592,9 @@ makeObjectName(const char *name1, const char *name2, const char *label)
* constraint names.)
*
* Note: it is theoretically possible to get a collision anyway, if someone
- * else chooses the same name concurrently. This is fairly unlikely to be
+ * else chooses the same name concurrently. We shorten the race condition
+ * window by checking for conflicting relations using SnapshotDirty, but
+ * that doesn't close the window entirely. This is fairly unlikely to be
* a problem in practice, especially if one is holding an exclusive lock on
* the relation identified by name1. However, if choosing multiple names
* within a single command, you'd better create the new object and do
@@ -2608,15 +2610,45 @@ ChooseRelationName(const char *name1, const char *name2,
int pass = 0;
char *relname = NULL;
char modlabel[NAMEDATALEN];
+ SnapshotData SnapshotDirty;
+ Relation pgclassrel;
+
+ /* prepare to search pg_class with a dirty snapshot */
+ InitDirtySnapshot(SnapshotDirty);
+ pgclassrel = table_open(RelationRelationId, AccessShareLock);
/* try the unmodified label first */
strlcpy(modlabel, label, sizeof(modlabel));
for (;;)
{
+ ScanKeyData key[2];
+ SysScanDesc scan;
+ bool collides;
+
relname = makeObjectName(name1, name2, modlabel);
- if (!OidIsValid(get_relname_relid(relname, namespaceid)))
+ /* is there any conflicting relation name? */
+ ScanKeyInit(&key[0],
+ Anum_pg_class_relname,
+ BTEqualStrategyNumber, F_NAMEEQ,
+ CStringGetDatum(relname));
+ ScanKeyInit(&key[1],
+ Anum_pg_class_relnamespace,
+ BTEqualStrategyNumber, F_OIDEQ,
+ ObjectIdGetDatum(namespaceid));
+
+ scan = systable_beginscan(pgclassrel, ClassNameNspIndexId,
+ true /* indexOK */ ,
+ &SnapshotDirty,
+ 2, key);
+
+ collides = HeapTupleIsValid(systable_getnext(scan));
+
+ systable_endscan(scan);
+
+ /* break out of loop if no conflict */
+ if (!collides)
{
if (!isconstraint ||
!ConstraintNameExists(relname, namespaceid))
@@ -2628,6 +2660,8 @@ ChooseRelationName(const char *name1, const char *name2,
snprintf(modlabel, sizeof(modlabel), "%s%d", label, ++pass);
}
+ table_close(pgclassrel, AccessShareLock);
+
return relname;
}
@@ -4226,7 +4260,7 @@ ReindexRelationConcurrently(const ReindexStmt *stmt, Oid relationOid, const Rein
false);
/*
- * Updating pg_index might involve TOAST table access, so ensure we
+ * Swapping the indexes might involve TOAST table access, so ensure we
* have a valid snapshot.
*/
PushActiveSnapshot(GetTransactionSnapshot());
diff --git a/src/backend/commands/matview.c b/src/backend/commands/matview.c
index e7854add178..188e26f0e6e 100644
--- a/src/backend/commands/matview.c
+++ b/src/backend/commands/matview.c
@@ -438,13 +438,12 @@ refresh_matview_datafill(DestReceiver *dest, Query *query,
UpdateActiveSnapshotCommandId();
/* Create a QueryDesc, redirecting output to our tuple receiver */
- queryDesc = CreateQueryDesc(plan, NULL, queryString,
+ queryDesc = CreateQueryDesc(plan, queryString,
GetActiveSnapshot(), InvalidSnapshot,
dest, NULL, NULL, 0);
/* call ExecutorStart to prepare the plan for execution */
- if (!ExecutorStart(queryDesc, 0))
- elog(ERROR, "ExecutorStart() failed unexpectedly");
+ ExecutorStart(queryDesc, 0);
/* run the plan */
ExecutorRun(queryDesc, ForwardScanDirection, 0);
@@ -836,7 +835,8 @@ refresh_by_match_merge(Oid matviewOid, Oid tempOid, Oid relowner,
if (!foundUniqueIndex)
ereport(ERROR,
errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("could not find suitable unique index on materialized view"));
+ errmsg("could not find suitable unique index on materialized view \"%s\"",
+ RelationGetRelationName(matviewRel)));
appendStringInfoString(&querybuf,
" AND newdata.* OPERATOR(pg_catalog.*=) mv.*) "
diff --git a/src/backend/commands/portalcmds.c b/src/backend/commands/portalcmds.c
index 4c2ac045224..e7c8171c102 100644
--- a/src/backend/commands/portalcmds.c
+++ b/src/backend/commands/portalcmds.c
@@ -117,7 +117,6 @@ PerformCursorOpen(ParseState *pstate, DeclareCursorStmt *cstmt, ParamListInfo pa
queryString,
CMDTAG_SELECT, /* cursor's query is always a SELECT */
list_make1(plan),
- NULL,
NULL);
/*----------
diff --git a/src/backend/commands/prepare.c b/src/backend/commands/prepare.c
index bf7d2b2309f..34b6410d6a2 100644
--- a/src/backend/commands/prepare.c
+++ b/src/backend/commands/prepare.c
@@ -205,8 +205,7 @@ ExecuteQuery(ParseState *pstate,
query_string,
entry->plansource->commandTag,
plan_list,
- cplan,
- entry->plansource);
+ cplan);
/*
* For CREATE TABLE ... AS EXECUTE, we must verify that the prepared
@@ -586,7 +585,6 @@ ExplainExecuteQuery(ExecuteStmt *execstmt, IntoClause *into, ExplainState *es,
MemoryContextCounters mem_counters;
MemoryContext planner_ctx = NULL;
MemoryContext saved_ctx = NULL;
- int query_index = 0;
if (es->memory)
{
@@ -659,8 +657,7 @@ ExplainExecuteQuery(ExecuteStmt *execstmt, IntoClause *into, ExplainState *es,
PlannedStmt *pstmt = lfirst_node(PlannedStmt, p);
if (pstmt->commandType != CMD_UTILITY)
- ExplainOnePlan(pstmt, cplan, entry->plansource, query_index,
- into, es, query_string, paramLI, pstate->p_queryEnv,
+ ExplainOnePlan(pstmt, into, es, query_string, paramLI, pstate->p_queryEnv,
&planduration, (es->buffers ? &bufusage : NULL),
es->memory ? &mem_counters : NULL);
else
@@ -671,8 +668,6 @@ ExplainExecuteQuery(ExecuteStmt *execstmt, IntoClause *into, ExplainState *es,
/* Separate plans with an appropriate separator */
if (lnext(plan_list, p) != NULL)
ExplainSeparatePlans(es);
-
- query_index++;
}
if (estate)
diff --git a/src/backend/commands/publicationcmds.c b/src/backend/commands/publicationcmds.c
index 0b23d94c38e..1bf7eaae5b3 100644
--- a/src/backend/commands/publicationcmds.c
+++ b/src/backend/commands/publicationcmds.c
@@ -2130,8 +2130,8 @@ defGetGeneratedColsOption(DefElem *def)
ereport(ERROR,
errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("%s requires a \"none\" or \"stored\" value",
- def->defname));
+ errmsg("invalid value for publication parameter \"%s\": \"%s\"", def->defname, sval),
+ errdetail("Valid values are \"%s\" and \"%s\".", "none", "stored"));
return PUBLISH_GENCOLS_NONE; /* keep compiler quiet */
}
diff --git a/src/backend/commands/subscriptioncmds.c b/src/backend/commands/subscriptioncmds.c
index 4aec73bcc6b..4ff246cd943 100644
--- a/src/backend/commands/subscriptioncmds.c
+++ b/src/backend/commands/subscriptioncmds.c
@@ -1267,7 +1267,7 @@ AlterSubscription(ParseState *pstate, AlterSubscriptionStmt *stmt,
IsSet(opts.specified_opts, SUBOPT_SLOT_NAME))
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("slot_name and two_phase cannot be altered at the same time")));
+ errmsg("\"slot_name\" and \"two_phase\" cannot be altered at the same time")));
/*
* Note that workers may still survive even if the
@@ -1283,7 +1283,7 @@ AlterSubscription(ParseState *pstate, AlterSubscriptionStmt *stmt,
if (logicalrep_workers_find(subid, true, true))
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("cannot alter two_phase when logical replication worker is still running"),
+ errmsg("cannot alter \"two_phase\" when logical replication worker is still running"),
errhint("Try again after some time.")));
/*
@@ -1297,7 +1297,7 @@ AlterSubscription(ParseState *pstate, AlterSubscriptionStmt *stmt,
LookupGXactBySubid(subid))
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("cannot disable two_phase when prepared transactions are present"),
+ errmsg("cannot disable \"two_phase\" when prepared transactions exist"),
errhint("Resolve these transactions and try again.")));
/* Change system catalog accordingly */
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index 54ad38247aa..b8837f26cb4 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -430,8 +430,8 @@ static void AlterConstrUpdateConstraintEntry(ATAlterConstraint *cmdcon, Relation
static ObjectAddress ATExecValidateConstraint(List **wqueue,
Relation rel, char *constrName,
bool recurse, bool recursing, LOCKMODE lockmode);
-static void QueueFKConstraintValidation(List **wqueue, Relation conrel, Relation rel,
- HeapTuple contuple, LOCKMODE lockmode);
+static void QueueFKConstraintValidation(List **wqueue, Relation conrel, Relation fkrel,
+ Oid pkrelid, HeapTuple contuple, LOCKMODE lockmode);
static void QueueCheckConstraintValidation(List **wqueue, Relation conrel, Relation rel,
char *constrName, HeapTuple contuple,
bool recurse, bool recursing, LOCKMODE lockmode);
@@ -7374,7 +7374,7 @@ ATExecAddColumn(List **wqueue, AlteredTableInfo *tab, Relation rel,
/* make sure datatype is legal for a column */
CheckAttributeType(NameStr(attribute->attname), attribute->atttypid, attribute->attcollation,
list_make1_oid(rel->rd_rel->reltype),
- 0);
+ (attribute->attgenerated == ATTRIBUTE_GENERATED_VIRTUAL ? CHKATYPE_IS_VIRTUAL : 0));
InsertPgAttributeTuples(attrdesc, tupdesc, myrelid, NULL, NULL);
@@ -8609,7 +8609,7 @@ ATExecSetExpression(AlteredTableInfo *tab, Relation rel, const char *colName,
rel->rd_att->constr && rel->rd_att->constr->num_check > 0)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("ALTER TABLE / SET EXPRESSION is not supported for virtual generated columns on tables with check constraints"),
+ errmsg("ALTER TABLE / SET EXPRESSION is not supported for virtual generated columns in tables with check constraints"),
errdetail("Column \"%s\" of relation \"%s\" is a virtual generated column.",
colName, RelationGetRelationName(rel))));
@@ -8627,7 +8627,7 @@ ATExecSetExpression(AlteredTableInfo *tab, Relation rel, const char *colName,
GetRelationPublications(RelationGetRelid(rel)) != NIL)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("ALTER TABLE / SET EXPRESSION is not supported for virtual generated columns on tables that are part of a publication"),
+ errmsg("ALTER TABLE / SET EXPRESSION is not supported for virtual generated columns in tables that are part of a publication"),
errdetail("Column \"%s\" of relation \"%s\" is a virtual generated column.",
colName, RelationGetRelationName(rel))));
@@ -10189,7 +10189,7 @@ ATAddForeignKeyConstraint(List **wqueue, AlteredTableInfo *tab, Relation rel,
if (pk_has_without_overlaps && !with_period)
ereport(ERROR,
errcode(ERRCODE_INVALID_FOREIGN_KEY),
- errmsg("foreign key must use PERIOD when referencing a primary using WITHOUT OVERLAPS"));
+ errmsg("foreign key must use PERIOD when referencing a primary key using WITHOUT OVERLAPS"));
/*
* Now we can check permissions.
@@ -10330,8 +10330,8 @@ ATAddForeignKeyConstraint(List **wqueue, AlteredTableInfo *tab, Relation rel,
for_overlaps
? errmsg("could not identify an overlaps operator for foreign key")
: errmsg("could not identify an equality operator for foreign key"),
- errdetail("Could not translate compare type %d for operator family \"%s\", input type %s, access method \"%s\".",
- cmptype, get_opfamily_name(opfamily, false), format_type_be(opcintype), get_am_name(amid)));
+ errdetail("Could not translate compare type %d for operator family \"%s\" of access method \"%s\".",
+ cmptype, get_opfamily_name(opfamily, false), get_am_name(amid)));
/*
* There had better be a primary equality operator for the index.
@@ -11858,6 +11858,7 @@ AttachPartitionForeignKey(List **wqueue,
if (queueValidation)
{
Relation conrel;
+ Oid confrelid;
conrel = table_open(ConstraintRelationId, RowExclusiveLock);
@@ -11865,9 +11866,11 @@ AttachPartitionForeignKey(List **wqueue,
if (!HeapTupleIsValid(partcontup))
elog(ERROR, "cache lookup failed for constraint %u", partConstrOid);
+ confrelid = ((Form_pg_constraint) GETSTRUCT(partcontup))->confrelid;
+
/* Use the same lock as for AT_ValidateConstraint */
- QueueFKConstraintValidation(wqueue, conrel, partition, partcontup,
- ShareUpdateExclusiveLock);
+ QueueFKConstraintValidation(wqueue, conrel, partition, confrelid,
+ partcontup, ShareUpdateExclusiveLock);
ReleaseSysCache(partcontup);
table_close(conrel, RowExclusiveLock);
}
@@ -12463,9 +12466,12 @@ ATExecAlterConstrEnforceability(List **wqueue, ATAlterConstraint *cmdcon,
/*
* Tell Phase 3 to check that the constraint is satisfied by existing
- * rows.
+ * rows. Only applies to leaf partitions, and (for constraints that
+ * reference a partitioned table) only if this is not one of the
+ * pg_constraint rows that exist solely to support action triggers.
*/
- if (rel->rd_rel->relkind == RELKIND_RELATION)
+ if (rel->rd_rel->relkind == RELKIND_RELATION &&
+ currcon->confrelid == pkrelid)
{
AlteredTableInfo *tab;
NewConstraint *newcon;
@@ -12907,8 +12913,9 @@ ATExecValidateConstraint(List **wqueue, Relation rel, char *constrName,
con->contype != CONSTRAINT_NOTNULL)
ereport(ERROR,
errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("constraint \"%s\" of relation \"%s\" is not a foreign key, check, or not-null constraint",
- constrName, RelationGetRelationName(rel)));
+ errmsg("cannot validate constraint \"%s\" of relation \"%s\"",
+ constrName, RelationGetRelationName(rel)),
+ errdetail("This operation is not supported for this type of constraint."));
if (!con->conenforced)
ereport(ERROR,
@@ -12919,7 +12926,8 @@ ATExecValidateConstraint(List **wqueue, Relation rel, char *constrName,
{
if (con->contype == CONSTRAINT_FOREIGN)
{
- QueueFKConstraintValidation(wqueue, conrel, rel, tuple, lockmode);
+ QueueFKConstraintValidation(wqueue, conrel, rel, con->confrelid,
+ tuple, lockmode);
}
else if (con->contype == CONSTRAINT_CHECK)
{
@@ -12952,8 +12960,8 @@ ATExecValidateConstraint(List **wqueue, Relation rel, char *constrName,
* for the specified relation and all its children.
*/
static void
-QueueFKConstraintValidation(List **wqueue, Relation conrel, Relation rel,
- HeapTuple contuple, LOCKMODE lockmode)
+QueueFKConstraintValidation(List **wqueue, Relation conrel, Relation fkrel,
+ Oid pkrelid, HeapTuple contuple, LOCKMODE lockmode)
{
Form_pg_constraint con;
AlteredTableInfo *tab;
@@ -12964,7 +12972,17 @@ QueueFKConstraintValidation(List **wqueue, Relation conrel, Relation rel,
Assert(con->contype == CONSTRAINT_FOREIGN);
Assert(!con->convalidated);
- if (rel->rd_rel->relkind == RELKIND_RELATION)
+ /*
+ * Add the validation to phase 3's queue; not needed for partitioned
+ * tables themselves, only for their partitions.
+ *
+ * When the referenced table (pkrelid) is partitioned, the referencing
+ * table (fkrel) has one pg_constraint row pointing to each partition
+ * thereof. These rows are there only to support action triggers and no
+ * table scan is needed, therefore skip this for them as well.
+ */
+ if (fkrel->rd_rel->relkind == RELKIND_RELATION &&
+ con->confrelid == pkrelid)
{
NewConstraint *newcon;
Constraint *fkconstraint;
@@ -12983,15 +13001,16 @@ QueueFKConstraintValidation(List **wqueue, Relation conrel, Relation rel,
newcon->qual = (Node *) fkconstraint;
/* Find or create work queue entry for this table */
- tab = ATGetQueueEntry(wqueue, rel);
+ tab = ATGetQueueEntry(wqueue, fkrel);
tab->constraints = lappend(tab->constraints, newcon);
}
/*
* If the table at either end of the constraint is partitioned, we need to
- * recurse and handle every constraint that is a child of this constraint.
+ * recurse and handle every unvalidate constraint that is a child of this
+ * constraint.
*/
- if (rel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE ||
+ if (fkrel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE ||
get_rel_relkind(con->confrelid) == RELKIND_PARTITIONED_TABLE)
{
ScanKeyData pkey;
@@ -13023,8 +13042,12 @@ QueueFKConstraintValidation(List **wqueue, Relation conrel, Relation rel,
childrel = table_open(childcon->conrelid, lockmode);
- QueueFKConstraintValidation(wqueue, conrel, childrel, childtup,
- lockmode);
+ /*
+ * NB: Note that pkrelid should be passed as-is during recursion,
+ * as it is required to identify the root referenced table.
+ */
+ QueueFKConstraintValidation(wqueue, conrel, childrel, pkrelid,
+ childtup, lockmode);
table_close(childrel, NoLock);
}
@@ -13032,7 +13055,11 @@ QueueFKConstraintValidation(List **wqueue, Relation conrel, Relation rel,
}
/*
- * Now update the catalog, while we have the door open.
+ * Now mark the pg_constraint row as validated (even if we didn't check,
+ * notably the ones for partitions on the referenced side).
+ *
+ * We rely on transaction abort to roll back this change if phase 3
+ * ultimately finds violating rows. This is a bit ugly.
*/
copyTuple = heap_copytuple(contuple);
copy_con = (Form_pg_constraint) GETSTRUCT(copyTuple);
@@ -14400,7 +14427,7 @@ ATPrepAlterColumnType(List **wqueue,
/* make sure datatype is legal for a column */
CheckAttributeType(colName, targettype, targetcollid,
list_make1_oid(rel->rd_rel->reltype),
- 0);
+ (attTup->attgenerated == ATTRIBUTE_GENERATED_VIRTUAL ? CHKATYPE_IS_VIRTUAL : 0));
if (attTup->attgenerated == ATTRIBUTE_GENERATED_VIRTUAL)
{
@@ -14458,6 +14485,9 @@ ATPrepAlterColumnType(List **wqueue,
/* Fix collations after all else */
assign_expr_collations(pstate, transform);
+ /* Expand virtual generated columns in the expr. */
+ transform = expand_generated_columns_in_expr(transform, rel, 1);
+
/* Plan the expr now so we can accurately assess the need to rewrite. */
transform = (Node *) expression_planner((Expr *) transform);
@@ -15385,9 +15415,12 @@ ATPostAlterTypeCleanup(List **wqueue, AlteredTableInfo *tab, LOCKMODE lockmode)
/*
* Re-parse the index and constraint definitions, and attach them to the
* appropriate work queue entries. We do this before dropping because in
- * the case of a FOREIGN KEY constraint, we might not yet have exclusive
- * lock on the table the constraint is attached to, and we need to get
- * that before reparsing/dropping.
+ * the case of a constraint on another table, we might not yet have
+ * exclusive lock on the table the constraint is attached to, and we need
+ * to get that before reparsing/dropping. (That's possible at least for
+ * FOREIGN KEY, CHECK, and EXCLUSION constraints; in non-FK cases it
+ * requires a dependency on the target table's composite type in the other
+ * table's constraint expressions.)
*
* We can't rely on the output of deparsing to tell us which relation to
* operate on, because concurrent activity might have made the name
@@ -15403,7 +15436,6 @@ ATPostAlterTypeCleanup(List **wqueue, AlteredTableInfo *tab, LOCKMODE lockmode)
Form_pg_constraint con;
Oid relid;
Oid confrelid;
- char contype;
bool conislocal;
tup = SearchSysCache1(CONSTROID, ObjectIdGetDatum(oldId));
@@ -15420,7 +15452,6 @@ ATPostAlterTypeCleanup(List **wqueue, AlteredTableInfo *tab, LOCKMODE lockmode)
elog(ERROR, "could not identify relation associated with constraint %u", oldId);
}
confrelid = con->confrelid;
- contype = con->contype;
conislocal = con->conislocal;
ReleaseSysCache(tup);
@@ -15438,12 +15469,12 @@ ATPostAlterTypeCleanup(List **wqueue, AlteredTableInfo *tab, LOCKMODE lockmode)
continue;
/*
- * When rebuilding an FK constraint that references the table we're
- * modifying, we might not yet have any lock on the FK's table, so get
- * one now. We'll need AccessExclusiveLock for the DROP CONSTRAINT
- * step, so there's no value in asking for anything weaker.
+ * When rebuilding another table's constraint that references the
+ * table we're modifying, we might not yet have any lock on the other
+ * table, so get one now. We'll need AccessExclusiveLock for the DROP
+ * CONSTRAINT step, so there's no value in asking for anything weaker.
*/
- if (relid != tab->relid && contype == CONSTRAINT_FOREIGN)
+ if (relid != tab->relid)
LockRelationOid(relid, AccessExclusiveLock);
ATPostAlterTypeParse(oldId, relid, confrelid,
@@ -20964,9 +20995,17 @@ ATExecDetachPartition(List **wqueue, AlteredTableInfo *tab, Relation rel,
tab->rel = rel;
}
+ /*
+ * Detaching the partition might involve TOAST table access, so ensure we
+ * have a valid snapshot.
+ */
+ PushActiveSnapshot(GetTransactionSnapshot());
+
/* Do the final part of detaching */
DetachPartitionFinalize(rel, partRel, concurrent, defaultPartOid);
+ PopActiveSnapshot();
+
ObjectAddressSet(address, RelationRelationId, RelationGetRelid(partRel));
/* keep our lock until commit */
diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c
index c9f61130c69..67f8e70f9c1 100644
--- a/src/backend/commands/trigger.c
+++ b/src/backend/commands/trigger.c
@@ -5058,21 +5058,6 @@ AfterTriggerBeginQuery(void)
/* ----------
- * AfterTriggerAbortQuery()
- *
- * Called by standard_ExecutorEnd() if the query execution was aborted due to
- * the plan becoming invalid during initialization.
- * ----------
- */
-void
-AfterTriggerAbortQuery(void)
-{
- /* Revert the actions of AfterTriggerBeginQuery(). */
- afterTriggers.query_depth--;
-}
-
-
-/* ----------
* AfterTriggerEndQuery()
*
* Called after one query has been completely processed. At this time
diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c
index 33a33bf6b1c..733ef40ae7c 100644
--- a/src/backend/commands/vacuum.c
+++ b/src/backend/commands/vacuum.c
@@ -56,6 +56,7 @@
#include "utils/fmgroids.h"
#include "utils/guc.h"
#include "utils/guc_hooks.h"
+#include "utils/injection_point.h"
#include "utils/memutils.h"
#include "utils/snapmgr.h"
#include "utils/syscache.h"
@@ -123,7 +124,7 @@ static void vac_truncate_clog(TransactionId frozenXID,
MultiXactId minMulti,
TransactionId lastSaneFrozenXid,
MultiXactId lastSaneMinMulti);
-static bool vacuum_rel(Oid relid, RangeVar *relation, VacuumParams *params,
+static bool vacuum_rel(Oid relid, RangeVar *relation, VacuumParams params,
BufferAccessStrategy bstrategy);
static double compute_parallel_delay(void);
static VacOptValue get_vacoptval_from_boolean(DefElem *def);
@@ -464,7 +465,7 @@ ExecVacuum(ParseState *pstate, VacuumStmt *vacstmt, bool isTopLevel)
}
/* Now go through the common routine */
- vacuum(vacstmt->rels, &params, bstrategy, vac_context, isTopLevel);
+ vacuum(vacstmt->rels, params, bstrategy, vac_context, isTopLevel);
/* Finally, clean up the vacuum memory context */
MemoryContextDelete(vac_context);
@@ -493,7 +494,7 @@ ExecVacuum(ParseState *pstate, VacuumStmt *vacstmt, bool isTopLevel)
* memory context that will not disappear at transaction commit.
*/
void
-vacuum(List *relations, VacuumParams *params, BufferAccessStrategy bstrategy,
+vacuum(List *relations, const VacuumParams params, BufferAccessStrategy bstrategy,
MemoryContext vac_context, bool isTopLevel)
{
static bool in_vacuum = false;
@@ -502,9 +503,7 @@ vacuum(List *relations, VacuumParams *params, BufferAccessStrategy bstrategy,
volatile bool in_outer_xact,
use_own_xacts;
- Assert(params != NULL);
-
- stmttype = (params->options & VACOPT_VACUUM) ? "VACUUM" : "ANALYZE";
+ stmttype = (params.options & VACOPT_VACUUM) ? "VACUUM" : "ANALYZE";
/*
* We cannot run VACUUM inside a user transaction block; if we were inside
@@ -514,7 +513,7 @@ vacuum(List *relations, VacuumParams *params, BufferAccessStrategy bstrategy,
*
* ANALYZE (without VACUUM) can run either way.
*/
- if (params->options & VACOPT_VACUUM)
+ if (params.options & VACOPT_VACUUM)
{
PreventInTransactionBlock(isTopLevel, stmttype);
in_outer_xact = false;
@@ -537,7 +536,7 @@ vacuum(List *relations, VacuumParams *params, BufferAccessStrategy bstrategy,
* Build list of relation(s) to process, putting any new data in
* vac_context for safekeeping.
*/
- if (params->options & VACOPT_ONLY_DATABASE_STATS)
+ if (params.options & VACOPT_ONLY_DATABASE_STATS)
{
/* We don't process any tables in this case */
Assert(relations == NIL);
@@ -553,7 +552,7 @@ vacuum(List *relations, VacuumParams *params, BufferAccessStrategy bstrategy,
List *sublist;
MemoryContext old_context;
- sublist = expand_vacuum_rel(vrel, vac_context, params->options);
+ sublist = expand_vacuum_rel(vrel, vac_context, params.options);
old_context = MemoryContextSwitchTo(vac_context);
newrels = list_concat(newrels, sublist);
MemoryContextSwitchTo(old_context);
@@ -561,7 +560,7 @@ vacuum(List *relations, VacuumParams *params, BufferAccessStrategy bstrategy,
relations = newrels;
}
else
- relations = get_all_vacuum_rels(vac_context, params->options);
+ relations = get_all_vacuum_rels(vac_context, params.options);
/*
* Decide whether we need to start/commit our own transactions.
@@ -577,11 +576,11 @@ vacuum(List *relations, VacuumParams *params, BufferAccessStrategy bstrategy,
* transaction block, and also in an autovacuum worker, use own
* transactions so we can release locks sooner.
*/
- if (params->options & VACOPT_VACUUM)
+ if (params.options & VACOPT_VACUUM)
use_own_xacts = true;
else
{
- Assert(params->options & VACOPT_ANALYZE);
+ Assert(params.options & VACOPT_ANALYZE);
if (AmAutoVacuumWorkerProcess())
use_own_xacts = true;
else if (in_outer_xact)
@@ -632,13 +631,13 @@ vacuum(List *relations, VacuumParams *params, BufferAccessStrategy bstrategy,
{
VacuumRelation *vrel = lfirst_node(VacuumRelation, cur);
- if (params->options & VACOPT_VACUUM)
+ if (params.options & VACOPT_VACUUM)
{
if (!vacuum_rel(vrel->oid, vrel->relation, params, bstrategy))
continue;
}
- if (params->options & VACOPT_ANALYZE)
+ if (params.options & VACOPT_ANALYZE)
{
/*
* If using separate xacts, start one for analyze. Otherwise,
@@ -702,8 +701,8 @@ vacuum(List *relations, VacuumParams *params, BufferAccessStrategy bstrategy,
StartTransactionCommand();
}
- if ((params->options & VACOPT_VACUUM) &&
- !(params->options & VACOPT_SKIP_DATABASE_STATS))
+ if ((params.options & VACOPT_VACUUM) &&
+ !(params.options & VACOPT_SKIP_DATABASE_STATS))
{
/*
* Update pg_database.datfrozenxid, and truncate pg_xact if possible.
@@ -1101,7 +1100,7 @@ get_all_vacuum_rels(MemoryContext vac_context, int options)
* minimum).
*/
bool
-vacuum_get_cutoffs(Relation rel, const VacuumParams *params,
+vacuum_get_cutoffs(Relation rel, const VacuumParams params,
struct VacuumCutoffs *cutoffs)
{
int freeze_min_age,
@@ -1117,10 +1116,10 @@ vacuum_get_cutoffs(Relation rel, const VacuumParams *params,
aggressiveMXIDCutoff;
/* Use mutable copies of freeze age parameters */
- freeze_min_age = params->freeze_min_age;
- multixact_freeze_min_age = params->multixact_freeze_min_age;
- freeze_table_age = params->freeze_table_age;
- multixact_freeze_table_age = params->multixact_freeze_table_age;
+ freeze_min_age = params.freeze_min_age;
+ multixact_freeze_min_age = params.multixact_freeze_min_age;
+ freeze_table_age = params.freeze_table_age;
+ multixact_freeze_table_age = params.multixact_freeze_table_age;
/* Set pg_class fields in cutoffs */
cutoffs->relfrozenxid = rel->rd_rel->relfrozenxid;
@@ -1997,7 +1996,7 @@ vac_truncate_clog(TransactionId frozenXID,
* At entry and exit, we are not inside a transaction.
*/
static bool
-vacuum_rel(Oid relid, RangeVar *relation, VacuumParams *params,
+vacuum_rel(Oid relid, RangeVar *relation, VacuumParams params,
BufferAccessStrategy bstrategy)
{
LOCKMODE lmode;
@@ -2008,13 +2007,18 @@ vacuum_rel(Oid relid, RangeVar *relation, VacuumParams *params,
Oid save_userid;
int save_sec_context;
int save_nestlevel;
+ VacuumParams toast_vacuum_params;
- Assert(params != NULL);
+ /*
+ * This function scribbles on the parameters, so make a copy early to
+ * avoid affecting the TOAST table (if we do end up recursing to it).
+ */
+ memcpy(&toast_vacuum_params, &params, sizeof(VacuumParams));
/* Begin a transaction for vacuuming this relation */
StartTransactionCommand();
- if (!(params->options & VACOPT_FULL))
+ if (!(params.options & VACOPT_FULL))
{
/*
* In lazy vacuum, we can set the PROC_IN_VACUUM flag, which lets
@@ -2040,7 +2044,7 @@ vacuum_rel(Oid relid, RangeVar *relation, VacuumParams *params,
*/
LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
MyProc->statusFlags |= PROC_IN_VACUUM;
- if (params->is_wraparound)
+ if (params.is_wraparound)
MyProc->statusFlags |= PROC_VACUUM_FOR_WRAPAROUND;
ProcGlobal->statusFlags[MyProc->pgxactoff] = MyProc->statusFlags;
LWLockRelease(ProcArrayLock);
@@ -2064,12 +2068,12 @@ vacuum_rel(Oid relid, RangeVar *relation, VacuumParams *params,
* vacuum, but just ShareUpdateExclusiveLock for concurrent vacuum. Either
* way, we can be sure that no other backend is vacuuming the same table.
*/
- lmode = (params->options & VACOPT_FULL) ?
+ lmode = (params.options & VACOPT_FULL) ?
AccessExclusiveLock : ShareUpdateExclusiveLock;
/* open the relation and get the appropriate lock on it */
- rel = vacuum_open_relation(relid, relation, params->options,
- params->log_min_duration >= 0, lmode);
+ rel = vacuum_open_relation(relid, relation, params.options,
+ params.log_min_duration >= 0, lmode);
/* leave if relation could not be opened or locked */
if (!rel)
@@ -2084,8 +2088,8 @@ vacuum_rel(Oid relid, RangeVar *relation, VacuumParams *params,
* This is only safe to do because we hold a session lock on the main
* relation that prevents concurrent deletion.
*/
- if (OidIsValid(params->toast_parent))
- priv_relid = params->toast_parent;
+ if (OidIsValid(params.toast_parent))
+ priv_relid = params.toast_parent;
else
priv_relid = RelationGetRelid(rel);
@@ -2098,7 +2102,7 @@ vacuum_rel(Oid relid, RangeVar *relation, VacuumParams *params,
*/
if (!vacuum_is_permitted_for_relation(priv_relid,
rel->rd_rel,
- params->options & ~VACOPT_ANALYZE))
+ params.options & ~VACOPT_ANALYZE))
{
relation_close(rel, lmode);
PopActiveSnapshot();
@@ -2169,7 +2173,7 @@ vacuum_rel(Oid relid, RangeVar *relation, VacuumParams *params,
* Set index_cleanup option based on index_cleanup reloption if it wasn't
* specified in VACUUM command, or when running in an autovacuum worker
*/
- if (params->index_cleanup == VACOPTVALUE_UNSPECIFIED)
+ if (params.index_cleanup == VACOPTVALUE_UNSPECIFIED)
{
StdRdOptIndexCleanup vacuum_index_cleanup;
@@ -2180,56 +2184,74 @@ vacuum_rel(Oid relid, RangeVar *relation, VacuumParams *params,
((StdRdOptions *) rel->rd_options)->vacuum_index_cleanup;
if (vacuum_index_cleanup == STDRD_OPTION_VACUUM_INDEX_CLEANUP_AUTO)
- params->index_cleanup = VACOPTVALUE_AUTO;
+ params.index_cleanup = VACOPTVALUE_AUTO;
else if (vacuum_index_cleanup == STDRD_OPTION_VACUUM_INDEX_CLEANUP_ON)
- params->index_cleanup = VACOPTVALUE_ENABLED;
+ params.index_cleanup = VACOPTVALUE_ENABLED;
else
{
Assert(vacuum_index_cleanup ==
STDRD_OPTION_VACUUM_INDEX_CLEANUP_OFF);
- params->index_cleanup = VACOPTVALUE_DISABLED;
+ params.index_cleanup = VACOPTVALUE_DISABLED;
}
}
+#ifdef USE_INJECTION_POINTS
+ if (params.index_cleanup == VACOPTVALUE_AUTO)
+ INJECTION_POINT("vacuum-index-cleanup-auto", NULL);
+ else if (params.index_cleanup == VACOPTVALUE_DISABLED)
+ INJECTION_POINT("vacuum-index-cleanup-disabled", NULL);
+ else if (params.index_cleanup == VACOPTVALUE_ENABLED)
+ INJECTION_POINT("vacuum-index-cleanup-enabled", NULL);
+#endif
+
/*
* Check if the vacuum_max_eager_freeze_failure_rate table storage
* parameter was specified. This overrides the GUC value.
*/
if (rel->rd_options != NULL &&
((StdRdOptions *) rel->rd_options)->vacuum_max_eager_freeze_failure_rate >= 0)
- params->max_eager_freeze_failure_rate =
+ params.max_eager_freeze_failure_rate =
((StdRdOptions *) rel->rd_options)->vacuum_max_eager_freeze_failure_rate;
/*
* Set truncate option based on truncate reloption or GUC if it wasn't
* specified in VACUUM command, or when running in an autovacuum worker
*/
- if (params->truncate == VACOPTVALUE_UNSPECIFIED)
+ if (params.truncate == VACOPTVALUE_UNSPECIFIED)
{
StdRdOptions *opts = (StdRdOptions *) rel->rd_options;
if (opts && opts->vacuum_truncate_set)
{
if (opts->vacuum_truncate)
- params->truncate = VACOPTVALUE_ENABLED;
+ params.truncate = VACOPTVALUE_ENABLED;
else
- params->truncate = VACOPTVALUE_DISABLED;
+ params.truncate = VACOPTVALUE_DISABLED;
}
else if (vacuum_truncate)
- params->truncate = VACOPTVALUE_ENABLED;
+ params.truncate = VACOPTVALUE_ENABLED;
else
- params->truncate = VACOPTVALUE_DISABLED;
+ params.truncate = VACOPTVALUE_DISABLED;
}
+#ifdef USE_INJECTION_POINTS
+ if (params.truncate == VACOPTVALUE_AUTO)
+ INJECTION_POINT("vacuum-truncate-auto", NULL);
+ else if (params.truncate == VACOPTVALUE_DISABLED)
+ INJECTION_POINT("vacuum-truncate-disabled", NULL);
+ else if (params.truncate == VACOPTVALUE_ENABLED)
+ INJECTION_POINT("vacuum-truncate-enabled", NULL);
+#endif
+
/*
* Remember the relation's TOAST relation for later, if the caller asked
* us to process it. In VACUUM FULL, though, the toast table is
* automatically rebuilt by cluster_rel so we shouldn't recurse to it,
* unless PROCESS_MAIN is disabled.
*/
- if ((params->options & VACOPT_PROCESS_TOAST) != 0 &&
- ((params->options & VACOPT_FULL) == 0 ||
- (params->options & VACOPT_PROCESS_MAIN) == 0))
+ if ((params.options & VACOPT_PROCESS_TOAST) != 0 &&
+ ((params.options & VACOPT_FULL) == 0 ||
+ (params.options & VACOPT_PROCESS_MAIN) == 0))
toast_relid = rel->rd_rel->reltoastrelid;
else
toast_relid = InvalidOid;
@@ -2252,16 +2274,16 @@ vacuum_rel(Oid relid, RangeVar *relation, VacuumParams *params,
* table is required (e.g., PROCESS_TOAST is set), we force PROCESS_MAIN
* to be set when we recurse to the TOAST table.
*/
- if (params->options & VACOPT_PROCESS_MAIN)
+ if (params.options & VACOPT_PROCESS_MAIN)
{
/*
* Do the actual work --- either FULL or "lazy" vacuum
*/
- if (params->options & VACOPT_FULL)
+ if (params.options & VACOPT_FULL)
{
ClusterParams cluster_params = {0};
- if ((params->options & VACOPT_VERBOSE) != 0)
+ if ((params.options & VACOPT_VERBOSE) != 0)
cluster_params.options |= CLUOPT_VERBOSE;
/* VACUUM FULL is now a variant of CLUSTER; see cluster.c */
@@ -2299,19 +2321,16 @@ vacuum_rel(Oid relid, RangeVar *relation, VacuumParams *params,
*/
if (toast_relid != InvalidOid)
{
- VacuumParams toast_vacuum_params;
-
/*
* Force VACOPT_PROCESS_MAIN so vacuum_rel() processes it. Likewise,
* set toast_parent so that the privilege checks are done on the main
* relation. NB: This is only safe to do because we hold a session
* lock on the main relation that prevents concurrent deletion.
*/
- memcpy(&toast_vacuum_params, params, sizeof(VacuumParams));
toast_vacuum_params.options |= VACOPT_PROCESS_MAIN;
toast_vacuum_params.toast_parent = relid;
- vacuum_rel(toast_relid, NULL, &toast_vacuum_params, bstrategy);
+ vacuum_rel(toast_relid, NULL, toast_vacuum_params, bstrategy);
}
/*
diff --git a/src/backend/commands/vacuumparallel.c b/src/backend/commands/vacuumparallel.c
index 2b9d548cdeb..0feea1d30ec 100644
--- a/src/backend/commands/vacuumparallel.c
+++ b/src/backend/commands/vacuumparallel.c
@@ -63,7 +63,7 @@ typedef struct PVShared
*/
Oid relid;
int elevel;
- uint64 queryid;
+ int64 queryid;
/*
* Fields for both index vacuum and cleanup.
diff --git a/src/backend/executor/README b/src/backend/executor/README
index 02745c23ed9..54f4782f31b 100644
--- a/src/backend/executor/README
+++ b/src/backend/executor/README
@@ -285,28 +285,6 @@ are typically reset to empty once per tuple. Per-tuple contexts are usually
associated with ExprContexts, and commonly each PlanState node has its own
ExprContext to evaluate its qual and targetlist expressions in.
-Relation Locking
-----------------
-
-When the executor initializes a plan tree for execution, it doesn't lock
-non-index relations if the plan tree is freshly generated and not derived
-from a CachedPlan. This is because such locks have already been established
-during the query's parsing, rewriting, and planning phases. However, with a
-cached plan tree, some relations may remain unlocked. The function
-AcquireExecutorLocks() only locks unprunable relations in the plan, deferring
-the locking of prunable ones to executor initialization. This avoids
-unnecessary locking of relations that will be pruned during "initial" runtime
-pruning in ExecDoInitialPruning().
-
-This approach creates a window where a cached plan tree with child tables
-could become outdated if another backend modifies these tables before
-ExecDoInitialPruning() locks them. As a result, the executor has the added duty
-to verify the plan tree's validity whenever it locks a child table after
-doing initial pruning. This validation is done by checking the CachedPlan.is_valid
-flag. If the plan tree is outdated (is_valid = false), the executor stops
-further initialization, cleans up anything in EState that would have been
-allocated up to that point, and retries execution after recreating the
-invalid plan in the CachedPlan. See ExecutorStartCachedPlan().
Query Processing Control Flow
-----------------------------
@@ -315,13 +293,11 @@ This is a sketch of control flow for full query processing:
CreateQueryDesc
- ExecutorStart or ExecutorStartCachedPlan
+ ExecutorStart
CreateExecutorState
creates per-query context
- switch to per-query context to run ExecDoInitialPruning and ExecInitNode
+ switch to per-query context to run ExecInitNode
AfterTriggerBeginQuery
- ExecDoInitialPruning
- does initial pruning and locks surviving partitions if needed
ExecInitNode --- recursively scans plan tree
ExecInitNode
recurse into subsidiary nodes
@@ -345,12 +321,7 @@ This is a sketch of control flow for full query processing:
FreeQueryDesc
-As mentioned in the "Relation Locking" section, if the plan tree is found to
-be stale after locking partitions in ExecDoInitialPruning(), the control is
-immediately returned to ExecutorStartCachedPlan(), which will create a new plan
-tree and perform the steps starting from CreateExecutorState() again.
-
-Per above comments, it's not really critical for ExecEndPlan to free any
+Per above comments, it's not really critical for ExecEndNode to free any
memory; it'll all go away in FreeExecutorState anyway. However, we do need to
be careful to close relations, drop buffer pins, etc, so we do need to scan
the plan state tree to find these sorts of resources.
diff --git a/src/backend/executor/execGrouping.c b/src/backend/executor/execGrouping.c
index 255bd795361..b5400749353 100644
--- a/src/backend/executor/execGrouping.c
+++ b/src/backend/executor/execGrouping.c
@@ -144,7 +144,7 @@ execTuplesHashPrepare(int numCols,
* hashfunctions: FmgrInfos of datatype-specific hashing functions to use
* collations: collations to use in comparisons
* nbuckets: initial estimate of hashtable size
- * additionalsize: size of data stored in ->additional
+ * additionalsize: size of data that may be stored along with the hash entry
* metacxt: memory context for long-lived allocation, but not per-entry data
* tablecxt: memory context in which to store table entries
* tempcxt: short-lived context for evaluation hash and comparison functions
@@ -288,7 +288,7 @@ ResetTupleHashTable(TupleHashTable hashtable)
*
* If isnew isn't NULL, then a new entry is created if no existing entry
* matches. On return, *isnew is true if the entry is newly created,
- * false if it existed already. ->additional_data in the new entry has
+ * false if it existed already. The additional data in the new entry has
* been zeroed.
*/
TupleHashEntry
diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c
index 7230f968101..0391798dd2c 100644
--- a/src/backend/executor/execMain.c
+++ b/src/backend/executor/execMain.c
@@ -55,13 +55,11 @@
#include "parser/parse_relation.h"
#include "pgstat.h"
#include "rewrite/rewriteHandler.h"
-#include "storage/lmgr.h"
#include "tcop/utility.h"
#include "utils/acl.h"
#include "utils/backend_status.h"
#include "utils/lsyscache.h"
#include "utils/partcache.h"
-#include "utils/plancache.h"
#include "utils/rls.h"
#include "utils/snapmgr.h"
@@ -119,16 +117,11 @@ static void ReportNotNullViolationError(ResultRelInfo *resultRelInfo,
* get control when ExecutorStart is called. Such a plugin would
* normally call standard_ExecutorStart().
*
- * Return value indicates if the plan has been initialized successfully so
- * that queryDesc->planstate contains a valid PlanState tree. It may not
- * if the plan got invalidated during InitPlan().
* ----------------------------------------------------------------
*/
-bool
+void
ExecutorStart(QueryDesc *queryDesc, int eflags)
{
- bool plan_valid;
-
/*
* In some cases (e.g. an EXECUTE statement or an execute message with the
* extended query protocol) the query_id won't be reported, so do it now.
@@ -140,14 +133,12 @@ ExecutorStart(QueryDesc *queryDesc, int eflags)
pgstat_report_query_id(queryDesc->plannedstmt->queryId, false);
if (ExecutorStart_hook)
- plan_valid = (*ExecutorStart_hook) (queryDesc, eflags);
+ (*ExecutorStart_hook) (queryDesc, eflags);
else
- plan_valid = standard_ExecutorStart(queryDesc, eflags);
-
- return plan_valid;
+ standard_ExecutorStart(queryDesc, eflags);
}
-bool
+void
standard_ExecutorStart(QueryDesc *queryDesc, int eflags)
{
EState *estate;
@@ -271,64 +262,6 @@ standard_ExecutorStart(QueryDesc *queryDesc, int eflags)
InitPlan(queryDesc, eflags);
MemoryContextSwitchTo(oldcontext);
-
- return ExecPlanStillValid(queryDesc->estate);
-}
-
-/*
- * ExecutorStartCachedPlan
- * Start execution for a given query in the CachedPlanSource, replanning
- * if the plan is invalidated due to deferred locks taken during the
- * plan's initialization
- *
- * This function handles cases where the CachedPlan given in queryDesc->cplan
- * might become invalid during the initialization of the plan given in
- * queryDesc->plannedstmt, particularly when prunable relations in it are
- * locked after performing initial pruning. If the locks invalidate the plan,
- * the function calls UpdateCachedPlan() to replan all queries in the
- * CachedPlan, and then retries initialization.
- *
- * The function repeats the process until ExecutorStart() successfully
- * initializes the plan, that is without the CachedPlan becoming invalid.
- */
-void
-ExecutorStartCachedPlan(QueryDesc *queryDesc, int eflags,
- CachedPlanSource *plansource,
- int query_index)
-{
- if (unlikely(queryDesc->cplan == NULL))
- elog(ERROR, "ExecutorStartCachedPlan(): missing CachedPlan");
- if (unlikely(plansource == NULL))
- elog(ERROR, "ExecutorStartCachedPlan(): missing CachedPlanSource");
-
- /*
- * Loop and retry with an updated plan until no further invalidation
- * occurs.
- */
- while (1)
- {
- if (!ExecutorStart(queryDesc, eflags))
- {
- /*
- * Clean up the current execution state before creating the new
- * plan to retry ExecutorStart(). Mark execution as aborted to
- * ensure that AFTER trigger state is properly reset.
- */
- queryDesc->estate->es_aborted = true;
- ExecutorEnd(queryDesc);
-
- /* Retry ExecutorStart() with an updated plan tree. */
- queryDesc->plannedstmt = UpdateCachedPlan(plansource, query_index,
- queryDesc->queryEnv);
- }
- else
-
- /*
- * Exit the loop if the plan is initialized successfully and no
- * sinval messages were received that invalidated the CachedPlan.
- */
- break;
- }
}
/* ----------------------------------------------------------------
@@ -387,7 +320,6 @@ standard_ExecutorRun(QueryDesc *queryDesc,
estate = queryDesc->estate;
Assert(estate != NULL);
- Assert(!estate->es_aborted);
Assert(!(estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
/* caller must ensure the query's snapshot is active */
@@ -494,11 +426,8 @@ standard_ExecutorFinish(QueryDesc *queryDesc)
Assert(estate != NULL);
Assert(!(estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
- /*
- * This should be run once and only once per Executor instance and never
- * if the execution was aborted.
- */
- Assert(!estate->es_finished && !estate->es_aborted);
+ /* This should be run once and only once per Executor instance */
+ Assert(!estate->es_finished);
/* Switch into per-query memory context */
oldcontext = MemoryContextSwitchTo(estate->es_query_cxt);
@@ -561,10 +490,11 @@ standard_ExecutorEnd(QueryDesc *queryDesc)
(PgStat_Counter) estate->es_parallel_workers_launched);
/*
- * Check that ExecutorFinish was called, unless in EXPLAIN-only mode or if
- * execution was aborted.
+ * Check that ExecutorFinish was called, unless in EXPLAIN-only mode. This
+ * Assert is needed because ExecutorFinish is new as of 9.1, and callers
+ * might forget to call it.
*/
- Assert(estate->es_finished || estate->es_aborted ||
+ Assert(estate->es_finished ||
(estate->es_top_eflags & EXEC_FLAG_EXPLAIN_ONLY));
/*
@@ -579,14 +509,6 @@ standard_ExecutorEnd(QueryDesc *queryDesc)
UnregisterSnapshot(estate->es_crosscheck_snapshot);
/*
- * Reset AFTER trigger module if the query execution was aborted.
- */
- if (estate->es_aborted &&
- !(estate->es_top_eflags &
- (EXEC_FLAG_SKIP_TRIGGERS | EXEC_FLAG_EXPLAIN_ONLY)))
- AfterTriggerAbortQuery();
-
- /*
* Must switch out of context before destroying it
*/
MemoryContextSwitchTo(oldcontext);
@@ -684,21 +606,6 @@ ExecCheckPermissions(List *rangeTable, List *rteperminfos,
(rte->rtekind == RTE_SUBQUERY &&
rte->relkind == RELKIND_VIEW));
- /*
- * Ensure that we have at least an AccessShareLock on relations
- * whose permissions need to be checked.
- *
- * Skip this check in a parallel worker because locks won't be
- * taken until ExecInitNode() performs plan initialization.
- *
- * XXX: ExecCheckPermissions() in a parallel worker may be
- * redundant with the checks done in the leader process, so this
- * should be reviewed to ensure it’s necessary.
- */
- Assert(IsParallelWorker() ||
- CheckRelationOidLockedByMe(rte->relid, AccessShareLock,
- true));
-
(void) getRTEPermissionInfo(rteperminfos, rte);
/* Many-to-one mapping not allowed */
Assert(!bms_is_member(rte->perminfoindex, indexset));
@@ -924,12 +831,6 @@ ExecCheckXactReadOnly(PlannedStmt *plannedstmt)
*
* Initializes the query plan: open files, allocate storage
* and start up the rule manager
- *
- * If the plan originates from a CachedPlan (given in queryDesc->cplan),
- * it can become invalid during runtime "initial" pruning when the
- * remaining set of locks is taken. The function returns early in that
- * case without initializing the plan, and the caller is expected to
- * retry with a new valid plan.
* ----------------------------------------------------------------
*/
static void
@@ -937,7 +838,6 @@ InitPlan(QueryDesc *queryDesc, int eflags)
{
CmdType operation = queryDesc->operation;
PlannedStmt *plannedstmt = queryDesc->plannedstmt;
- CachedPlan *cachedplan = queryDesc->cplan;
Plan *plan = plannedstmt->planTree;
List *rangeTable = plannedstmt->rtable;
EState *estate = queryDesc->estate;
@@ -958,7 +858,6 @@ InitPlan(QueryDesc *queryDesc, int eflags)
bms_copy(plannedstmt->unprunableRelids));
estate->es_plannedstmt = plannedstmt;
- estate->es_cachedplan = cachedplan;
estate->es_part_prune_infos = plannedstmt->partPruneInfos;
/*
@@ -972,9 +871,6 @@ InitPlan(QueryDesc *queryDesc, int eflags)
*/
ExecDoInitialPruning(estate);
- if (!ExecPlanStillValid(estate))
- return;
-
/*
* Next, build the ExecRowMark array from the PlanRowMark(s), if any.
*/
@@ -3092,9 +2988,6 @@ EvalPlanQualStart(EPQState *epqstate, Plan *planTree)
* the snapshot, rangetable, and external Param info. They need their own
* copies of local state, including a tuple table, es_param_exec_vals,
* result-rel info, etc.
- *
- * es_cachedplan is not copied because EPQ plan execution does not acquire
- * any new locks that could invalidate the CachedPlan.
*/
rcestate->es_direction = ForwardScanDirection;
rcestate->es_snapshot = parentestate->es_snapshot;
diff --git a/src/backend/executor/execParallel.c b/src/backend/executor/execParallel.c
index 39c990ae638..f3e77bda279 100644
--- a/src/backend/executor/execParallel.c
+++ b/src/backend/executor/execParallel.c
@@ -1278,15 +1278,8 @@ ExecParallelGetQueryDesc(shm_toc *toc, DestReceiver *receiver,
paramspace = shm_toc_lookup(toc, PARALLEL_KEY_PARAMLISTINFO, false);
paramLI = RestoreParamList(&paramspace);
- /*
- * Create a QueryDesc for the query. We pass NULL for cachedplan, because
- * we don't have a pointer to the CachedPlan in the leader's process. It's
- * fine because the only reason the executor needs to see it is to decide
- * if it should take locks on certain relations, but parallel workers
- * always take locks anyway.
- */
+ /* Create a QueryDesc for the query. */
return CreateQueryDesc(pstmt,
- NULL,
queryString,
GetActiveSnapshot(), InvalidSnapshot,
receiver, paramLI, NULL, instrument_options);
@@ -1471,8 +1464,7 @@ ParallelQueryMain(dsm_segment *seg, shm_toc *toc)
/* Start up the executor */
queryDesc->plannedstmt->jitFlags = fpes->jit_flags;
- if (!ExecutorStart(queryDesc, fpes->eflags))
- elog(ERROR, "ExecutorStart() failed unexpectedly");
+ ExecutorStart(queryDesc, fpes->eflags);
/* Special executor initialization steps for parallel workers */
queryDesc->planstate->state->es_query_dsa = area;
diff --git a/src/backend/executor/execPartition.c b/src/backend/executor/execPartition.c
index 3f8a4cb5244..514eae1037d 100644
--- a/src/backend/executor/execPartition.c
+++ b/src/backend/executor/execPartition.c
@@ -26,7 +26,6 @@
#include "partitioning/partdesc.h"
#include "partitioning/partprune.h"
#include "rewrite/rewriteManip.h"
-#include "storage/lmgr.h"
#include "utils/acl.h"
#include "utils/lsyscache.h"
#include "utils/partcache.h"
@@ -1771,8 +1770,7 @@ adjust_partition_colnos_using_map(List *colnos, AttrMap *attrMap)
* ExecDoInitialPruning:
* Perform runtime "initial" pruning, if necessary, to determine the set
* of child subnodes that need to be initialized during ExecInitNode() for
- * all plan nodes that contain a PartitionPruneInfo. This also locks the
- * leaf partitions whose subnodes will be initialized if needed.
+ * all plan nodes that contain a PartitionPruneInfo.
*
* ExecInitPartitionExecPruning:
* Updates the PartitionPruneState found at given part_prune_index in
@@ -1798,8 +1796,7 @@ adjust_partition_colnos_using_map(List *colnos, AttrMap *attrMap)
* ExecDoInitialPruning
* Perform runtime "initial" pruning, if necessary, to determine the set
* of child subnodes that need to be initialized during ExecInitNode() for
- * plan nodes that support partition pruning. This also locks the leaf
- * partitions whose subnodes will be initialized if needed.
+ * plan nodes that support partition pruning.
*
* This function iterates over each PartitionPruneInfo entry in
* estate->es_part_prune_infos. For each entry, it creates a PartitionPruneState
@@ -1821,9 +1818,7 @@ adjust_partition_colnos_using_map(List *colnos, AttrMap *attrMap)
void
ExecDoInitialPruning(EState *estate)
{
- PlannedStmt *stmt = estate->es_plannedstmt;
ListCell *lc;
- List *locked_relids = NIL;
foreach(lc, estate->es_part_prune_infos)
{
@@ -1849,68 +1844,11 @@ ExecDoInitialPruning(EState *estate)
else
validsubplan_rtis = all_leafpart_rtis;
- if (ExecShouldLockRelations(estate))
- {
- int rtindex = -1;
-
- while ((rtindex = bms_next_member(validsubplan_rtis,
- rtindex)) >= 0)
- {
- RangeTblEntry *rte = exec_rt_fetch(rtindex, estate);
-
- Assert(rte->rtekind == RTE_RELATION &&
- rte->rellockmode != NoLock);
- LockRelationOid(rte->relid, rte->rellockmode);
- locked_relids = lappend_int(locked_relids, rtindex);
- }
- }
estate->es_unpruned_relids = bms_add_members(estate->es_unpruned_relids,
validsubplan_rtis);
estate->es_part_prune_results = lappend(estate->es_part_prune_results,
validsubplans);
}
-
- /*
- * Lock the first result relation of each ModifyTable node, even if it was
- * pruned. This is required for ExecInitModifyTable(), which keeps its
- * first result relation if all other result relations have been pruned,
- * because some executor paths (e.g., in nodeModifyTable.c and
- * execPartition.c) rely on there being at least one result relation.
- *
- * There's room for improvement here --- we actually only need to do this
- * if all other result relations of the ModifyTable node were pruned, but
- * we don't have an easy way to tell that here.
- */
- if (stmt->resultRelations && ExecShouldLockRelations(estate))
- {
- foreach(lc, stmt->firstResultRels)
- {
- Index firstResultRel = lfirst_int(lc);
-
- if (!bms_is_member(firstResultRel, estate->es_unpruned_relids))
- {
- RangeTblEntry *rte = exec_rt_fetch(firstResultRel, estate);
-
- Assert(rte->rtekind == RTE_RELATION && rte->rellockmode != NoLock);
- LockRelationOid(rte->relid, rte->rellockmode);
- locked_relids = lappend_int(locked_relids, firstResultRel);
- }
- }
- }
-
- /*
- * Release the useless locks if the plan won't be executed. This is the
- * same as what CheckCachedPlan() in plancache.c does.
- */
- if (!ExecPlanStillValid(estate))
- {
- foreach(lc, locked_relids)
- {
- RangeTblEntry *rte = exec_rt_fetch(lfirst_int(lc), estate);
-
- UnlockRelationOid(rte->relid, rte->rellockmode);
- }
- }
}
/*
diff --git a/src/backend/executor/execUtils.c b/src/backend/executor/execUtils.c
index 772c86e70e9..fdc65c2b42b 100644
--- a/src/backend/executor/execUtils.c
+++ b/src/backend/executor/execUtils.c
@@ -147,7 +147,6 @@ CreateExecutorState(void)
estate->es_top_eflags = 0;
estate->es_instrument = 0;
estate->es_finished = false;
- estate->es_aborted = false;
estate->es_exprcontexts = NIL;
diff --git a/src/backend/executor/functions.c b/src/backend/executor/functions.c
index 8d4d062d579..359aafea681 100644
--- a/src/backend/executor/functions.c
+++ b/src/backend/executor/functions.c
@@ -34,6 +34,7 @@
#include "utils/funccache.h"
#include "utils/lsyscache.h"
#include "utils/memutils.h"
+#include "utils/plancache.h"
#include "utils/snapmgr.h"
#include "utils/syscache.h"
@@ -1338,7 +1339,6 @@ postquel_start(execution_state *es, SQLFunctionCachePtr fcache)
dest = None_Receiver;
es->qd = CreateQueryDesc(es->stmt,
- NULL,
fcache->func->src,
GetActiveSnapshot(),
InvalidSnapshot,
@@ -1363,8 +1363,7 @@ postquel_start(execution_state *es, SQLFunctionCachePtr fcache)
eflags = EXEC_FLAG_SKIP_TRIGGERS;
else
eflags = 0; /* default run-to-completion flags */
- if (!ExecutorStart(es->qd, eflags))
- elog(ERROR, "ExecutorStart() failed unexpectedly");
+ ExecutorStart(es->qd, eflags);
}
es->status = F_EXEC_RUN;
diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c
index 46d533b7288..54da8e7995b 100644
--- a/src/backend/executor/nodeModifyTable.c
+++ b/src/backend/executor/nodeModifyTable.c
@@ -64,6 +64,7 @@
#include "nodes/nodeFuncs.h"
#include "optimizer/optimizer.h"
#include "rewrite/rewriteHandler.h"
+#include "rewrite/rewriteManip.h"
#include "storage/lmgr.h"
#include "utils/builtins.h"
#include "utils/datum.h"
@@ -3735,6 +3736,7 @@ ExecInitMerge(ModifyTableState *mtstate, EState *estate)
switch (action->commandType)
{
case CMD_INSERT:
+ /* INSERT actions always use rootRelInfo */
ExecCheckPlanOutput(rootRelInfo->ri_RelationDesc,
action->targetList);
@@ -3774,9 +3776,23 @@ ExecInitMerge(ModifyTableState *mtstate, EState *estate)
}
else
{
- /* not partitioned? use the stock relation and slot */
- tgtslot = resultRelInfo->ri_newTupleSlot;
- tgtdesc = RelationGetDescr(resultRelInfo->ri_RelationDesc);
+ /*
+ * If the MERGE targets an inherited table, we insert
+ * into the root table, so we must initialize its
+ * "new" tuple slot, if not already done, and use its
+ * relation descriptor for the projection.
+ *
+ * For non-inherited tables, rootRelInfo and
+ * resultRelInfo are the same, and the "new" tuple
+ * slot will already have been initialized.
+ */
+ if (rootRelInfo->ri_newTupleSlot == NULL)
+ rootRelInfo->ri_newTupleSlot =
+ table_slot_create(rootRelInfo->ri_RelationDesc,
+ &estate->es_tupleTable);
+
+ tgtslot = rootRelInfo->ri_newTupleSlot;
+ tgtdesc = RelationGetDescr(rootRelInfo->ri_RelationDesc);
}
action_state->mas_proj =
@@ -3809,6 +3825,114 @@ ExecInitMerge(ModifyTableState *mtstate, EState *estate)
}
}
}
+
+ /*
+ * If the MERGE targets an inherited table, any INSERT actions will use
+ * rootRelInfo, and rootRelInfo will not be in the resultRelInfo array.
+ * Therefore we must initialize its WITH CHECK OPTION constraints and
+ * RETURNING projection, as ExecInitModifyTable did for the resultRelInfo
+ * entries.
+ *
+ * Note that the planner does not build a withCheckOptionList or
+ * returningList for the root relation, but as in ExecInitPartitionInfo,
+ * we can use the first resultRelInfo entry as a reference to calculate
+ * the attno's for the root table.
+ */
+ if (rootRelInfo != mtstate->resultRelInfo &&
+ rootRelInfo->ri_RelationDesc->rd_rel->relkind != RELKIND_PARTITIONED_TABLE &&
+ (mtstate->mt_merge_subcommands & MERGE_INSERT) != 0)
+ {
+ ModifyTable *node = (ModifyTable *) mtstate->ps.plan;
+ Relation rootRelation = rootRelInfo->ri_RelationDesc;
+ Relation firstResultRel = mtstate->resultRelInfo[0].ri_RelationDesc;
+ int firstVarno = mtstate->resultRelInfo[0].ri_RangeTableIndex;
+ AttrMap *part_attmap = NULL;
+ bool found_whole_row;
+
+ if (node->withCheckOptionLists != NIL)
+ {
+ List *wcoList;
+ List *wcoExprs = NIL;
+
+ /* There should be as many WCO lists as result rels */
+ Assert(list_length(node->withCheckOptionLists) ==
+ list_length(node->resultRelations));
+
+ /*
+ * Use the first WCO list as a reference. In the most common case,
+ * this will be for the same relation as rootRelInfo, and so there
+ * will be no need to adjust its attno's.
+ */
+ wcoList = linitial(node->withCheckOptionLists);
+ if (rootRelation != firstResultRel)
+ {
+ /* Convert any Vars in it to contain the root's attno's */
+ part_attmap =
+ build_attrmap_by_name(RelationGetDescr(rootRelation),
+ RelationGetDescr(firstResultRel),
+ false);
+
+ wcoList = (List *)
+ map_variable_attnos((Node *) wcoList,
+ firstVarno, 0,
+ part_attmap,
+ RelationGetForm(rootRelation)->reltype,
+ &found_whole_row);
+ }
+
+ foreach(lc, wcoList)
+ {
+ WithCheckOption *wco = lfirst_node(WithCheckOption, lc);
+ ExprState *wcoExpr = ExecInitQual(castNode(List, wco->qual),
+ &mtstate->ps);
+
+ wcoExprs = lappend(wcoExprs, wcoExpr);
+ }
+
+ rootRelInfo->ri_WithCheckOptions = wcoList;
+ rootRelInfo->ri_WithCheckOptionExprs = wcoExprs;
+ }
+
+ if (node->returningLists != NIL)
+ {
+ List *returningList;
+
+ /* There should be as many returning lists as result rels */
+ Assert(list_length(node->returningLists) ==
+ list_length(node->resultRelations));
+
+ /*
+ * Use the first returning list as a reference. In the most common
+ * case, this will be for the same relation as rootRelInfo, and so
+ * there will be no need to adjust its attno's.
+ */
+ returningList = linitial(node->returningLists);
+ if (rootRelation != firstResultRel)
+ {
+ /* Convert any Vars in it to contain the root's attno's */
+ if (part_attmap == NULL)
+ part_attmap =
+ build_attrmap_by_name(RelationGetDescr(rootRelation),
+ RelationGetDescr(firstResultRel),
+ false);
+
+ returningList = (List *)
+ map_variable_attnos((Node *) returningList,
+ firstVarno, 0,
+ part_attmap,
+ RelationGetForm(rootRelation)->reltype,
+ &found_whole_row);
+ }
+ rootRelInfo->ri_returningList = returningList;
+
+ /* Initialize the RETURNING projection */
+ rootRelInfo->ri_projectReturning =
+ ExecBuildProjectionInfo(returningList, econtext,
+ mtstate->ps.ps_ResultTupleSlot,
+ &mtstate->ps,
+ RelationGetDescr(rootRelation));
+ }
+ }
}
/*
@@ -4830,12 +4954,11 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
ExprContext *econtext;
/*
- * Initialize result tuple slot and assign its rowtype using the first
- * RETURNING list. We assume the rest will look the same.
+ * Initialize result tuple slot and assign its rowtype using the plan
+ * node's declared targetlist, which the planner set up to be the same
+ * as the first (before runtime pruning) RETURNING list. We assume
+ * all the result rels will produce compatible output.
*/
- mtstate->ps.plan->targetlist = (List *) linitial(returningLists);
-
- /* Set up a slot for the output of the RETURNING projection(s) */
ExecInitResultTupleSlotTL(&mtstate->ps, &TTSOpsVirtual);
slot = mtstate->ps.ps_ResultTupleSlot;
@@ -4865,7 +4988,6 @@ ExecInitModifyTable(ModifyTable *node, EState *estate, int eflags)
* We still must construct a dummy result tuple type, because InitPlan
* expects one (maybe should change that?).
*/
- mtstate->ps.plan->targetlist = NIL;
ExecInitResultTypeTL(&mtstate->ps);
mtstate->ps.ps_ExprContext = NULL;
diff --git a/src/backend/executor/nodeTidrangescan.c b/src/backend/executor/nodeTidrangescan.c
index ab2eab9596e..26f7420b64b 100644
--- a/src/backend/executor/nodeTidrangescan.c
+++ b/src/backend/executor/nodeTidrangescan.c
@@ -128,9 +128,11 @@ TidExprListCreate(TidRangeScanState *tidrangestate)
* TidRangeEval
*
* Compute and set node's block and offset range to scan by evaluating
- * the trss_tidexprs. Returns false if we detect the range cannot
+ * node->trss_tidexprs. Returns false if we detect the range cannot
* contain any tuples. Returns true if it's possible for the range to
- * contain tuples.
+ * contain tuples. We don't bother validating that trss_mintid is less
+ * than or equal to trss_maxtid, as the scan_set_tidrange() table AM
+ * function will handle that.
* ----------------------------------------------------------------
*/
static bool
diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c
index 3288396def3..ecb2e4ccaa1 100644
--- a/src/backend/executor/spi.c
+++ b/src/backend/executor/spi.c
@@ -70,8 +70,7 @@ static int _SPI_execute_plan(SPIPlanPtr plan, const SPIExecuteOptions *options,
static ParamListInfo _SPI_convert_params(int nargs, Oid *argtypes,
Datum *Values, const char *Nulls);
-static int _SPI_pquery(QueryDesc *queryDesc, bool fire_triggers, uint64 tcount,
- CachedPlanSource *plansource, int query_index);
+static int _SPI_pquery(QueryDesc *queryDesc, bool fire_triggers, uint64 tcount);
static void _SPI_error_callback(void *arg);
@@ -1686,8 +1685,7 @@ SPI_cursor_open_internal(const char *name, SPIPlanPtr plan,
query_string,
plansource->commandTag,
stmt_list,
- cplan,
- plansource);
+ cplan);
/*
* Set up options for portal. Default SCROLL type is chosen the same way
@@ -2502,7 +2500,6 @@ _SPI_execute_plan(SPIPlanPtr plan, const SPIExecuteOptions *options,
CachedPlanSource *plansource = (CachedPlanSource *) lfirst(lc1);
List *stmt_list;
ListCell *lc2;
- int query_index = 0;
spicallbackarg.query = plansource->query_string;
@@ -2693,16 +2690,14 @@ _SPI_execute_plan(SPIPlanPtr plan, const SPIExecuteOptions *options,
snap = InvalidSnapshot;
qdesc = CreateQueryDesc(stmt,
- cplan,
plansource->query_string,
snap, crosscheck_snapshot,
dest,
options->params,
_SPI_current->queryEnv,
0);
-
- res = _SPI_pquery(qdesc, fire_triggers, canSetTag ? options->tcount : 0,
- plansource, query_index);
+ res = _SPI_pquery(qdesc, fire_triggers,
+ canSetTag ? options->tcount : 0);
FreeQueryDesc(qdesc);
}
else
@@ -2799,8 +2794,6 @@ _SPI_execute_plan(SPIPlanPtr plan, const SPIExecuteOptions *options,
my_res = res;
goto fail;
}
-
- query_index++;
}
/* Done with this plan, so release refcount */
@@ -2878,8 +2871,7 @@ _SPI_convert_params(int nargs, Oid *argtypes,
}
static int
-_SPI_pquery(QueryDesc *queryDesc, bool fire_triggers, uint64 tcount,
- CachedPlanSource *plansource, int query_index)
+_SPI_pquery(QueryDesc *queryDesc, bool fire_triggers, uint64 tcount)
{
int operation = queryDesc->operation;
int eflags;
@@ -2935,16 +2927,7 @@ _SPI_pquery(QueryDesc *queryDesc, bool fire_triggers, uint64 tcount,
else
eflags = EXEC_FLAG_SKIP_TRIGGERS;
- if (queryDesc->cplan)
- {
- ExecutorStartCachedPlan(queryDesc, eflags, plansource, query_index);
- Assert(queryDesc->planstate);
- }
- else
- {
- if (!ExecutorStart(queryDesc, eflags))
- elog(ERROR, "ExecutorStart() failed unexpectedly");
- }
+ ExecutorStart(queryDesc, eflags);
ExecutorRun(queryDesc, ForwardScanDirection, tcount);
diff --git a/src/backend/jit/README b/src/backend/jit/README
index 5427bdf2153..a40950dfb03 100644
--- a/src/backend/jit/README
+++ b/src/backend/jit/README
@@ -205,7 +205,7 @@ The ability to do so allows us to get the LLVM IR for all operators
bitcode files get installed into the server's
$pkglibdir/bitcode/postgres/
Using existing LLVM functionality (for parallel LTO compilation),
-additionally an index is over these is stored to
+additionally an index over these is stored to
$pkglibdir/bitcode/postgres.index.bc
Similarly extensions can install code into
diff --git a/src/backend/lib/README b/src/backend/lib/README
index f2fb591237d..c28cbe356f0 100644
--- a/src/backend/lib/README
+++ b/src/backend/lib/README
@@ -1,8 +1,6 @@
This directory contains a general purpose data structures, for use anywhere
in the backend:
-binaryheap.c - a binary heap
-
bipartite_match.c - Hopcroft-Karp maximum cardinality algorithm for bipartite graphs
bloomfilter.c - probabilistic, space-efficient set membership testing
@@ -21,8 +19,6 @@ pairingheap.c - a pairing heap
rbtree.c - a red-black tree
-stringinfo.c - an extensible string type
-
Aside from the inherent characteristics of the data structures, there are a
few practical differences between the binary heap and the pairing heap. The
diff --git a/src/backend/libpq/be-secure-gssapi.c b/src/backend/libpq/be-secure-gssapi.c
index 717ba9824f9..5d98c58ffa8 100644
--- a/src/backend/libpq/be-secure-gssapi.c
+++ b/src/backend/libpq/be-secure-gssapi.c
@@ -46,11 +46,18 @@
* don't want the other side to send arbitrarily huge packets as we
* would have to allocate memory for them to then pass them to GSSAPI.
*
- * Therefore, these two #define's are effectively part of the protocol
+ * Therefore, this #define is effectively part of the protocol
* spec and can't ever be changed.
*/
-#define PQ_GSS_SEND_BUFFER_SIZE 16384
-#define PQ_GSS_RECV_BUFFER_SIZE 16384
+#define PQ_GSS_MAX_PACKET_SIZE 16384 /* includes uint32 header word */
+
+/*
+ * However, during the authentication exchange we must cope with whatever
+ * message size the GSSAPI library wants to send (because our protocol
+ * doesn't support splitting those messages). Depending on configuration
+ * those messages might be as much as 64kB.
+ */
+#define PQ_GSS_AUTH_BUFFER_SIZE 65536 /* includes uint32 header word */
/*
* Since we manage at most one GSS-encrypted connection per backend,
@@ -114,9 +121,9 @@ be_gssapi_write(Port *port, const void *ptr, size_t len)
* again, so if it offers a len less than that, something is wrong.
*
* Note: it may seem attractive to report partial write completion once
- * we've successfully sent any encrypted packets. However, that can cause
- * problems for callers; notably, pqPutMsgEnd's heuristic to send only
- * full 8K blocks interacts badly with such a hack. We won't save much,
+ * we've successfully sent any encrypted packets. However, doing that
+ * expands the state space of this processing and has been responsible for
+ * bugs in the past (cf. commit d053a879b). We won't save much,
* typically, by letting callers discard data early, so don't risk it.
*/
if (len < PqGSSSendConsumed)
@@ -210,12 +217,12 @@ be_gssapi_write(Port *port, const void *ptr, size_t len)
errno = ECONNRESET;
return -1;
}
- if (output.length > PQ_GSS_SEND_BUFFER_SIZE - sizeof(uint32))
+ if (output.length > PQ_GSS_MAX_PACKET_SIZE - sizeof(uint32))
{
ereport(COMMERROR,
(errmsg("server tried to send oversize GSSAPI packet (%zu > %zu)",
(size_t) output.length,
- PQ_GSS_SEND_BUFFER_SIZE - sizeof(uint32))));
+ PQ_GSS_MAX_PACKET_SIZE - sizeof(uint32))));
errno = ECONNRESET;
return -1;
}
@@ -346,12 +353,12 @@ be_gssapi_read(Port *port, void *ptr, size_t len)
/* Decode the packet length and check for overlength packet */
input.length = pg_ntoh32(*(uint32 *) PqGSSRecvBuffer);
- if (input.length > PQ_GSS_RECV_BUFFER_SIZE - sizeof(uint32))
+ if (input.length > PQ_GSS_MAX_PACKET_SIZE - sizeof(uint32))
{
ereport(COMMERROR,
(errmsg("oversize GSSAPI packet sent by the client (%zu > %zu)",
(size_t) input.length,
- PQ_GSS_RECV_BUFFER_SIZE - sizeof(uint32))));
+ PQ_GSS_MAX_PACKET_SIZE - sizeof(uint32))));
errno = ECONNRESET;
return -1;
}
@@ -517,10 +524,13 @@ secure_open_gssapi(Port *port)
* that will never use them, and we ensure that the buffers are
* sufficiently aligned for the length-word accesses that we do in some
* places in this file.
+ *
+ * We'll use PQ_GSS_AUTH_BUFFER_SIZE-sized buffers until transport
+ * negotiation is complete, then switch to PQ_GSS_MAX_PACKET_SIZE.
*/
- PqGSSSendBuffer = malloc(PQ_GSS_SEND_BUFFER_SIZE);
- PqGSSRecvBuffer = malloc(PQ_GSS_RECV_BUFFER_SIZE);
- PqGSSResultBuffer = malloc(PQ_GSS_RECV_BUFFER_SIZE);
+ PqGSSSendBuffer = malloc(PQ_GSS_AUTH_BUFFER_SIZE);
+ PqGSSRecvBuffer = malloc(PQ_GSS_AUTH_BUFFER_SIZE);
+ PqGSSResultBuffer = malloc(PQ_GSS_AUTH_BUFFER_SIZE);
if (!PqGSSSendBuffer || !PqGSSRecvBuffer || !PqGSSResultBuffer)
ereport(FATAL,
(errcode(ERRCODE_OUT_OF_MEMORY),
@@ -568,16 +578,16 @@ secure_open_gssapi(Port *port)
/*
* During initialization, packets are always fully consumed and
- * shouldn't ever be over PQ_GSS_RECV_BUFFER_SIZE in length.
+ * shouldn't ever be over PQ_GSS_AUTH_BUFFER_SIZE in total length.
*
* Verify on our side that the client doesn't do something funny.
*/
- if (input.length > PQ_GSS_RECV_BUFFER_SIZE)
+ if (input.length > PQ_GSS_AUTH_BUFFER_SIZE - sizeof(uint32))
{
ereport(COMMERROR,
- (errmsg("oversize GSSAPI packet sent by the client (%zu > %d)",
+ (errmsg("oversize GSSAPI packet sent by the client (%zu > %zu)",
(size_t) input.length,
- PQ_GSS_RECV_BUFFER_SIZE)));
+ PQ_GSS_AUTH_BUFFER_SIZE - sizeof(uint32))));
return -1;
}
@@ -631,12 +641,12 @@ secure_open_gssapi(Port *port)
{
uint32 netlen = pg_hton32(output.length);
- if (output.length > PQ_GSS_SEND_BUFFER_SIZE - sizeof(uint32))
+ if (output.length > PQ_GSS_AUTH_BUFFER_SIZE - sizeof(uint32))
{
ereport(COMMERROR,
(errmsg("server tried to send oversize GSSAPI packet (%zu > %zu)",
(size_t) output.length,
- PQ_GSS_SEND_BUFFER_SIZE - sizeof(uint32))));
+ PQ_GSS_AUTH_BUFFER_SIZE - sizeof(uint32))));
gss_release_buffer(&minor, &output);
return -1;
}
@@ -692,11 +702,28 @@ secure_open_gssapi(Port *port)
}
/*
+ * Release the large authentication buffers and allocate the ones we want
+ * for normal operation.
+ */
+ free(PqGSSSendBuffer);
+ free(PqGSSRecvBuffer);
+ free(PqGSSResultBuffer);
+ PqGSSSendBuffer = malloc(PQ_GSS_MAX_PACKET_SIZE);
+ PqGSSRecvBuffer = malloc(PQ_GSS_MAX_PACKET_SIZE);
+ PqGSSResultBuffer = malloc(PQ_GSS_MAX_PACKET_SIZE);
+ if (!PqGSSSendBuffer || !PqGSSRecvBuffer || !PqGSSResultBuffer)
+ ereport(FATAL,
+ (errcode(ERRCODE_OUT_OF_MEMORY),
+ errmsg("out of memory")));
+ PqGSSSendLength = PqGSSSendNext = PqGSSSendConsumed = 0;
+ PqGSSRecvLength = PqGSSResultLength = PqGSSResultNext = 0;
+
+ /*
* Determine the max packet size which will fit in our buffer, after
* accounting for the length. be_gssapi_write will need this.
*/
major = gss_wrap_size_limit(&minor, port->gss->ctx, 1, GSS_C_QOP_DEFAULT,
- PQ_GSS_SEND_BUFFER_SIZE - sizeof(uint32),
+ PQ_GSS_MAX_PACKET_SIZE - sizeof(uint32),
&PqGSSMaxPktSize);
if (GSS_ERROR(major))
diff --git a/src/backend/libpq/be-secure-openssl.c b/src/backend/libpq/be-secure-openssl.c
index 64ff3ce3d6a..c8b63ef8249 100644
--- a/src/backend/libpq/be-secure-openssl.c
+++ b/src/backend/libpq/be-secure-openssl.c
@@ -1436,10 +1436,10 @@ initialize_ecdh(SSL_CTX *context, bool isServerStart)
*/
ereport(isServerStart ? FATAL : LOG,
errcode(ERRCODE_CONFIG_FILE_ERROR),
- errmsg("failed to set group names specified in ssl_groups: %s",
+ errmsg("could not set group names specified in ssl_groups: %s",
SSLerrmessageExt(ERR_get_error(),
_("No valid groups found"))),
- errhint("Ensure that each group name is spelled correctly and supported by the installed version of OpenSSL"));
+ errhint("Ensure that each group name is spelled correctly and supported by the installed version of OpenSSL."));
return false;
}
#endif
diff --git a/src/backend/nodes/gen_node_support.pl b/src/backend/nodes/gen_node_support.pl
index 77659b0f760..9ecddb14231 100644
--- a/src/backend/nodes/gen_node_support.pl
+++ b/src/backend/nodes/gen_node_support.pl
@@ -1039,6 +1039,11 @@ _read${n}(void)
print $off "\tWRITE_UINT_FIELD($f);\n";
print $rff "\tREAD_UINT_FIELD($f);\n" unless $no_read;
}
+ elsif ($t eq 'int64')
+ {
+ print $off "\tWRITE_INT64_FIELD($f);\n";
+ print $rff "\tREAD_INT64_FIELD($f);\n" unless $no_read;
+ }
elsif ($t eq 'uint64'
|| $t eq 'AclMode')
{
@@ -1324,7 +1329,7 @@ _jumble${n}(JumbleState *jstate, Node *node)
# Node type. Squash constants if requested.
if ($query_jumble_squash)
{
- print $jff "\tJUMBLE_ELEMENTS($f);\n"
+ print $jff "\tJUMBLE_ELEMENTS($f, node);\n"
unless $query_jumble_ignore;
}
else
diff --git a/src/backend/nodes/outfuncs.c b/src/backend/nodes/outfuncs.c
index ceac3fd8620..eaf391fc2ab 100644
--- a/src/backend/nodes/outfuncs.c
+++ b/src/backend/nodes/outfuncs.c
@@ -51,6 +51,12 @@ static void outDouble(StringInfo str, double d);
#define WRITE_UINT_FIELD(fldname) \
appendStringInfo(str, " :" CppAsString(fldname) " %u", node->fldname)
+/* Write a signed integer field (anything written with INT64_FORMAT) */
+#define WRITE_INT64_FIELD(fldname) \
+ appendStringInfo(str, \
+ " :" CppAsString(fldname) " " INT64_FORMAT, \
+ node->fldname)
+
/* Write an unsigned integer field (anything written with UINT64_FORMAT) */
#define WRITE_UINT64_FIELD(fldname) \
appendStringInfo(str, " :" CppAsString(fldname) " " UINT64_FORMAT, \
@@ -647,6 +653,8 @@ _outA_Expr(StringInfo str, const A_Expr *node)
WRITE_NODE_FIELD(lexpr);
WRITE_NODE_FIELD(rexpr);
+ WRITE_LOCATION_FIELD(rexpr_list_start);
+ WRITE_LOCATION_FIELD(rexpr_list_end);
WRITE_LOCATION_FIELD(location);
}
diff --git a/src/backend/nodes/queryjumblefuncs.c b/src/backend/nodes/queryjumblefuncs.c
index d1e82a63f09..31f97151977 100644
--- a/src/backend/nodes/queryjumblefuncs.c
+++ b/src/backend/nodes/queryjumblefuncs.c
@@ -21,6 +21,11 @@
* tree(s) generated from the query. The executor can then use this value
* to blame query costs on the proper queryId.
*
+ * Arrays of two or more constants and PARAM_EXTERN parameters are "squashed"
+ * and contribute only once to the jumble. This has the effect that queries
+ * that differ only on the length of such lists have the same queryId.
+ *
+ *
* Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
@@ -56,16 +61,18 @@ int compute_query_id = COMPUTE_QUERY_ID_AUTO;
bool query_id_enabled = false;
static JumbleState *InitJumble(void);
-static uint64 DoJumble(JumbleState *jstate, Node *node);
+static int64 DoJumble(JumbleState *jstate, Node *node);
static void AppendJumble(JumbleState *jstate,
const unsigned char *value, Size size);
static void FlushPendingNulls(JumbleState *jstate);
static void RecordConstLocation(JumbleState *jstate,
- int location, bool squashed);
+ bool extern_param,
+ int location, int len);
static void _jumbleNode(JumbleState *jstate, Node *node);
-static void _jumbleElements(JumbleState *jstate, List *elements);
-static void _jumbleA_Const(JumbleState *jstate, Node *node);
static void _jumbleList(JumbleState *jstate, Node *node);
+static void _jumbleElements(JumbleState *jstate, List *elements, Node *node);
+static void _jumbleParam(JumbleState *jstate, Node *node);
+static void _jumbleA_Const(JumbleState *jstate, Node *node);
static void _jumbleVariableSetStmt(JumbleState *jstate, Node *node);
static void _jumbleRangeTblEntry_eref(JumbleState *jstate,
RangeTblEntry *rte,
@@ -141,12 +148,12 @@ JumbleQuery(Query *query)
* If we are unlucky enough to get a hash of zero, use 1 instead for
* normal statements and 2 for utility queries.
*/
- if (query->queryId == UINT64CONST(0))
+ if (query->queryId == INT64CONST(0))
{
if (query->utilityStmt)
- query->queryId = UINT64CONST(2);
+ query->queryId = INT64CONST(2);
else
- query->queryId = UINT64CONST(1);
+ query->queryId = INT64CONST(1);
}
return jstate;
@@ -185,6 +192,7 @@ InitJumble(void)
jstate->clocations_count = 0;
jstate->highest_extern_param_id = 0;
jstate->pending_nulls = 0;
+ jstate->has_squashed_lists = false;
#ifdef USE_ASSERT_CHECKING
jstate->total_jumble_len = 0;
#endif
@@ -197,7 +205,7 @@ InitJumble(void)
* Jumble the given Node using the given JumbleState and return the resulting
* jumble hash.
*/
-static uint64
+static int64
DoJumble(JumbleState *jstate, Node *node)
{
/* Jumble the given node */
@@ -207,10 +215,14 @@ DoJumble(JumbleState *jstate, Node *node)
if (jstate->pending_nulls > 0)
FlushPendingNulls(jstate);
+ /* Squashed list found, reset highest_extern_param_id */
+ if (jstate->has_squashed_lists)
+ jstate->highest_extern_param_id = 0;
+
/* Process the jumble buffer and produce the hash value */
- return DatumGetUInt64(hash_any_extended(jstate->jumble,
- jstate->jumble_len,
- 0));
+ return DatumGetInt64(hash_any_extended(jstate->jumble,
+ jstate->jumble_len,
+ 0));
}
/*
@@ -256,10 +268,10 @@ AppendJumbleInternal(JumbleState *jstate, const unsigned char *item,
if (unlikely(jumble_len >= JUMBLE_SIZE))
{
- uint64 start_hash;
+ int64 start_hash;
- start_hash = DatumGetUInt64(hash_any_extended(jumble,
- JUMBLE_SIZE, 0));
+ start_hash = DatumGetInt64(hash_any_extended(jumble,
+ JUMBLE_SIZE, 0));
memcpy(jumble, &start_hash, sizeof(start_hash));
jumble_len = sizeof(start_hash);
}
@@ -373,15 +385,17 @@ FlushPendingNulls(JumbleState *jstate)
/*
- * Record location of constant within query string of query tree that is
- * currently being walked.
+ * Record the location of some kind of constant within a query string.
+ * These are not only bare constants but also expressions that ultimately
+ * constitute a constant, such as those inside casts and simple function
+ * calls; if extern_param, then it corresponds to a PARAM_EXTERN Param.
*
- * 'squashed' signals that the constant represents the first or the last
- * element in a series of merged constants, and everything but the first/last
- * element contributes nothing to the jumble hash.
+ * If length is -1, it indicates a single such constant element. If
+ * it's a positive integer, it indicates the length of a squashable
+ * list of them.
*/
static void
-RecordConstLocation(JumbleState *jstate, int location, bool squashed)
+RecordConstLocation(JumbleState *jstate, bool extern_param, int location, int len)
{
/* -1 indicates unknown or undefined location */
if (location >= 0)
@@ -396,9 +410,15 @@ RecordConstLocation(JumbleState *jstate, int location, bool squashed)
sizeof(LocationLen));
}
jstate->clocations[jstate->clocations_count].location = location;
- /* initialize lengths to -1 to simplify third-party module usage */
- jstate->clocations[jstate->clocations_count].squashed = squashed;
- jstate->clocations[jstate->clocations_count].length = -1;
+
+ /*
+ * Lengths are either positive integers (indicating a squashable
+ * list), or -1.
+ */
+ Assert(len > -1 || len == -1);
+ jstate->clocations[jstate->clocations_count].length = len;
+ jstate->clocations[jstate->clocations_count].squashed = (len > -1);
+ jstate->clocations[jstate->clocations_count].extern_param = extern_param;
jstate->clocations_count++;
}
}
@@ -407,47 +427,74 @@ RecordConstLocation(JumbleState *jstate, int location, bool squashed)
* Subroutine for _jumbleElements: Verify a few simple cases where we can
* deduce that the expression is a constant:
*
- * - Ignore a possible wrapping RelabelType and CoerceViaIO.
- * - If it's a FuncExpr, check that the function is an implicit
+ * - See through any wrapping RelabelType and CoerceViaIO layers.
+ * - If it's a FuncExpr, check that the function is a builtin
* cast and its arguments are Const.
- * - Otherwise test if the expression is a simple Const.
+ * - Otherwise test if the expression is a simple Const or a
+ * PARAM_EXTERN param.
*/
static bool
-IsSquashableConst(Node *element)
+IsSquashableConstant(Node *element)
{
- if (IsA(element, RelabelType))
- element = (Node *) ((RelabelType *) element)->arg;
-
- if (IsA(element, CoerceViaIO))
- element = (Node *) ((CoerceViaIO *) element)->arg;
-
- if (IsA(element, FuncExpr))
+restart:
+ switch (nodeTag(element))
{
- FuncExpr *func = (FuncExpr *) element;
- ListCell *temp;
+ case T_RelabelType:
+ /* Unwrap RelabelType */
+ element = (Node *) ((RelabelType *) element)->arg;
+ goto restart;
- if (func->funcformat != COERCE_IMPLICIT_CAST &&
- func->funcformat != COERCE_EXPLICIT_CAST)
- return false;
+ case T_CoerceViaIO:
+ /* Unwrap CoerceViaIO */
+ element = (Node *) ((CoerceViaIO *) element)->arg;
+ goto restart;
- if (func->funcid > FirstGenbkiObjectId)
- return false;
+ case T_Const:
+ return true;
- foreach(temp, func->args)
- {
- Node *arg = lfirst(temp);
+ case T_Param:
+ return castNode(Param, element)->paramkind == PARAM_EXTERN;
- if (!IsA(arg, Const)) /* XXX we could recurse here instead */
- return false;
- }
+ case T_FuncExpr:
+ {
+ FuncExpr *func = (FuncExpr *) element;
+ ListCell *temp;
- return true;
- }
+ if (func->funcformat != COERCE_IMPLICIT_CAST &&
+ func->funcformat != COERCE_EXPLICIT_CAST)
+ return false;
- if (!IsA(element, Const))
- return false;
+ if (func->funcid > FirstGenbkiObjectId)
+ return false;
- return true;
+ /*
+ * We can check function arguments recursively, being careful
+ * about recursing too deep. At each recursion level it's
+ * enough to test the stack on the first element. (Note that
+ * I wasn't able to hit this without bloating the stack
+ * artificially in this function: the parser errors out before
+ * stack size becomes a problem here.)
+ */
+ foreach(temp, func->args)
+ {
+ Node *arg = lfirst(temp);
+
+ if (!IsA(arg, Const))
+ {
+ if (foreach_current_index(temp) == 0 &&
+ stack_is_too_deep())
+ return false;
+ else if (!IsSquashableConstant(arg))
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ default:
+ return false;
+ }
}
/*
@@ -457,39 +504,33 @@ IsSquashableConst(Node *element)
* Return value indicates if squashing is possible.
*
* Note that this function searches only for explicit Const nodes with
- * possibly very simple decorations on top, and does not try to simplify
- * expressions.
+ * possibly very simple decorations on top and PARAM_EXTERN parameters,
+ * and does not try to simplify expressions.
*/
static bool
-IsSquashableConstList(List *elements, Node **firstExpr, Node **lastExpr)
+IsSquashableConstantList(List *elements)
{
ListCell *temp;
- /*
- * If squashing is disabled, or the list is too short, we don't try to
- * squash it.
- */
+ /* If the list is too short, we don't try to squash it. */
if (list_length(elements) < 2)
return false;
foreach(temp, elements)
{
- if (!IsSquashableConst(lfirst(temp)))
+ if (!IsSquashableConstant(lfirst(temp)))
return false;
}
- *firstExpr = linitial(elements);
- *lastExpr = llast(elements);
-
return true;
}
#define JUMBLE_NODE(item) \
_jumbleNode(jstate, (Node *) expr->item)
-#define JUMBLE_ELEMENTS(list) \
- _jumbleElements(jstate, (List *) expr->list)
+#define JUMBLE_ELEMENTS(list, node) \
+ _jumbleElements(jstate, (List *) expr->list, node)
#define JUMBLE_LOCATION(location) \
- RecordConstLocation(jstate, expr->location, false)
+ RecordConstLocation(jstate, false, expr->location, -1)
#define JUMBLE_FIELD(item) \
do { \
if (sizeof(expr->item) == 8) \
@@ -516,42 +557,6 @@ do { \
#include "queryjumblefuncs.funcs.c"
-/*
- * We jumble lists of constant elements as one individual item regardless
- * of how many elements are in the list. This means different queries
- * jumble to the same query_id, if the only difference is the number of
- * elements in the list.
- */
-static void
-_jumbleElements(JumbleState *jstate, List *elements)
-{
- Node *first,
- *last;
-
- if (IsSquashableConstList(elements, &first, &last))
- {
- /*
- * If this list of elements is squashable, keep track of the location
- * of its first and last elements. When reading back the locations
- * array, we'll see two consecutive locations with ->squashed set to
- * true, indicating the location of initial and final elements of this
- * list.
- *
- * For the limited set of cases we support now (implicit coerce via
- * FuncExpr, Const) it's fine to use exprLocation of the 'last'
- * expression, but if more complex composite expressions are to be
- * supported (e.g., OpExpr or FuncExpr as an explicit call), more
- * sophisticated tracking will be needed.
- */
- RecordConstLocation(jstate, exprLocation(first), true);
- RecordConstLocation(jstate, exprLocation(last), true);
- }
- else
- {
- _jumbleNode(jstate, (Node *) elements);
- }
-}
-
static void
_jumbleNode(JumbleState *jstate, Node *node)
{
@@ -593,26 +598,6 @@ _jumbleNode(JumbleState *jstate, Node *node)
break;
}
- /* Special cases to handle outside the automated code */
- switch (nodeTag(expr))
- {
- case T_Param:
- {
- Param *p = (Param *) node;
-
- /*
- * Update the highest Param id seen, in order to start
- * normalization correctly.
- */
- if (p->paramkind == PARAM_EXTERN &&
- p->paramid > jstate->highest_extern_param_id)
- jstate->highest_extern_param_id = p->paramid;
- }
- break;
- default:
- break;
- }
-
/* Ensure we added something to the jumble buffer */
Assert(jstate->total_jumble_len > prev_jumble_len);
}
@@ -648,6 +633,79 @@ _jumbleList(JumbleState *jstate, Node *node)
}
}
+/*
+ * We try to jumble lists of expressions as one individual item regardless
+ * of how many elements are in the list. This is know as squashing, which
+ * results in different queries jumbling to the same query_id, if the only
+ * difference is the number of elements in the list.
+ *
+ * We allow constants and PARAM_EXTERN parameters to be squashed. To normalize
+ * such queries, we use the start and end locations of the list of elements in
+ * a list.
+ */
+static void
+_jumbleElements(JumbleState *jstate, List *elements, Node *node)
+{
+ bool normalize_list = false;
+
+ if (IsSquashableConstantList(elements))
+ {
+ if (IsA(node, ArrayExpr))
+ {
+ ArrayExpr *aexpr = (ArrayExpr *) node;
+
+ if (aexpr->list_start > 0 && aexpr->list_end > 0)
+ {
+ RecordConstLocation(jstate,
+ false,
+ aexpr->list_start + 1,
+ (aexpr->list_end - aexpr->list_start) - 1);
+ normalize_list = true;
+ jstate->has_squashed_lists = true;
+ }
+ }
+ }
+
+ if (!normalize_list)
+ {
+ _jumbleNode(jstate, (Node *) elements);
+ }
+}
+
+/*
+ * We store the highest param ID of extern params. This can later be used
+ * to start the numbering of the placeholder for squashed lists.
+ */
+static void
+_jumbleParam(JumbleState *jstate, Node *node)
+{
+ Param *expr = (Param *) node;
+
+ JUMBLE_FIELD(paramkind);
+ JUMBLE_FIELD(paramid);
+ JUMBLE_FIELD(paramtype);
+ /* paramtypmode and paramcollid are ignored */
+
+ if (expr->paramkind == PARAM_EXTERN)
+ {
+ /*
+ * At this point, only external parameter locations outside of
+ * squashable lists will be recorded.
+ */
+ RecordConstLocation(jstate, true, expr->location, -1);
+
+ /*
+ * Update the highest Param id seen, in order to start normalization
+ * correctly.
+ *
+ * Note: This value is reset at the end of jumbling if there exists a
+ * squashable list. See the comment in the definition of JumbleState.
+ */
+ if (expr->paramid > jstate->highest_extern_param_id)
+ jstate->highest_extern_param_id = expr->paramid;
+ }
+}
+
static void
_jumbleA_Const(JumbleState *jstate, Node *node)
{
diff --git a/src/backend/nodes/readfuncs.c b/src/backend/nodes/readfuncs.c
index 64d3a09f765..48b5d13b9b6 100644
--- a/src/backend/nodes/readfuncs.c
+++ b/src/backend/nodes/readfuncs.c
@@ -68,6 +68,12 @@
token = pg_strtok(&length); /* get field value */ \
local_node->fldname = atoui(token)
+/* Read a signed integer field (anything written using INT64_FORMAT) */
+#define READ_INT64_FIELD(fldname) \
+ token = pg_strtok(&length); /* skip :fldname */ \
+ token = pg_strtok(&length); /* get field value */ \
+ local_node->fldname = strtoi64(token, NULL, 10)
+
/* Read an unsigned integer field (anything written using UINT64_FORMAT) */
#define READ_UINT64_FIELD(fldname) \
token = pg_strtok(&length); /* skip :fldname */ \
@@ -520,6 +526,8 @@ _readA_Expr(void)
READ_NODE_FIELD(lexpr);
READ_NODE_FIELD(rexpr);
+ READ_LOCATION_FIELD(rexpr_list_start);
+ READ_LOCATION_FIELD(rexpr_list_end);
READ_LOCATION_FIELD(location);
READ_DONE();
diff --git a/src/backend/optimizer/path/allpaths.c b/src/backend/optimizer/path/allpaths.c
index 905250b3325..6cc6966b060 100644
--- a/src/backend/optimizer/path/allpaths.c
+++ b/src/backend/optimizer/path/allpaths.c
@@ -1891,7 +1891,17 @@ generate_orderedappend_paths(PlannerInfo *root, RelOptInfo *rel,
*/
if (root->tuple_fraction > 0)
{
- double path_fraction = (1.0 / root->tuple_fraction);
+ double path_fraction = root->tuple_fraction;
+
+ /*
+ * Merge Append considers only live children relations. Dummy
+ * relations must be filtered out before.
+ */
+ Assert(childrel->rows > 0);
+
+ /* Convert absolute limit to a path fraction */
+ if (path_fraction >= 1.0)
+ path_fraction /= childrel->rows;
cheapest_fractional =
get_cheapest_fractional_path_for_pathkeys(childrel->pathlist,
diff --git a/src/backend/optimizer/path/joinpath.c b/src/backend/optimizer/path/joinpath.c
index 26f0336f1e4..7aa8f5d799c 100644
--- a/src/backend/optimizer/path/joinpath.c
+++ b/src/backend/optimizer/path/joinpath.c
@@ -876,16 +876,13 @@ try_nestloop_path(PlannerInfo *root,
/*
* Check to see if proposed path is still parameterized, and reject if the
* parameterization wouldn't be sensible --- unless allow_star_schema_join
- * says to allow it anyway. Also, we must reject if have_dangerous_phv
- * doesn't like the look of it, which could only happen if the nestloop is
- * still parameterized.
+ * says to allow it anyway.
*/
required_outer = calc_nestloop_required_outer(outerrelids, outer_paramrels,
innerrelids, inner_paramrels);
if (required_outer &&
- ((!bms_overlap(required_outer, extra->param_source_rels) &&
- !allow_star_schema_join(root, outerrelids, inner_paramrels)) ||
- have_dangerous_phv(root, outerrelids, inner_paramrels)))
+ !bms_overlap(required_outer, extra->param_source_rels) &&
+ !allow_star_schema_join(root, outerrelids, inner_paramrels))
{
/* Waste no memory when we reject a path here */
bms_free(required_outer);
diff --git a/src/backend/optimizer/path/joinrels.c b/src/backend/optimizer/path/joinrels.c
index 60d65762b5d..aad41b94009 100644
--- a/src/backend/optimizer/path/joinrels.c
+++ b/src/backend/optimizer/path/joinrels.c
@@ -565,9 +565,6 @@ join_is_legal(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2,
* Also, if the lateral reference is only indirect, we should reject
* the join; whatever rel(s) the reference chain goes through must be
* joined to first.
- *
- * Another case that might keep us from building a valid plan is the
- * implementation restriction described by have_dangerous_phv().
*/
lateral_fwd = bms_overlap(rel1->relids, rel2->lateral_relids);
lateral_rev = bms_overlap(rel2->relids, rel1->lateral_relids);
@@ -584,9 +581,6 @@ join_is_legal(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2,
/* check there is a direct reference from rel2 to rel1 */
if (!bms_overlap(rel1->relids, rel2->direct_lateral_relids))
return false; /* only indirect refs, so reject */
- /* check we won't have a dangerous PHV */
- if (have_dangerous_phv(root, rel1->relids, rel2->lateral_relids))
- return false; /* might be unable to handle required PHV */
}
else if (lateral_rev)
{
@@ -599,9 +593,6 @@ join_is_legal(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2,
/* check there is a direct reference from rel1 to rel2 */
if (!bms_overlap(rel2->relids, rel1->direct_lateral_relids))
return false; /* only indirect refs, so reject */
- /* check we won't have a dangerous PHV */
- if (have_dangerous_phv(root, rel2->relids, rel1->lateral_relids))
- return false; /* might be unable to handle required PHV */
}
/*
@@ -1279,57 +1270,6 @@ has_legal_joinclause(PlannerInfo *root, RelOptInfo *rel)
/*
- * There's a pitfall for creating parameterized nestloops: suppose the inner
- * rel (call it A) has a parameter that is a PlaceHolderVar, and that PHV's
- * minimum eval_at set includes the outer rel (B) and some third rel (C).
- * We might think we could create a B/A nestloop join that's parameterized by
- * C. But we would end up with a plan in which the PHV's expression has to be
- * evaluated as a nestloop parameter at the B/A join; and the executor is only
- * set up to handle simple Vars as NestLoopParams. Rather than add complexity
- * and overhead to the executor for such corner cases, it seems better to
- * forbid the join. (Note that we can still make use of A's parameterized
- * path with pre-joined B+C as the outer rel. have_join_order_restriction()
- * ensures that we will consider making such a join even if there are not
- * other reasons to do so.)
- *
- * So we check whether any PHVs used in the query could pose such a hazard.
- * We don't have any simple way of checking whether a risky PHV would actually
- * be used in the inner plan, and the case is so unusual that it doesn't seem
- * worth working very hard on it.
- *
- * This needs to be checked in two places. If the inner rel's minimum
- * parameterization would trigger the restriction, then join_is_legal() should
- * reject the join altogether, because there will be no workable paths for it.
- * But joinpath.c has to check again for every proposed nestloop path, because
- * the inner path might have more than the minimum parameterization, causing
- * some PHV to be dangerous for it that otherwise wouldn't be.
- */
-bool
-have_dangerous_phv(PlannerInfo *root,
- Relids outer_relids, Relids inner_params)
-{
- ListCell *lc;
-
- foreach(lc, root->placeholder_list)
- {
- PlaceHolderInfo *phinfo = (PlaceHolderInfo *) lfirst(lc);
-
- if (!bms_is_subset(phinfo->ph_eval_at, inner_params))
- continue; /* ignore, could not be a nestloop param */
- if (!bms_overlap(phinfo->ph_eval_at, outer_relids))
- continue; /* ignore, not relevant to this join */
- if (bms_is_subset(phinfo->ph_eval_at, outer_relids))
- continue; /* safe, it can be eval'd within outerrel */
- /* Otherwise, it's potentially unsafe, so reject the join */
- return true;
- }
-
- /* OK to perform the join */
- return false;
-}
-
-
-/*
* is_dummy_rel --- has relation been proven empty?
*/
bool
diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c
index 4ad30b7627e..0b61aef962c 100644
--- a/src/backend/optimizer/plan/createplan.c
+++ b/src/backend/optimizer/plan/createplan.c
@@ -4344,13 +4344,16 @@ create_nestloop_plan(PlannerInfo *root,
NestLoop *join_plan;
Plan *outer_plan;
Plan *inner_plan;
+ Relids outerrelids;
List *tlist = build_path_tlist(root, &best_path->jpath.path);
List *joinrestrictclauses = best_path->jpath.joinrestrictinfo;
List *joinclauses;
List *otherclauses;
- Relids outerrelids;
List *nestParams;
+ List *outer_tlist;
+ bool outer_parallel_safe;
Relids saveOuterRels = root->curOuterRels;
+ ListCell *lc;
/*
* If the inner path is parameterized by the topmost parent of the outer
@@ -4372,8 +4375,8 @@ create_nestloop_plan(PlannerInfo *root,
outer_plan = create_plan_recurse(root, best_path->jpath.outerjoinpath, 0);
/* For a nestloop, include outer relids in curOuterRels for inner side */
- root->curOuterRels = bms_union(root->curOuterRels,
- best_path->jpath.outerjoinpath->parent->relids);
+ outerrelids = best_path->jpath.outerjoinpath->parent->relids;
+ root->curOuterRels = bms_union(root->curOuterRels, outerrelids);
inner_plan = create_plan_recurse(root, best_path->jpath.innerjoinpath, 0);
@@ -4412,9 +4415,66 @@ create_nestloop_plan(PlannerInfo *root,
* Identify any nestloop parameters that should be supplied by this join
* node, and remove them from root->curOuterParams.
*/
- outerrelids = best_path->jpath.outerjoinpath->parent->relids;
- nestParams = identify_current_nestloop_params(root, outerrelids);
+ nestParams = identify_current_nestloop_params(root,
+ outerrelids,
+ PATH_REQ_OUTER((Path *) best_path));
+
+ /*
+ * While nestloop parameters that are Vars had better be available from
+ * the outer_plan already, there are edge cases where nestloop parameters
+ * that are PHVs won't be. In such cases we must add them to the
+ * outer_plan's tlist, since the executor's NestLoopParam machinery
+ * requires the params to be simple outer-Var references to that tlist.
+ * (This is cheating a little bit, because the outer path's required-outer
+ * relids might not be enough to allow evaluating such a PHV. But in
+ * practice, if we could have evaluated the PHV at the nestloop node, we
+ * can do so in the outer plan too.)
+ */
+ outer_tlist = outer_plan->targetlist;
+ outer_parallel_safe = outer_plan->parallel_safe;
+ foreach(lc, nestParams)
+ {
+ NestLoopParam *nlp = (NestLoopParam *) lfirst(lc);
+ PlaceHolderVar *phv;
+ TargetEntry *tle;
+
+ if (IsA(nlp->paramval, Var))
+ continue; /* nothing to do for simple Vars */
+ /* Otherwise it must be a PHV */
+ phv = castNode(PlaceHolderVar, nlp->paramval);
+
+ if (tlist_member((Expr *) phv, outer_tlist))
+ continue; /* already available */
+
+ /*
+ * It's possible that nestloop parameter PHVs selected to evaluate
+ * here contain references to surviving root->curOuterParams items
+ * (that is, they reference values that will be supplied by some
+ * higher-level nestloop). Those need to be converted to Params now.
+ * Note: it's safe to do this after the tlist_member() check, because
+ * equal() won't pay attention to phv->phexpr.
+ */
+ phv->phexpr = (Expr *) replace_nestloop_params(root,
+ (Node *) phv->phexpr);
+
+ /* Make a shallow copy of outer_tlist, if we didn't already */
+ if (outer_tlist == outer_plan->targetlist)
+ outer_tlist = list_copy(outer_tlist);
+ /* ... and add the needed expression */
+ tle = makeTargetEntry((Expr *) copyObject(phv),
+ list_length(outer_tlist) + 1,
+ NULL,
+ true);
+ outer_tlist = lappend(outer_tlist, tle);
+ /* ... and track whether tlist is (still) parallel-safe */
+ if (outer_parallel_safe)
+ outer_parallel_safe = is_parallel_safe(root, (Node *) phv);
+ }
+ if (outer_tlist != outer_plan->targetlist)
+ outer_plan = change_plan_targetlist(outer_plan, outer_tlist,
+ outer_parallel_safe);
+ /* And finally, we can build the join plan node */
join_plan = make_nestloop(tlist,
joinclauses,
otherclauses,
diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c
index 49ad6e83578..549aedcfa99 100644
--- a/src/backend/optimizer/plan/planner.c
+++ b/src/backend/optimizer/plan/planner.c
@@ -331,7 +331,6 @@ standard_planner(Query *parse, const char *query_string, int cursorOptions,
glob->finalrteperminfos = NIL;
glob->finalrowmarks = NIL;
glob->resultRelations = NIL;
- glob->firstResultRels = NIL;
glob->appendRelations = NIL;
glob->partPruneInfos = NIL;
glob->relationOids = NIL;
@@ -571,7 +570,6 @@ standard_planner(Query *parse, const char *query_string, int cursorOptions,
glob->prunableRelids);
result->permInfos = glob->finalrteperminfos;
result->resultRelations = glob->resultRelations;
- result->firstResultRels = glob->firstResultRels;
result->appendRelations = glob->appendRelations;
result->subplans = glob->subplans;
result->rewindPlanIDs = glob->rewindPlanIDs;
@@ -6881,7 +6879,7 @@ plan_cluster_use_sort(Oid tableOid, Oid indexOid)
*
* tableOid is the table on which the index is to be built. indexOid is the
* OID of an index to be created or reindexed (which must be an index with
- * support for parallel builds - currently btree or BRIN).
+ * support for parallel builds - currently btree, GIN, or BRIN).
*
* Return value is the number of parallel worker processes to request. It
* may be unsafe to proceed if this is 0. Note that this does not include the
diff --git a/src/backend/optimizer/plan/setrefs.c b/src/backend/optimizer/plan/setrefs.c
index 150e9f060ee..846e44186c3 100644
--- a/src/backend/optimizer/plan/setrefs.c
+++ b/src/backend/optimizer/plan/setrefs.c
@@ -1097,9 +1097,10 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset)
/*
* Set up the visible plan targetlist as being the same as
- * the first RETURNING list. This is for the use of
- * EXPLAIN; the executor won't pay any attention to the
- * targetlist. We postpone this step until here so that
+ * the first RETURNING list. This is mostly for the use
+ * of EXPLAIN; the executor won't execute that targetlist,
+ * although it does use it to prepare the node's result
+ * tuple slot. We postpone this step until here so that
* we don't have to do set_returning_clause_references()
* twice on identical targetlists.
*/
@@ -1248,9 +1249,6 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset)
lappend_int(root->glob->resultRelations,
splan->rootRelation);
}
- root->glob->firstResultRels =
- lappend_int(root->glob->firstResultRels,
- linitial_int(splan->resultRelations));
}
break;
case T_Append:
diff --git a/src/backend/optimizer/util/paramassign.c b/src/backend/optimizer/util/paramassign.c
index 3bd3ce37c8f..4c13c5931b4 100644
--- a/src/backend/optimizer/util/paramassign.c
+++ b/src/backend/optimizer/util/paramassign.c
@@ -599,38 +599,46 @@ process_subquery_nestloop_params(PlannerInfo *root, List *subplan_params)
}
/*
- * Identify any NestLoopParams that should be supplied by a NestLoop plan
- * node with the specified lefthand rels. Remove them from the active
- * root->curOuterParams list and return them as the result list.
+ * Identify any NestLoopParams that should be supplied by a NestLoop
+ * plan node with the specified lefthand rels and required-outer rels.
+ * Remove them from the active root->curOuterParams list and return
+ * them as the result list.
*
- * XXX Here we also hack up the returned Vars and PHVs so that they do not
- * contain nullingrel sets exceeding what is available from the outer side.
- * This is needed if we have applied outer join identity 3,
- * (A leftjoin B on (Pab)) leftjoin C on (Pb*c)
- * = A leftjoin (B leftjoin C on (Pbc)) on (Pab)
- * and C contains lateral references to B. It's still safe to apply the
- * identity, but the parser will have created those references in the form
- * "b*" (i.e., with varnullingrels listing the A/B join), while what we will
- * have available from the nestloop's outer side is just "b". We deal with
- * that here by stripping the nullingrels down to what is available from the
- * outer side according to leftrelids.
- *
- * That fixes matters for the case of forward application of identity 3.
- * If the identity was applied in the reverse direction, we will have
- * parameter Vars containing too few nullingrel bits rather than too many.
- * Currently, that causes no problems because setrefs.c applies only a
- * subset check to nullingrels in NestLoopParams, but we'd have to work
- * harder if we ever want to tighten that check. This is all pretty annoying
- * because it greatly weakens setrefs.c's cross-check, but the alternative
+ * Vars and PHVs appearing in the result list must have nullingrel sets
+ * that could validly appear in the lefthand rel's output. Ordinarily that
+ * would be true already, but if we have applied outer join identity 3,
+ * there could be more or fewer nullingrel bits in the nodes appearing in
+ * curOuterParams than are in the nominal leftrelids. We deal with that by
+ * forcing their nullingrel sets to include exactly the outer-join relids
+ * that appear in leftrelids and can null the respective Var or PHV.
+ * This fix is a bit ad-hoc and intellectually unsatisfactory, because it's
+ * essentially jumping to the conclusion that we've placed evaluation of
+ * the nestloop parameters correctly, and thus it defeats the intent of the
+ * subsequent nullingrel cross-checks in setrefs.c. But the alternative
* seems to be to generate multiple versions of each laterally-parameterized
* subquery, which'd be unduly expensive.
*/
List *
-identify_current_nestloop_params(PlannerInfo *root, Relids leftrelids)
+identify_current_nestloop_params(PlannerInfo *root,
+ Relids leftrelids,
+ Relids outerrelids)
{
List *result;
+ Relids allleftrelids;
ListCell *cell;
+ /*
+ * We'll be able to evaluate a PHV in the lefthand path if it uses the
+ * lefthand rels plus any available required-outer rels. But don't do so
+ * if it uses *only* required-outer rels; in that case it should be
+ * evaluated higher in the tree. For Vars, no such hair-splitting is
+ * necessary since they depend on only one relid.
+ */
+ if (outerrelids)
+ allleftrelids = bms_union(leftrelids, outerrelids);
+ else
+ allleftrelids = leftrelids;
+
result = NIL;
foreach(cell, root->curOuterParams)
{
@@ -646,25 +654,60 @@ identify_current_nestloop_params(PlannerInfo *root, Relids leftrelids)
bms_is_member(nlp->paramval->varno, leftrelids))
{
Var *var = (Var *) nlp->paramval;
+ RelOptInfo *rel = root->simple_rel_array[var->varno];
root->curOuterParams = foreach_delete_current(root->curOuterParams,
cell);
- var->varnullingrels = bms_intersect(var->varnullingrels,
+ var->varnullingrels = bms_intersect(rel->nulling_relids,
leftrelids);
result = lappend(result, nlp);
}
- else if (IsA(nlp->paramval, PlaceHolderVar) &&
- bms_is_subset(find_placeholder_info(root,
- (PlaceHolderVar *) nlp->paramval)->ph_eval_at,
- leftrelids))
+ else if (IsA(nlp->paramval, PlaceHolderVar))
{
PlaceHolderVar *phv = (PlaceHolderVar *) nlp->paramval;
+ PlaceHolderInfo *phinfo = find_placeholder_info(root, phv);
+ Relids eval_at = phinfo->ph_eval_at;
- root->curOuterParams = foreach_delete_current(root->curOuterParams,
- cell);
- phv->phnullingrels = bms_intersect(phv->phnullingrels,
- leftrelids);
- result = lappend(result, nlp);
+ if (bms_is_subset(eval_at, allleftrelids) &&
+ bms_overlap(eval_at, leftrelids))
+ {
+ root->curOuterParams = foreach_delete_current(root->curOuterParams,
+ cell);
+
+ /*
+ * Deal with an edge case: if the PHV was pulled up out of a
+ * subquery and it contains a subquery that was originally
+ * pushed down from this query level, then that will still be
+ * represented as a SubLink, because SS_process_sublinks won't
+ * recurse into outer PHVs, so it didn't get transformed
+ * during expression preprocessing in the subquery. We need a
+ * version of the PHV that has a SubPlan, which we can get
+ * from the current query level's placeholder_list. This is
+ * quite grotty of course, but dealing with it earlier in the
+ * handling of subplan params would be just as grotty, and it
+ * might end up being a waste of cycles if we don't decide to
+ * treat the PHV as a NestLoopParam. (Perhaps that whole
+ * mechanism should be redesigned someday, but today is not
+ * that day.)
+ */
+ if (root->parse->hasSubLinks)
+ {
+ phv = copyObject(phinfo->ph_var);
+
+ /*
+ * The ph_var will have empty nullingrels, but that
+ * doesn't matter since we're about to overwrite
+ * phv->phnullingrels. Other fields should be OK already.
+ */
+ nlp->paramval = (Var *) phv;
+ }
+
+ phv->phnullingrels =
+ bms_intersect(get_placeholder_nulling_relids(root, phinfo),
+ leftrelids);
+
+ result = lappend(result, nlp);
+ }
}
}
return result;
diff --git a/src/backend/optimizer/util/placeholder.c b/src/backend/optimizer/util/placeholder.c
index 41a4c81e94a..e1cd00a72fb 100644
--- a/src/backend/optimizer/util/placeholder.c
+++ b/src/backend/optimizer/util/placeholder.c
@@ -545,3 +545,43 @@ contain_placeholder_references_walker(Node *node,
return expression_tree_walker(node, contain_placeholder_references_walker,
context);
}
+
+/*
+ * Compute the set of outer-join relids that can null a placeholder.
+ *
+ * This is analogous to RelOptInfo.nulling_relids for Vars, but we compute it
+ * on-the-fly rather than saving it somewhere. Currently the value is needed
+ * at most once per query, so there's little value in doing otherwise. If it
+ * ever gains more widespread use, perhaps we should cache the result in
+ * PlaceHolderInfo.
+ */
+Relids
+get_placeholder_nulling_relids(PlannerInfo *root, PlaceHolderInfo *phinfo)
+{
+ Relids result = NULL;
+ int relid = -1;
+
+ /*
+ * Form the union of all potential nulling OJs for each baserel included
+ * in ph_eval_at.
+ */
+ while ((relid = bms_next_member(phinfo->ph_eval_at, relid)) > 0)
+ {
+ RelOptInfo *rel = root->simple_rel_array[relid];
+
+ /* ignore the RTE_GROUP RTE */
+ if (relid == root->group_rtindex)
+ continue;
+
+ if (rel == NULL) /* must be an outer join */
+ {
+ Assert(bms_is_member(relid, root->outer_join_rels));
+ continue;
+ }
+ result = bms_add_members(result, rel->nulling_relids);
+ }
+
+ /* Now remove any OJs already included in ph_eval_at, and we're done. */
+ result = bms_del_members(result, phinfo->ph_eval_at);
+ return result;
+}
diff --git a/src/backend/parser/analyze.c b/src/backend/parser/analyze.c
index 1f4d6adda52..34f7c17f576 100644
--- a/src/backend/parser/analyze.c
+++ b/src/backend/parser/analyze.c
@@ -239,107 +239,23 @@ parse_sub_analyze(Node *parseTree, ParseState *parentParseState,
}
/*
- * setQueryLocationAndLength
- * Set query's location and length from statement and ParseState
- *
- * Some statements, like PreparableStmt, can be located within parentheses.
- * For example "(SELECT 1)" or "COPY (UPDATE ...) to x;". For those, we
- * cannot use the whole string from the statement's location or the SQL
- * string would yield incorrectly. The parser will set stmt_len, reflecting
- * the size of the statement within the parentheses. Thus, when stmt_len is
- * available, we need to use it for the Query's stmt_len.
- *
- * For other cases, the parser can't provide the length of individual
- * statements. However, we have the statement's location plus the length
- * (p_stmt_len) and location (p_stmt_location) of the top level RawStmt,
- * stored in pstate. Thus, the statement's length is the RawStmt's length
- * minus how much we've advanced in the RawStmt's string.
- */
-static void
-setQueryLocationAndLength(ParseState *pstate, Query *qry, Node *parseTree)
-{
- ParseLoc stmt_len = 0;
-
- /*
- * If there is no information about the top RawStmt's length, leave it at
- * 0 to use the whole string.
- */
- if (pstate->p_stmt_len == 0)
- return;
-
- switch (nodeTag(parseTree))
- {
- case T_InsertStmt:
- qry->stmt_location = ((InsertStmt *) parseTree)->stmt_location;
- stmt_len = ((InsertStmt *) parseTree)->stmt_len;
- break;
-
- case T_DeleteStmt:
- qry->stmt_location = ((DeleteStmt *) parseTree)->stmt_location;
- stmt_len = ((DeleteStmt *) parseTree)->stmt_len;
- break;
-
- case T_UpdateStmt:
- qry->stmt_location = ((UpdateStmt *) parseTree)->stmt_location;
- stmt_len = ((UpdateStmt *) parseTree)->stmt_len;
- break;
-
- case T_MergeStmt:
- qry->stmt_location = ((MergeStmt *) parseTree)->stmt_location;
- stmt_len = ((MergeStmt *) parseTree)->stmt_len;
- break;
-
- case T_SelectStmt:
- qry->stmt_location = ((SelectStmt *) parseTree)->stmt_location;
- stmt_len = ((SelectStmt *) parseTree)->stmt_len;
- break;
-
- case T_PLAssignStmt:
- qry->stmt_location = ((PLAssignStmt *) parseTree)->location;
- break;
-
- default:
- qry->stmt_location = pstate->p_stmt_location;
- break;
- }
-
- if (stmt_len > 0)
- {
- /* Statement's length is known, use it */
- qry->stmt_len = stmt_len;
- }
- else
- {
- /*
- * Compute the statement's length from the statement's location and
- * the RawStmt's length and location.
- */
- qry->stmt_len = pstate->p_stmt_len - (qry->stmt_location - pstate->p_stmt_location);
- }
-
- /* The calculated statement length should be calculated as positive. */
- Assert(qry->stmt_len >= 0);
-}
-
-/*
* transformTopLevelStmt -
* transform a Parse tree into a Query tree.
*
- * This function is just responsible for storing location data
- * from the RawStmt into the ParseState.
+ * This function is just responsible for transferring statement location data
+ * from the RawStmt into the finished Query.
*/
Query *
transformTopLevelStmt(ParseState *pstate, RawStmt *parseTree)
{
Query *result;
- /* Store RawStmt's length and location in pstate */
- pstate->p_stmt_len = parseTree->stmt_len;
- pstate->p_stmt_location = parseTree->stmt_location;
-
/* We're at top level, so allow SELECT INTO */
result = transformOptionalSelectInto(pstate, parseTree->stmt);
+ result->stmt_location = parseTree->stmt_location;
+ result->stmt_len = parseTree->stmt_len;
+
return result;
}
@@ -508,7 +424,6 @@ transformStmt(ParseState *pstate, Node *parseTree)
/* Mark as original query until we learn differently */
result->querySource = QSRC_ORIGINAL;
result->canSetTag = true;
- setQueryLocationAndLength(pstate, result, parseTree);
return result;
}
diff --git a/src/backend/parser/gram.y b/src/backend/parser/gram.y
index 0b5652071d1..50f53159d58 100644
--- a/src/backend/parser/gram.y
+++ b/src/backend/parser/gram.y
@@ -154,7 +154,6 @@ static void base_yyerror(YYLTYPE *yylloc, core_yyscan_t yyscanner,
const char *msg);
static RawStmt *makeRawStmt(Node *stmt, int stmt_location);
static void updateRawStmtEnd(RawStmt *rs, int end_location);
-static void updatePreparableStmtEnd(Node *n, int end_location);
static Node *makeColumnRef(char *colname, List *indirection,
int location, core_yyscan_t yyscanner);
static Node *makeTypeCast(Node *arg, TypeName *typename, int location);
@@ -178,13 +177,13 @@ static void insertSelectOptions(SelectStmt *stmt,
SelectLimit *limitClause,
WithClause *withClause,
core_yyscan_t yyscanner);
-static Node *makeSetOp(SetOperation op, bool all, Node *larg, Node *rarg, int location);
+static Node *makeSetOp(SetOperation op, bool all, Node *larg, Node *rarg);
static Node *doNegate(Node *n, int location);
static void doNegateFloat(Float *v);
static Node *makeAndExpr(Node *lexpr, Node *rexpr, int location);
static Node *makeOrExpr(Node *lexpr, Node *rexpr, int location);
static Node *makeNotExpr(Node *expr, int location);
-static Node *makeAArrayExpr(List *elements, int location);
+static Node *makeAArrayExpr(List *elements, int location, int end_location);
static Node *makeSQLValueFunction(SQLValueFunctionOp op, int32 typmod,
int location);
static Node *makeXmlExpr(XmlExprOp op, char *name, List *named_args,
@@ -523,7 +522,7 @@ static Node *makeRecursiveViewSelect(char *relname, List *aliases, Node *query);
%type <defelt> def_elem reloption_elem old_aggr_elem operator_def_elem
%type <node> def_arg columnElem where_clause where_or_current_clause
a_expr b_expr c_expr AexprConst indirection_el opt_slice_bound
- columnref in_expr having_clause func_table xmltable array_expr
+ columnref having_clause func_table xmltable array_expr
OptWhereClause operator_def_arg
%type <list> opt_column_and_period_list
%type <list> rowsfrom_item rowsfrom_list opt_col_def_list
@@ -3417,7 +3416,6 @@ CopyStmt: COPY opt_binary qualified_name opt_column_list
{
CopyStmt *n = makeNode(CopyStmt);
- updatePreparableStmtEnd($3, @4);
n->relation = NULL;
n->query = $3;
n->attlist = NIL;
@@ -12240,7 +12238,6 @@ InsertStmt:
$5->onConflictClause = $6;
$5->returningClause = $7;
$5->withClause = $1;
- $5->stmt_location = @$;
$$ = (Node *) $5;
}
;
@@ -12431,7 +12428,6 @@ DeleteStmt: opt_with_clause DELETE_P FROM relation_expr_opt_alias
n->whereClause = $6;
n->returningClause = $7;
n->withClause = $1;
- n->stmt_location = @$;
$$ = (Node *) n;
}
;
@@ -12506,7 +12502,6 @@ UpdateStmt: opt_with_clause UPDATE relation_expr_opt_alias
n->whereClause = $7;
n->returningClause = $8;
n->withClause = $1;
- n->stmt_location = @$;
$$ = (Node *) n;
}
;
@@ -12584,7 +12579,6 @@ MergeStmt:
m->joinCondition = $8;
m->mergeWhenClauses = $9;
m->returningClause = $10;
- m->stmt_location = @$;
$$ = (Node *) m;
}
@@ -12825,20 +12819,7 @@ SelectStmt: select_no_parens %prec UMINUS
;
select_with_parens:
- '(' select_no_parens ')'
- {
- SelectStmt *n = (SelectStmt *) $2;
-
- /*
- * As SelectStmt's location starts at the SELECT keyword,
- * we need to track the length of the SelectStmt within
- * parentheses to be able to extract the relevant part
- * of the query. Without this, the RawStmt's length would
- * be used and would include the closing parenthesis.
- */
- n->stmt_len = @3 - @2;
- $$ = $2;
- }
+ '(' select_no_parens ')' { $$ = $2; }
| '(' select_with_parens ')' { $$ = $2; }
;
@@ -12960,7 +12941,6 @@ simple_select:
n->groupDistinct = ($7)->distinct;
n->havingClause = $8;
n->windowClause = $9;
- n->stmt_location = @1;
$$ = (Node *) n;
}
| SELECT distinct_clause target_list
@@ -12978,7 +12958,6 @@ simple_select:
n->groupDistinct = ($7)->distinct;
n->havingClause = $8;
n->windowClause = $9;
- n->stmt_location = @1;
$$ = (Node *) n;
}
| values_clause { $$ = $1; }
@@ -12999,20 +12978,19 @@ simple_select:
n->targetList = list_make1(rt);
n->fromClause = list_make1($2);
- n->stmt_location = @1;
$$ = (Node *) n;
}
| select_clause UNION set_quantifier select_clause
{
- $$ = makeSetOp(SETOP_UNION, $3 == SET_QUANTIFIER_ALL, $1, $4, @1);
+ $$ = makeSetOp(SETOP_UNION, $3 == SET_QUANTIFIER_ALL, $1, $4);
}
| select_clause INTERSECT set_quantifier select_clause
{
- $$ = makeSetOp(SETOP_INTERSECT, $3 == SET_QUANTIFIER_ALL, $1, $4, @1);
+ $$ = makeSetOp(SETOP_INTERSECT, $3 == SET_QUANTIFIER_ALL, $1, $4);
}
| select_clause EXCEPT set_quantifier select_clause
{
- $$ = makeSetOp(SETOP_EXCEPT, $3 == SET_QUANTIFIER_ALL, $1, $4, @1);
+ $$ = makeSetOp(SETOP_EXCEPT, $3 == SET_QUANTIFIER_ALL, $1, $4);
}
;
@@ -13590,7 +13568,6 @@ values_clause:
{
SelectStmt *n = makeNode(SelectStmt);
- n->stmt_location = @1;
n->valuesLists = list_make1($3);
$$ = (Node *) n;
}
@@ -15287,49 +15264,50 @@ a_expr: c_expr { $$ = $1; }
(Node *) list_make2($5, $7),
@2);
}
- | a_expr IN_P in_expr
+ | a_expr IN_P select_with_parens
{
- /* in_expr returns a SubLink or a list of a_exprs */
- if (IsA($3, SubLink))
- {
- /* generate foo = ANY (subquery) */
- SubLink *n = (SubLink *) $3;
+ /* generate foo = ANY (subquery) */
+ SubLink *n = makeNode(SubLink);
- n->subLinkType = ANY_SUBLINK;
- n->subLinkId = 0;
- n->testexpr = $1;
- n->operName = NIL; /* show it's IN not = ANY */
- n->location = @2;
- $$ = (Node *) n;
- }
- else
- {
- /* generate scalar IN expression */
- $$ = (Node *) makeSimpleA_Expr(AEXPR_IN, "=", $1, $3, @2);
- }
+ n->subselect = $3;
+ n->subLinkType = ANY_SUBLINK;
+ n->subLinkId = 0;
+ n->testexpr = $1;
+ n->operName = NIL; /* show it's IN not = ANY */
+ n->location = @2;
+ $$ = (Node *) n;
}
- | a_expr NOT_LA IN_P in_expr %prec NOT_LA
+ | a_expr IN_P '(' expr_list ')'
{
- /* in_expr returns a SubLink or a list of a_exprs */
- if (IsA($4, SubLink))
- {
- /* generate NOT (foo = ANY (subquery)) */
- /* Make an = ANY node */
- SubLink *n = (SubLink *) $4;
-
- n->subLinkType = ANY_SUBLINK;
- n->subLinkId = 0;
- n->testexpr = $1;
- n->operName = NIL; /* show it's IN not = ANY */
- n->location = @2;
- /* Stick a NOT on top; must have same parse location */
- $$ = makeNotExpr((Node *) n, @2);
- }
- else
- {
- /* generate scalar NOT IN expression */
- $$ = (Node *) makeSimpleA_Expr(AEXPR_IN, "<>", $1, $4, @2);
- }
+ /* generate scalar IN expression */
+ A_Expr *n = makeSimpleA_Expr(AEXPR_IN, "=", $1, (Node *) $4, @2);
+
+ n->rexpr_list_start = @3;
+ n->rexpr_list_end = @5;
+ $$ = (Node *) n;
+ }
+ | a_expr NOT_LA IN_P select_with_parens %prec NOT_LA
+ {
+ /* generate NOT (foo = ANY (subquery)) */
+ SubLink *n = makeNode(SubLink);
+
+ n->subselect = $4;
+ n->subLinkType = ANY_SUBLINK;
+ n->subLinkId = 0;
+ n->testexpr = $1;
+ n->operName = NIL; /* show it's IN not = ANY */
+ n->location = @2;
+ /* Stick a NOT on top; must have same parse location */
+ $$ = makeNotExpr((Node *) n, @2);
+ }
+ | a_expr NOT_LA IN_P '(' expr_list ')'
+ {
+ /* generate scalar NOT IN expression */
+ A_Expr *n = makeSimpleA_Expr(AEXPR_IN, "<>", $1, (Node *) $5, @2);
+
+ n->rexpr_list_start = @4;
+ n->rexpr_list_end = @6;
+ $$ = (Node *) n;
}
| a_expr subquery_Op sub_type select_with_parens %prec Op
{
@@ -16764,15 +16742,15 @@ type_list: Typename { $$ = list_make1($1); }
array_expr: '[' expr_list ']'
{
- $$ = makeAArrayExpr($2, @1);
+ $$ = makeAArrayExpr($2, @1, @3);
}
| '[' array_expr_list ']'
{
- $$ = makeAArrayExpr($2, @1);
+ $$ = makeAArrayExpr($2, @1, @3);
}
| '[' ']'
{
- $$ = makeAArrayExpr(NIL, @1);
+ $$ = makeAArrayExpr(NIL, @1, @2);
}
;
@@ -16894,17 +16872,6 @@ trim_list: a_expr FROM expr_list { $$ = lappend($3, $1); }
| expr_list { $$ = $1; }
;
-in_expr: select_with_parens
- {
- SubLink *n = makeNode(SubLink);
-
- n->subselect = $1;
- /* other fields will be filled later */
- $$ = (Node *) n;
- }
- | '(' expr_list ')' { $$ = (Node *) $2; }
- ;
-
/*
* Define SQL-style CASE clause.
* - Full specification
@@ -18748,47 +18715,6 @@ updateRawStmtEnd(RawStmt *rs, int end_location)
rs->stmt_len = end_location - rs->stmt_location;
}
-/*
- * Adjust a PreparableStmt to reflect that it doesn't run to the end of the
- * string.
- */
-static void
-updatePreparableStmtEnd(Node *n, int end_location)
-{
- if (IsA(n, SelectStmt))
- {
- SelectStmt *stmt = (SelectStmt *) n;
-
- stmt->stmt_len = end_location - stmt->stmt_location;
- }
- else if (IsA(n, InsertStmt))
- {
- InsertStmt *stmt = (InsertStmt *) n;
-
- stmt->stmt_len = end_location - stmt->stmt_location;
- }
- else if (IsA(n, UpdateStmt))
- {
- UpdateStmt *stmt = (UpdateStmt *) n;
-
- stmt->stmt_len = end_location - stmt->stmt_location;
- }
- else if (IsA(n, DeleteStmt))
- {
- DeleteStmt *stmt = (DeleteStmt *) n;
-
- stmt->stmt_len = end_location - stmt->stmt_location;
- }
- else if (IsA(n, MergeStmt))
- {
- MergeStmt *stmt = (MergeStmt *) n;
-
- stmt->stmt_len = end_location - stmt->stmt_location;
- }
- else
- elog(ERROR, "unexpected node type %d", (int) n->type);
-}
-
static Node *
makeColumnRef(char *colname, List *indirection,
int location, core_yyscan_t yyscanner)
@@ -19167,14 +19093,11 @@ insertSelectOptions(SelectStmt *stmt,
errmsg("multiple WITH clauses not allowed"),
parser_errposition(exprLocation((Node *) withClause))));
stmt->withClause = withClause;
-
- /* Update SelectStmt's location to the start of the WITH clause */
- stmt->stmt_location = withClause->location;
}
}
static Node *
-makeSetOp(SetOperation op, bool all, Node *larg, Node *rarg, int location)
+makeSetOp(SetOperation op, bool all, Node *larg, Node *rarg)
{
SelectStmt *n = makeNode(SelectStmt);
@@ -19182,7 +19105,6 @@ makeSetOp(SetOperation op, bool all, Node *larg, Node *rarg, int location)
n->all = all;
n->larg = (SelectStmt *) larg;
n->rarg = (SelectStmt *) rarg;
- n->stmt_location = location;
return (Node *) n;
}
@@ -19300,12 +19222,14 @@ makeNotExpr(Node *expr, int location)
}
static Node *
-makeAArrayExpr(List *elements, int location)
+makeAArrayExpr(List *elements, int location, int location_end)
{
A_ArrayExpr *n = makeNode(A_ArrayExpr);
n->elements = elements;
n->location = location;
+ n->list_start = location;
+ n->list_end = location_end;
return (Node *) n;
}
diff --git a/src/backend/parser/parse_expr.c b/src/backend/parser/parse_expr.c
index 1f8e2d54673..d66276801c6 100644
--- a/src/backend/parser/parse_expr.c
+++ b/src/backend/parser/parse_expr.c
@@ -1223,6 +1223,8 @@ transformAExprIn(ParseState *pstate, A_Expr *a)
newa->element_typeid = scalar_type;
newa->elements = aexprs;
newa->multidims = false;
+ newa->list_start = a->rexpr_list_start;
+ newa->list_end = a->rexpr_list_end;
newa->location = -1;
result = (Node *) make_scalar_array_op(pstate,
@@ -2165,6 +2167,8 @@ transformArrayExpr(ParseState *pstate, A_ArrayExpr *a,
/* array_collid will be set by parse_collate.c */
newa->element_typeid = element_type;
newa->elements = newcoercedelems;
+ newa->list_start = a->list_start;
+ newa->list_end = a->list_end;
newa->location = a->location;
return (Node *) newa;
diff --git a/src/backend/parser/parse_utilcmd.c b/src/backend/parser/parse_utilcmd.c
index 62015431fdf..afcf54169c3 100644
--- a/src/backend/parser/parse_utilcmd.c
+++ b/src/backend/parser/parse_utilcmd.c
@@ -1279,6 +1279,28 @@ transformTableLikeClause(CreateStmtContext *cxt, TableLikeClause *table_like_cla
lst = RelationGetNotNullConstraints(RelationGetRelid(relation), false,
true);
cxt->nnconstraints = list_concat(cxt->nnconstraints, lst);
+
+ /* Copy comments on not-null constraints */
+ if (table_like_clause->options & CREATE_TABLE_LIKE_COMMENTS)
+ {
+ foreach_node(Constraint, nnconstr, lst)
+ {
+ if ((comment = GetComment(get_relation_constraint_oid(RelationGetRelid(relation),
+ nnconstr->conname, false),
+ ConstraintRelationId,
+ 0)) != NULL)
+ {
+ CommentStmt *stmt = makeNode(CommentStmt);
+
+ stmt->objtype = OBJECT_TABCONSTRAINT;
+ stmt->object = (Node *) list_make3(makeString(cxt->relation->schemaname),
+ makeString(cxt->relation->relname),
+ makeString(nnconstr->conname));
+ stmt->comment = comment;
+ cxt->alist = lappend(cxt->alist, stmt);
+ }
+ }
+ }
}
/*
diff --git a/src/backend/postmaster/autovacuum.c b/src/backend/postmaster/autovacuum.c
index 4d4a1a3197e..9474095f271 100644
--- a/src/backend/postmaster/autovacuum.c
+++ b/src/backend/postmaster/autovacuum.c
@@ -781,10 +781,6 @@ ProcessAutoVacLauncherInterrupts(void)
if (LogMemoryContextPending)
ProcessLogMemoryContextInterrupt();
- /* Publish memory contexts of this process */
- if (PublishMemoryContextPending)
- ProcessGetMemoryContextInterrupt();
-
/* Process sinval catchup interrupts that happened while sleeping */
ProcessCatchupInterrupt();
}
@@ -2077,6 +2073,12 @@ do_autovacuum(void)
}
}
}
+
+ /* Release stuff to avoid per-relation leakage */
+ if (relopts)
+ pfree(relopts);
+ if (tabentry)
+ pfree(tabentry);
}
table_endscan(relScan);
@@ -2093,7 +2095,8 @@ do_autovacuum(void)
Form_pg_class classForm = (Form_pg_class) GETSTRUCT(tuple);
PgStat_StatTabEntry *tabentry;
Oid relid;
- AutoVacOpts *relopts = NULL;
+ AutoVacOpts *relopts;
+ bool free_relopts = false;
bool dovacuum;
bool doanalyze;
bool wraparound;
@@ -2111,7 +2114,9 @@ do_autovacuum(void)
* main rel
*/
relopts = extract_autovac_opts(tuple, pg_class_desc);
- if (relopts == NULL)
+ if (relopts)
+ free_relopts = true;
+ else
{
av_relation *hentry;
bool found;
@@ -2132,6 +2137,12 @@ do_autovacuum(void)
/* ignore analyze for toast tables */
if (dovacuum)
table_oids = lappend_oid(table_oids, relid);
+
+ /* Release stuff to avoid leakage */
+ if (free_relopts)
+ pfree(relopts);
+ if (tabentry)
+ pfree(tabentry);
}
table_endscan(relScan);
@@ -2223,6 +2234,12 @@ do_autovacuum(void)
get_namespace_name(classForm->relnamespace),
NameStr(classForm->relname))));
+ /*
+ * Deletion might involve TOAST table access, so ensure we have a
+ * valid snapshot.
+ */
+ PushActiveSnapshot(GetTransactionSnapshot());
+
object.classId = RelationRelationId;
object.objectId = relid;
object.objectSubId = 0;
@@ -2235,6 +2252,7 @@ do_autovacuum(void)
* To commit the deletion, end current transaction and start a new
* one. Note this also releases the locks we took.
*/
+ PopActiveSnapshot();
CommitTransactionCommand();
StartTransactionCommand();
@@ -2503,6 +2521,8 @@ deleted:
pg_atomic_test_set_flag(&MyWorkerInfo->wi_dobalance);
}
+ list_free(table_oids);
+
/*
* Perform additional work items, as requested by backends.
*/
@@ -2684,8 +2704,8 @@ deleted2:
/*
* extract_autovac_opts
*
- * Given a relation's pg_class tuple, return the AutoVacOpts portion of
- * reloptions, if set; otherwise, return NULL.
+ * Given a relation's pg_class tuple, return a palloc'd copy of the
+ * AutoVacOpts portion of reloptions, if set; otherwise, return NULL.
*
* Note: callers do not have a relation lock on the table at this point,
* so the table could have been dropped, and its catalog rows gone, after
@@ -2734,6 +2754,7 @@ table_recheck_autovac(Oid relid, HTAB *table_toast_map,
autovac_table *tab = NULL;
bool wraparound;
AutoVacOpts *avopts;
+ bool free_avopts = false;
/* fetch the relation's relcache entry */
classTup = SearchSysCacheCopy1(RELOID, ObjectIdGetDatum(relid));
@@ -2746,8 +2767,10 @@ table_recheck_autovac(Oid relid, HTAB *table_toast_map,
* main table reloptions if the toast table itself doesn't have.
*/
avopts = extract_autovac_opts(classTup, pg_class_desc);
- if (classForm->relkind == RELKIND_TOASTVALUE &&
- avopts == NULL && table_toast_map != NULL)
+ if (avopts)
+ free_avopts = true;
+ else if (classForm->relkind == RELKIND_TOASTVALUE &&
+ table_toast_map != NULL)
{
av_relation *hentry;
bool found;
@@ -2856,6 +2879,8 @@ table_recheck_autovac(Oid relid, HTAB *table_toast_map,
avopts->vacuum_cost_delay >= 0));
}
+ if (free_avopts)
+ pfree(avopts);
heap_freetuple(classTup);
return tab;
}
@@ -2887,6 +2912,10 @@ recheck_relation_needs_vacanalyze(Oid relid,
effective_multixact_freeze_max_age,
dovacuum, doanalyze, wraparound);
+ /* Release tabentry to avoid leakage */
+ if (tabentry)
+ pfree(tabentry);
+
/* ignore ANALYZE for toast tables */
if (classForm->relkind == RELKIND_TOASTVALUE)
*doanalyze = false;
@@ -3144,20 +3173,24 @@ autovacuum_do_vac_analyze(autovac_table *tab, BufferAccessStrategy bstrategy)
VacuumRelation *rel;
List *rel_list;
MemoryContext vac_context;
+ MemoryContext old_context;
/* Let pgstat know what we're doing */
autovac_report_activity(tab);
+ /* Create a context that vacuum() can use as cross-transaction storage */
+ vac_context = AllocSetContextCreate(CurrentMemoryContext,
+ "Vacuum",
+ ALLOCSET_DEFAULT_SIZES);
+
/* Set up one VacuumRelation target, identified by OID, for vacuum() */
+ old_context = MemoryContextSwitchTo(vac_context);
rangevar = makeRangeVar(tab->at_nspname, tab->at_relname, -1);
rel = makeVacuumRelation(rangevar, tab->at_relid, NIL);
rel_list = list_make1(rel);
+ MemoryContextSwitchTo(old_context);
- vac_context = AllocSetContextCreate(CurrentMemoryContext,
- "Vacuum",
- ALLOCSET_DEFAULT_SIZES);
-
- vacuum(rel_list, &tab->at_params, bstrategy, vac_context, true);
+ vacuum(rel_list, tab->at_params, bstrategy, vac_context, true);
MemoryContextDelete(vac_context);
}
diff --git a/src/backend/postmaster/checkpointer.c b/src/backend/postmaster/checkpointer.c
index d3cb3f1891c..fda91ffd1ce 100644
--- a/src/backend/postmaster/checkpointer.c
+++ b/src/backend/postmaster/checkpointer.c
@@ -663,10 +663,6 @@ ProcessCheckpointerInterrupts(void)
/* Perform logging of memory contexts of this process */
if (LogMemoryContextPending)
ProcessLogMemoryContextInterrupt();
-
- /* Publish memory contexts of this process */
- if (PublishMemoryContextPending)
- ProcessGetMemoryContextInterrupt();
}
/*
diff --git a/src/backend/postmaster/interrupt.c b/src/backend/postmaster/interrupt.c
index f24f574e748..0ae9bf906ec 100644
--- a/src/backend/postmaster/interrupt.c
+++ b/src/backend/postmaster/interrupt.c
@@ -48,10 +48,6 @@ ProcessMainLoopInterrupts(void)
/* Perform logging of memory contexts of this process */
if (LogMemoryContextPending)
ProcessLogMemoryContextInterrupt();
-
- /* Publish memory contexts of this process */
- if (PublishMemoryContextPending)
- ProcessGetMemoryContextInterrupt();
}
/*
diff --git a/src/backend/postmaster/pgarch.c b/src/backend/postmaster/pgarch.c
index cb7408acf4c..7e622ae4bd2 100644
--- a/src/backend/postmaster/pgarch.c
+++ b/src/backend/postmaster/pgarch.c
@@ -867,10 +867,6 @@ ProcessPgArchInterrupts(void)
if (LogMemoryContextPending)
ProcessLogMemoryContextInterrupt();
- /* Publish memory contexts of this process */
- if (PublishMemoryContextPending)
- ProcessGetMemoryContextInterrupt();
-
if (ConfigReloadPending)
{
char *archiveLib = pstrdup(XLogArchiveLibrary);
diff --git a/src/backend/postmaster/startup.c b/src/backend/postmaster/startup.c
index 7149a67fcbc..27e86cf393f 100644
--- a/src/backend/postmaster/startup.c
+++ b/src/backend/postmaster/startup.c
@@ -192,10 +192,6 @@ ProcessStartupProcInterrupts(void)
/* Perform logging of memory contexts of this process */
if (LogMemoryContextPending)
ProcessLogMemoryContextInterrupt();
-
- /* Publish memory contexts of this process */
- if (PublishMemoryContextPending)
- ProcessGetMemoryContextInterrupt();
}
diff --git a/src/backend/postmaster/walsummarizer.c b/src/backend/postmaster/walsummarizer.c
index c7a76711cc5..0fec4f1f871 100644
--- a/src/backend/postmaster/walsummarizer.c
+++ b/src/backend/postmaster/walsummarizer.c
@@ -879,10 +879,6 @@ ProcessWalSummarizerInterrupts(void)
/* Perform logging of memory contexts of this process */
if (LogMemoryContextPending)
ProcessLogMemoryContextInterrupt();
-
- /* Publish memory contexts of this process */
- if (PublishMemoryContextPending)
- ProcessGetMemoryContextInterrupt();
}
/*
diff --git a/src/backend/replication/logical/launcher.c b/src/backend/replication/logical/launcher.c
index 10677da56b2..4aed0dfcebb 100644
--- a/src/backend/replication/logical/launcher.c
+++ b/src/backend/replication/logical/launcher.c
@@ -175,12 +175,14 @@ WaitForReplicationWorkerAttach(LogicalRepWorker *worker,
uint16 generation,
BackgroundWorkerHandle *handle)
{
- BgwHandleStatus status;
- int rc;
+ bool result = false;
+ bool dropped_latch = false;
for (;;)
{
+ BgwHandleStatus status;
pid_t pid;
+ int rc;
CHECK_FOR_INTERRUPTS();
@@ -189,8 +191,9 @@ WaitForReplicationWorkerAttach(LogicalRepWorker *worker,
/* Worker either died or has started. Return false if died. */
if (!worker->in_use || worker->proc)
{
+ result = worker->in_use;
LWLockRelease(LogicalRepWorkerLock);
- return worker->in_use;
+ break;
}
LWLockRelease(LogicalRepWorkerLock);
@@ -205,7 +208,7 @@ WaitForReplicationWorkerAttach(LogicalRepWorker *worker,
if (generation == worker->generation)
logicalrep_worker_cleanup(worker);
LWLockRelease(LogicalRepWorkerLock);
- return false;
+ break; /* result is already false */
}
/*
@@ -220,8 +223,18 @@ WaitForReplicationWorkerAttach(LogicalRepWorker *worker,
{
ResetLatch(MyLatch);
CHECK_FOR_INTERRUPTS();
+ dropped_latch = true;
}
}
+
+ /*
+ * If we had to clear a latch event in order to wait, be sure to restore
+ * it before exiting. Otherwise caller may miss events.
+ */
+ if (dropped_latch)
+ SetLatch(MyLatch);
+
+ return result;
}
/*
@@ -328,7 +341,7 @@ logicalrep_worker_launch(LogicalRepWorkerType wtype,
if (max_active_replication_origins == 0)
ereport(ERROR,
(errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED),
- errmsg("cannot start logical replication workers when \"max_active_replication_origins\"=0")));
+ errmsg("cannot start logical replication workers when \"max_active_replication_origins\" is 0")));
/*
* We need to do the modification of the shared memory under lock so that
@@ -1016,7 +1029,7 @@ logicalrep_launcher_attach_dshmem(void)
last_start_times_dsa = dsa_attach(LogicalRepCtx->last_start_dsa);
dsa_pin_mapping(last_start_times_dsa);
last_start_times = dshash_attach(last_start_times_dsa, &dsh_params,
- LogicalRepCtx->last_start_dsh, 0);
+ LogicalRepCtx->last_start_dsh, NULL);
}
MemoryContextSwitchTo(oldcontext);
@@ -1194,10 +1207,21 @@ ApplyLauncherMain(Datum main_arg)
(elapsed = TimestampDifferenceMilliseconds(last_start, now)) >= wal_retrieve_retry_interval)
{
ApplyLauncherSetWorkerStartTime(sub->oid, now);
- logicalrep_worker_launch(WORKERTYPE_APPLY,
- sub->dbid, sub->oid, sub->name,
- sub->owner, InvalidOid,
- DSM_HANDLE_INVALID);
+ if (!logicalrep_worker_launch(WORKERTYPE_APPLY,
+ sub->dbid, sub->oid, sub->name,
+ sub->owner, InvalidOid,
+ DSM_HANDLE_INVALID))
+ {
+ /*
+ * We get here either if we failed to launch a worker
+ * (perhaps for resource-exhaustion reasons) or if we
+ * launched one but it immediately quit. Either way, it
+ * seems appropriate to try again after
+ * wal_retrieve_retry_interval.
+ */
+ wait_time = Min(wait_time,
+ wal_retrieve_retry_interval);
+ }
}
else
{
diff --git a/src/backend/replication/logical/logical.c b/src/backend/replication/logical/logical.c
index a8d2e024d34..f1eb798f3e9 100644
--- a/src/backend/replication/logical/logical.c
+++ b/src/backend/replication/logical/logical.c
@@ -29,6 +29,7 @@
#include "postgres.h"
#include "access/xact.h"
+#include "access/xlog_internal.h"
#include "access/xlogutils.h"
#include "fmgr.h"
#include "miscadmin.h"
@@ -41,6 +42,7 @@
#include "storage/proc.h"
#include "storage/procarray.h"
#include "utils/builtins.h"
+#include "utils/injection_point.h"
#include "utils/inval.h"
#include "utils/memutils.h"
@@ -1825,10 +1827,26 @@ LogicalConfirmReceivedLocation(XLogRecPtr lsn)
{
bool updated_xmin = false;
bool updated_restart = false;
+ XLogRecPtr restart_lsn pg_attribute_unused();
SpinLockAcquire(&MyReplicationSlot->mutex);
- MyReplicationSlot->data.confirmed_flush = lsn;
+ /* remember the old restart lsn */
+ restart_lsn = MyReplicationSlot->data.restart_lsn;
+
+ /*
+ * Prevent moving the confirmed_flush backwards, as this could lead to
+ * data duplication issues caused by replicating already replicated
+ * changes.
+ *
+ * This can happen when a client acknowledges an LSN it doesn't have
+ * to do anything for, and thus didn't store persistently. After a
+ * restart, the client can send the prior LSN that it stored
+ * persistently as an acknowledgement, but we need to ignore such an
+ * LSN. See similar case handling in CreateDecodingContext.
+ */
+ if (lsn > MyReplicationSlot->data.confirmed_flush)
+ MyReplicationSlot->data.confirmed_flush = lsn;
/* if we're past the location required for bumping xmin, do so */
if (MyReplicationSlot->candidate_xmin_lsn != InvalidXLogRecPtr &&
@@ -1869,6 +1887,18 @@ LogicalConfirmReceivedLocation(XLogRecPtr lsn)
/* first write new xmin to disk, so we know what's up after a crash */
if (updated_xmin || updated_restart)
{
+#ifdef USE_INJECTION_POINTS
+ XLogSegNo seg1,
+ seg2;
+
+ XLByteToSeg(restart_lsn, seg1, wal_segment_size);
+ XLByteToSeg(MyReplicationSlot->data.restart_lsn, seg2, wal_segment_size);
+
+ /* trigger injection point, but only if segment changes */
+ if (seg1 != seg2)
+ INJECTION_POINT("logical-replication-slot-advance-segment", NULL);
+#endif
+
ReplicationSlotMarkDirty();
ReplicationSlotSave();
elog(DEBUG1, "updated xmin: %u restart: %u", updated_xmin, updated_restart);
@@ -1893,7 +1923,14 @@ LogicalConfirmReceivedLocation(XLogRecPtr lsn)
else
{
SpinLockAcquire(&MyReplicationSlot->mutex);
- MyReplicationSlot->data.confirmed_flush = lsn;
+
+ /*
+ * Prevent moving the confirmed_flush backwards. See comments above
+ * for the details.
+ */
+ if (lsn > MyReplicationSlot->data.confirmed_flush)
+ MyReplicationSlot->data.confirmed_flush = lsn;
+
SpinLockRelease(&MyReplicationSlot->mutex);
}
}
diff --git a/src/backend/replication/logical/reorderbuffer.c b/src/backend/replication/logical/reorderbuffer.c
index 67655111875..c4299c76fb1 100644
--- a/src/backend/replication/logical/reorderbuffer.c
+++ b/src/backend/replication/logical/reorderbuffer.c
@@ -109,10 +109,22 @@
#include "storage/procarray.h"
#include "storage/sinval.h"
#include "utils/builtins.h"
+#include "utils/inval.h"
#include "utils/memutils.h"
#include "utils/rel.h"
#include "utils/relfilenumbermap.h"
+/*
+ * Each transaction has an 8MB limit for invalidation messages distributed from
+ * other transactions. This limit is set considering scenarios with many
+ * concurrent logical decoding operations. When the distributed invalidation
+ * messages reach this threshold, the transaction is marked as
+ * RBTXN_DISTR_INVAL_OVERFLOWED to invalidate the complete cache as we have lost
+ * some inval messages and hence don't know what needs to be invalidated.
+ */
+#define MAX_DISTR_INVAL_MSG_PER_TXN \
+ ((8 * 1024 * 1024) / sizeof(SharedInvalidationMessage))
+
/* entry for a hash table we use to map from xid to our transaction state */
typedef struct ReorderBufferTXNByIdEnt
{
@@ -472,6 +484,12 @@ ReorderBufferFreeTXN(ReorderBuffer *rb, ReorderBufferTXN *txn)
txn->invalidations = NULL;
}
+ if (txn->invalidations_distributed)
+ {
+ pfree(txn->invalidations_distributed);
+ txn->invalidations_distributed = NULL;
+ }
+
/* Reset the toast hash */
ReorderBufferToastReset(rb, txn);
@@ -2661,7 +2679,17 @@ ReorderBufferProcessTXN(ReorderBuffer *rb, ReorderBufferTXN *txn,
AbortCurrentTransaction();
/* make sure there's no cache pollution */
- ReorderBufferExecuteInvalidations(txn->ninvalidations, txn->invalidations);
+ if (rbtxn_distr_inval_overflowed(txn))
+ {
+ Assert(txn->ninvalidations_distributed == 0);
+ InvalidateSystemCaches();
+ }
+ else
+ {
+ ReorderBufferExecuteInvalidations(txn->ninvalidations, txn->invalidations);
+ ReorderBufferExecuteInvalidations(txn->ninvalidations_distributed,
+ txn->invalidations_distributed);
+ }
if (using_subtxn)
RollbackAndReleaseCurrentSubTransaction();
@@ -2710,8 +2738,17 @@ ReorderBufferProcessTXN(ReorderBuffer *rb, ReorderBufferTXN *txn,
AbortCurrentTransaction();
/* make sure there's no cache pollution */
- ReorderBufferExecuteInvalidations(txn->ninvalidations,
- txn->invalidations);
+ if (rbtxn_distr_inval_overflowed(txn))
+ {
+ Assert(txn->ninvalidations_distributed == 0);
+ InvalidateSystemCaches();
+ }
+ else
+ {
+ ReorderBufferExecuteInvalidations(txn->ninvalidations, txn->invalidations);
+ ReorderBufferExecuteInvalidations(txn->ninvalidations_distributed,
+ txn->invalidations_distributed);
+ }
if (using_subtxn)
RollbackAndReleaseCurrentSubTransaction();
@@ -3060,7 +3097,8 @@ ReorderBufferAbort(ReorderBuffer *rb, TransactionId xid, XLogRecPtr lsn,
* We might have decoded changes for this transaction that could load
* the cache as per the current transaction's view (consider DDL's
* happened in this transaction). We don't want the decoding of future
- * transactions to use those cache entries so execute invalidations.
+ * transactions to use those cache entries so execute only the inval
+ * messages in this transaction.
*/
if (txn->ninvalidations > 0)
ReorderBufferImmediateInvalidation(rb, txn->ninvalidations,
@@ -3147,9 +3185,10 @@ ReorderBufferForget(ReorderBuffer *rb, TransactionId xid, XLogRecPtr lsn)
txn->final_lsn = lsn;
/*
- * Process cache invalidation messages if there are any. Even if we're not
- * interested in the transaction's contents, it could have manipulated the
- * catalog and we need to update the caches according to that.
+ * Process only cache invalidation messages in this transaction if there
+ * are any. Even if we're not interested in the transaction's contents, it
+ * could have manipulated the catalog and we need to update the caches
+ * according to that.
*/
if (txn->base_snapshot != NULL && txn->ninvalidations > 0)
ReorderBufferImmediateInvalidation(rb, txn->ninvalidations,
@@ -3422,6 +3461,57 @@ ReorderBufferAddNewTupleCids(ReorderBuffer *rb, TransactionId xid,
}
/*
+ * Add new invalidation messages to the reorder buffer queue.
+ */
+static void
+ReorderBufferQueueInvalidations(ReorderBuffer *rb, TransactionId xid,
+ XLogRecPtr lsn, Size nmsgs,
+ SharedInvalidationMessage *msgs)
+{
+ ReorderBufferChange *change;
+
+ change = ReorderBufferAllocChange(rb);
+ change->action = REORDER_BUFFER_CHANGE_INVALIDATION;
+ change->data.inval.ninvalidations = nmsgs;
+ change->data.inval.invalidations = (SharedInvalidationMessage *)
+ palloc(sizeof(SharedInvalidationMessage) * nmsgs);
+ memcpy(change->data.inval.invalidations, msgs,
+ sizeof(SharedInvalidationMessage) * nmsgs);
+
+ ReorderBufferQueueChange(rb, xid, lsn, change, false);
+}
+
+/*
+ * A helper function for ReorderBufferAddInvalidations() and
+ * ReorderBufferAddDistributedInvalidations() to accumulate the invalidation
+ * messages to the **invals_out.
+ */
+static void
+ReorderBufferAccumulateInvalidations(SharedInvalidationMessage **invals_out,
+ uint32 *ninvals_out,
+ SharedInvalidationMessage *msgs_new,
+ Size nmsgs_new)
+{
+ if (*ninvals_out == 0)
+ {
+ *ninvals_out = nmsgs_new;
+ *invals_out = (SharedInvalidationMessage *)
+ palloc(sizeof(SharedInvalidationMessage) * nmsgs_new);
+ memcpy(*invals_out, msgs_new, sizeof(SharedInvalidationMessage) * nmsgs_new);
+ }
+ else
+ {
+ /* Enlarge the array of inval messages */
+ *invals_out = (SharedInvalidationMessage *)
+ repalloc(*invals_out, sizeof(SharedInvalidationMessage) *
+ (*ninvals_out + nmsgs_new));
+ memcpy(*invals_out + *ninvals_out, msgs_new,
+ nmsgs_new * sizeof(SharedInvalidationMessage));
+ *ninvals_out += nmsgs_new;
+ }
+}
+
+/*
* Accumulate the invalidations for executing them later.
*
* This needs to be called for each XLOG_XACT_INVALIDATIONS message and
@@ -3441,7 +3531,6 @@ ReorderBufferAddInvalidations(ReorderBuffer *rb, TransactionId xid,
{
ReorderBufferTXN *txn;
MemoryContext oldcontext;
- ReorderBufferChange *change;
txn = ReorderBufferTXNByXid(rb, xid, true, NULL, lsn, true);
@@ -3456,35 +3545,76 @@ ReorderBufferAddInvalidations(ReorderBuffer *rb, TransactionId xid,
Assert(nmsgs > 0);
- /* Accumulate invalidations. */
- if (txn->ninvalidations == 0)
- {
- txn->ninvalidations = nmsgs;
- txn->invalidations = (SharedInvalidationMessage *)
- palloc(sizeof(SharedInvalidationMessage) * nmsgs);
- memcpy(txn->invalidations, msgs,
- sizeof(SharedInvalidationMessage) * nmsgs);
- }
- else
+ ReorderBufferAccumulateInvalidations(&txn->invalidations,
+ &txn->ninvalidations,
+ msgs, nmsgs);
+
+ ReorderBufferQueueInvalidations(rb, xid, lsn, nmsgs, msgs);
+
+ MemoryContextSwitchTo(oldcontext);
+}
+
+/*
+ * Accumulate the invalidations distributed by other committed transactions
+ * for executing them later.
+ *
+ * This function is similar to ReorderBufferAddInvalidations() but stores
+ * the given inval messages to the txn->invalidations_distributed with the
+ * overflow check.
+ *
+ * This needs to be called by committed transactions to distribute their
+ * inval messages to in-progress transactions.
+ */
+void
+ReorderBufferAddDistributedInvalidations(ReorderBuffer *rb, TransactionId xid,
+ XLogRecPtr lsn, Size nmsgs,
+ SharedInvalidationMessage *msgs)
+{
+ ReorderBufferTXN *txn;
+ MemoryContext oldcontext;
+
+ txn = ReorderBufferTXNByXid(rb, xid, true, NULL, lsn, true);
+
+ oldcontext = MemoryContextSwitchTo(rb->context);
+
+ /*
+ * Collect all the invalidations under the top transaction, if available,
+ * so that we can execute them all together. See comments
+ * ReorderBufferAddInvalidations.
+ */
+ txn = rbtxn_get_toptxn(txn);
+
+ Assert(nmsgs > 0);
+
+ if (!rbtxn_distr_inval_overflowed(txn))
{
- txn->invalidations = (SharedInvalidationMessage *)
- repalloc(txn->invalidations, sizeof(SharedInvalidationMessage) *
- (txn->ninvalidations + nmsgs));
+ /*
+ * Check the transaction has enough space for storing distributed
+ * invalidation messages.
+ */
+ if (txn->ninvalidations_distributed + nmsgs >= MAX_DISTR_INVAL_MSG_PER_TXN)
+ {
+ /*
+ * Mark the invalidation message as overflowed and free up the
+ * messages accumulated so far.
+ */
+ txn->txn_flags |= RBTXN_DISTR_INVAL_OVERFLOWED;
- memcpy(txn->invalidations + txn->ninvalidations, msgs,
- nmsgs * sizeof(SharedInvalidationMessage));
- txn->ninvalidations += nmsgs;
+ if (txn->invalidations_distributed)
+ {
+ pfree(txn->invalidations_distributed);
+ txn->invalidations_distributed = NULL;
+ txn->ninvalidations_distributed = 0;
+ }
+ }
+ else
+ ReorderBufferAccumulateInvalidations(&txn->invalidations_distributed,
+ &txn->ninvalidations_distributed,
+ msgs, nmsgs);
}
- change = ReorderBufferAllocChange(rb);
- change->action = REORDER_BUFFER_CHANGE_INVALIDATION;
- change->data.inval.ninvalidations = nmsgs;
- change->data.inval.invalidations = (SharedInvalidationMessage *)
- palloc(sizeof(SharedInvalidationMessage) * nmsgs);
- memcpy(change->data.inval.invalidations, msgs,
- sizeof(SharedInvalidationMessage) * nmsgs);
-
- ReorderBufferQueueChange(rb, xid, lsn, change, false);
+ /* Queue the invalidation messages into the transaction */
+ ReorderBufferQueueInvalidations(rb, xid, lsn, nmsgs, msgs);
MemoryContextSwitchTo(oldcontext);
}
diff --git a/src/backend/replication/logical/slotsync.c b/src/backend/replication/logical/slotsync.c
index 656e66e0ae0..3ec3abfa3da 100644
--- a/src/backend/replication/logical/slotsync.c
+++ b/src/backend/replication/logical/slotsync.c
@@ -211,9 +211,9 @@ update_local_synced_slot(RemoteSlot *remote_slot, Oid remote_dbid,
* impact the users, so we used DEBUG1 level to log the message.
*/
ereport(slot->data.persistency == RS_TEMPORARY ? LOG : DEBUG1,
- errmsg("could not synchronize replication slot \"%s\" because remote slot precedes local slot",
+ errmsg("could not synchronize replication slot \"%s\"",
remote_slot->name),
- errdetail("The remote slot has LSN %X/%X and catalog xmin %u, but the local slot has LSN %X/%X and catalog xmin %u.",
+ errdetail("Synchronization could lead to data loss, because the remote slot needs WAL at LSN %X/%X and catalog xmin %u, but the standby has LSN %X/%X and catalog xmin %u.",
LSN_FORMAT_ARGS(remote_slot->restart_lsn),
remote_slot->catalog_xmin,
LSN_FORMAT_ARGS(slot->data.restart_lsn),
@@ -593,7 +593,7 @@ update_and_persist_local_synced_slot(RemoteSlot *remote_slot, Oid remote_dbid)
{
ereport(LOG,
errmsg("could not synchronize replication slot \"%s\"", remote_slot->name),
- errdetail("Logical decoding could not find consistent point from local slot's LSN %X/%X.",
+ errdetail("Synchronization could lead to data loss, because the standby could not build a consistent snapshot to decode WALs at LSN %X/%X.",
LSN_FORMAT_ARGS(slot->data.restart_lsn)));
return false;
diff --git a/src/backend/replication/logical/snapbuild.c b/src/backend/replication/logical/snapbuild.c
index 0d7bddbe4ed..adf18c397db 100644
--- a/src/backend/replication/logical/snapbuild.c
+++ b/src/backend/replication/logical/snapbuild.c
@@ -794,6 +794,13 @@ SnapBuildDistributeSnapshotAndInval(SnapBuild *builder, XLogRecPtr lsn, Transact
* contents built by the current transaction even after its decoding,
* which should have been invalidated due to concurrent catalog
* changing transaction.
+ *
+ * Distribute only the invalidation messages generated by the current
+ * committed transaction. Invalidation messages received from other
+ * transactions would have already been propagated to the relevant
+ * in-progress transactions. This transaction would have processed
+ * those invalidations, ensuring that subsequent transactions observe
+ * a consistent cache state.
*/
if (txn->xid != xid)
{
@@ -807,8 +814,9 @@ SnapBuildDistributeSnapshotAndInval(SnapBuild *builder, XLogRecPtr lsn, Transact
{
Assert(msgs != NULL);
- ReorderBufferAddInvalidations(builder->reorder, txn->xid, lsn,
- ninvalidations, msgs);
+ ReorderBufferAddDistributedInvalidations(builder->reorder,
+ txn->xid, lsn,
+ ninvalidations, msgs);
}
}
}
diff --git a/src/backend/replication/logical/tablesync.c b/src/backend/replication/logical/tablesync.c
index 8e1e8762f62..c90f23ee5b0 100644
--- a/src/backend/replication/logical/tablesync.c
+++ b/src/backend/replication/logical/tablesync.c
@@ -603,14 +603,19 @@ process_syncing_tables_for_apply(XLogRecPtr current_lsn)
TimestampDifferenceExceeds(hentry->last_start_time, now,
wal_retrieve_retry_interval))
{
- logicalrep_worker_launch(WORKERTYPE_TABLESYNC,
- MyLogicalRepWorker->dbid,
- MySubscription->oid,
- MySubscription->name,
- MyLogicalRepWorker->userid,
- rstate->relid,
- DSM_HANDLE_INVALID);
+ /*
+ * Set the last_start_time even if we fail to start
+ * the worker, so that we won't retry until
+ * wal_retrieve_retry_interval has elapsed.
+ */
hentry->last_start_time = now;
+ (void) logicalrep_worker_launch(WORKERTYPE_TABLESYNC,
+ MyLogicalRepWorker->dbid,
+ MySubscription->oid,
+ MySubscription->name,
+ MyLogicalRepWorker->userid,
+ rstate->relid,
+ DSM_HANDLE_INVALID);
}
}
}
diff --git a/src/backend/replication/logical/worker.c b/src/backend/replication/logical/worker.c
index 4151a4b2a96..fd11805a44c 100644
--- a/src/backend/replication/logical/worker.c
+++ b/src/backend/replication/logical/worker.c
@@ -109,13 +109,6 @@
* If ever a user needs to be aware of the tri-state value, they can fetch it
* from the pg_subscription catalog (see column subtwophasestate).
*
- * We don't allow to toggle two_phase option of a subscription because it can
- * lead to an inconsistent replica. Consider, initially, it was on and we have
- * received some prepare then we turn it off, now at commit time the server
- * will send the entire transaction data along with the commit. With some more
- * analysis, we can allow changing this option from off to on but not sure if
- * that alone would be useful.
- *
* Finally, to avoid problems mentioned in previous paragraphs from any
* subsequent (not READY) tablesyncs (need to toggle two_phase option from 'on'
* to 'off' and then again back to 'on') there is a restriction for
@@ -4626,8 +4619,16 @@ run_apply_worker()
walrcv_startstreaming(LogRepWorkerWalRcvConn, &options);
StartTransactionCommand();
+
+ /*
+ * Updating pg_subscription might involve TOAST table access, so
+ * ensure we have a valid snapshot.
+ */
+ PushActiveSnapshot(GetTransactionSnapshot());
+
UpdateTwoPhaseState(MySubscription->oid, LOGICALREP_TWOPHASE_STATE_ENABLED);
MySubscription->twophasestate = LOGICALREP_TWOPHASE_STATE_ENABLED;
+ PopActiveSnapshot();
CommitTransactionCommand();
}
else
@@ -4843,7 +4844,15 @@ DisableSubscriptionAndExit(void)
/* Disable the subscription */
StartTransactionCommand();
+
+ /*
+ * Updating pg_subscription might involve TOAST table access, so ensure we
+ * have a valid snapshot.
+ */
+ PushActiveSnapshot(GetTransactionSnapshot());
+
DisableSubscription(MySubscription->oid);
+ PopActiveSnapshot();
CommitTransactionCommand();
/* Ensure we remove no-longer-useful entry for worker's start time */
@@ -4948,6 +4957,12 @@ clear_subscription_skip_lsn(XLogRecPtr finish_lsn)
}
/*
+ * Updating pg_subscription might involve TOAST table access, so ensure we
+ * have a valid snapshot.
+ */
+ PushActiveSnapshot(GetTransactionSnapshot());
+
+ /*
* Protect subskiplsn of pg_subscription from being concurrently updated
* while clearing it.
*/
@@ -5005,6 +5020,8 @@ clear_subscription_skip_lsn(XLogRecPtr finish_lsn)
heap_freetuple(tup);
table_close(rel, NoLock);
+ PopActiveSnapshot();
+
if (started_tx)
CommitTransactionCommand();
}
diff --git a/src/backend/replication/pgoutput/pgoutput.c b/src/backend/replication/pgoutput/pgoutput.c
index 693a766e6d7..082b4d9d327 100644
--- a/src/backend/replication/pgoutput/pgoutput.c
+++ b/src/backend/replication/pgoutput/pgoutput.c
@@ -1789,7 +1789,7 @@ LoadPublications(List *pubnames)
else
ereport(WARNING,
errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("skipped loading publication: %s", pubname),
+ errmsg("skipped loading publication \"%s\"", pubname),
errdetail("The publication does not exist at this point in the WAL."),
errhint("Create the publication if it does not exist."));
}
diff --git a/src/backend/replication/slot.c b/src/backend/replication/slot.c
index 600b87fa9cb..f9fec50ae88 100644
--- a/src/backend/replication/slot.c
+++ b/src/backend/replication/slot.c
@@ -424,6 +424,7 @@ ReplicationSlotCreate(const char *name, bool db_specific,
slot->candidate_restart_valid = InvalidXLogRecPtr;
slot->candidate_restart_lsn = InvalidXLogRecPtr;
slot->last_saved_confirmed_flush = InvalidXLogRecPtr;
+ slot->last_saved_restart_lsn = InvalidXLogRecPtr;
slot->inactive_since = 0;
/*
@@ -1165,20 +1166,41 @@ ReplicationSlotsComputeRequiredLSN(void)
{
ReplicationSlot *s = &ReplicationSlotCtl->replication_slots[i];
XLogRecPtr restart_lsn;
+ XLogRecPtr last_saved_restart_lsn;
bool invalidated;
+ ReplicationSlotPersistency persistency;
if (!s->in_use)
continue;
SpinLockAcquire(&s->mutex);
+ persistency = s->data.persistency;
restart_lsn = s->data.restart_lsn;
invalidated = s->data.invalidated != RS_INVAL_NONE;
+ last_saved_restart_lsn = s->last_saved_restart_lsn;
SpinLockRelease(&s->mutex);
/* invalidated slots need not apply */
if (invalidated)
continue;
+ /*
+ * For persistent slot use last_saved_restart_lsn to compute the
+ * oldest LSN for removal of WAL segments. The segments between
+ * last_saved_restart_lsn and restart_lsn might be needed by a
+ * persistent slot in the case of database crash. Non-persistent
+ * slots can't survive the database crash, so we don't care about
+ * last_saved_restart_lsn for them.
+ */
+ if (persistency == RS_PERSISTENT)
+ {
+ if (last_saved_restart_lsn != InvalidXLogRecPtr &&
+ restart_lsn > last_saved_restart_lsn)
+ {
+ restart_lsn = last_saved_restart_lsn;
+ }
+ }
+
if (restart_lsn != InvalidXLogRecPtr &&
(min_required == InvalidXLogRecPtr ||
restart_lsn < min_required))
@@ -1216,7 +1238,9 @@ ReplicationSlotsComputeLogicalRestartLSN(void)
{
ReplicationSlot *s;
XLogRecPtr restart_lsn;
+ XLogRecPtr last_saved_restart_lsn;
bool invalidated;
+ ReplicationSlotPersistency persistency;
s = &ReplicationSlotCtl->replication_slots[i];
@@ -1230,14 +1254,33 @@ ReplicationSlotsComputeLogicalRestartLSN(void)
/* read once, it's ok if it increases while we're checking */
SpinLockAcquire(&s->mutex);
+ persistency = s->data.persistency;
restart_lsn = s->data.restart_lsn;
invalidated = s->data.invalidated != RS_INVAL_NONE;
+ last_saved_restart_lsn = s->last_saved_restart_lsn;
SpinLockRelease(&s->mutex);
/* invalidated slots need not apply */
if (invalidated)
continue;
+ /*
+ * For persistent slot use last_saved_restart_lsn to compute the
+ * oldest LSN for removal of WAL segments. The segments between
+ * last_saved_restart_lsn and restart_lsn might be needed by a
+ * persistent slot in the case of database crash. Non-persistent
+ * slots can't survive the database crash, so we don't care about
+ * last_saved_restart_lsn for them.
+ */
+ if (persistency == RS_PERSISTENT)
+ {
+ if (last_saved_restart_lsn != InvalidXLogRecPtr &&
+ restart_lsn > last_saved_restart_lsn)
+ {
+ restart_lsn = last_saved_restart_lsn;
+ }
+ }
+
if (restart_lsn == InvalidXLogRecPtr)
continue;
@@ -1455,6 +1498,7 @@ ReplicationSlotReserveWal(void)
Assert(slot != NULL);
Assert(slot->data.restart_lsn == InvalidXLogRecPtr);
+ Assert(slot->last_saved_restart_lsn == InvalidXLogRecPtr);
/*
* The replication slot mechanism is used to prevent removal of required
@@ -1835,7 +1879,10 @@ InvalidatePossiblyObsoleteSlot(uint32 possible_causes,
* just rely on .invalidated.
*/
if (invalidation_cause == RS_INVAL_WAL_REMOVED)
+ {
s->data.restart_lsn = InvalidXLogRecPtr;
+ s->last_saved_restart_lsn = InvalidXLogRecPtr;
+ }
/* Let caller know */
*invalidated = true;
@@ -2032,6 +2079,7 @@ void
CheckPointReplicationSlots(bool is_shutdown)
{
int i;
+ bool last_saved_restart_lsn_updated = false;
elog(DEBUG1, "performing replication slot checkpoint");
@@ -2076,9 +2124,23 @@ CheckPointReplicationSlots(bool is_shutdown)
SpinLockRelease(&s->mutex);
}
+ /*
+ * Track if we're going to update slot's last_saved_restart_lsn. We
+ * need this to know if we need to recompute the required LSN.
+ */
+ if (s->last_saved_restart_lsn != s->data.restart_lsn)
+ last_saved_restart_lsn_updated = true;
+
SaveSlotToPath(s, path, LOG);
}
LWLockRelease(ReplicationSlotAllocationLock);
+
+ /*
+ * Recompute the required LSN if SaveSlotToPath() updated
+ * last_saved_restart_lsn for any slot.
+ */
+ if (last_saved_restart_lsn_updated)
+ ReplicationSlotsComputeRequiredLSN();
}
/*
@@ -2354,6 +2416,7 @@ SaveSlotToPath(ReplicationSlot *slot, const char *dir, int elevel)
if (!slot->just_dirtied)
slot->dirty = false;
slot->last_saved_confirmed_flush = cp.slotdata.confirmed_flush;
+ slot->last_saved_restart_lsn = cp.slotdata.restart_lsn;
SpinLockRelease(&slot->mutex);
LWLockRelease(&slot->io_in_progress_lock);
@@ -2569,6 +2632,7 @@ RestoreSlotFromDisk(const char *name)
slot->effective_xmin = cp.slotdata.xmin;
slot->effective_catalog_xmin = cp.slotdata.catalog_xmin;
slot->last_saved_confirmed_flush = cp.slotdata.confirmed_flush;
+ slot->last_saved_restart_lsn = cp.slotdata.restart_lsn;
slot->candidate_catalog_xmin = InvalidTransactionId;
slot->candidate_xmin_lsn = InvalidXLogRecPtr;
diff --git a/src/backend/replication/walsender.c b/src/backend/replication/walsender.c
index 9fa8beb6103..f2c33250e8b 100644
--- a/src/backend/replication/walsender.c
+++ b/src/backend/replication/walsender.c
@@ -3449,8 +3449,16 @@ XLogSendLogical(void)
if (flushPtr == InvalidXLogRecPtr ||
logical_decoding_ctx->reader->EndRecPtr >= flushPtr)
{
+ /*
+ * For cascading logical WAL senders, we use the replay LSN instead of
+ * the flush LSN, since logical decoding on a standby only processes
+ * WAL that has been replayed. This distinction becomes particularly
+ * important during shutdown, as new WAL is no longer replayed and the
+ * last replayed LSN marks the furthest point up to which decoding can
+ * proceed.
+ */
if (am_cascading_walsender)
- flushPtr = GetStandbyFlushRecPtr(NULL);
+ flushPtr = GetXLogReplayRecPtr(NULL);
else
flushPtr = GetFlushRecPtr(NULL);
}
diff --git a/src/backend/rewrite/rewriteHandler.c b/src/backend/rewrite/rewriteHandler.c
index f0bce5f9ed9..2ef0e7fbf3a 100644
--- a/src/backend/rewrite/rewriteHandler.c
+++ b/src/backend/rewrite/rewriteHandler.c
@@ -4544,7 +4544,7 @@ build_generation_expression(Relation rel, int attrno)
List *
QueryRewrite(Query *parsetree)
{
- uint64 input_query_id = parsetree->queryId;
+ int64 input_query_id = parsetree->queryId;
List *querylist;
List *results;
ListCell *l;
diff --git a/src/backend/storage/aio/aio.c b/src/backend/storage/aio/aio.c
index ebb5a771bfd..3643f27ad6e 100644
--- a/src/backend/storage/aio/aio.c
+++ b/src/backend/storage/aio/aio.c
@@ -184,6 +184,8 @@ pgaio_io_acquire(struct ResourceOwnerData *resowner, PgAioReturn *ret)
PgAioHandle *
pgaio_io_acquire_nb(struct ResourceOwnerData *resowner, PgAioReturn *ret)
{
+ PgAioHandle *ioh = NULL;
+
if (pgaio_my_backend->num_staged_ios >= PGAIO_SUBMIT_BATCH_SIZE)
{
Assert(pgaio_my_backend->num_staged_ios == PGAIO_SUBMIT_BATCH_SIZE);
@@ -193,10 +195,17 @@ pgaio_io_acquire_nb(struct ResourceOwnerData *resowner, PgAioReturn *ret)
if (pgaio_my_backend->handed_out_io)
elog(ERROR, "API violation: Only one IO can be handed out");
+ /*
+ * Probably not needed today, as interrupts should not process this IO,
+ * but...
+ */
+ HOLD_INTERRUPTS();
+
if (!dclist_is_empty(&pgaio_my_backend->idle_ios))
{
dlist_node *ion = dclist_pop_head_node(&pgaio_my_backend->idle_ios);
- PgAioHandle *ioh = dclist_container(PgAioHandle, node, ion);
+
+ ioh = dclist_container(PgAioHandle, node, ion);
Assert(ioh->state == PGAIO_HS_IDLE);
Assert(ioh->owner_procno == MyProcNumber);
@@ -212,11 +221,11 @@ pgaio_io_acquire_nb(struct ResourceOwnerData *resowner, PgAioReturn *ret)
ioh->report_return = ret;
ret->result.status = PGAIO_RS_UNKNOWN;
}
-
- return ioh;
}
- return NULL;
+ RESUME_INTERRUPTS();
+
+ return ioh;
}
/*
@@ -233,6 +242,12 @@ pgaio_io_release(PgAioHandle *ioh)
Assert(ioh->resowner);
pgaio_my_backend->handed_out_io = NULL;
+
+ /*
+ * Note that no interrupts are processed between the handed_out_io
+ * check and the call to reclaim - that's important as otherwise an
+ * interrupt could have already reclaimed the handle.
+ */
pgaio_io_reclaim(ioh);
}
else
@@ -251,6 +266,12 @@ pgaio_io_release_resowner(dlist_node *ioh_node, bool on_error)
Assert(ioh->resowner);
+ /*
+ * Otherwise an interrupt, in the middle of releasing the IO, could end up
+ * trying to wait for the IO, leading to state confusion.
+ */
+ HOLD_INTERRUPTS();
+
ResourceOwnerForgetAioHandle(ioh->resowner, &ioh->resowner_node);
ioh->resowner = NULL;
@@ -291,6 +312,8 @@ pgaio_io_release_resowner(dlist_node *ioh_node, bool on_error)
*/
if (ioh->report_return)
ioh->report_return = NULL;
+
+ RESUME_INTERRUPTS();
}
/*
@@ -359,6 +382,13 @@ pgaio_io_get_wref(PgAioHandle *ioh, PgAioWaitRef *iow)
static inline void
pgaio_io_update_state(PgAioHandle *ioh, PgAioHandleState new_state)
{
+ /*
+ * All callers need to have held interrupts in some form, otherwise
+ * interrupt processing could wait for the IO to complete, while in an
+ * intermediary state.
+ */
+ Assert(!INTERRUPTS_CAN_BE_PROCESSED());
+
pgaio_debug_io(DEBUG5, ioh,
"updating state to %s",
pgaio_io_state_get_name(new_state));
@@ -396,6 +426,13 @@ pgaio_io_stage(PgAioHandle *ioh, PgAioOp op)
Assert(pgaio_my_backend->handed_out_io == ioh);
Assert(pgaio_io_has_target(ioh));
+ /*
+ * Otherwise an interrupt, in the middle of staging and possibly executing
+ * the IO, could end up trying to wait for the IO, leading to state
+ * confusion.
+ */
+ HOLD_INTERRUPTS();
+
ioh->op = op;
ioh->result = 0;
@@ -435,6 +472,8 @@ pgaio_io_stage(PgAioHandle *ioh, PgAioOp op)
pgaio_io_prepare_submit(ioh);
pgaio_io_perform_synchronously(ioh);
}
+
+ RESUME_INTERRUPTS();
}
bool
@@ -517,6 +556,13 @@ bool
pgaio_io_was_recycled(PgAioHandle *ioh, uint64 ref_generation, PgAioHandleState *state)
{
*state = ioh->state;
+
+ /*
+ * Ensure that we don't see an earlier state of the handle than ioh->state
+ * due to compiler or CPU reordering. This protects both ->generation as
+ * directly used here, and other fields in the handle accessed in the
+ * caller if the handle was not reused.
+ */
pg_read_barrier();
return ioh->generation != ref_generation;
@@ -544,8 +590,8 @@ pgaio_io_wait(PgAioHandle *ioh, uint64 ref_generation)
&& state != PGAIO_HS_COMPLETED_SHARED
&& state != PGAIO_HS_COMPLETED_LOCAL)
{
- elog(PANIC, "waiting for own IO in wrong state: %d",
- state);
+ elog(PANIC, "waiting for own IO %d in wrong state: %s",
+ pgaio_io_get_id(ioh), pgaio_io_get_state_name(ioh));
}
}
@@ -599,7 +645,13 @@ pgaio_io_wait(PgAioHandle *ioh, uint64 ref_generation)
case PGAIO_HS_COMPLETED_SHARED:
case PGAIO_HS_COMPLETED_LOCAL:
- /* see above */
+
+ /*
+ * Note that no interrupts are processed between
+ * pgaio_io_was_recycled() and this check - that's important
+ * as otherwise an interrupt could have already reclaimed the
+ * handle.
+ */
if (am_owner)
pgaio_io_reclaim(ioh);
return;
@@ -610,6 +662,11 @@ pgaio_io_wait(PgAioHandle *ioh, uint64 ref_generation)
/*
* Make IO handle ready to be reused after IO has completed or after the
* handle has been released without being used.
+ *
+ * Note that callers need to be careful about only calling this in the right
+ * state and that no interrupts can be processed between the state check and
+ * the call to pgaio_io_reclaim(). Otherwise interrupt processing could
+ * already have reclaimed the handle.
*/
static void
pgaio_io_reclaim(PgAioHandle *ioh)
@@ -618,6 +675,9 @@ pgaio_io_reclaim(PgAioHandle *ioh)
Assert(ioh->owner_procno == MyProcNumber);
Assert(ioh->state != PGAIO_HS_IDLE);
+ /* see comment in function header */
+ HOLD_INTERRUPTS();
+
/*
* It's a bit ugly, but right now the easiest place to put the execution
* of local completion callbacks is this function, as we need to execute
@@ -685,6 +745,8 @@ pgaio_io_reclaim(PgAioHandle *ioh)
* efficient in cases where only a few IOs are used.
*/
dclist_push_head(&pgaio_my_backend->idle_ios, &ioh->node);
+
+ RESUME_INTERRUPTS();
}
/*
@@ -697,10 +759,10 @@ pgaio_io_wait_for_free(void)
{
int reclaimed = 0;
- pgaio_debug(DEBUG2, "waiting for free IO with %d pending, %d in-flight, %d idle IOs",
+ pgaio_debug(DEBUG2, "waiting for free IO with %d pending, %u in-flight, %u idle IOs",
pgaio_my_backend->num_staged_ios,
dclist_count(&pgaio_my_backend->in_flight_ios),
- dclist_is_empty(&pgaio_my_backend->idle_ios));
+ dclist_count(&pgaio_my_backend->idle_ios));
/*
* First check if any of our IOs actually have completed - when using
@@ -714,6 +776,16 @@ pgaio_io_wait_for_free(void)
if (ioh->state == PGAIO_HS_COMPLETED_SHARED)
{
+ /*
+ * Note that no interrupts are processed between the state check
+ * and the call to reclaim - that's important as otherwise an
+ * interrupt could have already reclaimed the handle.
+ *
+ * Need to ensure that there's no reordering, in the more common
+ * paths, where we wait for IO, that's done by
+ * pgaio_io_was_recycled().
+ */
+ pg_read_barrier();
pgaio_io_reclaim(ioh);
reclaimed++;
}
@@ -730,13 +802,17 @@ pgaio_io_wait_for_free(void)
if (pgaio_my_backend->num_staged_ios > 0)
pgaio_submit_staged();
+ /* possibly some IOs finished during submission */
+ if (!dclist_is_empty(&pgaio_my_backend->idle_ios))
+ return;
+
if (dclist_count(&pgaio_my_backend->in_flight_ios) == 0)
ereport(ERROR,
errmsg_internal("no free IOs despite no in-flight IOs"),
- errdetail_internal("%d pending, %d in-flight, %d idle IOs",
+ errdetail_internal("%d pending, %u in-flight, %u idle IOs",
pgaio_my_backend->num_staged_ios,
dclist_count(&pgaio_my_backend->in_flight_ios),
- dclist_is_empty(&pgaio_my_backend->idle_ios)));
+ dclist_count(&pgaio_my_backend->idle_ios)));
/*
* Wait for the oldest in-flight IO to complete.
@@ -747,6 +823,7 @@ pgaio_io_wait_for_free(void)
{
PgAioHandle *ioh = dclist_head_element(PgAioHandle, node,
&pgaio_my_backend->in_flight_ios);
+ uint64 generation = ioh->generation;
switch (ioh->state)
{
@@ -763,20 +840,36 @@ pgaio_io_wait_for_free(void)
case PGAIO_HS_COMPLETED_IO:
case PGAIO_HS_SUBMITTED:
pgaio_debug_io(DEBUG2, ioh,
- "waiting for free io with %d in flight",
+ "waiting for free io with %u in flight",
dclist_count(&pgaio_my_backend->in_flight_ios));
/*
* In a more general case this would be racy, because the
* generation could increase after we read ioh->state above.
* But we are only looking at IOs by the current backend and
- * the IO can only be recycled by this backend.
+ * the IO can only be recycled by this backend. Even this is
+ * only OK because we get the handle's generation before
+ * potentially processing interrupts, e.g. as part of
+ * pgaio_debug_io().
*/
- pgaio_io_wait(ioh, ioh->generation);
+ pgaio_io_wait(ioh, generation);
break;
case PGAIO_HS_COMPLETED_SHARED:
- /* it's possible that another backend just finished this IO */
+
+ /*
+ * It's possible that another backend just finished this IO.
+ *
+ * Note that no interrupts are processed between the state
+ * check and the call to reclaim - that's important as
+ * otherwise an interrupt could have already reclaimed the
+ * handle.
+ *
+ * Need to ensure that there's no reordering, in the more
+ * common paths, where we wait for IO, that's done by
+ * pgaio_io_was_recycled().
+ */
+ pg_read_barrier();
pgaio_io_reclaim(ioh);
break;
}
@@ -926,6 +1019,11 @@ pgaio_wref_check_done(PgAioWaitRef *iow)
if (state == PGAIO_HS_COMPLETED_SHARED ||
state == PGAIO_HS_COMPLETED_LOCAL)
{
+ /*
+ * Note that no interrupts are processed between
+ * pgaio_io_was_recycled() and this check - that's important as
+ * otherwise an interrupt could have already reclaimed the handle.
+ */
if (am_owner)
pgaio_io_reclaim(ioh);
return true;
@@ -1153,11 +1251,14 @@ pgaio_closing_fd(int fd)
{
dlist_iter iter;
PgAioHandle *ioh = NULL;
+ uint64 generation;
dclist_foreach(iter, &pgaio_my_backend->in_flight_ios)
{
ioh = dclist_container(PgAioHandle, node, iter.cur);
+ generation = ioh->generation;
+
if (pgaio_io_uses_fd(ioh, fd))
break;
else
@@ -1168,11 +1269,11 @@ pgaio_closing_fd(int fd)
break;
pgaio_debug_io(DEBUG2, ioh,
- "waiting for IO before FD %d gets closed, %d in-flight IOs",
+ "waiting for IO before FD %d gets closed, %u in-flight IOs",
fd, dclist_count(&pgaio_my_backend->in_flight_ios));
/* see comment in pgaio_io_wait_for_free() about raciness */
- pgaio_io_wait(ioh, ioh->generation);
+ pgaio_io_wait(ioh, generation);
}
}
}
@@ -1201,13 +1302,14 @@ pgaio_shutdown(int code, Datum arg)
while (!dclist_is_empty(&pgaio_my_backend->in_flight_ios))
{
PgAioHandle *ioh = dclist_head_element(PgAioHandle, node, &pgaio_my_backend->in_flight_ios);
+ uint64 generation = ioh->generation;
pgaio_debug_io(DEBUG2, ioh,
- "waiting for IO to complete during shutdown, %d in-flight IOs",
+ "waiting for IO to complete during shutdown, %u in-flight IOs",
dclist_count(&pgaio_my_backend->in_flight_ios));
/* see comment in pgaio_io_wait_for_free() about raciness */
- pgaio_io_wait(ioh, ioh->generation);
+ pgaio_io_wait(ioh, generation);
}
pgaio_my_backend = NULL;
diff --git a/src/backend/storage/aio/aio_callback.c b/src/backend/storage/aio/aio_callback.c
index 0ad9795bb7e..03c9bba0802 100644
--- a/src/backend/storage/aio/aio_callback.c
+++ b/src/backend/storage/aio/aio_callback.c
@@ -256,6 +256,9 @@ pgaio_io_call_complete_shared(PgAioHandle *ioh)
pgaio_result_status_string(result.status),
result.id, result.error_data, result.result);
result = ce->cb->complete_shared(ioh, result, cb_data);
+
+ /* the callback should never transition to unknown */
+ Assert(result.status != PGAIO_RS_UNKNOWN);
}
ioh->distilled_result = result;
@@ -290,6 +293,7 @@ pgaio_io_call_complete_local(PgAioHandle *ioh)
/* start with distilled result from shared callback */
result = ioh->distilled_result;
+ Assert(result.status != PGAIO_RS_UNKNOWN);
for (int i = ioh->num_callbacks; i > 0; i--)
{
@@ -306,6 +310,9 @@ pgaio_io_call_complete_local(PgAioHandle *ioh)
pgaio_result_status_string(result.status),
result.id, result.error_data, result.result);
result = ce->cb->complete_local(ioh, result, cb_data);
+
+ /* the callback should never transition to unknown */
+ Assert(result.status != PGAIO_RS_UNKNOWN);
}
/*
diff --git a/src/backend/storage/aio/aio_io.c b/src/backend/storage/aio/aio_io.c
index 00e176135a6..520b5077df2 100644
--- a/src/backend/storage/aio/aio_io.c
+++ b/src/backend/storage/aio/aio_io.c
@@ -181,9 +181,9 @@ pgaio_io_get_op_name(PgAioHandle *ioh)
case PGAIO_OP_INVALID:
return "invalid";
case PGAIO_OP_READV:
- return "read";
+ return "readv";
case PGAIO_OP_WRITEV:
- return "write";
+ return "writev";
}
return NULL; /* silence compiler */
diff --git a/src/backend/storage/aio/method_io_uring.c b/src/backend/storage/aio/method_io_uring.c
index c719ba2727a..b78048328e1 100644
--- a/src/backend/storage/aio/method_io_uring.c
+++ b/src/backend/storage/aio/method_io_uring.c
@@ -126,7 +126,7 @@ pgaio_uring_shmem_size(void)
static void
pgaio_uring_shmem_init(bool first_time)
{
- int TotalProcs = MaxBackends + NUM_AUXILIARY_PROCS - MAX_IO_WORKERS;
+ int TotalProcs = pgaio_uring_procs();
bool found;
pgaio_uring_contexts = (PgAioUringContext *)
@@ -400,9 +400,9 @@ pgaio_uring_wait_one(PgAioHandle *ioh, uint64 ref_generation)
while (true)
{
pgaio_debug_io(DEBUG3, ioh,
- "wait_one io_gen: %llu, ref_gen: %llu, cycle %d",
- (long long unsigned) ioh->generation,
- (long long unsigned) ref_generation,
+ "wait_one io_gen: %" PRIu64 ", ref_gen: %" PRIu64 ", cycle %d",
+ ioh->generation,
+ ref_generation,
waited);
if (pgaio_io_was_recycled(ioh, ref_generation, &state) ||
diff --git a/src/backend/storage/aio/method_worker.c b/src/backend/storage/aio/method_worker.c
index 743cccc2acd..36be179678d 100644
--- a/src/backend/storage/aio/method_worker.c
+++ b/src/backend/storage/aio/method_worker.c
@@ -461,7 +461,12 @@ IoWorkerMain(const void *startup_data, size_t startup_data_len)
int nwakeups = 0;
int worker;
- /* Try to get a job to do. */
+ /*
+ * Try to get a job to do.
+ *
+ * The lwlock acquisition also provides the necessary memory barrier
+ * to ensure that we don't see an outdated data in the handle.
+ */
LWLockAcquire(AioWorkerSubmissionQueueLock, LW_EXCLUSIVE);
if ((io_index = pgaio_worker_submission_queue_consume()) == UINT32_MAX)
{
diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c
index f93131a645e..667aa0c0c78 100644
--- a/src/backend/storage/buffer/bufmgr.c
+++ b/src/backend/storage/buffer/bufmgr.c
@@ -7320,7 +7320,7 @@ buffer_readv_report(PgAioResult result, const PgAioTargetData *td,
affected_count > 1 ?
errdetail("Block %u held first zeroed page.",
first + first_off) : 0,
- errhint("See server log for details about the other %u invalid block(s).",
+ errhint("See server log for details about the other %d invalid block(s).",
affected_count + checkfail_count - 1));
return;
}
diff --git a/src/backend/storage/buffer/localbuf.c b/src/backend/storage/buffer/localbuf.c
index 63101d56a07..ba26627f7b0 100644
--- a/src/backend/storage/buffer/localbuf.c
+++ b/src/backend/storage/buffer/localbuf.c
@@ -629,7 +629,7 @@ InvalidateLocalBuffer(BufferDesc *bufHdr, bool check_unreferenced)
*/
if (check_unreferenced &&
(LocalRefCount[bufid] != 0 || BUF_STATE_GET_REFCOUNT(buf_state) != 0))
- elog(ERROR, "block %u of %s is still referenced (local %u)",
+ elog(ERROR, "block %u of %s is still referenced (local %d)",
bufHdr->tag.blockNum,
relpathbackend(BufTagGetRelFileLocator(&bufHdr->tag),
MyProcNumber,
diff --git a/src/backend/storage/ipc/ipci.c b/src/backend/storage/ipc/ipci.c
index 00c76d05356..2fa045e6b0f 100644
--- a/src/backend/storage/ipc/ipci.c
+++ b/src/backend/storage/ipc/ipci.c
@@ -51,7 +51,6 @@
#include "storage/sinvaladt.h"
#include "utils/guc.h"
#include "utils/injection_point.h"
-#include "utils/memutils.h"
/* GUCs */
int shared_memory_type = DEFAULT_SHARED_MEMORY_TYPE;
@@ -151,7 +150,6 @@ CalculateShmemSize(int *num_semaphores)
size = add_size(size, InjectionPointShmemSize());
size = add_size(size, SlotSyncShmemSize());
size = add_size(size, AioShmemSize());
- size = add_size(size, MemoryContextReportingShmemSize());
/* include additional requested shmem from preload libraries */
size = add_size(size, total_addin_request);
@@ -345,7 +343,6 @@ CreateOrAttachShmemStructs(void)
WaitEventCustomShmemInit();
InjectionPointShmemInit();
AioShmemInit();
- MemoryContextReportingShmemInit();
}
/*
diff --git a/src/backend/storage/ipc/procsignal.c b/src/backend/storage/ipc/procsignal.c
index ce69e26d720..a9bb540b55a 100644
--- a/src/backend/storage/ipc/procsignal.c
+++ b/src/backend/storage/ipc/procsignal.c
@@ -691,9 +691,6 @@ procsignal_sigusr1_handler(SIGNAL_ARGS)
if (CheckProcSignal(PROCSIG_LOG_MEMORY_CONTEXT))
HandleLogMemoryContextInterrupt();
- if (CheckProcSignal(PROCSIG_GET_MEMORY_CONTEXT))
- HandleGetMemoryContextInterrupt();
-
if (CheckProcSignal(PROCSIG_PARALLEL_APPLY_MESSAGE))
HandleParallelApplyMessageInterrupt();
diff --git a/src/backend/storage/lmgr/lmgr.c b/src/backend/storage/lmgr/lmgr.c
index f50962983c3..3f6bf70bd3c 100644
--- a/src/backend/storage/lmgr/lmgr.c
+++ b/src/backend/storage/lmgr/lmgr.c
@@ -717,7 +717,10 @@ XactLockTableWait(TransactionId xid, Relation rel, ItemPointer ctid,
* through, to avoid slowing down the normal case.)
*/
if (!first)
+ {
+ CHECK_FOR_INTERRUPTS();
pg_usleep(1000L);
+ }
first = false;
xid = SubTransGetTopmostTransaction(xid);
}
@@ -757,7 +760,10 @@ ConditionalXactLockTableWait(TransactionId xid, bool logLockFailure)
/* See XactLockTableWait about this case */
if (!first)
+ {
+ CHECK_FOR_INTERRUPTS();
pg_usleep(1000L);
+ }
first = false;
xid = SubTransGetTopmostTransaction(xid);
}
diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c
index 86b06b9223f..2776ceb295b 100644
--- a/src/backend/storage/lmgr/lock.c
+++ b/src/backend/storage/lmgr/lock.c
@@ -51,7 +51,7 @@
/* GUC variables */
int max_locks_per_xact; /* used to set the lock table size */
-bool log_lock_failure = false;
+bool log_lock_failures = false;
#define NLOCKENTS() \
mul_size(max_locks_per_xact, add_size(MaxBackends, max_prepared_xacts))
diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c
index 5148ef982e3..46f44bc4511 100644
--- a/src/backend/storage/lmgr/lwlock.c
+++ b/src/backend/storage/lmgr/lwlock.c
@@ -178,8 +178,6 @@ static const char *const BuiltinTrancheNames[] = {
[LWTRANCHE_XACT_SLRU] = "XactSLRU",
[LWTRANCHE_PARALLEL_VACUUM_DSA] = "ParallelVacuumDSA",
[LWTRANCHE_AIO_URING_COMPLETION] = "AioUringCompletion",
- [LWTRANCHE_MEMORY_CONTEXT_REPORTING_STATE] = "MemoryContextReportingState",
- [LWTRANCHE_MEMORY_CONTEXT_REPORTING_PROC] = "MemoryContextReportingPerProcess",
};
StaticAssertDecl(lengthof(BuiltinTrancheNames) ==
diff --git a/src/backend/storage/lmgr/proc.c b/src/backend/storage/lmgr/proc.c
index f194e6b3dcc..e9ef0fbfe32 100644
--- a/src/backend/storage/lmgr/proc.c
+++ b/src/backend/storage/lmgr/proc.c
@@ -50,7 +50,6 @@
#include "storage/procsignal.h"
#include "storage/spin.h"
#include "storage/standby.h"
-#include "utils/memutils.h"
#include "utils/timeout.h"
#include "utils/timestamp.h"
diff --git a/src/backend/tcop/backend_startup.c b/src/backend/tcop/backend_startup.c
index a7d1fec981f..ad0af5edc1f 100644
--- a/src/backend/tcop/backend_startup.c
+++ b/src/backend/tcop/backend_startup.c
@@ -881,7 +881,7 @@ ProcessCancelRequestPacket(Port *port, void *pkt, int pktlen)
{
ereport(COMMERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- errmsg("invalid length of query cancel packet")));
+ errmsg("invalid length of cancel request packet")));
return;
}
len = pktlen - offsetof(CancelRequestPacket, cancelAuthCode);
@@ -889,7 +889,7 @@ ProcessCancelRequestPacket(Port *port, void *pkt, int pktlen)
{
ereport(COMMERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- errmsg("invalid length of query cancel key")));
+ errmsg("invalid length of cancel key in cancel request packet")));
return;
}
@@ -1077,7 +1077,7 @@ check_log_connections(char **newval, void **extra, GucSource source)
if (!SplitIdentifierString(rawstring, ',', &elemlist))
{
- GUC_check_errdetail("Invalid list syntax in parameter \"log_connections\".");
+ GUC_check_errdetail("Invalid list syntax in parameter \"%s\".", "log_connections");
pfree(rawstring);
list_free(elemlist);
return false;
diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c
index 1ae51b1b391..2f8c3d5f918 100644
--- a/src/backend/tcop/postgres.c
+++ b/src/backend/tcop/postgres.c
@@ -1226,7 +1226,6 @@ exec_simple_query(const char *query_string)
query_string,
commandTag,
plantree_list,
- NULL,
NULL);
/*
@@ -1683,7 +1682,7 @@ exec_bind_message(StringInfo input_message)
{
Query *query = lfirst_node(Query, lc);
- if (query->queryId != UINT64CONST(0))
+ if (query->queryId != INT64CONST(0))
{
pgstat_report_query_id(query->queryId, false);
break;
@@ -2028,15 +2027,14 @@ exec_bind_message(StringInfo input_message)
query_string,
psrc->commandTag,
cplan->stmt_list,
- cplan,
- psrc);
+ cplan);
/* Portal is defined, set the plan ID based on its contents. */
foreach(lc, portal->stmts)
{
PlannedStmt *plan = lfirst_node(PlannedStmt, lc);
- if (plan->planId != UINT64CONST(0))
+ if (plan->planId != INT64CONST(0))
{
pgstat_report_plan_id(plan->planId, false);
break;
@@ -2176,7 +2174,7 @@ exec_execute_message(const char *portal_name, long max_rows)
{
PlannedStmt *stmt = lfirst_node(PlannedStmt, lc);
- if (stmt->queryId != UINT64CONST(0))
+ if (stmt->queryId != INT64CONST(0))
{
pgstat_report_query_id(stmt->queryId, false);
break;
@@ -2187,7 +2185,7 @@ exec_execute_message(const char *portal_name, long max_rows)
{
PlannedStmt *stmt = lfirst_node(PlannedStmt, lc);
- if (stmt->planId != UINT64CONST(0))
+ if (stmt->planId != INT64CONST(0))
{
pgstat_report_plan_id(stmt->planId, false);
break;
@@ -3535,9 +3533,6 @@ ProcessInterrupts(void)
if (LogMemoryContextPending)
ProcessLogMemoryContextInterrupt();
- if (PublishMemoryContextPending)
- ProcessGetMemoryContextInterrupt();
-
if (ParallelApplyMessagePending)
ProcessParallelApplyMessages();
}
@@ -3695,7 +3690,7 @@ set_debug_options(int debug_flag, GucContext context, GucSource source)
if (debug_flag >= 1 && context == PGC_POSTMASTER)
{
- SetConfigOption("log_connections", "true", context, source);
+ SetConfigOption("log_connections", "all", context, source);
SetConfigOption("log_disconnections", "true", context, source);
}
if (debug_flag >= 2)
diff --git a/src/backend/tcop/pquery.c b/src/backend/tcop/pquery.c
index 8164d0fbb4f..d1593f38b35 100644
--- a/src/backend/tcop/pquery.c
+++ b/src/backend/tcop/pquery.c
@@ -19,7 +19,6 @@
#include "access/xact.h"
#include "commands/prepare.h"
-#include "executor/execdesc.h"
#include "executor/executor.h"
#include "executor/tstoreReceiver.h"
#include "miscadmin.h"
@@ -38,9 +37,6 @@ Portal ActivePortal = NULL;
static void ProcessQuery(PlannedStmt *plan,
- CachedPlan *cplan,
- CachedPlanSource *plansource,
- int query_index,
const char *sourceText,
ParamListInfo params,
QueryEnvironment *queryEnv,
@@ -70,7 +66,6 @@ static void DoPortalRewind(Portal portal);
*/
QueryDesc *
CreateQueryDesc(PlannedStmt *plannedstmt,
- CachedPlan *cplan,
const char *sourceText,
Snapshot snapshot,
Snapshot crosscheck_snapshot,
@@ -83,7 +78,6 @@ CreateQueryDesc(PlannedStmt *plannedstmt,
qd->operation = plannedstmt->commandType; /* operation */
qd->plannedstmt = plannedstmt; /* plan */
- qd->cplan = cplan; /* CachedPlan supplying the plannedstmt */
qd->sourceText = sourceText; /* query text */
qd->snapshot = RegisterSnapshot(snapshot); /* snapshot */
/* RI check snapshot */
@@ -129,9 +123,6 @@ FreeQueryDesc(QueryDesc *qdesc)
* PORTAL_ONE_RETURNING, or PORTAL_ONE_MOD_WITH portal
*
* plan: the plan tree for the query
- * cplan: CachedPlan supplying the plan
- * plansource: CachedPlanSource supplying the cplan
- * query_index: index of the query in plansource->query_list
* sourceText: the source text of the query
* params: any parameters needed
* dest: where to send results
@@ -144,9 +135,6 @@ FreeQueryDesc(QueryDesc *qdesc)
*/
static void
ProcessQuery(PlannedStmt *plan,
- CachedPlan *cplan,
- CachedPlanSource *plansource,
- int query_index,
const char *sourceText,
ParamListInfo params,
QueryEnvironment *queryEnv,
@@ -158,23 +146,14 @@ ProcessQuery(PlannedStmt *plan,
/*
* Create the QueryDesc object
*/
- queryDesc = CreateQueryDesc(plan, cplan, sourceText,
+ queryDesc = CreateQueryDesc(plan, sourceText,
GetActiveSnapshot(), InvalidSnapshot,
dest, params, queryEnv, 0);
/*
- * Prepare the plan for execution
+ * Call ExecutorStart to prepare the plan for execution
*/
- if (queryDesc->cplan)
- {
- ExecutorStartCachedPlan(queryDesc, 0, plansource, query_index);
- Assert(queryDesc->planstate);
- }
- else
- {
- if (!ExecutorStart(queryDesc, 0))
- elog(ERROR, "ExecutorStart() failed unexpectedly");
- }
+ ExecutorStart(queryDesc, 0);
/*
* Run the plan to completion.
@@ -515,7 +494,6 @@ PortalStart(Portal portal, ParamListInfo params,
* the destination to DestNone.
*/
queryDesc = CreateQueryDesc(linitial_node(PlannedStmt, portal->stmts),
- portal->cplan,
portal->sourceText,
GetActiveSnapshot(),
InvalidSnapshot,
@@ -535,19 +513,9 @@ PortalStart(Portal portal, ParamListInfo params,
myeflags = eflags;
/*
- * Prepare the plan for execution.
+ * Call ExecutorStart to prepare the plan for execution
*/
- if (portal->cplan)
- {
- ExecutorStartCachedPlan(queryDesc, myeflags,
- portal->plansource, 0);
- Assert(queryDesc->planstate);
- }
- else
- {
- if (!ExecutorStart(queryDesc, myeflags))
- elog(ERROR, "ExecutorStart() failed unexpectedly");
- }
+ ExecutorStart(queryDesc, myeflags);
/*
* This tells PortalCleanup to shut down the executor
@@ -1221,7 +1189,6 @@ PortalRunMulti(Portal portal,
{
bool active_snapshot_set = false;
ListCell *stmtlist_item;
- int query_index = 0;
/*
* If the destination is DestRemoteExecute, change to DestNone. The
@@ -1303,9 +1270,6 @@ PortalRunMulti(Portal portal,
{
/* statement can set tag string */
ProcessQuery(pstmt,
- portal->cplan,
- portal->plansource,
- query_index,
portal->sourceText,
portal->portalParams,
portal->queryEnv,
@@ -1315,9 +1279,6 @@ PortalRunMulti(Portal portal,
{
/* stmt added by rewrite cannot set tag */
ProcessQuery(pstmt,
- portal->cplan,
- portal->plansource,
- query_index,
portal->sourceText,
portal->portalParams,
portal->queryEnv,
@@ -1382,8 +1343,6 @@ PortalRunMulti(Portal portal,
*/
if (lnext(portal->stmts, stmtlist_item) != NULL)
CommandCounterIncrement();
-
- query_index++;
}
/* Pop the snapshot if we pushed one. */
diff --git a/src/backend/utils/activity/backend_status.c b/src/backend/utils/activity/backend_status.c
index e1576e64b6d..a290cc4c975 100644
--- a/src/backend/utils/activity/backend_status.c
+++ b/src/backend/utils/activity/backend_status.c
@@ -320,8 +320,8 @@ pgstat_bestart_initial(void)
lbeentry.st_state = STATE_STARTING;
lbeentry.st_progress_command = PROGRESS_COMMAND_INVALID;
lbeentry.st_progress_command_target = InvalidOid;
- lbeentry.st_query_id = UINT64CONST(0);
- lbeentry.st_plan_id = UINT64CONST(0);
+ lbeentry.st_query_id = INT64CONST(0);
+ lbeentry.st_plan_id = INT64CONST(0);
/*
* we don't zero st_progress_param here to save cycles; nobody should
@@ -599,8 +599,8 @@ pgstat_report_activity(BackendState state, const char *cmd_str)
beentry->st_activity_start_timestamp = 0;
/* st_xact_start_timestamp and wait_event_info are also disabled */
beentry->st_xact_start_timestamp = 0;
- beentry->st_query_id = UINT64CONST(0);
- beentry->st_plan_id = UINT64CONST(0);
+ beentry->st_query_id = INT64CONST(0);
+ beentry->st_plan_id = INT64CONST(0);
proc->wait_event_info = 0;
PGSTAT_END_WRITE_ACTIVITY(beentry);
}
@@ -662,8 +662,8 @@ pgstat_report_activity(BackendState state, const char *cmd_str)
*/
if (state == STATE_RUNNING)
{
- beentry->st_query_id = UINT64CONST(0);
- beentry->st_plan_id = UINT64CONST(0);
+ beentry->st_query_id = INT64CONST(0);
+ beentry->st_plan_id = INT64CONST(0);
}
if (cmd_str != NULL)
@@ -683,7 +683,7 @@ pgstat_report_activity(BackendState state, const char *cmd_str)
* --------
*/
void
-pgstat_report_query_id(uint64 query_id, bool force)
+pgstat_report_query_id(int64 query_id, bool force)
{
volatile PgBackendStatus *beentry = MyBEEntry;
@@ -702,7 +702,7 @@ pgstat_report_query_id(uint64 query_id, bool force)
* command, so ignore the one provided unless it's an explicit call to
* reset the identifier.
*/
- if (beentry->st_query_id != 0 && !force)
+ if (beentry->st_query_id != INT64CONST(0) && !force)
return;
/*
@@ -722,7 +722,7 @@ pgstat_report_query_id(uint64 query_id, bool force)
* --------
*/
void
-pgstat_report_plan_id(uint64 plan_id, bool force)
+pgstat_report_plan_id(int64 plan_id, bool force)
{
volatile PgBackendStatus *beentry = MyBEEntry;
@@ -1134,7 +1134,7 @@ pgstat_get_crashed_backend_activity(int pid, char *buffer, int buflen)
*
* Return current backend's query identifier.
*/
-uint64
+int64
pgstat_get_my_query_id(void)
{
if (!MyBEEntry)
@@ -1154,7 +1154,7 @@ pgstat_get_my_query_id(void)
*
* Return current backend's plan identifier.
*/
-uint64
+int64
pgstat_get_my_plan_id(void)
{
if (!MyBEEntry)
diff --git a/src/backend/utils/activity/pgstat_shmem.c b/src/backend/utils/activity/pgstat_shmem.c
index 2e33293b000..53e7d534270 100644
--- a/src/backend/utils/activity/pgstat_shmem.c
+++ b/src/backend/utils/activity/pgstat_shmem.c
@@ -183,7 +183,7 @@ StatsShmemInit(void)
p += MAXALIGN(pgstat_dsa_init_size());
dsa = dsa_create_in_place(ctl->raw_dsa_area,
pgstat_dsa_init_size(),
- LWTRANCHE_PGSTATS_DSA, 0);
+ LWTRANCHE_PGSTATS_DSA, NULL);
dsa_pin(dsa);
/*
@@ -255,7 +255,8 @@ pgstat_attach_shmem(void)
dsa_pin_mapping(pgStatLocal.dsa);
pgStatLocal.shared_hash = dshash_attach(pgStatLocal.dsa, &dsh_params,
- pgStatLocal.shmem->hash_handle, 0);
+ pgStatLocal.shmem->hash_handle,
+ NULL);
MemoryContextSwitchTo(oldcontext);
}
diff --git a/src/backend/utils/activity/wait_event_names.txt b/src/backend/utils/activity/wait_event_names.txt
index 930321905f1..4da68312b5f 100644
--- a/src/backend/utils/activity/wait_event_names.txt
+++ b/src/backend/utils/activity/wait_event_names.txt
@@ -161,7 +161,6 @@ WAL_RECEIVER_EXIT "Waiting for the WAL receiver to exit."
WAL_RECEIVER_WAIT_START "Waiting for startup process to send initial data for streaming replication."
WAL_SUMMARY_READY "Waiting for a new WAL summary to be generated."
XACT_GROUP_UPDATE "Waiting for the group leader to update transaction status at transaction end."
-MEM_CXT_PUBLISH "Waiting for a process to publish memory information."
ABI_compatibility:
@@ -402,6 +401,7 @@ SerialSLRU "Waiting to access the serializable transaction conflict SLRU cache."
SubtransSLRU "Waiting to access the sub-transaction SLRU cache."
XactSLRU "Waiting to access the transaction status SLRU cache."
ParallelVacuumDSA "Waiting for parallel vacuum dynamic shared memory allocation."
+AioUringCompletion "Waiting for another process to complete IO via io_uring."
# No "ABI_compatibility" region here as WaitEventLWLock has its own C code.
diff --git a/src/backend/utils/adt/datetime.c b/src/backend/utils/adt/datetime.c
index 793d8a9adcc..680fee2a844 100644
--- a/src/backend/utils/adt/datetime.c
+++ b/src/backend/utils/adt/datetime.c
@@ -702,9 +702,18 @@ ParseFraction(char *cp, double *frac)
}
else
{
+ /*
+ * On the other hand, let's reject anything that's not digits after
+ * the ".". strtod is happy with input like ".123e9", but that'd
+ * break callers' expectation that the result is in 0..1. (It's quite
+ * difficult to get here with such input, but not impossible.)
+ */
+ if (strspn(cp + 1, "0123456789") != strlen(cp + 1))
+ return DTERR_BAD_FORMAT;
+
errno = 0;
*frac = strtod(cp, &cp);
- /* check for parse failure */
+ /* check for parse failure (probably redundant given prior check) */
if (*cp != '\0' || errno != 0)
return DTERR_BAD_FORMAT;
}
@@ -2959,30 +2968,27 @@ DecodeNumberField(int len, char *str, int fmask,
char *cp;
/*
+ * This function was originally meant to cope only with DTK_NUMBER fields,
+ * but we now sometimes abuse it to parse (parts of) DTK_DATE fields,
+ * which can contain letters and other punctuation. Reject if it's not a
+ * valid DTK_NUMBER, that is digits and decimal point(s). (ParseFraction
+ * will reject if there's more than one decimal point.)
+ */
+ if (strspn(str, "0123456789.") != len)
+ return DTERR_BAD_FORMAT;
+
+ /*
* Have a decimal point? Then this is a date or something with a seconds
* field...
*/
if ((cp = strchr(str, '.')) != NULL)
{
- /*
- * Can we use ParseFractionalSecond here? Not clear whether trailing
- * junk should be rejected ...
- */
- if (cp[1] == '\0')
- {
- /* avoid assuming that strtod will accept "." */
- *fsec = 0;
- }
- else
- {
- double frac;
+ int dterr;
- errno = 0;
- frac = strtod(cp, NULL);
- if (errno != 0)
- return DTERR_BAD_FORMAT;
- *fsec = rint(frac * 1000000);
- }
+ /* Convert the fraction and store at *fsec */
+ dterr = ParseFractionalSecond(cp, fsec);
+ if (dterr)
+ return dterr;
/* Now truncate off the fraction for further processing */
*cp = '\0';
len = strlen(str);
diff --git a/src/backend/utils/adt/float.c b/src/backend/utils/adt/float.c
index 6d20ae07ae7..ba66a9c4ce6 100644
--- a/src/backend/utils/adt/float.c
+++ b/src/backend/utils/adt/float.c
@@ -4065,8 +4065,8 @@ float84ge(PG_FUNCTION_ARGS)
* in the histogram. width_bucket() returns an integer indicating the
* bucket number that 'operand' belongs to in an equiwidth histogram
* with the specified characteristics. An operand smaller than the
- * lower bound is assigned to bucket 0. An operand greater than the
- * upper bound is assigned to an additional bucket (with number
+ * lower bound is assigned to bucket 0. An operand greater than or equal
+ * to the upper bound is assigned to an additional bucket (with number
* count+1). We don't allow "NaN" for any of the float8 inputs, and we
* don't allow either of the histogram bounds to be +/- infinity.
*/
diff --git a/src/backend/utils/adt/formatting.c b/src/backend/utils/adt/formatting.c
index 5bd1e01f7e4..1d05481181d 100644
--- a/src/backend/utils/adt/formatting.c
+++ b/src/backend/utils/adt/formatting.c
@@ -3590,14 +3590,15 @@ DCH_from_char(FormatNode *node, const char *in, TmFromChar *out,
if (matched < 2)
ereturn(escontext,,
(errcode(ERRCODE_INVALID_DATETIME_FORMAT),
- errmsg("invalid input string for \"Y,YYY\"")));
+ errmsg("invalid value \"%s\" for \"%s\"",
+ s, "Y,YYY")));
/* years += (millennia * 1000); */
if (pg_mul_s32_overflow(millennia, 1000, &millennia) ||
pg_add_s32_overflow(years, millennia, &years))
ereturn(escontext,,
(errcode(ERRCODE_DATETIME_FIELD_OVERFLOW),
- errmsg("value for \"Y,YYY\" in source string is out of range")));
+ errmsg("value for \"%s\" in source string is out of range", "Y,YYY")));
if (!from_char_set_int(&out->year, years, n, escontext))
return;
diff --git a/src/backend/utils/adt/mcxtfuncs.c b/src/backend/utils/adt/mcxtfuncs.c
index 7ec2c225016..fe6dce9cba3 100644
--- a/src/backend/utils/adt/mcxtfuncs.c
+++ b/src/backend/utils/adt/mcxtfuncs.c
@@ -15,27 +15,30 @@
#include "postgres.h"
-#include "access/twophase.h"
-#include "catalog/pg_authid_d.h"
#include "funcapi.h"
#include "mb/pg_wchar.h"
-#include "miscadmin.h"
#include "storage/proc.h"
#include "storage/procarray.h"
-#include "utils/acl.h"
#include "utils/array.h"
#include "utils/builtins.h"
#include "utils/hsearch.h"
-#include "utils/memutils.h"
-#include "utils/wait_event_types.h"
/* ----------
* The max bytes for showing identifiers of MemoryContext.
* ----------
*/
#define MEMORY_CONTEXT_IDENT_DISPLAY_SIZE 1024
-struct MemoryStatsBackendState *memCxtState = NULL;
-struct MemoryStatsCtl *memCxtArea = NULL;
+
+/*
+ * MemoryContextId
+ * Used for storage of transient identifiers for
+ * pg_get_backend_memory_contexts.
+ */
+typedef struct MemoryContextId
+{
+ MemoryContext context;
+ int context_id;
+} MemoryContextId;
/*
* int_list_to_array
@@ -86,7 +89,7 @@ PutMemoryContextsStatsTupleStore(Tuplestorestate *tupstore,
*/
for (MemoryContext cur = context; cur != NULL; cur = cur->parent)
{
- MemoryStatsContextId *entry;
+ MemoryContextId *entry;
bool found;
entry = hash_search(context_id_lookup, &cur, HASH_FIND, &found);
@@ -140,51 +143,36 @@ PutMemoryContextsStatsTupleStore(Tuplestorestate *tupstore,
else
nulls[1] = true;
- type = ContextTypeToString(context->type);
-
- values[2] = CStringGetTextDatum(type);
- values[3] = Int32GetDatum(list_length(path)); /* level */
- values[4] = int_list_to_array(path);
- values[5] = Int64GetDatum(stat.totalspace);
- values[6] = Int64GetDatum(stat.nblocks);
- values[7] = Int64GetDatum(stat.freespace);
- values[8] = Int64GetDatum(stat.freechunks);
- values[9] = Int64GetDatum(stat.totalspace - stat.freespace);
-
- tuplestore_putvalues(tupstore, tupdesc, values, nulls);
- list_free(path);
-}
-
-/*
- * ContextTypeToString
- * Returns a textual representation of a context type
- *
- * This should cover the same types as MemoryContextIsValid.
- */
-const char *
-ContextTypeToString(NodeTag type)
-{
- const char *context_type;
-
- switch (type)
+ switch (context->type)
{
case T_AllocSetContext:
- context_type = "AllocSet";
+ type = "AllocSet";
break;
case T_GenerationContext:
- context_type = "Generation";
+ type = "Generation";
break;
case T_SlabContext:
- context_type = "Slab";
+ type = "Slab";
break;
case T_BumpContext:
- context_type = "Bump";
+ type = "Bump";
break;
default:
- context_type = "???";
+ type = "???";
break;
}
- return context_type;
+
+ values[2] = CStringGetTextDatum(type);
+ values[3] = Int32GetDatum(list_length(path)); /* level */
+ values[4] = int_list_to_array(path);
+ values[5] = Int64GetDatum(stat.totalspace);
+ values[6] = Int64GetDatum(stat.nblocks);
+ values[7] = Int64GetDatum(stat.freespace);
+ values[8] = Int64GetDatum(stat.freechunks);
+ values[9] = Int64GetDatum(stat.totalspace - stat.freespace);
+
+ tuplestore_putvalues(tupstore, tupdesc, values, nulls);
+ list_free(path);
}
/*
@@ -201,7 +189,7 @@ pg_get_backend_memory_contexts(PG_FUNCTION_ARGS)
HTAB *context_id_lookup;
ctl.keysize = sizeof(MemoryContext);
- ctl.entrysize = sizeof(MemoryStatsContextId);
+ ctl.entrysize = sizeof(MemoryContextId);
ctl.hcxt = CurrentMemoryContext;
context_id_lookup = hash_create("pg_get_backend_memory_contexts",
@@ -228,7 +216,7 @@ pg_get_backend_memory_contexts(PG_FUNCTION_ARGS)
foreach_ptr(MemoryContextData, cur, contexts)
{
- MemoryStatsContextId *entry;
+ MemoryContextId *entry;
bool found;
/*
@@ -236,8 +224,8 @@ pg_get_backend_memory_contexts(PG_FUNCTION_ARGS)
* PutMemoryContextsStatsTupleStore needs this to populate the "path"
* column with the parent context_ids.
*/
- entry = (MemoryStatsContextId *) hash_search(context_id_lookup, &cur,
- HASH_ENTER, &found);
+ entry = (MemoryContextId *) hash_search(context_id_lookup, &cur,
+ HASH_ENTER, &found);
entry->context_id = context_id++;
Assert(!found);
@@ -317,349 +305,3 @@ pg_log_backend_memory_contexts(PG_FUNCTION_ARGS)
PG_RETURN_BOOL(true);
}
-
-/*
- * pg_get_process_memory_contexts
- * Signal a backend or an auxiliary process to send its memory contexts,
- * wait for the results and display them.
- *
- * By default, only superusers or users with ROLE_PG_READ_ALL_STATS are allowed
- * to signal a process to return the memory contexts. This is because allowing
- * any users to issue this request at an unbounded rate would cause lots of
- * requests to be sent, which can lead to denial of service. Additional roles
- * can be permitted with GRANT.
- *
- * On receipt of this signal, a backend or an auxiliary process sets the flag
- * in the signal handler, which causes the next CHECK_FOR_INTERRUPTS()
- * or process-specific interrupt handler to copy the memory context details
- * to a dynamic shared memory space.
- *
- * We have defined a limit on DSA memory that could be allocated per process -
- * if the process has more memory contexts than what can fit in the allocated
- * size, the excess contexts are summarized and represented as cumulative total
- * at the end of the buffer.
- *
- * After sending the signal, wait on a condition variable. The publishing
- * backend, after copying the data to shared memory, sends signal on that
- * condition variable. There is one condition variable per publishing backend.
- * Once the condition variable is signalled, check if the latest memory context
- * information is available and display.
- *
- * If the publishing backend does not respond before the condition variable
- * times out, which is set to MEMSTATS_WAIT_TIMEOUT, retry given that there is
- * time left within the timeout specified by the user, before giving up and
- * returning previously published statistics, if any. If no previous statistics
- * exist, return NULL.
- */
-#define MEMSTATS_WAIT_TIMEOUT 100
-Datum
-pg_get_process_memory_contexts(PG_FUNCTION_ARGS)
-{
- int pid = PG_GETARG_INT32(0);
- bool summary = PG_GETARG_BOOL(1);
- double timeout = PG_GETARG_FLOAT8(2);
- PGPROC *proc;
- ProcNumber procNumber = INVALID_PROC_NUMBER;
- bool proc_is_aux = false;
- ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
- MemoryStatsEntry *memcxt_info;
- TimestampTz start_timestamp;
-
- /*
- * See if the process with given pid is a backend or an auxiliary process
- * and remember the type for when we requery the process later.
- */
- proc = BackendPidGetProc(pid);
- if (proc == NULL)
- {
- proc = AuxiliaryPidGetProc(pid);
- proc_is_aux = true;
- }
-
- /*
- * BackendPidGetProc() and AuxiliaryPidGetProc() return NULL if the pid
- * isn't valid; this is however not a problem and leave with a WARNING.
- * See comment in pg_log_backend_memory_contexts for a discussion on this.
- */
- if (proc == NULL)
- {
- /*
- * This is just a warning so a loop-through-resultset will not abort
- * if one backend terminated on its own during the run.
- */
- ereport(WARNING,
- errmsg("PID %d is not a PostgreSQL server process", pid));
- PG_RETURN_NULL();
- }
-
- InitMaterializedSRF(fcinfo, 0);
-
- procNumber = GetNumberFromPGProc(proc);
-
- LWLockAcquire(&memCxtState[procNumber].lw_lock, LW_EXCLUSIVE);
- memCxtState[procNumber].summary = summary;
- LWLockRelease(&memCxtState[procNumber].lw_lock);
-
- start_timestamp = GetCurrentTimestamp();
-
- /*
- * Send a signal to a PostgreSQL process, informing it we want it to
- * produce information about its memory contexts.
- */
- if (SendProcSignal(pid, PROCSIG_GET_MEMORY_CONTEXT, procNumber) < 0)
- {
- ereport(WARNING,
- errmsg("could not send signal to process %d: %m", pid));
- PG_RETURN_NULL();
- }
-
- /*
- * Even if the proc has published statistics, the may not be due to the
- * current request, but previously published stats. Check if the stats
- * are updated by comparing the timestamp, if the stats are newer than our
- * previously recorded timestamp from before sending the procsignal, they
- * must by definition be updated. Wait for the timeout specified by the
- * user, following which display old statistics if available or return
- * NULL.
- */
- while (1)
- {
- long msecs;
-
- /*
- * We expect to come out of sleep when the requested process has
- * finished publishing the statistics, verified using the valid DSA
- * pointer.
- *
- * Make sure that the information belongs to pid we requested
- * information for, Otherwise loop back and wait for the server
- * process to finish publishing statistics.
- */
- LWLockAcquire(&memCxtState[procNumber].lw_lock, LW_EXCLUSIVE);
-
- /*
- * Note in procnumber.h file says that a procNumber can be re-used for
- * a different backend immediately after a backend exits. In case an
- * old process' data was there and not updated by the current process
- * in the slot identified by the procNumber, the pid of the requested
- * process and the proc_id might not match.
- */
- if (memCxtState[procNumber].proc_id == pid)
- {
- /*
- * Break if the latest stats have been read, indicated by
- * statistics timestamp being newer than the current request
- * timestamp.
- */
- msecs = TimestampDifferenceMilliseconds(start_timestamp,
- memCxtState[procNumber].stats_timestamp);
-
- if (DsaPointerIsValid(memCxtState[procNumber].memstats_dsa_pointer)
- && msecs > 0)
- break;
- }
- LWLockRelease(&memCxtState[procNumber].lw_lock);
-
- /*
- * Recheck the state of the backend before sleeping on the condition
- * variable to ensure the process is still alive. Only check the
- * relevant process type based on the earlier PID check.
- */
- if (proc_is_aux)
- proc = AuxiliaryPidGetProc(pid);
- else
- proc = BackendPidGetProc(pid);
-
- /*
- * The process ending during memory context processing is not an
- * error.
- */
- if (proc == NULL)
- {
- ereport(WARNING,
- errmsg("PID %d is no longer a PostgreSQL server process",
- pid));
- PG_RETURN_NULL();
- }
-
- msecs = TimestampDifferenceMilliseconds(start_timestamp, GetCurrentTimestamp());
-
- /*
- * If we haven't already exceeded the timeout value, sleep for the
- * remainder of the timeout on the condition variable.
- */
- if (msecs > 0 && msecs < (timeout * 1000))
- {
- /*
- * Wait for the timeout as defined by the user. If no updated
- * statistics are available within the allowed time then display
- * previously published statistics if there are any. If no
- * previous statistics are available then return NULL. The timer
- * is defined in milliseconds since that's what the condition
- * variable sleep uses.
- */
- if (ConditionVariableTimedSleep(&memCxtState[procNumber].memcxt_cv,
- ((timeout * 1000) - msecs), WAIT_EVENT_MEM_CXT_PUBLISH))
- {
- LWLockAcquire(&memCxtState[procNumber].lw_lock, LW_EXCLUSIVE);
- /* Displaying previously published statistics if available */
- if (DsaPointerIsValid(memCxtState[procNumber].memstats_dsa_pointer))
- break;
- else
- {
- LWLockRelease(&memCxtState[procNumber].lw_lock);
- PG_RETURN_NULL();
- }
- }
- }
- else
- {
- LWLockAcquire(&memCxtState[procNumber].lw_lock, LW_EXCLUSIVE);
- /* Displaying previously published statistics if available */
- if (DsaPointerIsValid(memCxtState[procNumber].memstats_dsa_pointer))
- break;
- else
- {
- LWLockRelease(&memCxtState[procNumber].lw_lock);
- PG_RETURN_NULL();
- }
- }
- }
-
- /*
- * We should only reach here with a valid DSA handle, either containing
- * updated statistics or previously published statistics (identified by
- * the timestamp.
- */
- Assert(memCxtArea->memstats_dsa_handle != DSA_HANDLE_INVALID);
- /* Attach to the dsa area if we have not already done so */
- if (MemoryStatsDsaArea == NULL)
- {
- MemoryContext oldcontext = CurrentMemoryContext;
-
- MemoryContextSwitchTo(TopMemoryContext);
- MemoryStatsDsaArea = dsa_attach(memCxtArea->memstats_dsa_handle);
- MemoryContextSwitchTo(oldcontext);
- dsa_pin_mapping(MemoryStatsDsaArea);
- }
-
- /*
- * Backend has finished publishing the stats, project them.
- */
- memcxt_info = (MemoryStatsEntry *)
- dsa_get_address(MemoryStatsDsaArea, memCxtState[procNumber].memstats_dsa_pointer);
-
-#define PG_GET_PROCESS_MEMORY_CONTEXTS_COLS 12
- for (int i = 0; i < memCxtState[procNumber].total_stats; i++)
- {
- ArrayType *path_array;
- int path_length;
- Datum values[PG_GET_PROCESS_MEMORY_CONTEXTS_COLS];
- bool nulls[PG_GET_PROCESS_MEMORY_CONTEXTS_COLS];
- char *name;
- char *ident;
- Datum *path_datum = NULL;
- int *path_int = NULL;
-
- memset(values, 0, sizeof(values));
- memset(nulls, 0, sizeof(nulls));
-
- if (DsaPointerIsValid(memcxt_info[i].name))
- {
- name = (char *) dsa_get_address(MemoryStatsDsaArea, memcxt_info[i].name);
- values[0] = CStringGetTextDatum(name);
- }
- else
- nulls[0] = true;
-
- if (DsaPointerIsValid(memcxt_info[i].ident))
- {
- ident = (char *) dsa_get_address(MemoryStatsDsaArea, memcxt_info[i].ident);
- values[1] = CStringGetTextDatum(ident);
- }
- else
- nulls[1] = true;
-
- values[2] = CStringGetTextDatum(ContextTypeToString(memcxt_info[i].type));
-
- path_length = memcxt_info[i].path_length;
- path_datum = (Datum *) palloc(path_length * sizeof(Datum));
- if (DsaPointerIsValid(memcxt_info[i].path))
- {
- path_int = (int *) dsa_get_address(MemoryStatsDsaArea, memcxt_info[i].path);
- for (int j = 0; j < path_length; j++)
- path_datum[j] = Int32GetDatum(path_int[j]);
- path_array = construct_array_builtin(path_datum, path_length, INT4OID);
- values[3] = PointerGetDatum(path_array);
- }
- else
- nulls[3] = true;
-
- values[4] = Int32GetDatum(memcxt_info[i].levels);
- values[5] = Int64GetDatum(memcxt_info[i].totalspace);
- values[6] = Int64GetDatum(memcxt_info[i].nblocks);
- values[7] = Int64GetDatum(memcxt_info[i].freespace);
- values[8] = Int64GetDatum(memcxt_info[i].freechunks);
- values[9] = Int64GetDatum(memcxt_info[i].totalspace -
- memcxt_info[i].freespace);
- values[10] = Int32GetDatum(memcxt_info[i].num_agg_stats);
- values[11] = TimestampTzGetDatum(memCxtState[procNumber].stats_timestamp);
-
- tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc,
- values, nulls);
- }
- LWLockRelease(&memCxtState[procNumber].lw_lock);
-
- ConditionVariableCancelSleep();
-
- PG_RETURN_NULL();
-}
-
-Size
-MemoryContextReportingShmemSize(void)
-{
- Size sz = 0;
- Size TotalProcs = 0;
-
- TotalProcs = add_size(TotalProcs, NUM_AUXILIARY_PROCS);
- TotalProcs = add_size(TotalProcs, MaxBackends);
- sz = add_size(sz, mul_size(TotalProcs, sizeof(MemoryStatsBackendState)));
-
- sz = add_size(sz, sizeof(MemoryStatsCtl));
-
- return sz;
-}
-
-/*
- * Initialize shared memory for displaying memory context statistics
- */
-void
-MemoryContextReportingShmemInit(void)
-{
- bool found;
-
- memCxtArea = (MemoryStatsCtl *)
- ShmemInitStruct("MemoryStatsCtl",
- sizeof(MemoryStatsCtl), &found);
-
- if (!found)
- {
- LWLockInitialize(&memCxtArea->lw_lock, LWTRANCHE_MEMORY_CONTEXT_REPORTING_STATE);
- memCxtArea->memstats_dsa_handle = DSA_HANDLE_INVALID;
- }
-
- memCxtState = (MemoryStatsBackendState *)
- ShmemInitStruct("MemoryStatsBackendState",
- ((MaxBackends + NUM_AUXILIARY_PROCS) * sizeof(MemoryStatsBackendState)),
- &found);
-
- if (found)
- return;
-
- for (int i = 0; i < (MaxBackends + NUM_AUXILIARY_PROCS); i++)
- {
- ConditionVariableInit(&memCxtState[i].memcxt_cv);
- LWLockInitialize(&memCxtState[i].lw_lock, LWTRANCHE_MEMORY_CONTEXT_REPORTING_PROC);
- memCxtState[i].memstats_dsa_pointer = InvalidDsaPointer;
- }
-}
diff --git a/src/backend/utils/adt/network.c b/src/backend/utils/adt/network.c
index f03fcc1147b..9fd211b2d45 100644
--- a/src/backend/utils/adt/network.c
+++ b/src/backend/utils/adt/network.c
@@ -12,8 +12,6 @@
#include <netinet/in.h>
#include <arpa/inet.h>
-#include "access/stratnum.h"
-#include "catalog/pg_opfamily.h"
#include "catalog/pg_type.h"
#include "common/hashfn.h"
#include "common/ip.h"
diff --git a/src/backend/utils/adt/network_spgist.c b/src/backend/utils/adt/network_spgist.c
index a84747d9275..602276a35c3 100644
--- a/src/backend/utils/adt/network_spgist.c
+++ b/src/backend/utils/adt/network_spgist.c
@@ -37,7 +37,6 @@
#include "catalog/pg_type.h"
#include "utils/fmgrprotos.h"
#include "utils/inet.h"
-#include "varatt.h"
static int inet_spg_node_number(const inet *val, int commonbits);
diff --git a/src/backend/utils/adt/numeric.c b/src/backend/utils/adt/numeric.c
index 40dcbc7b671..58ad1a65ef7 100644
--- a/src/backend/utils/adt/numeric.c
+++ b/src/backend/utils/adt/numeric.c
@@ -1958,9 +1958,10 @@ generate_series_numeric_support(PG_FUNCTION_ARGS)
* in the histogram. width_bucket() returns an integer indicating the
* bucket number that 'operand' belongs to in an equiwidth histogram
* with the specified characteristics. An operand smaller than the
- * lower bound is assigned to bucket 0. An operand greater than the
- * upper bound is assigned to an additional bucket (with number
- * count+1). We don't allow "NaN" for any of the numeric arguments.
+ * lower bound is assigned to bucket 0. An operand greater than or equal
+ * to the upper bound is assigned to an additional bucket (with number
+ * count+1). We don't allow "NaN" for any of the numeric inputs, and we
+ * don't allow either of the histogram bounds to be +/- infinity.
*/
Datum
width_bucket_numeric(PG_FUNCTION_ARGS)
diff --git a/src/backend/utils/adt/pg_locale.c b/src/backend/utils/adt/pg_locale.c
index a858f27cadc..bf1afb24d7d 100644
--- a/src/backend/utils/adt/pg_locale.c
+++ b/src/backend/utils/adt/pg_locale.c
@@ -41,11 +41,11 @@
#include "mb/pg_wchar.h"
#include "miscadmin.h"
#include "utils/builtins.h"
-#include "utils/formatting.h"
#include "utils/guc_hooks.h"
#include "utils/lsyscache.h"
#include "utils/memutils.h"
#include "utils/pg_locale.h"
+#include "utils/relcache.h"
#include "utils/syscache.h"
#ifdef WIN32
diff --git a/src/backend/utils/adt/pg_locale_builtin.c b/src/backend/utils/adt/pg_locale_builtin.c
index f51768830cd..ce4914a76a1 100644
--- a/src/backend/utils/adt/pg_locale_builtin.c
+++ b/src/backend/utils/adt/pg_locale_builtin.c
@@ -18,7 +18,6 @@
#include "mb/pg_wchar.h"
#include "miscadmin.h"
#include "utils/builtins.h"
-#include "utils/memutils.h"
#include "utils/pg_locale.h"
#include "utils/syscache.h"
diff --git a/src/backend/utils/adt/pgstatfuncs.c b/src/backend/utils/adt/pgstatfuncs.c
index 97af7c6554f..1c12ddbae49 100644
--- a/src/backend/utils/adt/pgstatfuncs.c
+++ b/src/backend/utils/adt/pgstatfuncs.c
@@ -640,10 +640,10 @@ pg_stat_get_activity(PG_FUNCTION_ARGS)
values[28] = BoolGetDatum(false); /* GSS credentials not
* delegated */
}
- if (beentry->st_query_id == 0)
+ if (beentry->st_query_id == INT64CONST(0))
nulls[30] = true;
else
- values[30] = UInt64GetDatum(beentry->st_query_id);
+ values[30] = Int64GetDatum(beentry->st_query_id);
}
else
{
@@ -1510,7 +1510,7 @@ pg_stat_io_build_tuples(ReturnSetInfo *rsinfo,
bktype_stats->bytes[io_obj][io_context][io_op];
/* Convert to numeric */
- snprintf(buf, sizeof buf, UINT64_FORMAT, byte);
+ snprintf(buf, sizeof buf, INT64_FORMAT, byte);
values[byte_idx] = DirectFunctionCall3(numeric_in,
CStringGetDatum(buf),
ObjectIdGetDatum(0),
diff --git a/src/backend/utils/adt/regexp.c b/src/backend/utils/adt/regexp.c
index edee1f7880b..6e2864cbbda 100644
--- a/src/backend/utils/adt/regexp.c
+++ b/src/backend/utils/adt/regexp.c
@@ -773,8 +773,11 @@ similar_escape_internal(text *pat_text, text *esc_text)
int plen,
elen;
bool afterescape = false;
- bool incharclass = false;
int nquotes = 0;
+ int charclass_depth = 0; /* Nesting level of character classes,
+ * encompassed by square brackets */
+ int charclass_start = 0; /* State of the character class start,
+ * for carets */
p = VARDATA_ANY(pat_text);
plen = VARSIZE_ANY_EXHDR(pat_text);
@@ -904,7 +907,7 @@ similar_escape_internal(text *pat_text, text *esc_text)
/* fast path */
if (afterescape)
{
- if (pchar == '"' && !incharclass) /* escape-double-quote? */
+ if (pchar == '"' && charclass_depth < 1) /* escape-double-quote? */
{
/* emit appropriate part separator, per notes above */
if (nquotes == 0)
@@ -953,18 +956,41 @@ similar_escape_internal(text *pat_text, text *esc_text)
/* SQL escape character; do not send to output */
afterescape = true;
}
- else if (incharclass)
+ else if (charclass_depth > 0)
{
if (pchar == '\\')
*r++ = '\\';
*r++ = pchar;
- if (pchar == ']')
- incharclass = false;
+
+ /*
+ * Ignore a closing bracket at the start of a character class.
+ * Such a bracket is taken literally rather than closing the
+ * class. "charclass_start" is 1 right at the beginning of a
+ * class and 2 after an initial caret.
+ */
+ if (pchar == ']' && charclass_start > 2)
+ charclass_depth--;
+ else if (pchar == '[')
+ charclass_depth++;
+
+ /*
+ * If there is a caret right after the opening bracket, it negates
+ * the character class, but a following closing bracket should
+ * still be treated as a normal character. That holds only for
+ * the first caret, so only the values 1 and 2 mean that closing
+ * brackets should be taken literally.
+ */
+ if (pchar == '^')
+ charclass_start++;
+ else
+ charclass_start = 3; /* definitely past the start */
}
else if (pchar == '[')
{
+ /* start of a character class */
*r++ = pchar;
- incharclass = true;
+ charclass_depth++;
+ charclass_start = 1;
}
else if (pchar == '%')
{
diff --git a/src/backend/utils/adt/ri_triggers.c b/src/backend/utils/adt/ri_triggers.c
index 6239900fa28..059fc5ebf60 100644
--- a/src/backend/utils/adt/ri_triggers.c
+++ b/src/backend/utils/adt/ri_triggers.c
@@ -30,7 +30,6 @@
#include "access/xact.h"
#include "catalog/pg_collation.h"
#include "catalog/pg_constraint.h"
-#include "catalog/pg_proc.h"
#include "commands/trigger.h"
#include "executor/executor.h"
#include "executor/spi.h"
@@ -46,7 +45,6 @@
#include "utils/inval.h"
#include "utils/lsyscache.h"
#include "utils/memutils.h"
-#include "utils/rangetypes.h"
#include "utils/rel.h"
#include "utils/rls.h"
#include "utils/ruleutils.h"
diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c
index 467b08198b8..3d6e6bdbfd2 100644
--- a/src/backend/utils/adt/ruleutils.c
+++ b/src/backend/utils/adt/ruleutils.c
@@ -5956,9 +5956,19 @@ get_select_query_def(Query *query, deparse_context *context)
{
if (query->limitOption == LIMIT_OPTION_WITH_TIES)
{
+ /*
+ * The limitCount arg is a c_expr, so it needs parens. Simple
+ * literals and function expressions would not need parens, but
+ * unfortunately it's hard to tell if the expression will be
+ * printed as a simple literal like 123 or as a typecast
+ * expression, like '-123'::int4. The grammar accepts the former
+ * without quoting, but not the latter.
+ */
appendContextKeyword(context, " FETCH FIRST ",
-PRETTYINDENT_STD, PRETTYINDENT_STD, 0);
+ appendStringInfoChar(buf, '(');
get_rule_expr(query->limitCount, context, false);
+ appendStringInfoChar(buf, ')');
appendStringInfoString(buf, " ROWS WITH TIES");
}
else
diff --git a/src/backend/utils/adt/selfuncs.c b/src/backend/utils/adt/selfuncs.c
index a96b1b9c0bc..1e0f2de0336 100644
--- a/src/backend/utils/adt/selfuncs.c
+++ b/src/backend/utils/adt/selfuncs.c
@@ -103,7 +103,6 @@
#include "access/table.h"
#include "access/tableam.h"
#include "access/visibilitymap.h"
-#include "catalog/pg_am.h"
#include "catalog/pg_collation.h"
#include "catalog/pg_operator.h"
#include "catalog/pg_statistic.h"
diff --git a/src/backend/utils/adt/xml.c b/src/backend/utils/adt/xml.c
index db8d0d6a7e8..a4150bff2ea 100644
--- a/src/backend/utils/adt/xml.c
+++ b/src/backend/utils/adt/xml.c
@@ -754,6 +754,7 @@ xmltotext_with_options(xmltype *data, XmlOptionType xmloption_arg, bool indent)
* content nodes, and then iterate over the nodes.
*/
xmlNodePtr root;
+ xmlNodePtr oldroot;
xmlNodePtr newline;
root = xmlNewNode(NULL, (const xmlChar *) "content-root");
@@ -761,8 +762,14 @@ xmltotext_with_options(xmltype *data, XmlOptionType xmloption_arg, bool indent)
xml_ereport(xmlerrcxt, ERROR, ERRCODE_OUT_OF_MEMORY,
"could not allocate xml node");
- /* This attaches root to doc, so we need not free it separately. */
- xmlDocSetRootElement(doc, root);
+ /*
+ * This attaches root to doc, so we need not free it separately...
+ * but instead, we have to free the old root if there was one.
+ */
+ oldroot = xmlDocSetRootElement(doc, root);
+ if (oldroot != NULL)
+ xmlFreeNode(oldroot);
+
xmlAddChildList(root, content_nodes);
/*
@@ -1850,6 +1857,7 @@ xml_parse(text *data, XmlOptionType xmloption_arg,
else
{
xmlNodePtr root;
+ xmlNodePtr oldroot PG_USED_FOR_ASSERTS_ONLY;
/* set up document with empty root node to be the context node */
doc = xmlNewDoc(version);
@@ -1868,8 +1876,13 @@ xml_parse(text *data, XmlOptionType xmloption_arg,
if (root == NULL || xmlerrcxt->err_occurred)
xml_ereport(xmlerrcxt, ERROR, ERRCODE_OUT_OF_MEMORY,
"could not allocate xml node");
- /* This attaches root to doc, so we need not free it separately. */
- xmlDocSetRootElement(doc, root);
+
+ /*
+ * This attaches root to doc, so we need not free it separately;
+ * and there can't yet be any old root to free.
+ */
+ oldroot = xmlDocSetRootElement(doc, root);
+ Assert(oldroot == NULL);
/* allow empty content */
if (*(utf8string + count))
diff --git a/src/backend/utils/cache/funccache.c b/src/backend/utils/cache/funccache.c
index 150c502a612..afc048a051e 100644
--- a/src/backend/utils/cache/funccache.c
+++ b/src/backend/utils/cache/funccache.c
@@ -491,6 +491,7 @@ cached_function_compile(FunctionCallInfo fcinfo,
CachedFunctionHashKey hashkey;
bool function_valid = false;
bool hashkey_valid = false;
+ bool new_function = false;
/*
* Lookup the pg_proc tuple by Oid; we'll need it in any case
@@ -570,13 +571,15 @@ recheck:
/*
* Create the new function struct, if not done already. The function
- * structs are never thrown away, so keep them in TopMemoryContext.
+ * cache entry will be kept for the life of the backend, so put it in
+ * TopMemoryContext.
*/
Assert(cacheEntrySize >= sizeof(CachedFunction));
if (function == NULL)
{
function = (CachedFunction *)
MemoryContextAllocZero(TopMemoryContext, cacheEntrySize);
+ new_function = true;
}
else
{
@@ -585,17 +588,36 @@ recheck:
}
/*
- * Fill in the CachedFunction part. fn_hashkey and use_count remain
- * zeroes for now.
+ * However, if function compilation fails, we'd like not to leak the
+ * function struct, so use a PG_TRY block to prevent that. (It's up
+ * to the compile callback function to avoid its own internal leakage
+ * in such cases.) Unfortunately, freeing the struct is only safe if
+ * we just allocated it: otherwise there are probably fn_extra
+ * pointers to it.
*/
- function->fn_xmin = HeapTupleHeaderGetRawXmin(procTup->t_data);
- function->fn_tid = procTup->t_self;
- function->dcallback = dcallback;
+ PG_TRY();
+ {
+ /*
+ * Do the hard, language-specific part.
+ */
+ ccallback(fcinfo, procTup, &hashkey, function, forValidator);
+ }
+ PG_CATCH();
+ {
+ if (new_function)
+ pfree(function);
+ PG_RE_THROW();
+ }
+ PG_END_TRY();
/*
- * Do the hard, language-specific part.
+ * Fill in the CachedFunction part. (We do this last to prevent the
+ * function from looking valid before it's fully built.) fn_hashkey
+ * will be set by cfunc_hashtable_insert; use_count remains zero.
*/
- ccallback(fcinfo, procTup, &hashkey, function, forValidator);
+ function->fn_xmin = HeapTupleHeaderGetRawXmin(procTup->t_data);
+ function->fn_tid = procTup->t_self;
+ function->dcallback = dcallback;
/*
* Add the completed struct to the hash table.
diff --git a/src/backend/utils/cache/plancache.c b/src/backend/utils/cache/plancache.c
index 9bcbc4c3e97..89a1c79e984 100644
--- a/src/backend/utils/cache/plancache.c
+++ b/src/backend/utils/cache/plancache.c
@@ -92,8 +92,7 @@ static void ReleaseGenericPlan(CachedPlanSource *plansource);
static bool StmtPlanRequiresRevalidation(CachedPlanSource *plansource);
static bool BuildingPlanRequiresSnapshot(CachedPlanSource *plansource);
static List *RevalidateCachedQuery(CachedPlanSource *plansource,
- QueryEnvironment *queryEnv,
- bool release_generic);
+ QueryEnvironment *queryEnv);
static bool CheckCachedPlan(CachedPlanSource *plansource);
static CachedPlan *BuildCachedPlan(CachedPlanSource *plansource, List *qlist,
ParamListInfo boundParams, QueryEnvironment *queryEnv);
@@ -663,17 +662,10 @@ BuildingPlanRequiresSnapshot(CachedPlanSource *plansource)
* The result value is the transient analyzed-and-rewritten query tree if we
* had to do re-analysis, and NIL otherwise. (This is returned just to save
* a tree copying step in a subsequent BuildCachedPlan call.)
- *
- * This also releases and drops the generic plan (plansource->gplan), if any,
- * as most callers will typically build a new CachedPlan for the plansource
- * right after this. However, when called from UpdateCachedPlan(), the
- * function does not release the generic plan, as UpdateCachedPlan() updates
- * an existing CachedPlan in place.
*/
static List *
RevalidateCachedQuery(CachedPlanSource *plansource,
- QueryEnvironment *queryEnv,
- bool release_generic)
+ QueryEnvironment *queryEnv)
{
bool snapshot_set;
List *tlist; /* transient query-tree list */
@@ -772,9 +764,8 @@ RevalidateCachedQuery(CachedPlanSource *plansource,
MemoryContextDelete(qcxt);
}
- /* Drop the generic plan reference, if any, and if requested */
- if (release_generic)
- ReleaseGenericPlan(plansource);
+ /* Drop the generic plan reference if any */
+ ReleaseGenericPlan(plansource);
/*
* Now re-do parse analysis and rewrite. This not incidentally acquires
@@ -937,10 +928,8 @@ RevalidateCachedQuery(CachedPlanSource *plansource,
* Caller must have already called RevalidateCachedQuery to verify that the
* querytree is up to date.
*
- * On a "true" return, we have acquired locks on the "unprunableRelids" set
- * for all plans in plansource->stmt_list. However, the plans are not fully
- * race-condition-free until the executor acquires locks on the prunable
- * relations that survive initial runtime pruning during InitPlan().
+ * On a "true" return, we have acquired the locks needed to run the plan.
+ * (We must do this for the "true" result to be race-condition-free.)
*/
static bool
CheckCachedPlan(CachedPlanSource *plansource)
@@ -1025,8 +1014,6 @@ CheckCachedPlan(CachedPlanSource *plansource)
* Planning work is done in the caller's memory context. The finished plan
* is in a child memory context, which typically should get reparented
* (unless this is a one-shot plan, in which case we don't copy the plan).
- *
- * Note: When changing this, you should also look at UpdateCachedPlan().
*/
static CachedPlan *
BuildCachedPlan(CachedPlanSource *plansource, List *qlist,
@@ -1037,7 +1024,6 @@ BuildCachedPlan(CachedPlanSource *plansource, List *qlist,
bool snapshot_set;
bool is_transient;
MemoryContext plan_context;
- MemoryContext stmt_context = NULL;
MemoryContext oldcxt = CurrentMemoryContext;
ListCell *lc;
@@ -1055,7 +1041,7 @@ BuildCachedPlan(CachedPlanSource *plansource, List *qlist,
* let's treat it as real and redo the RevalidateCachedQuery call.
*/
if (!plansource->is_valid)
- qlist = RevalidateCachedQuery(plansource, queryEnv, true);
+ qlist = RevalidateCachedQuery(plansource, queryEnv);
/*
* If we don't already have a copy of the querytree list that can be
@@ -1093,19 +1079,10 @@ BuildCachedPlan(CachedPlanSource *plansource, List *qlist,
PopActiveSnapshot();
/*
- * Normally, we create a dedicated memory context for the CachedPlan and
- * its subsidiary data. Although it's usually not very large, the context
- * is designed to allow growth if necessary.
- *
- * The PlannedStmts are stored in a separate child context (stmt_context)
- * of the CachedPlan's memory context. This separation allows
- * UpdateCachedPlan() to free and replace the PlannedStmts without
- * affecting the CachedPlan structure or its stmt_list List.
- *
- * For one-shot plans, we instead use the caller's memory context, as the
- * CachedPlan will not persist. stmt_context will be set to NULL in this
- * case, because UpdateCachedPlan() should never get called on a one-shot
- * plan.
+ * Normally we make a dedicated memory context for the CachedPlan and its
+ * subsidiary data. (It's probably not going to be large, but just in
+ * case, allow it to grow large. It's transient for the moment.) But for
+ * a one-shot plan, we just leave it in the caller's memory context.
*/
if (!plansource->is_oneshot)
{
@@ -1114,17 +1091,12 @@ BuildCachedPlan(CachedPlanSource *plansource, List *qlist,
ALLOCSET_START_SMALL_SIZES);
MemoryContextCopyAndSetIdentifier(plan_context, plansource->query_string);
- stmt_context = AllocSetContextCreate(CurrentMemoryContext,
- "CachedPlan PlannedStmts",
- ALLOCSET_START_SMALL_SIZES);
- MemoryContextCopyAndSetIdentifier(stmt_context, plansource->query_string);
- MemoryContextSetParent(stmt_context, plan_context);
+ /*
+ * Copy plan into the new context.
+ */
+ MemoryContextSwitchTo(plan_context);
- MemoryContextSwitchTo(stmt_context);
plist = copyObject(plist);
-
- MemoryContextSwitchTo(plan_context);
- plist = list_copy(plist);
}
else
plan_context = CurrentMemoryContext;
@@ -1165,10 +1137,8 @@ BuildCachedPlan(CachedPlanSource *plansource, List *qlist,
plan->saved_xmin = InvalidTransactionId;
plan->refcount = 0;
plan->context = plan_context;
- plan->stmt_context = stmt_context;
plan->is_oneshot = plansource->is_oneshot;
plan->is_saved = false;
- plan->is_reused = false;
plan->is_valid = true;
/* assign generation number to new plan */
@@ -1180,113 +1150,6 @@ BuildCachedPlan(CachedPlanSource *plansource, List *qlist,
}
/*
- * UpdateCachedPlan
- * Create fresh plans for all queries in the CachedPlanSource, replacing
- * those in the generic plan's stmt_list, and return the plan for the
- * query_index'th query.
- *
- * This function is primarily used by ExecutorStartCachedPlan() to handle
- * cases where the original generic CachedPlan becomes invalid. Such
- * invalidation may occur when prunable relations in the old plan for the
- * query_index'th query are locked in preparation for execution.
- *
- * Note that invalidations received during the execution of the query_index'th
- * query can affect both the queries that have already finished execution
- * (e.g., due to concurrent modifications on prunable relations that were not
- * locked during their execution) and also the queries that have not yet been
- * executed. As a result, this function updates all plans to ensure
- * CachedPlan.is_valid is safely set to true.
- *
- * The old PlannedStmts in plansource->gplan->stmt_list are freed here, so
- * the caller and any of its callers must not rely on them remaining accessible
- * after this function is called.
- */
-PlannedStmt *
-UpdateCachedPlan(CachedPlanSource *plansource, int query_index,
- QueryEnvironment *queryEnv)
-{
- List *query_list = plansource->query_list,
- *plan_list;
- ListCell *l1,
- *l2;
- CachedPlan *plan = plansource->gplan;
- MemoryContext oldcxt;
-
- Assert(ActiveSnapshotSet());
-
- /* Sanity checks (XXX can be Asserts?) */
- if (plan == NULL)
- elog(ERROR, "UpdateCachedPlan() called in the wrong context: plansource->gplan is NULL");
- else if (plan->is_valid)
- elog(ERROR, "UpdateCachedPlan() called in the wrong context: plansource->gplan->is_valid is true");
- else if (plan->is_oneshot)
- elog(ERROR, "UpdateCachedPlan() called in the wrong context: plansource->gplan->is_oneshot is true");
-
- /*
- * The plansource might have become invalid since GetCachedPlan() returned
- * the CachedPlan. See the comment in BuildCachedPlan() for details on why
- * this might happen. Although invalidation is likely a false positive as
- * stated there, we make the plan valid to ensure the query list used for
- * planning is up to date.
- *
- * The risk of catching an invalidation is higher here than when
- * BuildCachedPlan() is called from GetCachedPlan(), because this function
- * is normally called long after GetCachedPlan() returns the CachedPlan,
- * so much more processing could have occurred including things that mark
- * the CachedPlanSource invalid.
- *
- * Note: Do not release plansource->gplan, because the upstream callers
- * (such as the callers of ExecutorStartCachedPlan()) would still be
- * referencing it.
- */
- if (!plansource->is_valid)
- query_list = RevalidateCachedQuery(plansource, queryEnv, false);
- Assert(query_list != NIL);
-
- /*
- * Build a new generic plan for all the queries after making a copy to be
- * scribbled on by the planner.
- */
- query_list = copyObject(query_list);
-
- /*
- * Planning work is done in the caller's memory context. The resulting
- * PlannedStmt is then copied into plan->stmt_context after throwing away
- * the old ones.
- */
- plan_list = pg_plan_queries(query_list, plansource->query_string,
- plansource->cursor_options, NULL);
- Assert(list_length(plan_list) == list_length(plan->stmt_list));
-
- MemoryContextReset(plan->stmt_context);
- oldcxt = MemoryContextSwitchTo(plan->stmt_context);
- forboth(l1, plan_list, l2, plan->stmt_list)
- {
- PlannedStmt *plannedstmt = lfirst(l1);
-
- lfirst(l2) = copyObject(plannedstmt);
- }
- MemoryContextSwitchTo(oldcxt);
-
- /*
- * XXX Should this also (re)set the properties of the CachedPlan that are
- * set in BuildCachedPlan() after creating the fresh plans such as
- * planRoleId, dependsOnRole, and saved_xmin?
- */
-
- /*
- * We've updated all the plans that might have been invalidated, so mark
- * the CachedPlan as valid.
- */
- plan->is_valid = true;
-
- /* Also update generic_cost because we just created a new generic plan. */
- plansource->generic_cost = cached_plan_cost(plan, false);
-
- return list_nth_node(PlannedStmt, plan->stmt_list, query_index);
-}
-
-/*
* choose_custom_plan: choose whether to use custom or generic plan
*
* This defines the policy followed by GetCachedPlan.
@@ -1402,13 +1265,8 @@ cached_plan_cost(CachedPlan *plan, bool include_planner)
* plan or a custom plan for the given parameters: the caller does not know
* which it will get.
*
- * On return, the plan is valid, but if it is a reused generic plan, not all
- * locks are acquired. In such cases, CheckCachedPlan() does not take locks
- * on relations subject to initial runtime pruning; instead, these locks are
- * deferred until execution startup, when ExecDoInitialPruning() performs
- * initial pruning. The plan's "is_reused" flag is set to indicate that
- * CachedPlanRequiresLocking() should return true when called by
- * ExecDoInitialPruning().
+ * On return, the plan is valid and we have sufficient locks to begin
+ * execution.
*
* On return, the refcount of the plan has been incremented; a later
* ReleaseCachedPlan() call is expected. If "owner" is not NULL then
@@ -1434,7 +1292,7 @@ GetCachedPlan(CachedPlanSource *plansource, ParamListInfo boundParams,
elog(ERROR, "cannot apply ResourceOwner to non-saved cached plan");
/* Make sure the querytree list is valid and we have parse-time locks */
- qlist = RevalidateCachedQuery(plansource, queryEnv, true);
+ qlist = RevalidateCachedQuery(plansource, queryEnv);
/* Decide whether to use a custom plan */
customplan = choose_custom_plan(plansource, boundParams);
@@ -1446,8 +1304,6 @@ GetCachedPlan(CachedPlanSource *plansource, ParamListInfo boundParams,
/* We want a generic plan, and we already have a valid one */
plan = plansource->gplan;
Assert(plan->magic == CACHEDPLAN_MAGIC);
- /* Reusing the existing plan, so not all locks may be acquired. */
- plan->is_reused = true;
}
else
{
@@ -1913,7 +1769,7 @@ CachedPlanGetTargetList(CachedPlanSource *plansource,
return NIL;
/* Make sure the querytree list is valid and we have parse-time locks */
- RevalidateCachedQuery(plansource, queryEnv, true);
+ RevalidateCachedQuery(plansource, queryEnv);
/* Get the primary statement and find out what it returns */
pstmt = QueryListGetPrimaryStmt(plansource->query_list);
@@ -2035,7 +1891,7 @@ AcquireExecutorLocks(List *stmt_list, bool acquire)
foreach(lc1, stmt_list)
{
PlannedStmt *plannedstmt = lfirst_node(PlannedStmt, lc1);
- int rtindex;
+ ListCell *lc2;
if (plannedstmt->commandType == CMD_UTILITY)
{
@@ -2053,16 +1909,13 @@ AcquireExecutorLocks(List *stmt_list, bool acquire)
continue;
}
- rtindex = -1;
- while ((rtindex = bms_next_member(plannedstmt->unprunableRelids,
- rtindex)) >= 0)
+ foreach(lc2, plannedstmt->rtable)
{
- RangeTblEntry *rte = list_nth_node(RangeTblEntry,
- plannedstmt->rtable,
- rtindex - 1);
+ RangeTblEntry *rte = (RangeTblEntry *) lfirst(lc2);
- Assert(rte->rtekind == RTE_RELATION ||
- (rte->rtekind == RTE_SUBQUERY && OidIsValid(rte->relid)));
+ if (!(rte->rtekind == RTE_RELATION ||
+ (rte->rtekind == RTE_SUBQUERY && OidIsValid(rte->relid))))
+ continue;
/*
* Acquire the appropriate type of lock on each relation OID. Note
diff --git a/src/backend/utils/cache/syscache.c b/src/backend/utils/cache/syscache.c
index 95a1d0a2749..f944453a1d8 100644
--- a/src/backend/utils/cache/syscache.c
+++ b/src/backend/utils/cache/syscache.c
@@ -396,7 +396,7 @@ SearchSysCacheCopy(int cacheId,
/*
* SearchSysCacheLockedCopy1
*
- * Meld SearchSysCacheLockedCopy1 with SearchSysCacheCopy(). After the
+ * Meld SearchSysCacheLocked1 with SearchSysCacheCopy(). After the
* caller's heap_update(), it should UnlockTuple(InplaceUpdateTupleLock) and
* heap_freetuple().
*/
diff --git a/src/backend/utils/fmgr/dfmgr.c b/src/backend/utils/fmgr/dfmgr.c
index 603632581d0..4bb84ff7087 100644
--- a/src/backend/utils/fmgr/dfmgr.c
+++ b/src/backend/utils/fmgr/dfmgr.c
@@ -99,6 +99,14 @@ load_external_function(const char *filename, const char *funcname,
void *lib_handle;
void *retval;
+ /*
+ * If the value starts with "$libdir/", strip that. This is because many
+ * extensions have hardcoded '$libdir/foo' as their library name, which
+ * prevents using the path.
+ */
+ if (strncmp(filename, "$libdir/", 8) == 0)
+ filename += 8;
+
/* Expand the possibly-abbreviated filename to an exact path name */
fullname = expand_dynamic_library_name(filename);
@@ -456,14 +464,6 @@ expand_dynamic_library_name(const char *name)
Assert(name);
- /*
- * If the value starts with "$libdir/", strip that. This is because many
- * extensions have hardcoded '$libdir/foo' as their library name, which
- * prevents using the path.
- */
- if (strncmp(name, "$libdir/", 8) == 0)
- name += 8;
-
have_slash = (first_dir_separator(name) != NULL);
if (!have_slash)
diff --git a/src/backend/utils/init/globals.c b/src/backend/utils/init/globals.c
index 92b0446b80c..d31cb45a058 100644
--- a/src/backend/utils/init/globals.c
+++ b/src/backend/utils/init/globals.c
@@ -39,7 +39,6 @@ volatile sig_atomic_t TransactionTimeoutPending = false;
volatile sig_atomic_t IdleSessionTimeoutPending = false;
volatile sig_atomic_t ProcSignalBarrierPending = false;
volatile sig_atomic_t LogMemoryContextPending = false;
-volatile sig_atomic_t PublishMemoryContextPending = false;
volatile sig_atomic_t IdleStatsUpdateTimeoutPending = false;
volatile uint32 InterruptHoldoffCount = 0;
volatile uint32 QueryCancelHoldoffCount = 0;
diff --git a/src/backend/utils/init/postinit.c b/src/backend/utils/init/postinit.c
index 89d72cdd5ff..c86ceefda94 100644
--- a/src/backend/utils/init/postinit.c
+++ b/src/backend/utils/init/postinit.c
@@ -663,13 +663,6 @@ BaseInit(void)
* drop ephemeral slots, which in turn triggers stats reporting.
*/
ReplicationSlotInitialize();
-
- /*
- * The before shmem exit callback frees the DSA memory occupied by the
- * latest memory context statistics that could be published by this proc
- * if requested.
- */
- before_shmem_exit(AtProcExit_memstats_cleanup, 0);
}
diff --git a/src/backend/utils/mb/mbutils.c b/src/backend/utils/mb/mbutils.c
index 308016d7763..886ecbad871 100644
--- a/src/backend/utils/mb/mbutils.c
+++ b/src/backend/utils/mb/mbutils.c
@@ -39,6 +39,7 @@
#include "mb/pg_wchar.h"
#include "utils/fmgrprotos.h"
#include "utils/memutils.h"
+#include "utils/relcache.h"
#include "varatt.h"
/*
diff --git a/src/backend/utils/misc/guc_tables.c b/src/backend/utils/misc/guc_tables.c
index 2f8cbd86759..511dc32d519 100644
--- a/src/backend/utils/misc/guc_tables.c
+++ b/src/backend/utils/misc/guc_tables.c
@@ -1028,7 +1028,7 @@ struct config_bool ConfigureNamesBool[] =
},
{
{"enable_distinct_reordering", PGC_USERSET, QUERY_TUNING_METHOD,
- gettext_noop("Enables reordering of DISTINCT pathkeys."),
+ gettext_noop("Enables reordering of DISTINCT keys."),
NULL,
GUC_EXPLAIN
},
@@ -1602,11 +1602,11 @@ struct config_bool ConfigureNamesBool[] =
NULL, NULL, NULL
},
{
- {"log_lock_failure", PGC_SUSET, LOGGING_WHAT,
+ {"log_lock_failures", PGC_SUSET, LOGGING_WHAT,
gettext_noop("Logs lock failures."),
NULL
},
- &log_lock_failure,
+ &log_lock_failures,
false,
NULL, NULL, NULL
},
@@ -4837,7 +4837,7 @@ struct config_string ConfigureNamesString[] =
{
{"ssl_groups", PGC_SIGHUP, CONN_AUTH_SSL,
gettext_noop("Sets the group(s) to use for Diffie-Hellman key exchange."),
- gettext_noop("Multiple groups can be specified using colon-separated list."),
+ gettext_noop("Multiple groups can be specified using a colon-separated list."),
GUC_SUPERUSER_ONLY
},
&SSLECDHCurve,
diff --git a/src/backend/utils/misc/postgresql.conf.sample b/src/backend/utils/misc/postgresql.conf.sample
index 34826d01380..341f88adc87 100644
--- a/src/backend/utils/misc/postgresql.conf.sample
+++ b/src/backend/utils/misc/postgresql.conf.sample
@@ -97,6 +97,7 @@
#password_encryption = scram-sha-256 # scram-sha-256 or md5
#scram_iterations = 4096
#md5_password_warnings = on
+#oauth_validator_libraries = '' # comma-separated list of trusted validator modules
# GSSAPI using Kerberos
#krb_server_keyfile = 'FILE:${sysconfdir}/krb5.keytab'
@@ -121,9 +122,6 @@
#ssl_passphrase_command = ''
#ssl_passphrase_command_supports_reload = off
-# OAuth
-#oauth_validator_libraries = '' # comma-separated list of trusted validator modules
-
#------------------------------------------------------------------------------
# RESOURCE USAGE (except WAL)
@@ -180,13 +178,11 @@
#temp_file_limit = -1 # limits per-process temp file space
# in kilobytes, or -1 for no limit
+#file_copy_method = copy # copy, clone (if supported by OS)
+
#max_notify_queue_pages = 1048576 # limits the number of SLRU pages allocated
# for NOTIFY / LISTEN queue
-#file_copy_method = copy # the default is the first option
- # copy
- # clone (if system support is available)
-
# - Kernel Resources -
#max_files_per_process = 1000 # min 64
@@ -628,7 +624,7 @@
# %% = '%'
# e.g. '<%u%%%d> '
#log_lock_waits = off # log lock waits >= deadlock_timeout
-#log_lock_failure = off # log lock failures
+#log_lock_failures = off # log lock failures
#log_recovery_conflict_waits = off # log standby recovery conflict waits
# >= deadlock_timeout
#log_parameter_max_length = -1 # when logging statements, limit logged
diff --git a/src/backend/utils/mmgr/alignedalloc.c b/src/backend/utils/mmgr/alignedalloc.c
index 85aee389d6b..7eea695de62 100644
--- a/src/backend/utils/mmgr/alignedalloc.c
+++ b/src/backend/utils/mmgr/alignedalloc.c
@@ -45,6 +45,7 @@ AlignedAllocFree(void *pointer)
GetMemoryChunkContext(unaligned)->name, chunk);
#endif
+ /* Recursively pfree the unaligned chunk */
pfree(unaligned);
}
@@ -96,18 +97,32 @@ AlignedAllocRealloc(void *pointer, Size size, int flags)
Assert(old_size >= redirchunk->requested_size);
#endif
+ /*
+ * To keep things simple, we always allocate a new aligned chunk and copy
+ * data into it. Because of the above inaccuracy, this may end in copying
+ * more data than was in the original allocation request size, but that
+ * should be OK.
+ */
ctx = GetMemoryChunkContext(unaligned);
newptr = MemoryContextAllocAligned(ctx, size, alignto, flags);
- /*
- * We may memcpy beyond the end of the original allocation request size,
- * so we must mark the entire allocation as defined.
- */
- if (likely(newptr != NULL))
+ /* Cope cleanly with OOM */
+ if (unlikely(newptr == NULL))
{
- VALGRIND_MAKE_MEM_DEFINED(pointer, old_size);
- memcpy(newptr, pointer, Min(size, old_size));
+ VALGRIND_MAKE_MEM_NOACCESS(redirchunk, sizeof(MemoryChunk));
+ return MemoryContextAllocationFailure(ctx, size, flags);
}
+
+ /*
+ * We may memcpy more than the original allocation request size, which
+ * would result in trying to copy trailing bytes that the original
+ * MemoryContextAllocAligned call marked NOACCESS. So we must mark the
+ * entire old_size as defined. That's slightly annoying, but probably not
+ * worth improving.
+ */
+ VALGRIND_MAKE_MEM_DEFINED(pointer, old_size);
+ memcpy(newptr, pointer, Min(size, old_size));
+
pfree(unaligned);
return newptr;
diff --git a/src/backend/utils/mmgr/mcxt.c b/src/backend/utils/mmgr/mcxt.c
index 7d28ca706eb..15fa4d0a55e 100644
--- a/src/backend/utils/mmgr/mcxt.c
+++ b/src/backend/utils/mmgr/mcxt.c
@@ -23,11 +23,6 @@
#include "mb/pg_wchar.h"
#include "miscadmin.h"
-#include "nodes/pg_list.h"
-#include "storage/lwlock.h"
-#include "storage/ipc.h"
-#include "utils/dsa.h"
-#include "utils/hsearch.h"
#include "utils/memdebug.h"
#include "utils/memutils.h"
#include "utils/memutils_internal.h"
@@ -140,17 +135,6 @@ static const MemoryContextMethods mcxt_methods[] = {
};
#undef BOGUS_MCTX
-/*
- * This is passed to MemoryContextStatsInternal to determine whether
- * to print context statistics or not and where to print them logs or
- * stderr.
- */
-typedef enum PrintDestination
-{
- PRINT_STATS_TO_STDERR = 0,
- PRINT_STATS_TO_LOGS,
- PRINT_STATS_NONE
-} PrintDestination;
/*
* CurrentMemoryContext
@@ -172,31 +156,16 @@ MemoryContext CurTransactionContext = NULL;
/* This is a transient link to the active portal's memory context: */
MemoryContext PortalContext = NULL;
-dsa_area *MemoryStatsDsaArea = NULL;
static void MemoryContextDeleteOnly(MemoryContext context);
static void MemoryContextCallResetCallbacks(MemoryContext context);
static void MemoryContextStatsInternal(MemoryContext context, int level,
int max_level, int max_children,
MemoryContextCounters *totals,
- PrintDestination print_location,
- int *num_contexts);
+ bool print_to_stderr);
static void MemoryContextStatsPrint(MemoryContext context, void *passthru,
const char *stats_string,
bool print_to_stderr);
-static void PublishMemoryContext(MemoryStatsEntry *memcxt_info,
- int curr_id, MemoryContext context,
- List *path,
- MemoryContextCounters stat,
- int num_contexts, dsa_area *area,
- int max_levels);
-static void compute_contexts_count_and_ids(List *contexts, HTAB *context_id_lookup,
- int *stats_count,
- bool summary);
-static List *compute_context_path(MemoryContext c, HTAB *context_id_lookup);
-static void free_memorycontextstate_dsa(dsa_area *area, int total_stats,
- dsa_pointer prev_dsa_pointer);
-static void end_memorycontext_reporting(void);
/*
* You should not do memory allocations within a critical section, because
@@ -862,19 +831,11 @@ MemoryContextStatsDetail(MemoryContext context,
bool print_to_stderr)
{
MemoryContextCounters grand_totals;
- int num_contexts;
- PrintDestination print_location;
memset(&grand_totals, 0, sizeof(grand_totals));
- if (print_to_stderr)
- print_location = PRINT_STATS_TO_STDERR;
- else
- print_location = PRINT_STATS_TO_LOGS;
-
- /* num_contexts report number of contexts aggregated in the output */
MemoryContextStatsInternal(context, 1, max_level, max_children,
- &grand_totals, print_location, &num_contexts);
+ &grand_totals, print_to_stderr);
if (print_to_stderr)
fprintf(stderr,
@@ -909,14 +870,13 @@ MemoryContextStatsDetail(MemoryContext context,
* One recursion level for MemoryContextStats
*
* Print stats for this context if possible, but in any case accumulate counts
- * into *totals (if not NULL). The callers should make sure that print_location
- * is set to PRINT_STATS_TO_STDERR or PRINT_STATS_TO_LOGS or PRINT_STATS_NONE.
+ * into *totals (if not NULL).
*/
static void
MemoryContextStatsInternal(MemoryContext context, int level,
int max_level, int max_children,
MemoryContextCounters *totals,
- PrintDestination print_location, int *num_contexts)
+ bool print_to_stderr)
{
MemoryContext child;
int ichild;
@@ -924,39 +884,10 @@ MemoryContextStatsInternal(MemoryContext context, int level,
Assert(MemoryContextIsValid(context));
/* Examine the context itself */
- switch (print_location)
- {
- case PRINT_STATS_TO_STDERR:
- context->methods->stats(context,
- MemoryContextStatsPrint,
- &level,
- totals, true);
- break;
-
- case PRINT_STATS_TO_LOGS:
- context->methods->stats(context,
- MemoryContextStatsPrint,
- &level,
- totals, false);
- break;
-
- case PRINT_STATS_NONE:
-
- /*
- * Do not print the statistics if print_location is
- * PRINT_STATS_NONE, only compute totals. This is used in
- * reporting of memory context statistics via a sql function. Last
- * parameter is not relevant.
- */
- context->methods->stats(context,
- NULL,
- NULL,
- totals, false);
- break;
- }
-
- /* Increment the context count for each of the recursive call */
- *num_contexts = *num_contexts + 1;
+ context->methods->stats(context,
+ MemoryContextStatsPrint,
+ &level,
+ totals, print_to_stderr);
/*
* Examine children.
@@ -976,7 +907,7 @@ MemoryContextStatsInternal(MemoryContext context, int level,
MemoryContextStatsInternal(child, level + 1,
max_level, max_children,
totals,
- print_location, num_contexts);
+ print_to_stderr);
}
}
@@ -995,13 +926,7 @@ MemoryContextStatsInternal(MemoryContext context, int level,
child = MemoryContextTraverseNext(child, context);
}
- /*
- * Add the count of children contexts which are traversed in the
- * non-recursive manner.
- */
- *num_contexts = *num_contexts + ichild;
-
- if (print_location == PRINT_STATS_TO_STDERR)
+ if (print_to_stderr)
{
for (int i = 0; i < level; i++)
fprintf(stderr, " ");
@@ -1014,7 +939,7 @@ MemoryContextStatsInternal(MemoryContext context, int level,
local_totals.freechunks,
local_totals.totalspace - local_totals.freespace);
}
- else if (print_location == PRINT_STATS_TO_LOGS)
+ else
ereport(LOG_SERVER_ONLY,
(errhidestmt(true),
errhidecontext(true),
@@ -1356,22 +1281,6 @@ HandleLogMemoryContextInterrupt(void)
}
/*
- * HandleGetMemoryContextInterrupt
- * Handle receipt of an interrupt indicating a request to publish memory
- * contexts statistics.
- *
- * All the actual work is deferred to ProcessGetMemoryContextInterrupt() as
- * this cannot be performed in a signal handler.
- */
-void
-HandleGetMemoryContextInterrupt(void)
-{
- InterruptPending = true;
- PublishMemoryContextPending = true;
- /* latch will be set by procsignal_sigusr1_handler */
-}
-
-/*
* ProcessLogMemoryContextInterrupt
* Perform logging of memory contexts of this backend process.
*
@@ -1408,539 +1317,6 @@ ProcessLogMemoryContextInterrupt(void)
MemoryContextStatsDetail(TopMemoryContext, 100, 100, false);
}
-/*
- * ProcessGetMemoryContextInterrupt
- * Generate information about memory contexts used by the process.
- *
- * Performs a breadth first search on the memory context tree, thus parents
- * statistics are reported before their children in the monitoring function
- * output.
- *
- * Statistics for all the processes are shared via the same dynamic shared
- * area. Statistics written by each process are tracked independently in
- * per-process DSA pointers. These pointers are stored in static shared memory.
- *
- * We calculate maximum number of context's statistics that can be displayed
- * using a pre-determined limit for memory available per process for this
- * utility maximum size of statistics for each context. The remaining context
- * statistics if any are captured as a cumulative total at the end of
- * individual context's statistics.
- *
- * If summary is true, we capture the level 1 and level 2 contexts
- * statistics. For that we traverse the memory context tree recursively in
- * depth first search manner to cover all the children of a parent context, to
- * be able to display a cumulative total of memory consumption by a parent at
- * level 2 and all its children.
- */
-void
-ProcessGetMemoryContextInterrupt(void)
-{
- List *contexts;
- HASHCTL ctl;
- HTAB *context_id_lookup;
- int context_id = 0;
- MemoryStatsEntry *meminfo;
- bool summary = false;
- int max_stats;
- int idx = MyProcNumber;
- int stats_count = 0;
- int stats_num = 0;
- MemoryContextCounters stat;
- int num_individual_stats = 0;
-
- PublishMemoryContextPending = false;
-
- /*
- * The hash table is used for constructing "path" column of the view,
- * similar to its local backend counterpart.
- */
- ctl.keysize = sizeof(MemoryContext);
- ctl.entrysize = sizeof(MemoryStatsContextId);
- ctl.hcxt = CurrentMemoryContext;
-
- context_id_lookup = hash_create("pg_get_remote_backend_memory_contexts",
- 256,
- &ctl,
- HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
-
- /* List of contexts to process in the next round - start at the top. */
- contexts = list_make1(TopMemoryContext);
-
- /* Compute the number of stats that can fit in the defined limit */
- max_stats =
- MEMORY_CONTEXT_REPORT_MAX_PER_BACKEND / MAX_MEMORY_CONTEXT_STATS_SIZE;
- LWLockAcquire(&memCxtState[idx].lw_lock, LW_EXCLUSIVE);
- summary = memCxtState[idx].summary;
- LWLockRelease(&memCxtState[idx].lw_lock);
-
- /*
- * Traverse the memory context tree to find total number of contexts. If
- * summary is requested report the total number of contexts at level 1 and
- * 2 from the top. Also, populate the hash table of context ids.
- */
- compute_contexts_count_and_ids(contexts, context_id_lookup, &stats_count,
- summary);
-
- /*
- * Allocate memory in this process's DSA for storing statistics of the
- * memory contexts upto max_stats, for contexts that don't fit within a
- * limit, a cumulative total is written as the last record in the DSA
- * segment.
- */
- stats_num = Min(stats_count, max_stats);
-
- LWLockAcquire(&memCxtArea->lw_lock, LW_EXCLUSIVE);
-
- /*
- * Create a DSA and send handle to the client process after storing the
- * context statistics. If number of contexts exceed a predefined limit
- * (1MB), a cumulative total is stored for such contexts.
- */
- if (memCxtArea->memstats_dsa_handle == DSA_HANDLE_INVALID)
- {
- MemoryContext oldcontext = CurrentMemoryContext;
- dsa_handle handle;
-
- MemoryContextSwitchTo(TopMemoryContext);
-
- MemoryStatsDsaArea = dsa_create(memCxtArea->lw_lock.tranche);
-
- handle = dsa_get_handle(MemoryStatsDsaArea);
- MemoryContextSwitchTo(oldcontext);
-
- dsa_pin_mapping(MemoryStatsDsaArea);
-
- /*
- * Pin the DSA area, this is to make sure the area remains attachable
- * even if the backend that created it exits. This is done so that the
- * statistics are published even if the process exits while a client
- * is waiting. Also, other processes that publish statistics will use
- * the same area.
- */
- dsa_pin(MemoryStatsDsaArea);
-
- /* Set the handle in shared memory */
- memCxtArea->memstats_dsa_handle = handle;
- }
-
- /*
- * If DSA exists, created by another process publishing statistics, attach
- * to it.
- */
- else if (MemoryStatsDsaArea == NULL)
- {
- MemoryContext oldcontext = CurrentMemoryContext;
-
- MemoryContextSwitchTo(TopMemoryContext);
- MemoryStatsDsaArea = dsa_attach(memCxtArea->memstats_dsa_handle);
- MemoryContextSwitchTo(oldcontext);
- dsa_pin_mapping(MemoryStatsDsaArea);
- }
- LWLockRelease(&memCxtArea->lw_lock);
-
- /*
- * Hold the process lock to protect writes to process specific memory. Two
- * processes publishing statistics do not block each other.
- */
- LWLockAcquire(&memCxtState[idx].lw_lock, LW_EXCLUSIVE);
- memCxtState[idx].proc_id = MyProcPid;
-
- if (DsaPointerIsValid(memCxtState[idx].memstats_dsa_pointer))
- {
- /*
- * Free any previous allocations, free the name, ident and path
- * pointers before freeing the pointer that contains them.
- */
- free_memorycontextstate_dsa(MemoryStatsDsaArea, memCxtState[idx].total_stats,
- memCxtState[idx].memstats_dsa_pointer);
- }
-
- /*
- * Assigning total stats before allocating memory so that memory cleanup
- * can run if any subsequent dsa_allocate call to allocate name/ident/path
- * fails.
- */
- memCxtState[idx].total_stats = stats_num;
- memCxtState[idx].memstats_dsa_pointer =
- dsa_allocate0(MemoryStatsDsaArea, stats_num * sizeof(MemoryStatsEntry));
-
- meminfo = (MemoryStatsEntry *)
- dsa_get_address(MemoryStatsDsaArea, memCxtState[idx].memstats_dsa_pointer);
-
- if (summary)
- {
- int cxt_id = 0;
- List *path = NIL;
-
- /* Copy TopMemoryContext statistics to DSA */
- memset(&stat, 0, sizeof(stat));
- (*TopMemoryContext->methods->stats) (TopMemoryContext, NULL, NULL,
- &stat, true);
- path = lcons_int(1, path);
- PublishMemoryContext(meminfo, cxt_id, TopMemoryContext, path, stat,
- 1, MemoryStatsDsaArea, 100);
- cxt_id = cxt_id + 1;
-
- /*
- * Copy statistics for each of TopMemoryContexts children. This
- * includes statistics of at most 100 children per node, with each
- * child node limited to a depth of 100 in its subtree.
- */
- for (MemoryContext c = TopMemoryContext->firstchild; c != NULL;
- c = c->nextchild)
- {
- MemoryContextCounters grand_totals;
- int num_contexts = 0;
-
- path = NIL;
- memset(&grand_totals, 0, sizeof(grand_totals));
-
- MemoryContextStatsInternal(c, 1, 100, 100, &grand_totals,
- PRINT_STATS_NONE, &num_contexts);
-
- path = compute_context_path(c, context_id_lookup);
-
- /*
- * Register the stats entry first, that way the cleanup handler
- * can reach it in case of allocation failures of one or more
- * members.
- */
- memCxtState[idx].total_stats = cxt_id++;
- PublishMemoryContext(meminfo, cxt_id, c, path,
- grand_totals, num_contexts, MemoryStatsDsaArea, 100);
- }
- memCxtState[idx].total_stats = cxt_id;
-
- /* Notify waiting backends and return */
- end_memorycontext_reporting();
-
- hash_destroy(context_id_lookup);
-
- return;
- }
-
- foreach_ptr(MemoryContextData, cur, contexts)
- {
- List *path = NIL;
-
- /*
- * Figure out the transient context_id of this context and each of its
- * ancestors, to compute a path for this context.
- */
- path = compute_context_path(cur, context_id_lookup);
-
- /* Examine the context stats */
- memset(&stat, 0, sizeof(stat));
- (*cur->methods->stats) (cur, NULL, NULL, &stat, true);
-
- /* Account for saving one statistics slot for cumulative reporting */
- if (context_id < (max_stats - 1) || stats_count <= max_stats)
- {
- /* Copy statistics to DSA memory */
- PublishMemoryContext(meminfo, context_id, cur, path, stat, 1, MemoryStatsDsaArea, 100);
- }
- else
- {
- meminfo[max_stats - 1].totalspace += stat.totalspace;
- meminfo[max_stats - 1].nblocks += stat.nblocks;
- meminfo[max_stats - 1].freespace += stat.freespace;
- meminfo[max_stats - 1].freechunks += stat.freechunks;
- }
-
- /*
- * DSA max limit per process is reached, write aggregate of the
- * remaining statistics.
- *
- * We can store contexts from 0 to max_stats - 1. When stats_count is
- * greater than max_stats, we stop reporting individual statistics
- * when context_id equals max_stats - 2. As we use max_stats - 1 array
- * slot for reporting cumulative statistics or "Remaining Totals".
- */
- if (stats_count > max_stats && context_id == (max_stats - 2))
- {
- char *nameptr;
- int namelen = strlen("Remaining Totals");
-
- num_individual_stats = context_id + 1;
- meminfo[max_stats - 1].name = dsa_allocate(MemoryStatsDsaArea, namelen + 1);
- nameptr = dsa_get_address(MemoryStatsDsaArea, meminfo[max_stats - 1].name);
- strlcpy(nameptr, "Remaining Totals", namelen + 1);
- meminfo[max_stats - 1].ident = InvalidDsaPointer;
- meminfo[max_stats - 1].path = InvalidDsaPointer;
- meminfo[max_stats - 1].type = 0;
- }
- context_id++;
- }
-
- /*
- * Statistics are not aggregated, i.e individual statistics reported when
- * stats_count <= max_stats.
- */
- if (stats_count <= max_stats)
- {
- memCxtState[idx].total_stats = context_id;
- }
- /* Report number of aggregated memory contexts */
- else
- {
- meminfo[max_stats - 1].num_agg_stats = context_id -
- num_individual_stats;
-
- /*
- * Total stats equals num_individual_stats + 1 record for cumulative
- * statistics.
- */
- memCxtState[idx].total_stats = num_individual_stats + 1;
- }
-
- /* Notify waiting backends and return */
- end_memorycontext_reporting();
-
- hash_destroy(context_id_lookup);
-}
-
-/*
- * Update timestamp and signal all the waiting client backends after copying
- * all the statistics.
- */
-static void
-end_memorycontext_reporting(void)
-{
- memCxtState[MyProcNumber].stats_timestamp = GetCurrentTimestamp();
- LWLockRelease(&memCxtState[MyProcNumber].lw_lock);
- ConditionVariableBroadcast(&memCxtState[MyProcNumber].memcxt_cv);
-}
-
-/*
- * compute_context_path
- *
- * Append the transient context_id of this context and each of its ancestors
- * to a list, in order to compute a path.
- */
-static List *
-compute_context_path(MemoryContext c, HTAB *context_id_lookup)
-{
- bool found;
- List *path = NIL;
- MemoryContext cur_context;
-
- for (cur_context = c; cur_context != NULL; cur_context = cur_context->parent)
- {
- MemoryStatsContextId *cur_entry;
-
- cur_entry = hash_search(context_id_lookup, &cur_context, HASH_FIND, &found);
-
- if (!found)
- elog(ERROR, "hash table corrupted, can't construct path value");
-
- path = lcons_int(cur_entry->context_id, path);
- }
-
- return path;
-}
-
-/*
- * Return the number of contexts allocated currently by the backend
- * Assign context ids to each of the contexts.
- */
-static void
-compute_contexts_count_and_ids(List *contexts, HTAB *context_id_lookup,
- int *stats_count, bool summary)
-{
- foreach_ptr(MemoryContextData, cur, contexts)
- {
- MemoryStatsContextId *entry;
- bool found;
-
- entry = (MemoryStatsContextId *) hash_search(context_id_lookup, &cur,
- HASH_ENTER, &found);
- Assert(!found);
-
- /*
- * context id starts with 1 so increment the stats_count before
- * assigning.
- */
- entry->context_id = ++(*stats_count);
-
- /* Append the children of the current context to the main list. */
- for (MemoryContext c = cur->firstchild; c != NULL; c = c->nextchild)
- {
- if (summary)
- {
- entry = (MemoryStatsContextId *) hash_search(context_id_lookup, &c,
- HASH_ENTER, &found);
- Assert(!found);
-
- entry->context_id = ++(*stats_count);
- }
-
- contexts = lappend(contexts, c);
- }
-
- /*
- * In summary mode only the first two level (from top) contexts are
- * displayed.
- */
- if (summary)
- break;
- }
-}
-
-/*
- * PublishMemoryContext
- *
- * Copy the memory context statistics of a single context to a DSA memory
- */
-static void
-PublishMemoryContext(MemoryStatsEntry *memcxt_info, int curr_id,
- MemoryContext context, List *path,
- MemoryContextCounters stat, int num_contexts,
- dsa_area *area, int max_levels)
-{
- const char *ident = context->ident;
- const char *name = context->name;
- int *path_list;
-
- /*
- * To be consistent with logging output, we label dynahash contexts with
- * just the hash table name as with MemoryContextStatsPrint().
- */
- if (context->ident && strncmp(context->name, "dynahash", 8) == 0)
- {
- name = context->ident;
- ident = NULL;
- }
-
- if (name != NULL)
- {
- int namelen = strlen(name);
- char *nameptr;
-
- if (strlen(name) >= MEMORY_CONTEXT_IDENT_SHMEM_SIZE)
- namelen = pg_mbcliplen(name, namelen,
- MEMORY_CONTEXT_IDENT_SHMEM_SIZE - 1);
-
- memcxt_info[curr_id].name = dsa_allocate(area, namelen + 1);
- nameptr = (char *) dsa_get_address(area, memcxt_info[curr_id].name);
- strlcpy(nameptr, name, namelen + 1);
- }
- else
- memcxt_info[curr_id].name = InvalidDsaPointer;
-
- /* Trim and copy the identifier if it is not set to NULL */
- if (ident != NULL)
- {
- int idlen = strlen(context->ident);
- char *identptr;
-
- /*
- * Some identifiers such as SQL query string can be very long,
- * truncate oversize identifiers.
- */
- if (idlen >= MEMORY_CONTEXT_IDENT_SHMEM_SIZE)
- idlen = pg_mbcliplen(ident, idlen,
- MEMORY_CONTEXT_IDENT_SHMEM_SIZE - 1);
-
- memcxt_info[curr_id].ident = dsa_allocate(area, idlen + 1);
- identptr = (char *) dsa_get_address(area, memcxt_info[curr_id].ident);
- strlcpy(identptr, ident, idlen + 1);
- }
- else
- memcxt_info[curr_id].ident = InvalidDsaPointer;
-
- /* Allocate DSA memory for storing path information */
- if (path == NIL)
- memcxt_info[curr_id].path = InvalidDsaPointer;
- else
- {
- int levels = Min(list_length(path), max_levels);
-
- memcxt_info[curr_id].path_length = levels;
- memcxt_info[curr_id].path = dsa_allocate0(area, levels * sizeof(int));
- memcxt_info[curr_id].levels = list_length(path);
- path_list = (int *) dsa_get_address(area, memcxt_info[curr_id].path);
-
- foreach_int(i, path)
- {
- path_list[foreach_current_index(i)] = i;
- if (--levels == 0)
- break;
- }
- }
- memcxt_info[curr_id].type = context->type;
- memcxt_info[curr_id].totalspace = stat.totalspace;
- memcxt_info[curr_id].nblocks = stat.nblocks;
- memcxt_info[curr_id].freespace = stat.freespace;
- memcxt_info[curr_id].freechunks = stat.freechunks;
- memcxt_info[curr_id].num_agg_stats = num_contexts;
-}
-
-/*
- * free_memorycontextstate_dsa
- *
- * Worker for freeing resources from a MemoryStatsEntry. Callers are
- * responsible for ensuring that the DSA pointer is valid.
- */
-static void
-free_memorycontextstate_dsa(dsa_area *area, int total_stats,
- dsa_pointer prev_dsa_pointer)
-{
- MemoryStatsEntry *meminfo;
-
- meminfo = (MemoryStatsEntry *) dsa_get_address(area, prev_dsa_pointer);
- Assert(meminfo != NULL);
- for (int i = 0; i < total_stats; i++)
- {
- if (DsaPointerIsValid(meminfo[i].name))
- dsa_free(area, meminfo[i].name);
-
- if (DsaPointerIsValid(meminfo[i].ident))
- dsa_free(area, meminfo[i].ident);
-
- if (DsaPointerIsValid(meminfo[i].path))
- dsa_free(area, meminfo[i].path);
- }
-
- dsa_free(area, memCxtState[MyProcNumber].memstats_dsa_pointer);
- memCxtState[MyProcNumber].memstats_dsa_pointer = InvalidDsaPointer;
-}
-
-/*
- * Free the memory context statistics stored by this process
- * in DSA area.
- */
-void
-AtProcExit_memstats_cleanup(int code, Datum arg)
-{
- int idx = MyProcNumber;
-
- if (memCxtArea->memstats_dsa_handle == DSA_HANDLE_INVALID)
- return;
-
- LWLockAcquire(&memCxtState[idx].lw_lock, LW_EXCLUSIVE);
-
- if (!DsaPointerIsValid(memCxtState[idx].memstats_dsa_pointer))
- {
- LWLockRelease(&memCxtState[idx].lw_lock);
- return;
- }
-
- /* If the dsa mapping could not be found, attach to the area */
- if (MemoryStatsDsaArea == NULL)
- MemoryStatsDsaArea = dsa_attach(memCxtArea->memstats_dsa_handle);
-
- /*
- * Free the memory context statistics, free the name, ident and path
- * pointers before freeing the pointer that contains these pointers and
- * integer statistics.
- */
- free_memorycontextstate_dsa(MemoryStatsDsaArea, memCxtState[idx].total_stats,
- memCxtState[idx].memstats_dsa_pointer);
-
- dsa_detach(MemoryStatsDsaArea);
- LWLockRelease(&memCxtState[idx].lw_lock);
-}
-
void *
palloc(Size size)
{
diff --git a/src/backend/utils/mmgr/portalmem.c b/src/backend/utils/mmgr/portalmem.c
index e3526e78064..0be1c2b0fff 100644
--- a/src/backend/utils/mmgr/portalmem.c
+++ b/src/backend/utils/mmgr/portalmem.c
@@ -284,8 +284,7 @@ PortalDefineQuery(Portal portal,
const char *sourceText,
CommandTag commandTag,
List *stmts,
- CachedPlan *cplan,
- CachedPlanSource *plansource)
+ CachedPlan *cplan)
{
Assert(PortalIsValid(portal));
Assert(portal->status == PORTAL_NEW);
@@ -300,7 +299,6 @@ PortalDefineQuery(Portal portal,
portal->commandTag = commandTag;
portal->stmts = stmts;
portal->cplan = cplan;
- portal->plansource = plansource;
portal->status = PORTAL_DEFINED;
}
diff --git a/src/bin/initdb/t/001_initdb.pl b/src/bin/initdb/t/001_initdb.pl
index 15dd10ce40a..b7ef7ed8d06 100644
--- a/src/bin/initdb/t/001_initdb.pl
+++ b/src/bin/initdb/t/001_initdb.pl
@@ -76,7 +76,8 @@ command_like(
'checksums are enabled in control file');
command_ok([ 'initdb', '--sync-only', $datadir ], 'sync only');
-command_ok([ 'initdb', '--sync-only', '--no-sync-data-files', $datadir ], '--no-sync-data-files');
+command_ok([ 'initdb', '--sync-only', '--no-sync-data-files', $datadir ],
+ '--no-sync-data-files');
command_fails([ 'initdb', $datadir ], 'existing data directory');
if ($supports_syncfs)
diff --git a/src/bin/pg_basebackup/pg_createsubscriber.c b/src/bin/pg_basebackup/pg_createsubscriber.c
index f65acc7cb11..11f71c03801 100644
--- a/src/bin/pg_basebackup/pg_createsubscriber.c
+++ b/src/bin/pg_basebackup/pg_createsubscriber.c
@@ -46,7 +46,7 @@ struct CreateSubscriberOptions
SimpleStringList replslot_names; /* list of replication slot names */
int recovery_timeout; /* stop recovery after this time */
bool all_dbs; /* all option */
- SimpleStringList objecttypes_to_remove; /* list of object types to remove */
+ SimpleStringList objecttypes_to_clean; /* list of object types to cleanup */
};
/* per-database publication/subscription info */
@@ -71,8 +71,8 @@ struct LogicalRepInfos
{
struct LogicalRepInfo *dbinfo;
bool two_phase; /* enable-two-phase option */
- bits32 objecttypes_to_remove; /* flags indicating which object types
- * to remove on subscriber */
+ bits32 objecttypes_to_clean; /* flags indicating which object types
+ * to clean up on subscriber */
};
static void cleanup_objects_atexit(void);
@@ -247,19 +247,19 @@ usage(void)
printf(_(" %s [OPTION]...\n"), progname);
printf(_("\nOptions:\n"));
printf(_(" -a, --all create subscriptions for all databases except template\n"
- " databases or databases that don't allow connections\n"));
+ " databases and databases that don't allow connections\n"));
printf(_(" -d, --database=DBNAME database in which to create a subscription\n"));
printf(_(" -D, --pgdata=DATADIR location for the subscriber data directory\n"));
printf(_(" -n, --dry-run dry run, just show what would be done\n"));
printf(_(" -p, --subscriber-port=PORT subscriber port number (default %s)\n"), DEFAULT_SUB_PORT);
printf(_(" -P, --publisher-server=CONNSTR publisher connection string\n"));
- printf(_(" -R, --remove=OBJECTTYPE remove all objects of the specified type from specified\n"
- " databases on the subscriber; accepts: publications\n"));
printf(_(" -s, --socketdir=DIR socket directory to use (default current dir.)\n"));
printf(_(" -t, --recovery-timeout=SECS seconds to wait for recovery to end\n"));
printf(_(" -T, --enable-two-phase enable two-phase commit for all subscriptions\n"));
printf(_(" -U, --subscriber-username=NAME user name for subscriber connection\n"));
printf(_(" -v, --verbose output verbose messages\n"));
+ printf(_(" --clean=OBJECTTYPE drop all objects of the specified type from specified\n"
+ " databases on the subscriber; accepts: \"%s\"\n"), "publications");
printf(_(" --config-file=FILENAME use specified main server configuration\n"
" file when running target cluster\n"));
printf(_(" --publication=NAME publication name\n"));
@@ -973,7 +973,7 @@ check_publisher(const struct LogicalRepInfo *dbinfo)
pg_log_warning("two_phase option will not be enabled for replication slots");
pg_log_warning_detail("Subscriptions will be created with the two_phase option disabled. "
"Prepared transactions will be replicated at COMMIT PREPARED.");
- pg_log_warning_hint("You can use --enable-two-phase switch to enable two_phase.");
+ pg_log_warning_hint("You can use the command-line option --enable-two-phase to enable two_phase.");
}
/*
@@ -1730,7 +1730,7 @@ static void
check_and_drop_publications(PGconn *conn, struct LogicalRepInfo *dbinfo)
{
PGresult *res;
- bool drop_all_pubs = dbinfos.objecttypes_to_remove & OBJECTTYPE_PUBLICATIONS;
+ bool drop_all_pubs = dbinfos.objecttypes_to_clean & OBJECTTYPE_PUBLICATIONS;
Assert(conn != NULL);
@@ -2026,7 +2026,6 @@ main(int argc, char **argv)
{"dry-run", no_argument, NULL, 'n'},
{"subscriber-port", required_argument, NULL, 'p'},
{"publisher-server", required_argument, NULL, 'P'},
- {"remove", required_argument, NULL, 'R'},
{"socketdir", required_argument, NULL, 's'},
{"recovery-timeout", required_argument, NULL, 't'},
{"enable-two-phase", no_argument, NULL, 'T'},
@@ -2038,6 +2037,7 @@ main(int argc, char **argv)
{"publication", required_argument, NULL, 2},
{"replication-slot", required_argument, NULL, 3},
{"subscription", required_argument, NULL, 4},
+ {"clean", required_argument, NULL, 5},
{NULL, 0, NULL, 0}
};
@@ -2109,7 +2109,7 @@ main(int argc, char **argv)
get_restricted_token();
- while ((c = getopt_long(argc, argv, "ad:D:np:P:R:s:t:TU:v",
+ while ((c = getopt_long(argc, argv, "ad:D:np:P:s:t:TU:v",
long_options, &option_index)) != -1)
{
switch (c)
@@ -2139,12 +2139,6 @@ main(int argc, char **argv)
case 'P':
opt.pub_conninfo_str = pg_strdup(optarg);
break;
- case 'R':
- if (!simple_string_list_member(&opt.objecttypes_to_remove, optarg))
- simple_string_list_append(&opt.objecttypes_to_remove, optarg);
- else
- pg_fatal("object type \"%s\" is specified more than once for -R/--remove", optarg);
- break;
case 's':
opt.socket_dir = pg_strdup(optarg);
canonicalize_path(opt.socket_dir);
@@ -2191,6 +2185,12 @@ main(int argc, char **argv)
else
pg_fatal("subscription \"%s\" specified more than once for --subscription", optarg);
break;
+ case 5:
+ if (!simple_string_list_member(&opt.objecttypes_to_clean, optarg))
+ simple_string_list_append(&opt.objecttypes_to_clean, optarg);
+ else
+ pg_fatal("object type \"%s\" specified more than once for --clean", optarg);
+ break;
default:
/* getopt_long already emitted a complaint */
pg_log_error_hint("Try \"%s --help\" for more information.", progname);
@@ -2214,7 +2214,7 @@ main(int argc, char **argv)
if (bad_switch)
{
- pg_log_error("%s cannot be used with -a/--all", bad_switch);
+ pg_log_error("options %s and -a/--all cannot be used together", bad_switch);
pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
@@ -2334,14 +2334,14 @@ main(int argc, char **argv)
}
/* Verify the object types specified for removal from the subscriber */
- for (SimpleStringListCell *cell = opt.objecttypes_to_remove.head; cell; cell = cell->next)
+ for (SimpleStringListCell *cell = opt.objecttypes_to_clean.head; cell; cell = cell->next)
{
if (pg_strcasecmp(cell->val, "publications") == 0)
- dbinfos.objecttypes_to_remove |= OBJECTTYPE_PUBLICATIONS;
+ dbinfos.objecttypes_to_clean |= OBJECTTYPE_PUBLICATIONS;
else
{
- pg_log_error("invalid object type \"%s\" specified for -R/--remove", cell->val);
- pg_log_error_hint("The valid option is: \"publications\"");
+ pg_log_error("invalid object type \"%s\" specified for --clean", cell->val);
+ pg_log_error_hint("The valid value is: \"%s\"", "publications");
exit(1);
}
}
diff --git a/src/bin/pg_basebackup/pg_recvlogical.c b/src/bin/pg_basebackup/pg_recvlogical.c
index e6810efe5f0..fb7a6a1d05d 100644
--- a/src/bin/pg_basebackup/pg_recvlogical.c
+++ b/src/bin/pg_basebackup/pg_recvlogical.c
@@ -41,8 +41,8 @@ typedef enum
/* Global Options */
static char *outfile = NULL;
static int verbose = 0;
-static bool two_phase = false;
-static bool failover = false;
+static bool two_phase = false; /* enable-two-phase option */
+static bool failover = false; /* enable-failover option */
static int noloop = 0;
static int standby_message_timeout = 10 * 1000; /* 10 sec = default */
static int fsync_interval = 10 * 1000; /* 10 sec = default */
@@ -89,9 +89,9 @@ usage(void)
printf(_(" --drop-slot drop the replication slot (for the slot's name see --slot)\n"));
printf(_(" --start start streaming in a replication slot (for the slot's name see --slot)\n"));
printf(_("\nOptions:\n"));
+ printf(_(" --enable-failover enable replication slot synchronization to standby servers when\n"
+ " creating a replication slot\n"));
printf(_(" -E, --endpos=LSN exit after receiving the specified LSN\n"));
- printf(_(" --failover enable replication slot synchronization to standby servers when\n"
- " creating a slot\n"));
printf(_(" -f, --file=FILE receive log into this file, - for stdout\n"));
printf(_(" -F --fsync-interval=SECS\n"
" time between fsyncs to the output file (default: %d)\n"), (fsync_interval / 1000));
@@ -105,7 +105,8 @@ usage(void)
printf(_(" -s, --status-interval=SECS\n"
" time between status packets sent to server (default: %d)\n"), (standby_message_timeout / 1000));
printf(_(" -S, --slot=SLOTNAME name of the logical replication slot\n"));
- printf(_(" -t, --two-phase enable decoding of prepared transactions when creating a slot\n"));
+ printf(_(" -t, --enable-two-phase enable decoding of prepared transactions when creating a slot\n"));
+ printf(_(" --two-phase (same as --enable-two-phase, deprecated)\n"));
printf(_(" -v, --verbose output verbose messages\n"));
printf(_(" -V, --version output version information, then exit\n"));
printf(_(" -?, --help show this help, then exit\n"));
@@ -698,9 +699,10 @@ main(int argc, char **argv)
{"file", required_argument, NULL, 'f'},
{"fsync-interval", required_argument, NULL, 'F'},
{"no-loop", no_argument, NULL, 'n'},
- {"failover", no_argument, NULL, 5},
+ {"enable-failover", no_argument, NULL, 5},
+ {"enable-two-phase", no_argument, NULL, 't'},
+ {"two-phase", no_argument, NULL, 't'}, /* deprecated */
{"verbose", no_argument, NULL, 'v'},
- {"two-phase", no_argument, NULL, 't'},
{"version", no_argument, NULL, 'V'},
{"help", no_argument, NULL, '?'},
/* connection options */
@@ -928,14 +930,14 @@ main(int argc, char **argv)
{
if (two_phase)
{
- pg_log_error("--two-phase may only be specified with --create-slot");
+ pg_log_error("%s may only be specified with --create-slot", "--enable-two-phase");
pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
if (failover)
{
- pg_log_error("--failover may only be specified with --create-slot");
+ pg_log_error("%s may only be specified with --create-slot", "--enable-failover");
pg_log_error_hint("Try \"%s --help\" for more information.", progname);
exit(1);
}
diff --git a/src/bin/pg_basebackup/t/030_pg_recvlogical.pl b/src/bin/pg_basebackup/t/030_pg_recvlogical.pl
index c82e78847b3..1b7a6f6f43f 100644
--- a/src/bin/pg_basebackup/t/030_pg_recvlogical.pl
+++ b/src/bin/pg_basebackup/t/030_pg_recvlogical.pl
@@ -110,7 +110,7 @@ $node->command_fails(
'--dbname' => $node->connstr('postgres'),
'--start',
'--endpos' => $nextlsn,
- '--two-phase', '--no-loop',
+ '--enable-two-phase', '--no-loop',
'--file' => '-',
],
'incorrect usage');
@@ -142,12 +142,13 @@ $node->command_ok(
'--slot' => 'test',
'--dbname' => $node->connstr('postgres'),
'--create-slot',
- '--failover',
+ '--enable-failover',
],
'slot with failover created');
my $result = $node->safe_psql('postgres',
- "SELECT failover FROM pg_catalog.pg_replication_slots WHERE slot_name = 'test'");
+ "SELECT failover FROM pg_catalog.pg_replication_slots WHERE slot_name = 'test'"
+);
is($result, 't', "failover is enabled for the new slot");
done_testing();
diff --git a/src/bin/pg_basebackup/t/040_pg_createsubscriber.pl b/src/bin/pg_basebackup/t/040_pg_createsubscriber.pl
index 2d532fee567..229fef5b3b5 100644
--- a/src/bin/pg_basebackup/t/040_pg_createsubscriber.pl
+++ b/src/bin/pg_basebackup/t/040_pg_createsubscriber.pl
@@ -331,7 +331,7 @@ $node_p->safe_psql($db1,
$node_p->wait_for_replay_catchup($node_s);
# Create user-defined publications, wait for streaming replication to sync them
-# to the standby, then verify that '--remove'
+# to the standby, then verify that '--clean'
# removes them.
$node_p->safe_psql(
$db1, qq(
@@ -399,7 +399,7 @@ command_fails_like(
'--database' => $db1,
'--all',
],
- qr/--database cannot be used with -a\/--all/,
+ qr/options --database and -a\/--all cannot be used together/,
'fail if --database is used with --all');
# run pg_createsubscriber with '--publication' and '--all' and verify
@@ -416,7 +416,7 @@ command_fails_like(
'--all',
'--publication' => 'pub1',
],
- qr/--publication cannot be used with -a\/--all/,
+ qr/options --publication and -a\/--all cannot be used together/,
'fail if --publication is used with --all');
# run pg_createsubscriber with '--all' option
@@ -446,7 +446,7 @@ is(scalar(() = $stderr =~ /creating subscription/g),
# Run pg_createsubscriber on node S. --verbose is used twice
# to show more information.
# In passing, also test the --enable-two-phase option and
-# --remove option
+# --clean option
command_ok(
[
'pg_createsubscriber',
@@ -463,7 +463,7 @@ command_ok(
'--database' => $db1,
'--database' => $db2,
'--enable-two-phase',
- '--remove' => 'publications',
+ '--clean' => 'publications',
],
'run pg_createsubscriber on node S');
diff --git a/src/bin/pg_combinebackup/t/010_hardlink.pl b/src/bin/pg_combinebackup/t/010_hardlink.pl
index a0ee419090c..4f92d6676bd 100644
--- a/src/bin/pg_combinebackup/t/010_hardlink.pl
+++ b/src/bin/pg_combinebackup/t/010_hardlink.pl
@@ -56,7 +56,7 @@ $primary->command_ok(
'--pgdata' => $backup1path,
'--no-sync',
'--checkpoint' => 'fast',
- '--wal-method' => 'none'
+ '--wal-method' => 'none'
],
"full backup");
@@ -74,7 +74,7 @@ $primary->command_ok(
'--pgdata' => $backup2path,
'--no-sync',
'--checkpoint' => 'fast',
- '--wal-method' => 'none',
+ '--wal-method' => 'none',
'--incremental' => $backup1path . '/backup_manifest'
],
"incremental backup");
@@ -112,45 +112,45 @@ done_testing();
# of the given data file.
sub check_data_file
{
- my ($data_file, $last_segment_nlinks) = @_;
-
- my @data_file_segments = ($data_file);
-
- # Start checking for additional segments
- my $segment_number = 1;
-
- while (1)
- {
- my $next_segment = $data_file . '.' . $segment_number;
-
- # If the file exists and is a regular file, add it to the list
- if (-f $next_segment)
- {
- push @data_file_segments, $next_segment;
- $segment_number++;
- }
- # Stop the loop if the file doesn't exist
- else
- {
- last;
- }
- }
-
- # All segments of the given data file should contain 2 hard links, except
- # for the last one, which should match the given number of links.
- my $last_segment = pop @data_file_segments;
-
- for my $segment (@data_file_segments)
- {
- # Get the file's stat information of each segment
- my $nlink_count = get_hard_link_count($segment);
- ok($nlink_count == 2, "File '$segment' has 2 hard links");
- }
-
- # Get the file's stat information of the last segment
- my $nlink_count = get_hard_link_count($last_segment);
- ok($nlink_count == $last_segment_nlinks,
- "File '$last_segment' has $last_segment_nlinks hard link(s)");
+ my ($data_file, $last_segment_nlinks) = @_;
+
+ my @data_file_segments = ($data_file);
+
+ # Start checking for additional segments
+ my $segment_number = 1;
+
+ while (1)
+ {
+ my $next_segment = $data_file . '.' . $segment_number;
+
+ # If the file exists and is a regular file, add it to the list
+ if (-f $next_segment)
+ {
+ push @data_file_segments, $next_segment;
+ $segment_number++;
+ }
+ # Stop the loop if the file doesn't exist
+ else
+ {
+ last;
+ }
+ }
+
+ # All segments of the given data file should contain 2 hard links, except
+ # for the last one, which should match the given number of links.
+ my $last_segment = pop @data_file_segments;
+
+ for my $segment (@data_file_segments)
+ {
+ # Get the file's stat information of each segment
+ my $nlink_count = get_hard_link_count($segment);
+ ok($nlink_count == 2, "File '$segment' has 2 hard links");
+ }
+
+ # Get the file's stat information of the last segment
+ my $nlink_count = get_hard_link_count($last_segment);
+ ok($nlink_count == $last_segment_nlinks,
+ "File '$last_segment' has $last_segment_nlinks hard link(s)");
}
@@ -159,11 +159,11 @@ sub check_data_file
# that file.
sub get_hard_link_count
{
- my ($file) = @_;
+ my ($file) = @_;
- # Get file stats
- my @stats = stat($file);
- my $nlink = $stats[3]; # Number of hard links
+ # Get file stats
+ my @stats = stat($file);
+ my $nlink = $stats[3]; # Number of hard links
- return $nlink;
+ return $nlink;
}
diff --git a/src/bin/pg_dump/pg_backup_archiver.c b/src/bin/pg_dump/pg_backup_archiver.c
index afa42337b11..197c1295d93 100644
--- a/src/bin/pg_dump/pg_backup_archiver.c
+++ b/src/bin/pg_dump/pg_backup_archiver.c
@@ -152,7 +152,7 @@ InitDumpOptions(DumpOptions *opts)
opts->dumpSections = DUMP_UNSECTIONED;
opts->dumpSchema = true;
opts->dumpData = true;
- opts->dumpStatistics = true;
+ opts->dumpStatistics = false;
}
/*
@@ -2655,7 +2655,7 @@ WriteToc(ArchiveHandle *AH)
pg_fatal("unexpected TOC entry in WriteToc(): %d %s %s",
te->dumpId, te->desc, te->tag);
- if (fseeko(AH->FH, te->defnLen, SEEK_CUR != 0))
+ if (fseeko(AH->FH, te->defnLen, SEEK_CUR) != 0)
pg_fatal("error during file seek: %m");
}
else if (te->defnDumper)
diff --git a/src/bin/pg_dump/pg_backup_directory.c b/src/bin/pg_dump/pg_backup_directory.c
index 21b00792a8a..bc2a2fb4797 100644
--- a/src/bin/pg_dump/pg_backup_directory.c
+++ b/src/bin/pg_dump/pg_backup_directory.c
@@ -412,10 +412,15 @@ _LoadLOs(ArchiveHandle *AH, TocEntry *te)
/*
* Note: before archive v16, there was always only one BLOBS TOC entry,
- * now there can be multiple. We don't need to worry what version we are
- * reading though, because tctx->filename should be correct either way.
+ * now there can be multiple. Furthermore, although the actual filename
+ * was always "blobs.toc" before v16, the value of tctx->filename did not
+ * match that before commit 548e50976 fixed it. For simplicity we assume
+ * it must be "blobs.toc" in all archives before v16.
*/
- setFilePath(AH, tocfname, tctx->filename);
+ if (AH->version < K_VERS_1_16)
+ setFilePath(AH, tocfname, "blobs.toc");
+ else
+ setFilePath(AH, tocfname, tctx->filename);
CFH = ctx->LOsTocFH = InitDiscoverCompressFileHandle(tocfname, PG_BINARY_R);
diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c
index e2e7975b34e..1937997ea67 100644
--- a/src/bin/pg_dump/pg_dump.c
+++ b/src/bin/pg_dump/pg_dump.c
@@ -350,7 +350,9 @@ static void buildMatViewRefreshDependencies(Archive *fout);
static void getTableDataFKConstraints(void);
static void determineNotNullFlags(Archive *fout, PGresult *res, int r,
TableInfo *tbinfo, int j,
- int i_notnull_name, int i_notnull_invalidoid,
+ int i_notnull_name,
+ int i_notnull_comment,
+ int i_notnull_invalidoid,
int i_notnull_noinherit,
int i_notnull_islocal,
PQExpBuffer *invalidnotnulloids);
@@ -1235,7 +1237,7 @@ main(int argc, char **argv)
static void
help(const char *progname)
{
- printf(_("%s dumps a database as a text file or to other formats.\n\n"), progname);
+ printf(_("%s exports a PostgreSQL database as an SQL script or to other formats.\n\n"), progname);
printf(_("Usage:\n"));
printf(_(" %s [OPTION]... [DBNAME]\n"), progname);
@@ -6890,7 +6892,8 @@ getRelationStatistics(Archive *fout, DumpableObject *rel, int32 relpages,
(relkind == RELKIND_PARTITIONED_TABLE) ||
(relkind == RELKIND_INDEX) ||
(relkind == RELKIND_PARTITIONED_INDEX) ||
- (relkind == RELKIND_MATVIEW))
+ (relkind == RELKIND_MATVIEW ||
+ relkind == RELKIND_FOREIGN_TABLE))
{
RelStatsInfo *info = pg_malloc0(sizeof(RelStatsInfo));
DumpableObject *dobj = &info->dobj;
@@ -6929,6 +6932,7 @@ getRelationStatistics(Archive *fout, DumpableObject *rel, int32 relpages,
case RELKIND_RELATION:
case RELKIND_PARTITIONED_TABLE:
case RELKIND_MATVIEW:
+ case RELKIND_FOREIGN_TABLE:
info->section = SECTION_DATA;
break;
case RELKIND_INDEX:
@@ -6936,7 +6940,7 @@ getRelationStatistics(Archive *fout, DumpableObject *rel, int32 relpages,
info->section = SECTION_POST_DATA;
break;
default:
- pg_fatal("cannot dump statistics for relation kind '%c'",
+ pg_fatal("cannot dump statistics for relation kind \"%c\"",
info->relkind);
}
@@ -9004,6 +9008,7 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
int i_attalign;
int i_attislocal;
int i_notnull_name;
+ int i_notnull_comment;
int i_notnull_noinherit;
int i_notnull_islocal;
int i_notnull_invalidoid;
@@ -9087,7 +9092,8 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
/*
* Find out any NOT NULL markings for each column. In 18 and up we read
- * pg_constraint to obtain the constraint name. notnull_noinherit is set
+ * pg_constraint to obtain the constraint name, and for valid constraints
+ * also pg_description to obtain its comment. notnull_noinherit is set
* according to the NO INHERIT property. For versions prior to 18, we
* store an empty string as the name when a constraint is marked as
* attnotnull (this cues dumpTableSchema to print the NOT NULL clause
@@ -9095,7 +9101,8 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
*
* For invalid constraints, we need to store their OIDs for processing
* elsewhere, so we bring the pg_constraint.oid value when the constraint
- * is invalid, and NULL otherwise.
+ * is invalid, and NULL otherwise. Their comments are handled not here
+ * but by collectComments, because they're their own dumpable object.
*
* We track in notnull_islocal whether the constraint was defined directly
* in this table or via an ancestor, for binary upgrade. flagInhAttrs
@@ -9105,6 +9112,8 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
if (fout->remoteVersion >= 180000)
appendPQExpBufferStr(q,
"co.conname AS notnull_name,\n"
+ "CASE WHEN co.convalidated THEN pt.description"
+ " ELSE NULL END AS notnull_comment,\n"
"CASE WHEN NOT co.convalidated THEN co.oid "
"ELSE NULL END AS notnull_invalidoid,\n"
"co.connoinherit AS notnull_noinherit,\n"
@@ -9112,6 +9121,7 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
else
appendPQExpBufferStr(q,
"CASE WHEN a.attnotnull THEN '' ELSE NULL END AS notnull_name,\n"
+ "NULL AS notnull_comment,\n"
"NULL AS notnull_invalidoid,\n"
"false AS notnull_noinherit,\n"
"a.attislocal AS notnull_islocal,\n");
@@ -9155,15 +9165,16 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
/*
* In versions 18 and up, we need pg_constraint for explicit NOT NULL
- * entries. Also, we need to know if the NOT NULL for each column is
- * backing a primary key.
+ * entries and pg_description to get their comments.
*/
if (fout->remoteVersion >= 180000)
appendPQExpBufferStr(q,
" LEFT JOIN pg_catalog.pg_constraint co ON "
"(a.attrelid = co.conrelid\n"
" AND co.contype = 'n' AND "
- "co.conkey = array[a.attnum])\n");
+ "co.conkey = array[a.attnum])\n"
+ " LEFT JOIN pg_catalog.pg_description pt ON "
+ "(pt.classoid = co.tableoid AND pt.objoid = co.oid)\n");
appendPQExpBufferStr(q,
"WHERE a.attnum > 0::pg_catalog.int2\n"
@@ -9187,6 +9198,7 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
i_attalign = PQfnumber(res, "attalign");
i_attislocal = PQfnumber(res, "attislocal");
i_notnull_name = PQfnumber(res, "notnull_name");
+ i_notnull_comment = PQfnumber(res, "notnull_comment");
i_notnull_invalidoid = PQfnumber(res, "notnull_invalidoid");
i_notnull_noinherit = PQfnumber(res, "notnull_noinherit");
i_notnull_islocal = PQfnumber(res, "notnull_islocal");
@@ -9255,6 +9267,7 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
tbinfo->attfdwoptions = (char **) pg_malloc(numatts * sizeof(char *));
tbinfo->attmissingval = (char **) pg_malloc(numatts * sizeof(char *));
tbinfo->notnull_constrs = (char **) pg_malloc(numatts * sizeof(char *));
+ tbinfo->notnull_comment = (char **) pg_malloc(numatts * sizeof(char *));
tbinfo->notnull_invalid = (bool *) pg_malloc(numatts * sizeof(bool));
tbinfo->notnull_noinh = (bool *) pg_malloc(numatts * sizeof(bool));
tbinfo->notnull_islocal = (bool *) pg_malloc(numatts * sizeof(bool));
@@ -9286,11 +9299,14 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
determineNotNullFlags(fout, res, r,
tbinfo, j,
i_notnull_name,
+ i_notnull_comment,
i_notnull_invalidoid,
i_notnull_noinherit,
i_notnull_islocal,
&invalidnotnulloids);
+ tbinfo->notnull_comment[j] = PQgetisnull(res, r, i_notnull_comment) ?
+ NULL : pg_strdup(PQgetvalue(res, r, i_notnull_comment));
tbinfo->attoptions[j] = pg_strdup(PQgetvalue(res, r, i_attoptions));
tbinfo->attcollation[j] = atooid(PQgetvalue(res, r, i_attcollation));
tbinfo->attcompression[j] = *(PQgetvalue(res, r, i_attcompression));
@@ -9461,7 +9477,7 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
int i_consrc;
int i_conislocal;
- pg_log_info("finding invalid not null constraints");
+ pg_log_info("finding invalid not-null constraints");
resetPQExpBuffer(q);
appendPQExpBuffer(q,
@@ -9702,8 +9718,9 @@ getTableAttrs(Archive *fout, TableInfo *tblinfo, int numTables)
* 4) The column has a constraint with a known name; in that case
* notnull_constrs carries that name and dumpTableSchema will print
* "CONSTRAINT the_name NOT NULL". However, if the name is the default
- * (table_column_not_null), there's no need to print that name in the dump,
- * so notnull_constrs is set to the empty string and it behaves as case 2.
+ * (table_column_not_null) and there's no comment on the constraint,
+ * there's no need to print that name in the dump, so notnull_constrs
+ * is set to the empty string and it behaves as case 2.
*
* In a child table that inherits from a parent already containing NOT NULL
* constraints and the columns in the child don't have their own NOT NULL
@@ -9730,6 +9747,7 @@ static void
determineNotNullFlags(Archive *fout, PGresult *res, int r,
TableInfo *tbinfo, int j,
int i_notnull_name,
+ int i_notnull_comment,
int i_notnull_invalidoid,
int i_notnull_noinherit,
int i_notnull_islocal,
@@ -9803,11 +9821,13 @@ determineNotNullFlags(Archive *fout, PGresult *res, int r,
{
/*
* In binary upgrade of inheritance child tables, must have a
- * constraint name that we can UPDATE later.
+ * constraint name that we can UPDATE later; same if there's a
+ * comment on the constraint.
*/
- if (dopt->binary_upgrade &&
- !tbinfo->ispartition &&
- !tbinfo->notnull_islocal)
+ if ((dopt->binary_upgrade &&
+ !tbinfo->ispartition &&
+ !tbinfo->notnull_islocal) ||
+ !PQgetisnull(res, r, i_notnull_comment))
{
tbinfo->notnull_constrs[j] =
pstrdup(PQgetvalue(res, r, i_notnull_name));
@@ -10765,6 +10785,9 @@ fetchAttributeStats(Archive *fout)
restarted = true;
}
+ appendPQExpBufferChar(nspnames, '{');
+ appendPQExpBufferChar(relnames, '{');
+
/*
* Scan the TOC for the next set of relevant stats entries. We assume
* that statistics are dumped in the order they are listed in the TOC.
@@ -10776,23 +10799,25 @@ fetchAttributeStats(Archive *fout)
if ((te->reqs & REQ_STATS) != 0 &&
strcmp(te->desc, "STATISTICS DATA") == 0)
{
- appendPQExpBuffer(nspnames, "%s%s", count ? "," : "",
- fmtId(te->namespace));
- appendPQExpBuffer(relnames, "%s%s", count ? "," : "",
- fmtId(te->tag));
+ appendPGArray(nspnames, te->namespace);
+ appendPGArray(relnames, te->tag);
count++;
}
}
+ appendPQExpBufferChar(nspnames, '}');
+ appendPQExpBufferChar(relnames, '}');
+
/* Execute the query for the next batch of relations. */
if (count > 0)
{
PQExpBuffer query = createPQExpBuffer();
- appendPQExpBuffer(query, "EXECUTE getAttributeStats("
- "'{%s}'::pg_catalog.name[],"
- "'{%s}'::pg_catalog.name[])",
- nspnames->data, relnames->data);
+ appendPQExpBufferStr(query, "EXECUTE getAttributeStats(");
+ appendStringLiteralAH(query, nspnames->data, fout);
+ appendPQExpBufferStr(query, "::pg_catalog.name[],");
+ appendStringLiteralAH(query, relnames->data, fout);
+ appendPQExpBufferStr(query, "::pg_catalog.name[])");
res = ExecuteSqlQuery(fout, query->data, PGRES_TUPLES_OK);
destroyPQExpBuffer(query);
}
@@ -10850,7 +10875,7 @@ dumpRelationStats_dumper(Archive *fout, const void *userArg, const TocEntry *te)
expected_te = expected_te->next;
if (te != expected_te)
- pg_fatal("stats dumped out of order (current: %d %s %s) (expected: %d %s %s)",
+ pg_fatal("statistics dumped out of order (current: %d %s %s, expected: %d %s %s)",
te->dumpId, te->desc, te->tag,
expected_te->dumpId, expected_te->desc, expected_te->tag);
@@ -10924,7 +10949,20 @@ dumpRelationStats_dumper(Archive *fout, const void *userArg, const TocEntry *te)
appendStringLiteralAH(out, rsinfo->dobj.name, fout);
appendPQExpBufferStr(out, ",\n");
appendPQExpBuffer(out, "\t'relpages', '%d'::integer,\n", rsinfo->relpages);
- appendPQExpBuffer(out, "\t'reltuples', '%s'::real,\n", rsinfo->reltuples);
+
+ /*
+ * Before v14, a reltuples value of 0 was ambiguous: it could either mean
+ * the relation is empty, or it could mean that it hadn't yet been
+ * vacuumed or analyzed. (Newer versions use -1 for the latter case.)
+ * This ambiguity allegedly can cause the planner to choose inefficient
+ * plans after restoring to v18 or newer. To deal with this, let's just
+ * set reltuples to -1 in that case.
+ */
+ if (fout->remoteVersion < 140000 && strcmp("0", rsinfo->reltuples) == 0)
+ appendPQExpBufferStr(out, "\t'reltuples', '-1'::real,\n");
+ else
+ appendPQExpBuffer(out, "\t'reltuples', '%s'::real,\n", rsinfo->reltuples);
+
appendPQExpBuffer(out, "\t'relallvisible', '%d'::integer",
rsinfo->relallvisible);
@@ -10978,7 +11016,7 @@ dumpRelationStats_dumper(Archive *fout, const void *userArg, const TocEntry *te)
appendStringLiteralAH(out, rsinfo->dobj.name, fout);
if (PQgetisnull(res, rownum, i_attname))
- pg_fatal("attname cannot be NULL");
+ pg_fatal("unexpected null attname");
attname = PQgetvalue(res, rownum, i_attname);
/*
@@ -17666,6 +17704,56 @@ dumpTableSchema(Archive *fout, const TableInfo *tbinfo)
if (tbinfo->dobj.dump & DUMP_COMPONENT_SECLABEL)
dumpTableSecLabel(fout, tbinfo, reltypename);
+ /*
+ * Dump comments for not-null constraints that aren't to be dumped
+ * separately (those are processed by collectComments/dumpComment).
+ */
+ if (!fout->dopt->no_comments && dopt->dumpSchema &&
+ fout->remoteVersion >= 180000)
+ {
+ PQExpBuffer comment = NULL;
+ PQExpBuffer tag = NULL;
+
+ for (j = 0; j < tbinfo->numatts; j++)
+ {
+ if (tbinfo->notnull_constrs[j] != NULL &&
+ tbinfo->notnull_comment[j] != NULL)
+ {
+ if (comment == NULL)
+ {
+ comment = createPQExpBuffer();
+ tag = createPQExpBuffer();
+ }
+ else
+ {
+ resetPQExpBuffer(comment);
+ resetPQExpBuffer(tag);
+ }
+
+ appendPQExpBuffer(comment, "COMMENT ON CONSTRAINT %s ON %s IS ",
+ fmtId(tbinfo->notnull_constrs[j]), qualrelname);
+ appendStringLiteralAH(comment, tbinfo->notnull_comment[j], fout);
+ appendPQExpBufferStr(comment, ";\n");
+
+ appendPQExpBuffer(tag, "CONSTRAINT %s ON %s",
+ fmtId(tbinfo->notnull_constrs[j]), qrelname);
+
+ ArchiveEntry(fout, nilCatalogId, createDumpId(),
+ ARCHIVE_OPTS(.tag = tag->data,
+ .namespace = tbinfo->dobj.namespace->dobj.name,
+ .owner = tbinfo->rolname,
+ .description = "COMMENT",
+ .section = SECTION_NONE,
+ .createStmt = comment->data,
+ .deps = &(tbinfo->dobj.dumpId),
+ .nDeps = 1));
+ }
+ }
+
+ destroyPQExpBuffer(comment);
+ destroyPQExpBuffer(tag);
+ }
+
/* Dump comments on inlined table constraints */
for (j = 0; j < tbinfo->ncheck; j++)
{
diff --git a/src/bin/pg_dump/pg_dump.h b/src/bin/pg_dump/pg_dump.h
index 7417eab6aef..39eef1d6617 100644
--- a/src/bin/pg_dump/pg_dump.h
+++ b/src/bin/pg_dump/pg_dump.h
@@ -365,6 +365,7 @@ typedef struct _tableInfo
* there isn't one on this column. If
* empty string, unnamed constraint
* (pre-v17) */
+ char **notnull_comment; /* comment thereof */
bool *notnull_invalid; /* true for NOT NULL NOT VALID */
bool *notnull_noinh; /* NOT NULL is NO INHERIT */
bool *notnull_islocal; /* true if NOT NULL has local definition */
diff --git a/src/bin/pg_dump/pg_dumpall.c b/src/bin/pg_dump/pg_dumpall.c
index 7f9c302b719..3cbcad65c5f 100644
--- a/src/bin/pg_dump/pg_dumpall.c
+++ b/src/bin/pg_dump/pg_dumpall.c
@@ -525,7 +525,7 @@ main(int argc, char *argv[])
OPF = fopen(global_path, PG_BINARY_W);
if (!OPF)
- pg_fatal("could not open \"%s\": %m", global_path);
+ pg_fatal("could not open file \"%s\": %m", global_path);
}
else if (filename)
{
@@ -699,7 +699,7 @@ main(int argc, char *argv[])
static void
help(void)
{
- printf(_("%s extracts a PostgreSQL database cluster based on specified dump format.\n\n"), progname);
+ printf(_("%s exports a PostgreSQL database cluster as an SQL script or to other formats.\n\n"), progname);
printf(_("Usage:\n"));
printf(_(" %s [OPTION]...\n"), progname);
@@ -1659,14 +1659,14 @@ dumpDatabases(PGconn *conn, ArchiveFormat archDumpFormat)
/* Create a subdirectory with 'databases' name under main directory. */
if (mkdir(db_subdir, pg_dir_create_mode) != 0)
- pg_fatal("could not create subdirectory \"%s\": %m", db_subdir);
+ pg_fatal("could not create directory \"%s\": %m", db_subdir);
snprintf(map_file_path, MAXPGPATH, "%s/map.dat", filename);
/* Create a map file (to store dboid and dbname) */
map_file = fopen(map_file_path, PG_BINARY_W);
if (!map_file)
- pg_fatal("could not open map file: %s", strerror(errno));
+ pg_fatal("could not open file \"%s\": %m", map_file_path);
}
for (i = 0; i < PQntuples(res); i++)
@@ -1976,7 +1976,7 @@ parseDumpFormat(const char *format)
else if (pg_strcasecmp(format, "tar") == 0)
archDumpFormat = archTar;
else
- pg_fatal("unrecognized archive format \"%s\"; please specify \"c\", \"d\", \"p\", or \"t\"",
+ pg_fatal("unrecognized output format \"%s\"; please specify \"c\", \"d\", \"p\", or \"t\"",
format);
return archDumpFormat;
diff --git a/src/bin/pg_dump/pg_restore.c b/src/bin/pg_dump/pg_restore.c
index f2182e91825..6ef789cb06d 100644
--- a/src/bin/pg_dump/pg_restore.c
+++ b/src/bin/pg_dump/pg_restore.c
@@ -523,7 +523,7 @@ main(int argc, char **argv)
*/
if (!globals_only && opts->createDB != 1)
{
- pg_log_error("-C/--create option should be specified when restoring an archive created by pg_dumpall");
+ pg_log_error("option -C/--create must be specified when restoring an archive created by pg_dumpall");
pg_log_error_hint("Try \"%s --help\" for more information.", progname);
pg_log_error_hint("Individual databases can be restored using their specific archives.");
exit_nicely(1);
@@ -557,7 +557,7 @@ main(int argc, char **argv)
if (conn)
PQfinish(conn);
- pg_log_info("database restoring skipped as -g/--globals-only option was specified");
+ pg_log_info("database restoring skipped because option -g/--globals-only was specified");
}
else
{
@@ -712,9 +712,9 @@ usage(const char *progname)
printf(_(" --use-set-session-authorization\n"
" use SET SESSION AUTHORIZATION commands instead of\n"
" ALTER OWNER commands to set ownership\n"));
- printf(_(" --with-data dump the data\n"));
- printf(_(" --with-schema dump the schema\n"));
- printf(_(" --with-statistics dump the statistics\n"));
+ printf(_(" --with-data restore the data\n"));
+ printf(_(" --with-schema restore the schema\n"));
+ printf(_(" --with-statistics restore the statistics\n"));
printf(_("\nConnection options:\n"));
printf(_(" -h, --host=HOSTNAME database server host or socket directory\n"));
@@ -725,8 +725,8 @@ usage(const char *progname)
printf(_(" --role=ROLENAME do SET ROLE before restore\n"));
printf(_("\n"
- "The options -I, -n, -N, -P, -t, -T, --section, and --exclude-database can be combined\n"
- "and specified multiple times to select multiple objects.\n"));
+ "The options -I, -n, -N, -P, -t, -T, --section, and --exclude-database can be\n"
+ "combined and specified multiple times to select multiple objects.\n"));
printf(_("\nIf no input file name is supplied, then standard input is used.\n\n"));
printf(_("Report bugs to <%s>.\n"), PACKAGE_BUGREPORT);
printf(_("%s home page: <%s>\n"), PACKAGE_NAME, PACKAGE_URL);
@@ -946,7 +946,7 @@ get_dbnames_list_to_restore(PGconn *conn,
query = createPQExpBuffer();
if (!conn)
- pg_log_info("considering PATTERN as NAME for --exclude-database option as no db connection while doing pg_restore.");
+ pg_log_info("considering PATTERN as NAME for --exclude-database option as no database connection while doing pg_restore");
/*
* Process one by one all dbnames and if specified to skip restoring, then
@@ -992,7 +992,7 @@ get_dbnames_list_to_restore(PGconn *conn,
if ((PQresultStatus(res) == PGRES_TUPLES_OK) && PQntuples(res))
{
skip_db_restore = true;
- pg_log_info("database \"%s\" matches exclude pattern: \"%s\"", dbidname->str, pat_cell->val);
+ pg_log_info("database name \"%s\" matches exclude pattern \"%s\"", dbidname->str, pat_cell->val);
}
PQclear(res);
@@ -1048,7 +1048,7 @@ get_dbname_oid_list_from_mfile(const char *dumpdirpath, SimplePtrList *dbname_oi
*/
if (!file_exists_in_directory(dumpdirpath, "map.dat"))
{
- pg_log_info("database restoring is skipped as \"map.dat\" is not present in \"%s\"", dumpdirpath);
+ pg_log_info("database restoring is skipped because file \"%s\" does not exist in directory \"%s\"", "map.dat", dumpdirpath);
return 0;
}
@@ -1058,7 +1058,7 @@ get_dbname_oid_list_from_mfile(const char *dumpdirpath, SimplePtrList *dbname_oi
pfile = fopen(map_file_path, PG_BINARY_R);
if (pfile == NULL)
- pg_fatal("could not open \"%s\": %m", map_file_path);
+ pg_fatal("could not open file \"%s\": %m", map_file_path);
initStringInfo(&linebuf);
@@ -1086,10 +1086,10 @@ get_dbname_oid_list_from_mfile(const char *dumpdirpath, SimplePtrList *dbname_oi
/* Report error and exit if the file has any corrupted data. */
if (!OidIsValid(db_oid) || namelen <= 1)
- pg_fatal("invalid entry in \"%s\" at line: %d", map_file_path,
+ pg_fatal("invalid entry in file \"%s\" on line %d", map_file_path,
count + 1);
- pg_log_info("found database \"%s\" (OID: %u) in \"%s\"",
+ pg_log_info("found database \"%s\" (OID: %u) in file \"%s\"",
dbname, db_oid, map_file_path);
dbidname = pg_malloc(offsetof(DbOidName, str) + namelen + 1);
@@ -1142,11 +1142,14 @@ restore_all_databases(PGconn *conn, const char *dumpdirpath,
if (dbname_oid_list.head == NULL)
return process_global_sql_commands(conn, dumpdirpath, opts->filename);
- pg_log_info("found %d database names in \"map.dat\"", num_total_db);
+ pg_log_info(ngettext("found %d database name in \"%s\"",
+ "found %d database names in \"%s\"",
+ num_total_db),
+ num_total_db, "map.dat");
if (!conn)
{
- pg_log_info("trying to connect database \"postgres\"");
+ pg_log_info("trying to connect to database \"%s\"", "postgres");
conn = ConnectDatabase("postgres", NULL, opts->cparams.pghost,
opts->cparams.pgport, opts->cparams.username, TRI_DEFAULT,
@@ -1155,7 +1158,7 @@ restore_all_databases(PGconn *conn, const char *dumpdirpath,
/* Try with template1. */
if (!conn)
{
- pg_log_info("trying to connect database \"template1\"");
+ pg_log_info("trying to connect to database \"%s\"", "template1");
conn = ConnectDatabase("template1", NULL, opts->cparams.pghost,
opts->cparams.pgport, opts->cparams.username, TRI_DEFAULT,
@@ -1179,7 +1182,9 @@ restore_all_databases(PGconn *conn, const char *dumpdirpath,
/* Exit if no db needs to be restored. */
if (dbname_oid_list.head == NULL || num_db_restore == 0)
{
- pg_log_info("no database needs to restore out of %d databases", num_total_db);
+ pg_log_info(ngettext("no database needs restoring out of %d database",
+ "no database needs restoring out of %d databases", num_total_db),
+ num_total_db);
return n_errors_total;
}
@@ -1314,7 +1319,7 @@ process_global_sql_commands(PGconn *conn, const char *dumpdirpath, const char *o
pfile = fopen(global_file_path, PG_BINARY_R);
if (pfile == NULL)
- pg_fatal("could not open \"%s\": %m", global_file_path);
+ pg_fatal("could not open file \"%s\": %m", global_file_path);
/*
* If outfile is given, then just copy all global.dat file data into
@@ -1354,15 +1359,17 @@ process_global_sql_commands(PGconn *conn, const char *dumpdirpath, const char *o
break;
default:
n_errors++;
- pg_log_error("could not execute query: \"%s\" \nCommand was: \"%s\"", PQerrorMessage(conn), sqlstatement.data);
+ pg_log_error("could not execute query: %s", PQerrorMessage(conn));
+ pg_log_error_detail("Command was: %s", sqlstatement.data);
}
PQclear(result);
}
/* Print a summary of ignored errors during global.dat. */
if (n_errors)
- pg_log_warning("ignored %d errors in \"%s\"", n_errors, global_file_path);
-
+ pg_log_warning(ngettext("ignored %d error in file \"%s\"",
+ "ignored %d errors in file \"%s\"", n_errors),
+ n_errors, global_file_path);
fclose(pfile);
return n_errors;
diff --git a/src/bin/pg_dump/t/001_basic.pl b/src/bin/pg_dump/t/001_basic.pl
index 84ca25e17d6..c3c5fae11ea 100644
--- a/src/bin/pg_dump/t/001_basic.pl
+++ b/src/bin/pg_dump/t/001_basic.pl
@@ -240,17 +240,20 @@ command_fails_like(
command_fails_like(
[ 'pg_restore', '--exclude-database=foo', '--globals-only', '-d', 'xxx' ],
qr/\Qpg_restore: error: option --exclude-database cannot be used together with -g\/--globals-only\E/,
- 'pg_restore: option --exclude-database cannot be used together with -g/--globals-only');
+ 'pg_restore: option --exclude-database cannot be used together with -g/--globals-only'
+);
command_fails_like(
[ 'pg_restore', '--exclude-database=foo', '-d', 'xxx', 'dumpdir' ],
qr/\Qpg_restore: error: option --exclude-database can be used only when restoring an archive created by pg_dumpall\E/,
- 'When option --exclude-database is used in pg_restore with dump of pg_dump');
+ 'When option --exclude-database is used in pg_restore with dump of pg_dump'
+);
command_fails_like(
[ 'pg_restore', '--globals-only', '-d', 'xxx', 'dumpdir' ],
qr/\Qpg_restore: error: option -g\/--globals-only can be used only when restoring an archive created by pg_dumpall\E/,
- 'When option --globals-only is not used in pg_restore with dump of pg_dump');
+ 'When option --globals-only is not used in pg_restore with dump of pg_dump'
+);
# also fails for -r and -t, but it seems pointless to add more tests for those.
command_fails_like(
@@ -261,6 +264,6 @@ command_fails_like(
command_fails_like(
[ 'pg_dumpall', '--format', 'x' ],
- qr/\Qpg_dumpall: error: unrecognized archive format "x";\E/,
- 'pg_dumpall: unrecognized archive format');
+ qr/\Qpg_dumpall: error: unrecognized output format "x";\E/,
+ 'pg_dumpall: unrecognized output format');
done_testing();
diff --git a/src/bin/pg_dump/t/002_pg_dump.pl b/src/bin/pg_dump/t/002_pg_dump.pl
index 55d892d9c16..2485d8f360e 100644
--- a/src/bin/pg_dump/t/002_pg_dump.pl
+++ b/src/bin/pg_dump/t/002_pg_dump.pl
@@ -68,6 +68,7 @@ my %pgdump_runs = (
'--no-data',
'--sequence-data',
'--binary-upgrade',
+ '--with-statistics',
'--dbname' => 'postgres', # alternative way to specify database
],
restore_cmd => [
@@ -75,6 +76,7 @@ my %pgdump_runs = (
'--format' => 'custom',
'--verbose',
'--file' => "$tempdir/binary_upgrade.sql",
+ '--with-statistics',
"$tempdir/binary_upgrade.dump",
],
},
@@ -88,11 +90,13 @@ my %pgdump_runs = (
'--format' => 'custom',
'--compress' => '1',
'--file' => "$tempdir/compression_gzip_custom.dump",
+ '--with-statistics',
'postgres',
],
restore_cmd => [
'pg_restore',
'--file' => "$tempdir/compression_gzip_custom.sql",
+ '--with-statistics',
"$tempdir/compression_gzip_custom.dump",
],
command_like => {
@@ -115,6 +119,7 @@ my %pgdump_runs = (
'--format' => 'directory',
'--compress' => 'gzip:1',
'--file' => "$tempdir/compression_gzip_dir",
+ '--with-statistics',
'postgres',
],
# Give coverage for manually compressed blobs.toc files during
@@ -132,6 +137,7 @@ my %pgdump_runs = (
'pg_restore',
'--jobs' => '2',
'--file' => "$tempdir/compression_gzip_dir.sql",
+ '--with-statistics',
"$tempdir/compression_gzip_dir",
],
},
@@ -144,6 +150,7 @@ my %pgdump_runs = (
'--format' => 'plain',
'--compress' => '1',
'--file' => "$tempdir/compression_gzip_plain.sql.gz",
+ '--with-statistics',
'postgres',
],
# Decompress the generated file to run through the tests.
@@ -162,11 +169,13 @@ my %pgdump_runs = (
'--format' => 'custom',
'--compress' => 'lz4',
'--file' => "$tempdir/compression_lz4_custom.dump",
+ '--with-statistics',
'postgres',
],
restore_cmd => [
'pg_restore',
'--file' => "$tempdir/compression_lz4_custom.sql",
+ '--with-statistics',
"$tempdir/compression_lz4_custom.dump",
],
command_like => {
@@ -189,6 +198,7 @@ my %pgdump_runs = (
'--format' => 'directory',
'--compress' => 'lz4:1',
'--file' => "$tempdir/compression_lz4_dir",
+ '--with-statistics',
'postgres',
],
# Verify that data files were compressed
@@ -200,6 +210,7 @@ my %pgdump_runs = (
'pg_restore',
'--jobs' => '2',
'--file' => "$tempdir/compression_lz4_dir.sql",
+ '--with-statistics',
"$tempdir/compression_lz4_dir",
],
},
@@ -212,6 +223,7 @@ my %pgdump_runs = (
'--format' => 'plain',
'--compress' => 'lz4',
'--file' => "$tempdir/compression_lz4_plain.sql.lz4",
+ '--with-statistics',
'postgres',
],
# Decompress the generated file to run through the tests.
@@ -233,11 +245,13 @@ my %pgdump_runs = (
'--format' => 'custom',
'--compress' => 'zstd',
'--file' => "$tempdir/compression_zstd_custom.dump",
+ '--with-statistics',
'postgres',
],
restore_cmd => [
'pg_restore',
'--file' => "$tempdir/compression_zstd_custom.sql",
+ '--with-statistics',
"$tempdir/compression_zstd_custom.dump",
],
command_like => {
@@ -259,6 +273,7 @@ my %pgdump_runs = (
'--format' => 'directory',
'--compress' => 'zstd:1',
'--file' => "$tempdir/compression_zstd_dir",
+ '--with-statistics',
'postgres',
],
# Give coverage for manually compressed blobs.toc files during
@@ -279,6 +294,7 @@ my %pgdump_runs = (
'pg_restore',
'--jobs' => '2',
'--file' => "$tempdir/compression_zstd_dir.sql",
+ '--with-statistics',
"$tempdir/compression_zstd_dir",
],
},
@@ -292,6 +308,7 @@ my %pgdump_runs = (
'--format' => 'plain',
'--compress' => 'zstd:long',
'--file' => "$tempdir/compression_zstd_plain.sql.zst",
+ '--with-statistics',
'postgres',
],
# Decompress the generated file to run through the tests.
@@ -310,6 +327,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/clean.sql",
'--clean',
+ '--with-statistics',
'--dbname' => 'postgres', # alternative way to specify database
],
},
@@ -320,6 +338,7 @@ my %pgdump_runs = (
'--clean',
'--if-exists',
'--encoding' => 'UTF8', # no-op, just for testing
+ '--with-statistics',
'postgres',
],
},
@@ -338,6 +357,7 @@ my %pgdump_runs = (
'--create',
'--no-reconnect', # no-op, just for testing
'--verbose',
+ '--with-statistics',
'postgres',
],
},
@@ -348,7 +368,7 @@ my %pgdump_runs = (
'--data-only',
'--superuser' => 'test_superuser',
'--disable-triggers',
- '--verbose', # no-op, just make sure it works
+ '--verbose', # no-op, just make sure it works
'postgres',
],
},
@@ -356,6 +376,7 @@ my %pgdump_runs = (
dump_cmd => [
'pg_dump', '--no-sync',
'--file' => "$tempdir/defaults.sql",
+ '--with-statistics',
'postgres',
],
},
@@ -364,6 +385,7 @@ my %pgdump_runs = (
dump_cmd => [
'pg_dump', '--no-sync',
'--file' => "$tempdir/defaults_no_public.sql",
+ '--with-statistics',
'regress_pg_dump_test',
],
},
@@ -373,6 +395,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--clean',
'--file' => "$tempdir/defaults_no_public_clean.sql",
+ '--with-statistics',
'regress_pg_dump_test',
],
},
@@ -381,6 +404,7 @@ my %pgdump_runs = (
dump_cmd => [
'pg_dump', '--no-sync',
'--file' => "$tempdir/defaults_public_owner.sql",
+ '--with-statistics',
'regress_public_owner',
],
},
@@ -395,12 +419,14 @@ my %pgdump_runs = (
'pg_dump',
'--format' => 'custom',
'--file' => "$tempdir/defaults_custom_format.dump",
+ '--with-statistics',
'postgres',
],
restore_cmd => [
'pg_restore',
'--format' => 'custom',
'--file' => "$tempdir/defaults_custom_format.sql",
+ '--with-statistics',
"$tempdir/defaults_custom_format.dump",
],
command_like => {
@@ -425,12 +451,14 @@ my %pgdump_runs = (
'pg_dump',
'--format' => 'directory',
'--file' => "$tempdir/defaults_dir_format",
+ '--with-statistics',
'postgres',
],
restore_cmd => [
'pg_restore',
'--format' => 'directory',
'--file' => "$tempdir/defaults_dir_format.sql",
+ '--with-statistics',
"$tempdir/defaults_dir_format",
],
command_like => {
@@ -456,11 +484,13 @@ my %pgdump_runs = (
'--format' => 'directory',
'--jobs' => 2,
'--file' => "$tempdir/defaults_parallel",
+ '--with-statistics',
'postgres',
],
restore_cmd => [
'pg_restore',
'--file' => "$tempdir/defaults_parallel.sql",
+ '--with-statistics',
"$tempdir/defaults_parallel",
],
},
@@ -472,12 +502,14 @@ my %pgdump_runs = (
'pg_dump',
'--format' => 'tar',
'--file' => "$tempdir/defaults_tar_format.tar",
+ '--with-statistics',
'postgres',
],
restore_cmd => [
'pg_restore',
'--format' => 'tar',
'--file' => "$tempdir/defaults_tar_format.sql",
+ '--with-statistics',
"$tempdir/defaults_tar_format.tar",
],
},
@@ -486,6 +518,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/exclude_dump_test_schema.sql",
'--exclude-schema' => 'dump_test',
+ '--with-statistics',
'postgres',
],
},
@@ -494,6 +527,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/exclude_test_table.sql",
'--exclude-table' => 'dump_test.test_table',
+ '--with-statistics',
'postgres',
],
},
@@ -502,6 +536,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/exclude_measurement.sql",
'--exclude-table-and-children' => 'dump_test.measurement',
+ '--with-statistics',
'postgres',
],
},
@@ -511,6 +546,7 @@ my %pgdump_runs = (
'--file' => "$tempdir/exclude_measurement_data.sql",
'--exclude-table-data-and-children' => 'dump_test.measurement',
'--no-unlogged-table-data',
+ '--with-statistics',
'postgres',
],
},
@@ -520,6 +556,7 @@ my %pgdump_runs = (
'--file' => "$tempdir/exclude_test_table_data.sql",
'--exclude-table-data' => 'dump_test.test_table',
'--no-unlogged-table-data',
+ '--with-statistics',
'postgres',
],
},
@@ -538,6 +575,7 @@ my %pgdump_runs = (
'--file' => "$tempdir/pg_dumpall_globals.sql",
'--globals-only',
'--no-sync',
+ '--with-statistics',
],
},
pg_dumpall_globals_clean => {
@@ -547,12 +585,14 @@ my %pgdump_runs = (
'--globals-only',
'--clean',
'--no-sync',
+ '--with-statistics',
],
},
pg_dumpall_dbprivs => {
dump_cmd => [
'pg_dumpall', '--no-sync',
'--file' => "$tempdir/pg_dumpall_dbprivs.sql",
+ '--with-statistics',
],
},
pg_dumpall_exclude => {
@@ -562,6 +602,7 @@ my %pgdump_runs = (
'--file' => "$tempdir/pg_dumpall_exclude.sql",
'--exclude-database' => '*dump_test*',
'--no-sync',
+ '--with-statistics',
],
},
no_toast_compression => {
@@ -569,6 +610,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/no_toast_compression.sql",
'--no-toast-compression',
+ '--with-statistics',
'postgres',
],
},
@@ -577,6 +619,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/no_large_objects.sql",
'--no-large-objects',
+ '--with-statistics',
'postgres',
],
},
@@ -585,6 +628,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/no_policies.sql",
'--no-policies',
+ '--with-statistics',
'postgres',
],
},
@@ -593,6 +637,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/no_privs.sql",
'--no-privileges',
+ '--with-statistics',
'postgres',
],
},
@@ -601,6 +646,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/no_owner.sql",
'--no-owner',
+ '--with-statistics',
'postgres',
],
},
@@ -609,6 +655,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/no_table_access_method.sql",
'--no-table-access-method',
+ '--with-statistics',
'postgres',
],
},
@@ -617,6 +664,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/only_dump_test_schema.sql",
'--schema' => 'dump_test',
+ '--with-statistics',
'postgres',
],
},
@@ -627,6 +675,7 @@ my %pgdump_runs = (
'--table' => 'dump_test.test_table',
'--lock-wait-timeout' =>
(1000 * $PostgreSQL::Test::Utils::timeout_default),
+ '--with-statistics',
'postgres',
],
},
@@ -637,6 +686,7 @@ my %pgdump_runs = (
'--table-and-children' => 'dump_test.measurement',
'--lock-wait-timeout' =>
(1000 * $PostgreSQL::Test::Utils::timeout_default),
+ '--with-statistics',
'postgres',
],
},
@@ -646,6 +696,7 @@ my %pgdump_runs = (
'--file' => "$tempdir/role.sql",
'--role' => 'regress_dump_test_role',
'--schema' => 'dump_test_second_schema',
+ '--with-statistics',
'postgres',
],
},
@@ -658,11 +709,13 @@ my %pgdump_runs = (
'--file' => "$tempdir/role_parallel",
'--role' => 'regress_dump_test_role',
'--schema' => 'dump_test_second_schema',
+ '--with-statistics',
'postgres',
],
restore_cmd => [
'pg_restore',
'--file' => "$tempdir/role_parallel.sql",
+ '--with-statistics',
"$tempdir/role_parallel",
],
},
@@ -691,6 +744,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/section_pre_data.sql",
'--section' => 'pre-data',
+ '--with-statistics',
'postgres',
],
},
@@ -699,6 +753,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/section_data.sql",
'--section' => 'data',
+ '--with-statistics',
'postgres',
],
},
@@ -707,6 +762,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
'--file' => "$tempdir/section_post_data.sql",
'--section' => 'post-data',
+ '--with-statistics',
'postgres',
],
},
@@ -717,6 +773,7 @@ my %pgdump_runs = (
'--schema' => 'dump_test',
'--large-objects',
'--no-large-objects',
+ '--with-statistics',
'postgres',
],
},
@@ -732,6 +789,7 @@ my %pgdump_runs = (
'pg_dump', '--no-sync',
"--file=$tempdir/no_data_no_schema.sql", '--no-data',
'--no-schema', 'postgres',
+ '--with-statistics',
],
},
statistics_only => {
@@ -752,7 +810,7 @@ my %pgdump_runs = (
dump_cmd => [
'pg_dump', '--no-sync',
"--file=$tempdir/no_schema.sql", '--no-schema',
- 'postgres',
+ '--with-statistics', 'postgres',
],
},);
@@ -1132,7 +1190,9 @@ my %tests = (
) INHERITS (dump_test.test_table_nn, dump_test.test_table_nn_2);
ALTER TABLE dump_test.test_table_nn ADD CONSTRAINT nn NOT NULL col1 NOT VALID;
ALTER TABLE dump_test.test_table_nn_chld1 VALIDATE CONSTRAINT nn;
- ALTER TABLE dump_test.test_table_nn_chld2 VALIDATE CONSTRAINT nn;',
+ ALTER TABLE dump_test.test_table_nn_chld2 VALIDATE CONSTRAINT nn;
+ COMMENT ON CONSTRAINT nn ON dump_test.test_table_nn IS \'nn comment is valid\';
+ COMMENT ON CONSTRAINT nn ON dump_test.test_table_nn_chld2 IS \'nn_chld2 comment is valid\';',
regexp => qr/^
\QALTER TABLE dump_test.test_table_nn\E \n^\s+
\QADD CONSTRAINT nn NOT NULL col1 NOT VALID;\E
@@ -1146,6 +1206,34 @@ my %tests = (
},
},
+ # This constraint is invalid therefore it goes in SECTION_POST_DATA
+ 'COMMENT ON CONSTRAINT ON test_table_nn' => {
+ regexp => qr/^
+ \QCOMMENT ON CONSTRAINT nn ON dump_test.test_table_nn IS\E
+ /xm,
+ like => {
+ %full_runs, %dump_test_schema_runs, section_post_data => 1,
+ },
+ unlike => {
+ exclude_dump_test_schema => 1,
+ only_dump_measurement => 1,
+ },
+ },
+
+ # This constraint is valid therefore it goes in SECTION_PRE_DATA
+ 'COMMENT ON CONSTRAINT ON test_table_chld2' => {
+ regexp => qr/^
+ \QCOMMENT ON CONSTRAINT nn ON dump_test.test_table_nn_chld2 IS\E
+ /xm,
+ like => {
+ %full_runs, %dump_test_schema_runs, section_pre_data => 1,
+ },
+ unlike => {
+ exclude_dump_test_schema => 1,
+ only_dump_measurement => 1,
+ },
+ },
+
'CONSTRAINT NOT NULL / NOT VALID (child1)' => {
regexp => qr/^
\QCREATE TABLE dump_test.test_table_nn_chld1 (\E\n
@@ -4834,13 +4922,13 @@ my %tests = (
CREATE TABLE dump_test.has_stats
AS SELECT g.g AS x, g.g / 2 AS y FROM generate_series(1,100) AS g(g);
CREATE MATERIALIZED VIEW dump_test.has_stats_mv AS SELECT * FROM dump_test.has_stats;
- CREATE INDEX dup_test_post_data_ix ON dump_test.has_stats(x, (x - 1));
+ CREATE INDEX """dump_test""\'s post-data index" ON dump_test.has_stats(x, (x - 1));
ANALYZE dump_test.has_stats, dump_test.has_stats_mv;',
regexp => qr/^
\QSELECT * FROM pg_catalog.pg_restore_relation_stats(\E\s+
'version',\s'\d+'::integer,\s+
'schemaname',\s'dump_test',\s+
- 'relname',\s'dup_test_post_data_ix',\s+
+ 'relname',\s'"dump_test"''s\ post-data\ index',\s+
'relpages',\s'\d+'::integer,\s+
'reltuples',\s'\d+'::real,\s+
'relallvisible',\s'\d+'::integer,\s+
@@ -4849,7 +4937,7 @@ my %tests = (
\QSELECT * FROM pg_catalog.pg_restore_attribute_stats(\E\s+
'version',\s'\d+'::integer,\s+
'schemaname',\s'dump_test',\s+
- 'relname',\s'dup_test_post_data_ix',\s+
+ 'relname',\s'"dump_test"''s\ post-data\ index',\s+
'attnum',\s'2'::smallint,\s+
'inherited',\s'f'::boolean,\s+
'null_frac',\s'0'::real,\s+
diff --git a/src/bin/pg_dump/t/006_pg_dumpall.pl b/src/bin/pg_dump/t/006_pg_dumpall.pl
index 5acd49f1559..c274b777586 100644
--- a/src/bin/pg_dump/t/006_pg_dumpall.pl
+++ b/src/bin/pg_dump/t/006_pg_dumpall.pl
@@ -294,17 +294,17 @@ my %pgdumpall_runs = (
'--format' => 'directory',
'--globals-only',
'--file' => "$tempdir/dump_globals_only",
- ],
- restore_cmd => [
- 'pg_restore', '-C', '--globals-only',
- '--format' => 'directory',
- '--file' => "$tempdir/dump_globals_only.sql",
- "$tempdir/dump_globals_only",
- ],
- like => qr/
+ ],
+ restore_cmd => [
+ 'pg_restore', '-C', '--globals-only',
+ '--format' => 'directory',
+ '--file' => "$tempdir/dump_globals_only.sql",
+ "$tempdir/dump_globals_only",
+ ],
+ like => qr/
^\s*\QCREATE ROLE dumpall;\E\s*\n
/xm
- }, );
+ },);
# First execute the setup_sql
foreach my $run (sort keys %pgdumpall_runs)
@@ -339,7 +339,8 @@ foreach my $run (sort keys %pgdumpall_runs)
# pg_restore --file output file.
my $output_file = slurp_file("$tempdir/${run}.sql");
- if (!($pgdumpall_runs{$run}->{like}) && !($pgdumpall_runs{$run}->{unlike}))
+ if ( !($pgdumpall_runs{$run}->{like})
+ && !($pgdumpall_runs{$run}->{unlike}))
{
die "missing \"like\" or \"unlike\" in test \"$run\"";
}
@@ -361,30 +362,38 @@ foreach my $run (sort keys %pgdumpall_runs)
# Some negative test case with dump of pg_dumpall and restore using pg_restore
# test case 1: when -C is not used in pg_restore with dump of pg_dumpall
$node->command_fails_like(
- [ 'pg_restore',
- "$tempdir/format_custom",
- '--format' => 'custom',
- '--file' => "$tempdir/error_test.sql", ],
- qr/\Qpg_restore: error: -C\/--create option should be specified when restoring an archive created by pg_dumpall\E/,
- 'When -C is not used in pg_restore with dump of pg_dumpall');
+ [
+ 'pg_restore',
+ "$tempdir/format_custom",
+ '--format' => 'custom',
+ '--file' => "$tempdir/error_test.sql",
+ ],
+ qr/\Qpg_restore: error: option -C\/--create must be specified when restoring an archive created by pg_dumpall\E/,
+ 'When -C is not used in pg_restore with dump of pg_dumpall');
# test case 2: When --list option is used with dump of pg_dumpall
$node->command_fails_like(
- [ 'pg_restore',
+ [
+ 'pg_restore',
"$tempdir/format_custom", '-C',
- '--format' => 'custom', '--list',
- '--file' => "$tempdir/error_test.sql", ],
+ '--format' => 'custom',
+ '--list',
+ '--file' => "$tempdir/error_test.sql",
+ ],
qr/\Qpg_restore: error: option -l\/--list cannot be used when restoring an archive created by pg_dumpall\E/,
'When --list is used in pg_restore with dump of pg_dumpall');
# test case 3: When non-exist database is given with -d option
$node->command_fails_like(
- [ 'pg_restore',
+ [
+ 'pg_restore',
"$tempdir/format_custom", '-C',
'--format' => 'custom',
- '-d' => 'dbpq', ],
+ '-d' => 'dbpq',
+ ],
qr/\Qpg_restore: error: could not connect to database "dbpq"\E/,
- 'When non-existent database is given with -d option in pg_restore with dump of pg_dumpall');
+ 'When non-existent database is given with -d option in pg_restore with dump of pg_dumpall'
+);
$node->stop('fast');
diff --git a/src/bin/pg_rewind/t/RewindTest.pm b/src/bin/pg_rewind/t/RewindTest.pm
index 3efab831797..b0234ebfaf2 100644
--- a/src/bin/pg_rewind/t/RewindTest.pm
+++ b/src/bin/pg_rewind/t/RewindTest.pm
@@ -285,7 +285,7 @@ sub run_pg_rewind
# Check that pg_rewind with dbname and --write-recovery-conf
# wrote the dbname in the generated primary_conninfo value.
like(slurp_file("$primary_pgdata/postgresql.auto.conf"),
- qr/dbname=postgres/m, 'recovery conf file sets dbname');
+ qr/dbname=postgres/m, 'recovery conf file sets dbname');
# Check that standby.signal is here as recovery configuration
# was requested.
diff --git a/src/bin/pg_upgrade/check.c b/src/bin/pg_upgrade/check.c
index 940fc77fc2e..81865cd3e48 100644
--- a/src/bin/pg_upgrade/check.c
+++ b/src/bin/pg_upgrade/check.c
@@ -885,7 +885,7 @@ check_cluster_versions(void)
*/
if (GET_MAJOR_VERSION(old_cluster.major_version) >= 1800 &&
user_opts.char_signedness != -1)
- pg_fatal("%s option cannot be used to upgrade from PostgreSQL %s and later.",
+ pg_fatal("The option %s cannot be used for upgrades from PostgreSQL %s and later.",
"--set-char-signedness", "18");
check_ok();
@@ -1934,7 +1934,7 @@ check_for_unicode_update(ClusterInfo *cluster)
{
fclose(report.file);
report_status(PG_WARNING, "warning");
- pg_log(PG_WARNING, "Your installation contains relations that may be affected by a new version of Unicode.\n"
+ pg_log(PG_WARNING, "Your installation contains relations that might be affected by a new version of Unicode.\n"
"A list of potentially-affected relations is in the file:\n"
" %s", report.path);
}
diff --git a/src/bin/pg_upgrade/dump.c b/src/bin/pg_upgrade/dump.c
index 23cb08e8347..183f08ce1e8 100644
--- a/src/bin/pg_upgrade/dump.c
+++ b/src/bin/pg_upgrade/dump.c
@@ -58,7 +58,7 @@ generate_old_dump(void)
(user_opts.transfer_mode == TRANSFER_MODE_SWAP) ?
"" : "--sequence-data",
log_opts.verbose ? "--verbose" : "",
- user_opts.do_statistics ? "" : "--no-statistics",
+ user_opts.do_statistics ? "--with-statistics" : "--no-statistics",
log_opts.dumpdir,
sql_file_name, escaped_connstr.data);
diff --git a/src/bin/pg_upgrade/relfilenumber.c b/src/bin/pg_upgrade/relfilenumber.c
index 2959c07f0b8..8d8e816a01f 100644
--- a/src/bin/pg_upgrade/relfilenumber.c
+++ b/src/bin/pg_upgrade/relfilenumber.c
@@ -290,19 +290,19 @@ prepare_for_swap(const char *old_tablespace, Oid db_oid,
/* Create directory for stuff that is moved aside. */
if (pg_mkdir_p(moved_tblspc, pg_dir_create_mode) != 0 && errno != EEXIST)
- pg_fatal("could not create directory \"%s\"", moved_tblspc);
+ pg_fatal("could not create directory \"%s\": %m", moved_tblspc);
/* Create directory for old catalog files. */
if (pg_mkdir_p(old_catalog_dir, pg_dir_create_mode) != 0)
- pg_fatal("could not create directory \"%s\"", old_catalog_dir);
+ pg_fatal("could not create directory \"%s\": %m", old_catalog_dir);
/* Move the new cluster's database directory aside. */
if (rename(new_db_dir, moved_db_dir) != 0)
- pg_fatal("could not rename \"%s\" to \"%s\"", new_db_dir, moved_db_dir);
+ pg_fatal("could not rename directory \"%s\" to \"%s\": %m", new_db_dir, moved_db_dir);
/* Move the old cluster's database directory into place. */
if (rename(old_db_dir, new_db_dir) != 0)
- pg_fatal("could not rename \"%s\" to \"%s\"", old_db_dir, new_db_dir);
+ pg_fatal("could not rename directory \"%s\" to \"%s\": %m", old_db_dir, new_db_dir);
return true;
}
@@ -390,7 +390,7 @@ swap_catalog_files(FileNameMap *maps, int size, const char *old_catalog_dir,
snprintf(dest, sizeof(dest), "%s/%s", old_catalog_dir, de->d_name);
if (rename(path, dest) != 0)
- pg_fatal("could not rename \"%s\" to \"%s\": %m", path, dest);
+ pg_fatal("could not rename file \"%s\" to \"%s\": %m", path, dest);
}
if (errno)
pg_fatal("could not read directory \"%s\": %m", new_db_dir);
@@ -417,7 +417,7 @@ swap_catalog_files(FileNameMap *maps, int size, const char *old_catalog_dir,
snprintf(dest, sizeof(dest), "%s/%s", new_db_dir, de->d_name);
if (rename(path, dest) != 0)
- pg_fatal("could not rename \"%s\" to \"%s\": %m", path, dest);
+ pg_fatal("could not rename file \"%s\" to \"%s\": %m", path, dest);
/*
* We don't fsync() the database files in the file synchronization
diff --git a/src/bin/pg_upgrade/t/004_subscription.pl b/src/bin/pg_upgrade/t/004_subscription.pl
index c545abf6581..e46f02c6cc6 100644
--- a/src/bin/pg_upgrade/t/004_subscription.pl
+++ b/src/bin/pg_upgrade/t/004_subscription.pl
@@ -53,7 +53,8 @@ $old_sub->safe_psql('postgres',
$old_sub->stop;
-$new_sub->append_conf('postgresql.conf', "max_active_replication_origins = 0");
+$new_sub->append_conf('postgresql.conf',
+ "max_active_replication_origins = 0");
# pg_upgrade will fail because the new cluster has insufficient
# max_active_replication_origins.
@@ -80,7 +81,8 @@ command_checks_all(
);
# Reset max_active_replication_origins
-$new_sub->append_conf('postgresql.conf', "max_active_replication_origins = 10");
+$new_sub->append_conf('postgresql.conf',
+ "max_active_replication_origins = 10");
# Cleanup
$publisher->safe_psql('postgres', "DROP PUBLICATION regress_pub1");
diff --git a/src/bin/pg_upgrade/t/005_char_signedness.pl b/src/bin/pg_upgrade/t/005_char_signedness.pl
index 17fa0d48b15..cd8cff6f513 100644
--- a/src/bin/pg_upgrade/t/005_char_signedness.pl
+++ b/src/bin/pg_upgrade/t/005_char_signedness.pl
@@ -65,7 +65,7 @@ command_checks_all(
$mode
],
1,
- [qr/--set-char-signedness option cannot be used/],
+ [qr/option --set-char-signedness cannot be used/],
[],
'--set-char-signedness option cannot be used for upgrading from v18 or later'
);
diff --git a/src/bin/pg_upgrade/t/006_transfer_modes.pl b/src/bin/pg_upgrade/t/006_transfer_modes.pl
index 550a63fdf7d..58fe8a8c7dc 100644
--- a/src/bin/pg_upgrade/t/006_transfer_modes.pl
+++ b/src/bin/pg_upgrade/t/006_transfer_modes.pl
@@ -13,7 +13,8 @@ sub test_mode
{
my ($mode) = @_;
- my $old = PostgreSQL::Test::Cluster->new('old', install_path => $ENV{oldinstall});
+ my $old =
+ PostgreSQL::Test::Cluster->new('old', install_path => $ENV{oldinstall});
my $new = PostgreSQL::Test::Cluster->new('new');
# --swap can't be used to upgrade from versions older than 10, so just skip
@@ -40,9 +41,11 @@ sub test_mode
# Create a small variety of simple test objects on the old cluster. We'll
# check that these reach the new version after upgrading.
$old->start;
- $old->safe_psql('postgres', "CREATE TABLE test1 AS SELECT generate_series(1, 100)");
+ $old->safe_psql('postgres',
+ "CREATE TABLE test1 AS SELECT generate_series(1, 100)");
$old->safe_psql('postgres', "CREATE DATABASE testdb1");
- $old->safe_psql('testdb1', "CREATE TABLE test2 AS SELECT generate_series(200, 300)");
+ $old->safe_psql('testdb1',
+ "CREATE TABLE test2 AS SELECT generate_series(200, 300)");
$old->safe_psql('testdb1', "VACUUM FULL test2");
$old->safe_psql('testdb1', "CREATE SEQUENCE testseq START 5432");
@@ -51,10 +54,15 @@ sub test_mode
if (defined($ENV{oldinstall}))
{
my $tblspc = PostgreSQL::Test::Utils::tempdir_short();
- $old->safe_psql('postgres', "CREATE TABLESPACE test_tblspc LOCATION '$tblspc'");
- $old->safe_psql('postgres', "CREATE DATABASE testdb2 TABLESPACE test_tblspc");
- $old->safe_psql('postgres', "CREATE TABLE test3 TABLESPACE test_tblspc AS SELECT generate_series(300, 401)");
- $old->safe_psql('testdb2', "CREATE TABLE test4 AS SELECT generate_series(400, 502)");
+ $old->safe_psql('postgres',
+ "CREATE TABLESPACE test_tblspc LOCATION '$tblspc'");
+ $old->safe_psql('postgres',
+ "CREATE DATABASE testdb2 TABLESPACE test_tblspc");
+ $old->safe_psql('postgres',
+ "CREATE TABLE test3 TABLESPACE test_tblspc AS SELECT generate_series(300, 401)"
+ );
+ $old->safe_psql('testdb2',
+ "CREATE TABLE test4 AS SELECT generate_series(400, 502)");
}
$old->stop;
@@ -90,9 +98,11 @@ sub test_mode
# tablespace.
if (defined($ENV{oldinstall}))
{
- $result = $new->safe_psql('postgres', "SELECT COUNT(*) FROM test3");
+ $result =
+ $new->safe_psql('postgres', "SELECT COUNT(*) FROM test3");
is($result, '102', "test3 data after pg_upgrade $mode");
- $result = $new->safe_psql('testdb2', "SELECT COUNT(*) FROM test4");
+ $result =
+ $new->safe_psql('testdb2', "SELECT COUNT(*) FROM test4");
is($result, '103', "test4 data after pg_upgrade $mode");
}
$new->stop;
diff --git a/src/bin/pg_upgrade/task.c b/src/bin/pg_upgrade/task.c
index a48d5691390..ee0e2457152 100644
--- a/src/bin/pg_upgrade/task.c
+++ b/src/bin/pg_upgrade/task.c
@@ -192,8 +192,7 @@ start_conn(const ClusterInfo *cluster, UpgradeTaskSlot *slot)
slot->conn = PQconnectStart(conn_opts.data);
if (!slot->conn)
- pg_fatal("failed to create connection with connection string: \"%s\"",
- conn_opts.data);
+ pg_fatal("out of memory");
termPQExpBuffer(&conn_opts);
}
@@ -402,7 +401,7 @@ wait_on_slots(UpgradeTaskSlot *slots, int numslots)
* If we found socket(s) to wait on, wait.
*/
if (select_loop(maxFd, &input, &output) == -1)
- pg_fatal("select() failed: %m");
+ pg_fatal("%s() failed: %m", "select");
/*
* Mark which sockets appear to be ready.
diff --git a/src/bin/pgbench/t/002_pgbench_no_server.pl b/src/bin/pgbench/t/002_pgbench_no_server.pl
index f975c73dd75..2cc59cc8140 100644
--- a/src/bin/pgbench/t/002_pgbench_no_server.pl
+++ b/src/bin/pgbench/t/002_pgbench_no_server.pl
@@ -233,21 +233,9 @@ for my $o (@options)
'pgbench option error: ' . $name);
}
-# Help
-pgbench(
- '--help', 0,
- [
- qr{benchmarking tool for PostgreSQL},
- qr{Usage},
- qr{Initialization options:},
- qr{Common options:},
- qr{Report bugs to}
- ],
- [qr{^$}],
- 'pgbench help');
-
-# Version
-pgbench('-V', 0, [qr{^pgbench .PostgreSQL. }], [qr{^$}], 'pgbench version');
+program_help_ok('pgbench');
+program_version_ok('pgbench');
+program_options_handling_ok('pgbench');
# list of builtins
pgbench(
diff --git a/src/bin/psql/command.c b/src/bin/psql/command.c
index 81a5ba844ba..9fcd2db8326 100644
--- a/src/bin/psql/command.c
+++ b/src/bin/psql/command.c
@@ -67,8 +67,8 @@ static backslashResult exec_command_C(PsqlScanState scan_state, bool active_bran
static backslashResult exec_command_connect(PsqlScanState scan_state, bool active_branch);
static backslashResult exec_command_cd(PsqlScanState scan_state, bool active_branch,
const char *cmd);
-static backslashResult exec_command_close(PsqlScanState scan_state, bool active_branch,
- const char *cmd);
+static backslashResult exec_command_close_prepared(PsqlScanState scan_state,
+ bool active_branch, const char *cmd);
static backslashResult exec_command_conninfo(PsqlScanState scan_state, bool active_branch);
static backslashResult exec_command_copy(PsqlScanState scan_state, bool active_branch);
static backslashResult exec_command_copyright(PsqlScanState scan_state, bool active_branch);
@@ -330,8 +330,8 @@ exec_command(const char *cmd,
status = exec_command_connect(scan_state, active_branch);
else if (strcmp(cmd, "cd") == 0)
status = exec_command_cd(scan_state, active_branch, cmd);
- else if (strcmp(cmd, "close") == 0)
- status = exec_command_close(scan_state, active_branch, cmd);
+ else if (strcmp(cmd, "close_prepared") == 0)
+ status = exec_command_close_prepared(scan_state, active_branch, cmd);
else if (strcmp(cmd, "conninfo") == 0)
status = exec_command_conninfo(scan_state, active_branch);
else if (pg_strcasecmp(cmd, "copy") == 0)
@@ -728,10 +728,10 @@ exec_command_cd(PsqlScanState scan_state, bool active_branch, const char *cmd)
}
/*
- * \close -- close a previously prepared statement
+ * \close_prepared -- close a previously prepared statement
*/
static backslashResult
-exec_command_close(PsqlScanState scan_state, bool active_branch, const char *cmd)
+exec_command_close_prepared(PsqlScanState scan_state, bool active_branch, const char *cmd)
{
backslashResult status = PSQL_CMD_SKIP_LINE;
@@ -778,6 +778,7 @@ exec_command_conninfo(PsqlScanState scan_state, bool active_branch)
int ssl_in_use,
password_used,
gssapi_used;
+ int version_num;
char *paramval;
if (!active_branch)
@@ -793,7 +794,9 @@ exec_command_conninfo(PsqlScanState scan_state, bool active_branch)
/* Get values for the parameters */
host = PQhost(pset.db);
hostaddr = PQhostaddr(pset.db);
- protocol_version = psprintf("%d", PQprotocolVersion(pset.db));
+ version_num = PQfullProtocolVersion(pset.db);
+ protocol_version = psprintf("%d.%d", version_num / 10000,
+ version_num % 10000);
ssl_in_use = PQsslInUse(pset.db);
password_used = PQconnectionUsedPassword(pset.db);
gssapi_used = PQconnectionUsedGSSAPI(pset.db);
@@ -874,11 +877,11 @@ exec_command_conninfo(PsqlScanState scan_state, bool active_branch)
printTableAddCell(&cont, _("Backend PID"), false, false);
printTableAddCell(&cont, backend_pid, false, false);
- /* TLS Connection */
- printTableAddCell(&cont, _("TLS Connection"), false, false);
+ /* SSL Connection */
+ printTableAddCell(&cont, _("SSL Connection"), false, false);
printTableAddCell(&cont, ssl_in_use ? _("true") : _("false"), false, false);
- /* TLS Information */
+ /* SSL Information */
if (ssl_in_use)
{
char *library,
@@ -895,19 +898,19 @@ exec_command_conninfo(PsqlScanState scan_state, bool active_branch)
compression = (char *) PQsslAttribute(pset.db, "compression");
alpn = (char *) PQsslAttribute(pset.db, "alpn");
- printTableAddCell(&cont, _("TLS Library"), false, false);
+ printTableAddCell(&cont, _("SSL Library"), false, false);
printTableAddCell(&cont, library ? library : _("unknown"), false, false);
- printTableAddCell(&cont, _("TLS Protocol"), false, false);
+ printTableAddCell(&cont, _("SSL Protocol"), false, false);
printTableAddCell(&cont, protocol ? protocol : _("unknown"), false, false);
- printTableAddCell(&cont, _("TLS Key Bits"), false, false);
+ printTableAddCell(&cont, _("SSL Key Bits"), false, false);
printTableAddCell(&cont, key_bits ? key_bits : _("unknown"), false, false);
- printTableAddCell(&cont, _("TLS Cipher"), false, false);
+ printTableAddCell(&cont, _("SSL Cipher"), false, false);
printTableAddCell(&cont, cipher ? cipher : _("unknown"), false, false);
- printTableAddCell(&cont, _("TLS Compression"), false, false);
+ printTableAddCell(&cont, _("SSL Compression"), false, false);
printTableAddCell(&cont, (compression && strcmp(compression, "off") != 0) ?
_("true") : _("false"), false, false);
@@ -1946,7 +1949,7 @@ exec_command_gexec(PsqlScanState scan_state, bool active_branch)
{
if (PQpipelineStatus(pset.db) != PQ_PIPELINE_OFF)
{
- pg_log_error("\\gexec not allowed in pipeline mode");
+ pg_log_error("\\%s not allowed in pipeline mode", "gexec");
clean_extended_state();
return PSQL_CMD_ERROR;
}
@@ -1972,7 +1975,7 @@ exec_command_gset(PsqlScanState scan_state, bool active_branch)
if (PQpipelineStatus(pset.db) != PQ_PIPELINE_OFF)
{
- pg_log_error("\\gset not allowed in pipeline mode");
+ pg_log_error("\\%s not allowed in pipeline mode", "gset");
clean_extended_state();
return PSQL_CMD_ERROR;
}
@@ -3284,7 +3287,7 @@ exec_command_watch(PsqlScanState scan_state, bool active_branch,
if (PQpipelineStatus(pset.db) != PQ_PIPELINE_OFF)
{
- pg_log_error("\\watch not allowed in pipeline mode");
+ pg_log_error("\\%s not allowed in pipeline mode", "watch");
clean_extended_state();
success = false;
}
diff --git a/src/bin/psql/common.c b/src/bin/psql/common.c
index 3e4e444f3fd..d2c0a49c46c 100644
--- a/src/bin/psql/common.c
+++ b/src/bin/psql/common.c
@@ -1867,6 +1867,33 @@ ExecQueryAndProcessResults(const char *query,
{
FILE *copy_stream = NULL;
+ if (PQpipelineStatus(pset.db) != PQ_PIPELINE_OFF)
+ {
+ /*
+ * Running COPY within a pipeline can break the protocol
+ * synchronisation in multiple ways, and psql shows its limits
+ * when it comes to tracking this information.
+ *
+ * While in COPY mode, the backend process ignores additional
+ * Sync messages and will not send the matching ReadyForQuery
+ * expected by the frontend.
+ *
+ * Additionally, libpq automatically sends a Sync with the
+ * Copy message, creating an unexpected synchronisation point.
+ * A failure during COPY would leave the pipeline in an
+ * aborted state while the backend would be in a clean state,
+ * ready to process commands.
+ *
+ * Improving those issues would require modifications in how
+ * libpq handles pipelines and COPY. Hence, for the time
+ * being, we forbid the use of COPY within a pipeline,
+ * aborting the connection to avoid an inconsistent state on
+ * psql side if trying to use a COPY command.
+ */
+ pg_log_info("COPY in a pipeline is not supported, aborting connection");
+ exit(EXIT_BADCONN);
+ }
+
/*
* For COPY OUT, direct the output to the default place (probably
* a pager pipe) for \watch, or to pset.copyStream for \copy,
@@ -2601,7 +2628,7 @@ clean_extended_state(void)
switch (pset.send_mode)
{
- case PSQL_SEND_EXTENDED_CLOSE: /* \close */
+ case PSQL_SEND_EXTENDED_CLOSE: /* \close_prepared */
free(pset.stmtName);
break;
case PSQL_SEND_EXTENDED_PARSE: /* \parse */
diff --git a/src/bin/psql/describe.c b/src/bin/psql/describe.c
index 1d08268393e..dd25d2fe7b8 100644
--- a/src/bin/psql/describe.c
+++ b/src/bin/psql/describe.c
@@ -296,6 +296,7 @@ describeFunctions(const char *functypes, const char *func_pattern,
char **arg_patterns, int num_arg_patterns,
bool verbose, bool showSystem)
{
+ const char *df_options = "anptwSx+";
bool showAggregate = strchr(functypes, 'a') != NULL;
bool showNormal = strchr(functypes, 'n') != NULL;
bool showProcedure = strchr(functypes, 'p') != NULL;
@@ -310,9 +311,9 @@ describeFunctions(const char *functypes, const char *func_pattern,
/* No "Parallel" column before 9.6 */
static const bool translate_columns_pre_96[] = {false, false, false, false, true, true, false, true, true, false, false, false, false};
- if (strlen(functypes) != strspn(functypes, "anptwSx+"))
+ if (strlen(functypes) != strspn(functypes, df_options))
{
- pg_log_error("\\df only takes [anptwSx+] as options");
+ pg_log_error("\\df only takes [%s] as options", df_options);
return true;
}
@@ -6188,8 +6189,8 @@ listExtensions(const char *pattern)
"FROM pg_catalog.pg_extension e "
"LEFT JOIN pg_catalog.pg_namespace n ON n.oid = e.extnamespace "
"LEFT JOIN pg_catalog.pg_description d ON d.objoid = e.oid "
- "LEFT JOIN pg_catalog.pg_available_extensions() ae(name, default_version, comment) ON ae.name = e.extname "
- "AND d.classoid = 'pg_catalog.pg_extension'::pg_catalog.regclass\n",
+ "AND d.classoid = 'pg_catalog.pg_extension'::pg_catalog.regclass "
+ "LEFT JOIN pg_catalog.pg_available_extensions() ae(name, default_version, comment) ON ae.name = e.extname\n",
gettext_noop("Name"),
gettext_noop("Version"),
gettext_noop("Default version"),
diff --git a/src/bin/psql/help.c b/src/bin/psql/help.c
index 403b51325a7..a2e009ab9be 100644
--- a/src/bin/psql/help.c
+++ b/src/bin/psql/help.c
@@ -252,7 +252,8 @@ slashUsage(unsigned short int pager)
HELP0(" \\dO[Sx+] [PATTERN] list collations\n");
HELP0(" \\dp[Sx] [PATTERN] list table, view, and sequence access privileges\n");
HELP0(" \\dP[itnx+] [PATTERN] list [only index/table] partitioned relations [n=nested]\n");
- HELP0(" \\drds[x] [ROLEPTRN [DBPTRN]] list per-database role settings\n");
+ HELP0(" \\drds[x] [ROLEPTRN [DBPTRN]]\n"
+ " list per-database role settings\n");
HELP0(" \\drg[Sx] [PATTERN] list role grants\n");
HELP0(" \\dRp[x+] [PATTERN] list replication publications\n");
HELP0(" \\dRs[x+] [PATTERN] list replication subscriptions\n");
@@ -330,12 +331,12 @@ slashUsage(unsigned short int pager)
HELP0(" \\bind [PARAM]... set query parameters\n");
HELP0(" \\bind_named STMT_NAME [PARAM]...\n"
" set query parameters for an existing prepared statement\n");
- HELP0(" \\close STMT_NAME close an existing prepared statement\n");
+ HELP0(" \\close_prepared STMT_NAME\n"
+ " close an existing prepared statement\n");
HELP0(" \\endpipeline exit pipeline mode\n");
HELP0(" \\flush flush output data to the server\n");
HELP0(" \\flushrequest send request to the server to flush its output buffer\n");
- HELP0(" \\getresults [NUM_RES] read NUM_RES pending results. All pending results are\n"
- " read if no argument is provided\n");
+ HELP0(" \\getresults [NUM_RES] read NUM_RES pending results, or all if no argument\n");
HELP0(" \\parse STMT_NAME create a prepared statement\n");
HELP0(" \\sendpipeline send an extended query to an ongoing pipeline\n");
HELP0(" \\startpipeline enter pipeline mode\n");
@@ -463,8 +464,9 @@ helpVariables(unsigned short int pager)
" VERSION_NAME\n"
" VERSION_NUM\n"
" psql's version (in verbose string, short string, or numeric format)\n");
- HELP0(" WATCH_INTERVAL\n"
- " if set to a number, overrides the default two second \\watch interval\n");
+ HELPN(" WATCH_INTERVAL\n"
+ " number of seconds \\watch waits between executions (default %s)\n",
+ DEFAULT_WATCH_INTERVAL);
HELP0("\nDisplay settings:\n");
HELP0("Usage:\n");
diff --git a/src/bin/psql/t/001_basic.pl b/src/bin/psql/t/001_basic.pl
index 4050f9a5e3e..f42c3961e09 100644
--- a/src/bin/psql/t/001_basic.pl
+++ b/src/bin/psql/t/001_basic.pl
@@ -483,8 +483,8 @@ psql_like($node, "copy (values ('foo'),('bar')) to stdout \\g | $pipe_cmd",
my $c4 = slurp_file($g_file);
like($c4, qr/foo.*bar/s);
-# Tests with pipelines. These trigger FATAL failures in the backend,
-# so they cannot be tested via SQL.
+# Test COPY within pipelines. These abort the connection from
+# the frontend so they cannot be tested via SQL.
$node->safe_psql('postgres', 'CREATE TABLE psql_pipeline()');
my $log_location = -s $node->logfile;
psql_fails_like(
@@ -493,35 +493,41 @@ psql_fails_like(
COPY psql_pipeline FROM STDIN;
SELECT 'val1';
\\syncpipeline
-\\getresults
\\endpipeline},
- qr/server closed the connection unexpectedly/,
- 'protocol sync loss in pipeline: direct COPY, SELECT, sync and getresult'
-);
+ qr/COPY in a pipeline is not supported, aborting connection/,
+ 'COPY FROM in pipeline: fails');
$node->wait_for_log(
qr/FATAL: .*terminating connection because protocol synchronization was lost/,
$log_location);
+# Remove \syncpipeline here.
psql_fails_like(
$node,
qq{\\startpipeline
-COPY psql_pipeline FROM STDIN \\bind \\sendpipeline
-SELECT 'val1' \\bind \\sendpipeline
-\\syncpipeline
-\\getresults
+COPY psql_pipeline TO STDOUT;
+SELECT 'val1';
\\endpipeline},
- qr/server closed the connection unexpectedly/,
- 'protocol sync loss in pipeline: bind COPY, SELECT, sync and getresult');
+ qr/COPY in a pipeline is not supported, aborting connection/,
+ 'COPY TO in pipeline: fails');
-# This time, test without the \getresults.
psql_fails_like(
$node,
qq{\\startpipeline
-COPY psql_pipeline FROM STDIN;
+\\copy psql_pipeline from stdin;
SELECT 'val1';
\\syncpipeline
\\endpipeline},
- qr/server closed the connection unexpectedly/,
- 'protocol sync loss in pipeline: COPY, SELECT and sync');
+ qr/COPY in a pipeline is not supported, aborting connection/,
+ '\copy from in pipeline: fails');
+
+# Sync attempt after a COPY TO/FROM.
+psql_fails_like(
+ $node,
+ qq{\\startpipeline
+\\copy psql_pipeline to stdout;
+\\syncpipeline
+\\endpipeline},
+ qr/COPY in a pipeline is not supported, aborting connection/,
+ '\copy to in pipeline: fails');
done_testing();
diff --git a/src/bin/psql/tab-complete.in.c b/src/bin/psql/tab-complete.in.c
index ec65ab79fec..8c2ea0b9587 100644
--- a/src/bin/psql/tab-complete.in.c
+++ b/src/bin/psql/tab-complete.in.c
@@ -889,6 +889,14 @@ static const SchemaQuery Query_for_list_of_analyzables = {
.result = "c.relname",
};
+/*
+ * Relations supporting COPY TO/FROM are currently almost the same as
+ * those supporting ANALYZE. Although views with INSTEAD OF INSERT triggers
+ * can be used with COPY FROM, they are rarely used for this purpose,
+ * so plain views are intentionally excluded from this tab completion.
+ */
+#define Query_for_list_of_tables_for_copy Query_for_list_of_analyzables
+
/* Relations supporting index creation */
static const SchemaQuery Query_for_list_of_indexables = {
.catname = "pg_catalog.pg_class c",
@@ -1875,7 +1883,7 @@ psql_completion(const char *text, int start, int end)
static const char *const backslash_commands[] = {
"\\a",
"\\bind", "\\bind_named",
- "\\connect", "\\conninfo", "\\C", "\\cd", "\\close", "\\copy",
+ "\\connect", "\\conninfo", "\\C", "\\cd", "\\close_prepared", "\\copy",
"\\copyright", "\\crosstabview",
"\\d", "\\da", "\\dA", "\\dAc", "\\dAf", "\\dAo", "\\dAp",
"\\db", "\\dc", "\\dconfig", "\\dC", "\\dd", "\\ddp", "\\dD",
@@ -3255,7 +3263,7 @@ match_previous_words(int pattern_id,
* backslash command).
*/
else if (Matches("COPY|\\copy"))
- COMPLETE_WITH_SCHEMA_QUERY_PLUS(Query_for_list_of_tables, "(");
+ COMPLETE_WITH_SCHEMA_QUERY_PLUS(Query_for_list_of_tables_for_copy, "(");
/* Complete COPY ( with legal query commands */
else if (Matches("COPY|\\copy", "("))
COMPLETE_WITH("SELECT", "TABLE", "VALUES", "INSERT INTO", "UPDATE", "DELETE FROM", "MERGE INTO", "WITH");
@@ -3289,7 +3297,7 @@ match_previous_words(int pattern_id,
COMPLETE_WITH("FORMAT", "FREEZE", "DELIMITER", "NULL",
"HEADER", "QUOTE", "ESCAPE", "FORCE_QUOTE",
"FORCE_NOT_NULL", "FORCE_NULL", "ENCODING", "DEFAULT",
- "ON_ERROR", "LOG_VERBOSITY");
+ "ON_ERROR", "LOG_VERBOSITY", "REJECT_LIMIT");
/* Complete COPY <sth> FROM|TO filename WITH (FORMAT */
else if (Matches("COPY|\\copy", MatchAny, "FROM|TO", MatchAny, "WITH", "(", "FORMAT"))
@@ -3664,9 +3672,10 @@ match_previous_words(int pattern_id,
TailMatches("CREATE", "TEMP|TEMPORARY|UNLOGGED", "TABLE", MatchAny, "(*)", "AS"))
COMPLETE_WITH("EXECUTE", "SELECT", "TABLE", "VALUES", "WITH");
/* Complete CREATE TABLE name (...) with supported options */
- else if (TailMatches("CREATE", "TABLE", MatchAny, "(*)") ||
- TailMatches("CREATE", "UNLOGGED", "TABLE", MatchAny, "(*)"))
+ else if (TailMatches("CREATE", "TABLE", MatchAny, "(*)"))
COMPLETE_WITH("AS", "INHERITS (", "PARTITION BY", "USING", "TABLESPACE", "WITH (");
+ else if (TailMatches("CREATE", "UNLOGGED", "TABLE", MatchAny, "(*)"))
+ COMPLETE_WITH("AS", "INHERITS (", "USING", "TABLESPACE", "WITH (");
else if (TailMatches("CREATE", "TEMP|TEMPORARY", "TABLE", MatchAny, "(*)"))
COMPLETE_WITH("AS", "INHERITS (", "ON COMMIT", "PARTITION BY", "USING",
"TABLESPACE", "WITH (");
diff --git a/src/bin/psql/variables.c b/src/bin/psql/variables.c
index ae2d0e5ed3f..6b64302ebca 100644
--- a/src/bin/psql/variables.c
+++ b/src/bin/psql/variables.c
@@ -204,7 +204,7 @@ ParseVariableDouble(const char *value, const char *name, double *result, double
if ((value == NULL) || (*value == '\0'))
{
if (name)
- pg_log_error("invalid input syntax for \"%s\"", name);
+ pg_log_error("invalid input syntax for variable \"%s\"", name);
return false;
}
@@ -215,14 +215,14 @@ ParseVariableDouble(const char *value, const char *name, double *result, double
if (dblval < min)
{
if (name)
- pg_log_error("invalid value \"%s\" for \"%s\": must be greater than %.2f",
+ pg_log_error("invalid value \"%s\" for variable \"%s\": must be greater than %.2f",
value, name, min);
return false;
}
else if (dblval > max)
{
if (name)
- pg_log_error("invalid value \"%s\" for \"%s\": must be less than %.2f",
+ pg_log_error("invalid value \"%s\" for variable \"%s\": must be less than %.2f",
value, name, max);
}
*result = dblval;
@@ -238,13 +238,13 @@ ParseVariableDouble(const char *value, const char *name, double *result, double
(dblval == 0.0 || dblval >= HUGE_VAL || dblval <= -HUGE_VAL))
{
if (name)
- pg_log_error("\"%s\" is out of range for \"%s\"", value, name);
+ pg_log_error("value \"%s\" is out of range for variable \"%s\"", value, name);
return false;
}
else
{
if (name)
- pg_log_error("invalid value \"%s\" for \"%s\"", value, name);
+ pg_log_error("invalid value \"%s\" for variable \"%s\"", value, name);
return false;
}
}
diff --git a/src/bin/scripts/t/100_vacuumdb.pl b/src/bin/scripts/t/100_vacuumdb.pl
index 75ac24a7a55..ff56a13b46b 100644
--- a/src/bin/scripts/t/100_vacuumdb.pl
+++ b/src/bin/scripts/t/100_vacuumdb.pl
@@ -238,62 +238,105 @@ $node->command_fails_like(
'cannot use option --all and a dbname as argument at the same time');
$node->safe_psql('postgres',
- 'CREATE TABLE regression_vacuumdb_test AS select generate_series(1, 10) a, generate_series(2, 11) b;');
+ 'CREATE TABLE regression_vacuumdb_test AS select generate_series(1, 10) a, generate_series(2, 11) b;'
+);
$node->issues_sql_like(
- [ 'vacuumdb', '--analyze-only', '--missing-stats-only', '-t', 'regression_vacuumdb_test', 'postgres' ],
+ [
+ 'vacuumdb', '--analyze-only',
+ '--missing-stats-only', '-t',
+ 'regression_vacuumdb_test', 'postgres'
+ ],
qr/statement:\ ANALYZE/sx,
'--missing-stats-only with missing stats');
$node->issues_sql_unlike(
- [ 'vacuumdb', '--analyze-only', '--missing-stats-only', '-t', 'regression_vacuumdb_test', 'postgres' ],
+ [
+ 'vacuumdb', '--analyze-only',
+ '--missing-stats-only', '-t',
+ 'regression_vacuumdb_test', 'postgres'
+ ],
qr/statement:\ ANALYZE/sx,
'--missing-stats-only with no missing stats');
$node->safe_psql('postgres',
- 'CREATE INDEX regression_vacuumdb_test_idx ON regression_vacuumdb_test (mod(a, 2));');
+ 'CREATE INDEX regression_vacuumdb_test_idx ON regression_vacuumdb_test (mod(a, 2));'
+);
$node->issues_sql_like(
- [ 'vacuumdb', '--analyze-in-stages', '--missing-stats-only', '-t', 'regression_vacuumdb_test', 'postgres' ],
+ [
+ 'vacuumdb', '--analyze-in-stages',
+ '--missing-stats-only', '-t',
+ 'regression_vacuumdb_test', 'postgres'
+ ],
qr/statement:\ ANALYZE/sx,
'--missing-stats-only with missing index expression stats');
$node->issues_sql_unlike(
- [ 'vacuumdb', '--analyze-in-stages', '--missing-stats-only', '-t', 'regression_vacuumdb_test', 'postgres' ],
+ [
+ 'vacuumdb', '--analyze-in-stages',
+ '--missing-stats-only', '-t',
+ 'regression_vacuumdb_test', 'postgres'
+ ],
qr/statement:\ ANALYZE/sx,
'--missing-stats-only with no missing index expression stats');
$node->safe_psql('postgres',
- 'CREATE STATISTICS regression_vacuumdb_test_stat ON a, b FROM regression_vacuumdb_test;');
+ 'CREATE STATISTICS regression_vacuumdb_test_stat ON a, b FROM regression_vacuumdb_test;'
+);
$node->issues_sql_like(
- [ 'vacuumdb', '--analyze-only', '--missing-stats-only', '-t', 'regression_vacuumdb_test', 'postgres' ],
+ [
+ 'vacuumdb', '--analyze-only',
+ '--missing-stats-only', '-t',
+ 'regression_vacuumdb_test', 'postgres'
+ ],
qr/statement:\ ANALYZE/sx,
'--missing-stats-only with missing extended stats');
$node->issues_sql_unlike(
- [ 'vacuumdb', '--analyze-only', '--missing-stats-only', '-t', 'regression_vacuumdb_test', 'postgres' ],
+ [
+ 'vacuumdb', '--analyze-only',
+ '--missing-stats-only', '-t',
+ 'regression_vacuumdb_test', 'postgres'
+ ],
qr/statement:\ ANALYZE/sx,
'--missing-stats-only with no missing extended stats');
$node->safe_psql('postgres',
"CREATE TABLE regression_vacuumdb_child (a INT) INHERITS (regression_vacuumdb_test);\n"
- . "INSERT INTO regression_vacuumdb_child VALUES (1, 2);\n"
- . "ANALYZE regression_vacuumdb_child;\n");
+ . "INSERT INTO regression_vacuumdb_child VALUES (1, 2);\n"
+ . "ANALYZE regression_vacuumdb_child;\n");
$node->issues_sql_like(
- [ 'vacuumdb', '--analyze-in-stages', '--missing-stats-only', '-t', 'regression_vacuumdb_test', 'postgres' ],
+ [
+ 'vacuumdb', '--analyze-in-stages',
+ '--missing-stats-only', '-t',
+ 'regression_vacuumdb_test', 'postgres'
+ ],
qr/statement:\ ANALYZE/sx,
'--missing-stats-only with missing inherited stats');
$node->issues_sql_unlike(
- [ 'vacuumdb', '--analyze-in-stages', '--missing-stats-only', '-t', 'regression_vacuumdb_test', 'postgres' ],
+ [
+ 'vacuumdb', '--analyze-in-stages',
+ '--missing-stats-only', '-t',
+ 'regression_vacuumdb_test', 'postgres'
+ ],
qr/statement:\ ANALYZE/sx,
'--missing-stats-only with no missing inherited stats');
$node->safe_psql('postgres',
"CREATE TABLE regression_vacuumdb_parted (a INT) PARTITION BY LIST (a);\n"
- . "CREATE TABLE regression_vacuumdb_part1 PARTITION OF regression_vacuumdb_parted FOR VALUES IN (1);\n"
- . "INSERT INTO regression_vacuumdb_parted VALUES (1);\n"
- . "ANALYZE regression_vacuumdb_part1;\n");
+ . "CREATE TABLE regression_vacuumdb_part1 PARTITION OF regression_vacuumdb_parted FOR VALUES IN (1);\n"
+ . "INSERT INTO regression_vacuumdb_parted VALUES (1);\n"
+ . "ANALYZE regression_vacuumdb_part1;\n");
$node->issues_sql_like(
- [ 'vacuumdb', '--analyze-only', '--missing-stats-only', '-t', 'regression_vacuumdb_parted', 'postgres' ],
+ [
+ 'vacuumdb', '--analyze-only',
+ '--missing-stats-only', '-t',
+ 'regression_vacuumdb_parted', 'postgres'
+ ],
qr/statement:\ ANALYZE/sx,
'--missing-stats-only with missing partition stats');
$node->issues_sql_unlike(
- [ 'vacuumdb', '--analyze-only', '--missing-stats-only', '-t', 'regression_vacuumdb_parted', 'postgres' ],
+ [
+ 'vacuumdb', '--analyze-only',
+ '--missing-stats-only', '-t',
+ 'regression_vacuumdb_parted', 'postgres'
+ ],
qr/statement:\ ANALYZE/sx,
'--missing-stats-only with no missing partition stats');
diff --git a/src/include/access/gin_tuple.h b/src/include/access/gin_tuple.h
index 4a50565960f..702f7d12889 100644
--- a/src/include/access/gin_tuple.h
+++ b/src/include/access/gin_tuple.h
@@ -2,7 +2,7 @@
* gin.h
* Public header file for Generalized Inverted Index access method.
*
- * Copyright (c) 2006-2024, PostgreSQL Global Development Group
+ * Copyright (c) 2006-2025, PostgreSQL Global Development Group
*
* src/include/access/gin.h
*--------------------------------------------------------------------------
diff --git a/src/include/access/gist.h b/src/include/access/gist.h
index db78e60eeab..b3f4e02cbfd 100644
--- a/src/include/access/gist.h
+++ b/src/include/access/gist.h
@@ -40,7 +40,7 @@
#define GIST_FETCH_PROC 9
#define GIST_OPTIONS_PROC 10
#define GIST_SORTSUPPORT_PROC 11
-#define GIST_STRATNUM_PROC 12
+#define GIST_TRANSLATE_CMPTYPE_PROC 12
#define GISTNProcs 12
/*
diff --git a/src/include/access/heapam.h b/src/include/access/heapam.h
index e48fe434cd3..a2bd5a897f8 100644
--- a/src/include/access/heapam.h
+++ b/src/include/access/heapam.h
@@ -21,6 +21,7 @@
#include "access/skey.h"
#include "access/table.h" /* for backward compatibility */
#include "access/tableam.h"
+#include "commands/vacuum.h"
#include "nodes/lockoptions.h"
#include "nodes/primnodes.h"
#include "storage/bufpage.h"
@@ -96,7 +97,7 @@ typedef struct HeapScanDescData
uint32 rs_cindex; /* current tuple's index in vistuples */
uint32 rs_ntuples; /* number of visible tuples on page */
OffsetNumber rs_vistuples[MaxHeapTuplesPerPage]; /* their offsets */
-} HeapScanDescData;
+} HeapScanDescData;
typedef struct HeapScanDescData *HeapScanDesc;
typedef struct BitmapHeapScanDescData
@@ -396,9 +397,8 @@ extern void log_heap_prune_and_freeze(Relation relation, Buffer buffer,
OffsetNumber *unused, int nunused);
/* in heap/vacuumlazy.c */
-struct VacuumParams;
extern void heap_vacuum_rel(Relation rel,
- struct VacuumParams *params, BufferAccessStrategy bstrategy);
+ const VacuumParams params, BufferAccessStrategy bstrategy);
/* in heap/heapam_visibility.c */
extern bool HeapTupleSatisfiesVisibility(HeapTuple htup, Snapshot snapshot,
diff --git a/src/include/access/nbtree.h b/src/include/access/nbtree.h
index ebca02588d3..e709d2e0afe 100644
--- a/src/include/access/nbtree.h
+++ b/src/include/access/nbtree.h
@@ -939,7 +939,7 @@ typedef BTVacuumPostingData *BTVacuumPosting;
* processing. This approach minimizes lock/unlock traffic. We must always
* drop the lock to make it okay for caller to process the returned items.
* Whether or not we can also release the pin during this window will vary.
- * We drop the pin eagerly (when safe) to avoid blocking progress by VACUUM
+ * We drop the pin (when so->dropPin) to avoid blocking progress by VACUUM
* (see nbtree/README section about making concurrent TID recycling safe).
* We'll always release both the lock and the pin on the current page before
* moving on to its sibling page.
@@ -967,7 +967,7 @@ typedef struct BTScanPosData
BlockNumber currPage; /* page referenced by items array */
BlockNumber prevPage; /* currPage's left link */
BlockNumber nextPage; /* currPage's right link */
- XLogRecPtr lsn; /* currPage's LSN */
+ XLogRecPtr lsn; /* currPage's LSN (when so->dropPin) */
/* scan direction for the saved position's call to _bt_readpage */
ScanDirection dir;
@@ -1070,6 +1070,7 @@ typedef struct BTScanOpaqueData
/* info about killed items if any (killedItems is NULL if never used) */
int *killedItems; /* currPos.items indexes of killed items */
int numKilled; /* number of currently stored items */
+ bool dropPin; /* drop leaf pin before btgettuple returns? */
/*
* If we are doing an index-only scan, these are the tuple storage
diff --git a/src/include/access/tableam.h b/src/include/access/tableam.h
index 8713e12cbfb..1c9e802a6b1 100644
--- a/src/include/access/tableam.h
+++ b/src/include/access/tableam.h
@@ -20,6 +20,7 @@
#include "access/relscan.h"
#include "access/sdir.h"
#include "access/xact.h"
+#include "commands/vacuum.h"
#include "executor/tuptable.h"
#include "storage/read_stream.h"
#include "utils/rel.h"
@@ -36,7 +37,6 @@ extern PGDLLIMPORT bool synchronize_seqscans;
struct BulkInsertStateData;
struct IndexInfo;
struct SampleScanState;
-struct VacuumParams;
struct ValidateIndexState;
/*
@@ -645,7 +645,7 @@ typedef struct TableAmRoutine
* integrate with autovacuum's scheduling.
*/
void (*relation_vacuum) (Relation rel,
- struct VacuumParams *params,
+ const VacuumParams params,
BufferAccessStrategy bstrategy);
/*
@@ -1664,7 +1664,7 @@ table_relation_copy_for_cluster(Relation OldTable, Relation NewTable,
* routine, even if (for ANALYZE) it is part of the same VACUUM command.
*/
static inline void
-table_relation_vacuum(Relation rel, struct VacuumParams *params,
+table_relation_vacuum(Relation rel, const VacuumParams params,
BufferAccessStrategy bstrategy)
{
rel->rd_tableam->relation_vacuum(rel, params, bstrategy);
diff --git a/src/include/catalog/catversion.h b/src/include/catalog/catversion.h
index 82988d24433..479629825f5 100644
--- a/src/include/catalog/catversion.h
+++ b/src/include/catalog/catversion.h
@@ -57,6 +57,6 @@
*/
/* yyyymmddN */
-#define CATALOG_VERSION_NO 202505071
+#define CATALOG_VERSION_NO 202506291
#endif
diff --git a/src/include/catalog/pg_amproc.dat b/src/include/catalog/pg_amproc.dat
index 92505148998..e3477500baa 100644
--- a/src/include/catalog/pg_amproc.dat
+++ b/src/include/catalog/pg_amproc.dat
@@ -533,7 +533,7 @@
amprocrighttype => 'box', amprocnum => '8', amproc => 'gist_box_distance' },
{ amprocfamily => 'gist/box_ops', amproclefttype => 'any',
amprocrighttype => 'any', amprocnum => '12',
- amproc => 'gist_stratnum_common' },
+ amproc => 'gist_translate_cmptype_common' },
{ amprocfamily => 'gist/poly_ops', amproclefttype => 'polygon',
amprocrighttype => 'polygon', amprocnum => '1',
amproc => 'gist_poly_consistent' },
@@ -555,7 +555,7 @@
amproc => 'gist_poly_distance' },
{ amprocfamily => 'gist/poly_ops', amproclefttype => 'any',
amprocrighttype => 'any', amprocnum => '12',
- amproc => 'gist_stratnum_common' },
+ amproc => 'gist_translate_cmptype_common' },
{ amprocfamily => 'gist/circle_ops', amproclefttype => 'circle',
amprocrighttype => 'circle', amprocnum => '1',
amproc => 'gist_circle_consistent' },
@@ -576,7 +576,7 @@
amproc => 'gist_circle_distance' },
{ amprocfamily => 'gist/circle_ops', amproclefttype => 'any',
amprocrighttype => 'any', amprocnum => '12',
- amproc => 'gist_stratnum_common' },
+ amproc => 'gist_translate_cmptype_common' },
{ amprocfamily => 'gist/tsvector_ops', amproclefttype => 'tsvector',
amprocrighttype => 'tsvector', amprocnum => '1',
amproc => 'gtsvector_consistent(internal,tsvector,int2,oid,internal)' },
@@ -636,7 +636,7 @@
amproc => 'range_sortsupport' },
{ amprocfamily => 'gist/range_ops', amproclefttype => 'any',
amprocrighttype => 'any', amprocnum => '12',
- amproc => 'gist_stratnum_common' },
+ amproc => 'gist_translate_cmptype_common' },
{ amprocfamily => 'gist/network_ops', amproclefttype => 'inet',
amprocrighttype => 'inet', amprocnum => '1',
amproc => 'inet_gist_consistent' },
@@ -655,7 +655,7 @@
amprocrighttype => 'inet', amprocnum => '9', amproc => 'inet_gist_fetch' },
{ amprocfamily => 'gist/network_ops', amproclefttype => 'any',
amprocrighttype => 'any', amprocnum => '12',
- amproc => 'gist_stratnum_common' },
+ amproc => 'gist_translate_cmptype_common' },
{ amprocfamily => 'gist/multirange_ops', amproclefttype => 'anymultirange',
amprocrighttype => 'anymultirange', amprocnum => '1',
amproc => 'multirange_gist_consistent' },
@@ -676,7 +676,7 @@
amproc => 'range_gist_same' },
{ amprocfamily => 'gist/multirange_ops', amproclefttype => 'any',
amprocrighttype => 'any', amprocnum => '12',
- amproc => 'gist_stratnum_common' },
+ amproc => 'gist_translate_cmptype_common' },
# gin
{ amprocfamily => 'gin/array_ops', amproclefttype => 'anyarray',
diff --git a/src/include/catalog/pg_authid.dat b/src/include/catalog/pg_authid.dat
index eb4dab5c6aa..c881c13adf1 100644
--- a/src/include/catalog/pg_authid.dat
+++ b/src/include/catalog/pg_authid.dat
@@ -99,7 +99,7 @@
rolcreaterole => 'f', rolcreatedb => 'f', rolcanlogin => 'f',
rolreplication => 'f', rolbypassrls => 'f', rolconnlimit => '-1',
rolpassword => '_null_', rolvaliduntil => '_null_' },
-{ oid => '8916', oid_symbol => 'ROLE_PG_SIGNAL_AUTOVACUUM_WORKER',
+{ oid => '6392', oid_symbol => 'ROLE_PG_SIGNAL_AUTOVACUUM_WORKER',
rolname => 'pg_signal_autovacuum_worker', rolsuper => 'f', rolinherit => 't',
rolcreaterole => 'f', rolcreatedb => 'f', rolcanlogin => 'f',
rolreplication => 'f', rolbypassrls => 'f', rolconnlimit => '-1',
diff --git a/src/include/catalog/pg_collation.dat b/src/include/catalog/pg_collation.dat
index fb76c421931..8cfd09f0314 100644
--- a/src/include/catalog/pg_collation.dat
+++ b/src/include/catalog/pg_collation.dat
@@ -33,7 +33,8 @@
descr => 'sorts by Unicode code point; Unicode and POSIX character semantics',
collname => 'pg_c_utf8', collprovider => 'b', collencoding => '6',
colllocale => 'C.UTF-8', collversion => '1' },
-{ oid => '9535', descr => 'sorts by Unicode code point; Unicode character semantics',
+{ oid => '6411',
+ descr => 'sorts by Unicode code point; Unicode character semantics',
collname => 'pg_unicode_fast', collprovider => 'b', collencoding => '6',
colllocale => 'PG_UNICODE_FAST', collversion => '1' },
diff --git a/src/include/catalog/pg_index.h b/src/include/catalog/pg_index.h
index 4392b9d221d..731d3938169 100644
--- a/src/include/catalog/pg_index.h
+++ b/src/include/catalog/pg_index.h
@@ -69,7 +69,7 @@ CATALOG(pg_index,2610,IndexRelationId) BKI_SCHEMA_MACRO
*/
typedef FormData_pg_index *Form_pg_index;
-DECLARE_TOAST_WITH_MACRO(pg_index, 8149, 8150, PgIndexToastTable, PgIndexToastIndex);
+DECLARE_TOAST_WITH_MACRO(pg_index, 6351, 6352, PgIndexToastTable, PgIndexToastIndex);
DECLARE_INDEX(pg_index_indrelid_index, 2678, IndexIndrelidIndexId, pg_index, btree(indrelid oid_ops));
DECLARE_UNIQUE_INDEX_PKEY(pg_index_indexrelid_index, 2679, IndexRelidIndexId, pg_index, btree(indexrelid oid_ops));
diff --git a/src/include/catalog/pg_proc.dat b/src/include/catalog/pg_proc.dat
index 62beb71da28..fb4f7f50350 100644
--- a/src/include/catalog/pg_proc.dat
+++ b/src/include/catalog/pg_proc.dat
@@ -1004,7 +1004,7 @@
{ oid => '3129', descr => 'sort support',
proname => 'btint2sortsupport', prorettype => 'void',
proargtypes => 'internal', prosrc => 'btint2sortsupport' },
-{ oid => '9290', descr => 'skip support',
+{ oid => '6402', descr => 'skip support',
proname => 'btint2skipsupport', prorettype => 'void',
proargtypes => 'internal', prosrc => 'btint2skipsupport' },
{ oid => '351', descr => 'less-equal-greater',
@@ -1013,7 +1013,7 @@
{ oid => '3130', descr => 'sort support',
proname => 'btint4sortsupport', prorettype => 'void',
proargtypes => 'internal', prosrc => 'btint4sortsupport' },
-{ oid => '9291', descr => 'skip support',
+{ oid => '6403', descr => 'skip support',
proname => 'btint4skipsupport', prorettype => 'void',
proargtypes => 'internal', prosrc => 'btint4skipsupport' },
{ oid => '842', descr => 'less-equal-greater',
@@ -1022,7 +1022,7 @@
{ oid => '3131', descr => 'sort support',
proname => 'btint8sortsupport', prorettype => 'void',
proargtypes => 'internal', prosrc => 'btint8sortsupport' },
-{ oid => '9292', descr => 'skip support',
+{ oid => '6404', descr => 'skip support',
proname => 'btint8skipsupport', prorettype => 'void',
proargtypes => 'internal', prosrc => 'btint8skipsupport' },
{ oid => '354', descr => 'less-equal-greater',
@@ -1043,7 +1043,7 @@
{ oid => '3134', descr => 'sort support',
proname => 'btoidsortsupport', prorettype => 'void',
proargtypes => 'internal', prosrc => 'btoidsortsupport' },
-{ oid => '9293', descr => 'skip support',
+{ oid => '6405', descr => 'skip support',
proname => 'btoidskipsupport', prorettype => 'void',
proargtypes => 'internal', prosrc => 'btoidskipsupport' },
{ oid => '404', descr => 'less-equal-greater',
@@ -1052,7 +1052,7 @@
{ oid => '358', descr => 'less-equal-greater',
proname => 'btcharcmp', proleakproof => 't', prorettype => 'int4',
proargtypes => 'char char', prosrc => 'btcharcmp' },
-{ oid => '9294', descr => 'skip support',
+{ oid => '6406', descr => 'skip support',
proname => 'btcharskipsupport', prorettype => 'void',
proargtypes => 'internal', prosrc => 'btcharskipsupport' },
{ oid => '359', descr => 'less-equal-greater',
@@ -1180,24 +1180,24 @@
proname => 'name', proleakproof => 't', prorettype => 'name',
proargtypes => 'bpchar', prosrc => 'bpchar_name' },
-{ oid => '8577', descr => 'convert int2 to bytea',
+{ oid => '6367', descr => 'convert int2 to bytea',
proname => 'bytea', proleakproof => 't', prorettype => 'bytea',
proargtypes => 'int2', prosrc => 'int2_bytea' },
-{ oid => '8578', descr => 'convert int4 to bytea',
+{ oid => '6368', descr => 'convert int4 to bytea',
proname => 'bytea', proleakproof => 't', prorettype => 'bytea',
proargtypes => 'int4', prosrc => 'int4_bytea' },
-{ oid => '8579', descr => 'convert int8 to bytea',
+{ oid => '6369', descr => 'convert int8 to bytea',
proname => 'bytea', proleakproof => 't', prorettype => 'bytea',
proargtypes => 'int8', prosrc => 'int8_bytea' },
-{ oid => '8580', descr => 'convert bytea to int2',
- proname => 'int2', prorettype => 'int2',
- proargtypes => 'bytea', prosrc => 'bytea_int2' },
-{ oid => '8581', descr => 'convert bytea to int4',
- proname => 'int4', prorettype => 'int4',
- proargtypes => 'bytea', prosrc => 'bytea_int4' },
-{ oid => '8582', descr => 'convert bytea to int8',
- proname => 'int8', prorettype => 'int8',
- proargtypes => 'bytea', prosrc => 'bytea_int8' },
+{ oid => '6370', descr => 'convert bytea to int2',
+ proname => 'int2', prorettype => 'int2', proargtypes => 'bytea',
+ prosrc => 'bytea_int2' },
+{ oid => '6371', descr => 'convert bytea to int4',
+ proname => 'int4', prorettype => 'int4', proargtypes => 'bytea',
+ prosrc => 'bytea_int4' },
+{ oid => '6372', descr => 'convert bytea to int8',
+ proname => 'int8', prorettype => 'int8', proargtypes => 'bytea',
+ prosrc => 'bytea_int8' },
{ oid => '449', descr => 'hash',
proname => 'hashint2', prorettype => 'int4', proargtypes => 'int2',
@@ -1259,10 +1259,10 @@
{ oid => '772', descr => 'hash',
proname => 'hashvarlenaextended', prorettype => 'int8',
proargtypes => 'internal int8', prosrc => 'hashvarlenaextended' },
-{ oid => '9708', descr => 'hash',
+{ oid => '6413', descr => 'hash',
proname => 'hashbytea', prorettype => 'int4', proargtypes => 'bytea',
prosrc => 'hashbytea' },
-{ oid => '9709', descr => 'hash',
+{ oid => '6414', descr => 'hash',
proname => 'hashbyteaextended', prorettype => 'int8',
proargtypes => 'bytea int8', prosrc => 'hashbyteaextended' },
{ oid => '457', descr => 'hash',
@@ -1301,34 +1301,34 @@
{ oid => '781', descr => 'hash',
proname => 'hashmacaddr8extended', prorettype => 'int8',
proargtypes => 'macaddr8 int8', prosrc => 'hashmacaddr8extended' },
-{ oid => '9710', descr => 'hash',
+{ oid => '6415', descr => 'hash',
proname => 'hashdate', prorettype => 'int4', proargtypes => 'date',
prosrc => 'hashdate' },
-{ oid => '9711', descr => 'hash',
+{ oid => '6416', descr => 'hash',
proname => 'hashdateextended', prorettype => 'int8',
proargtypes => 'date int8', prosrc => 'hashdateextended' },
-{ oid => '9712', descr => 'hash',
+{ oid => '6417', descr => 'hash',
proname => 'hashbool', prorettype => 'int4', proargtypes => 'bool',
prosrc => 'hashbool' },
-{ oid => '9713', descr => 'hash',
+{ oid => '6418', descr => 'hash',
proname => 'hashboolextended', prorettype => 'int8',
proargtypes => 'bool int8', prosrc => 'hashboolextended' },
-{ oid => '9714', descr => 'hash',
+{ oid => '6419', descr => 'hash',
proname => 'hashxid', prorettype => 'int4', proargtypes => 'xid',
prosrc => 'hashxid' },
-{ oid => '9715', descr => 'hash',
+{ oid => '6420', descr => 'hash',
proname => 'hashxidextended', prorettype => 'int8', proargtypes => 'xid int8',
prosrc => 'hashxidextended' },
-{ oid => '9716', descr => 'hash',
+{ oid => '6421', descr => 'hash',
proname => 'hashxid8', prorettype => 'int4', proargtypes => 'xid8',
prosrc => 'hashxid8' },
-{ oid => '9717', descr => 'hash',
+{ oid => '6422', descr => 'hash',
proname => 'hashxid8extended', prorettype => 'int8',
proargtypes => 'xid8 int8', prosrc => 'hashxid8extended' },
-{ oid => '9718', descr => 'hash',
+{ oid => '6423', descr => 'hash',
proname => 'hashcid', prorettype => 'int4', proargtypes => 'cid',
prosrc => 'hashcid' },
-{ oid => '9719', descr => 'hash',
+{ oid => '6424', descr => 'hash',
proname => 'hashcidextended', prorettype => 'int8', proargtypes => 'cid int8',
prosrc => 'hashcidextended' },
@@ -1348,10 +1348,10 @@
proname => 'text_smaller', proleakproof => 't', prorettype => 'text',
proargtypes => 'text text', prosrc => 'text_smaller' },
-{ oid => '8920', descr => 'larger of two',
+{ oid => '6393', descr => 'larger of two',
proname => 'bytea_larger', proleakproof => 't', prorettype => 'bytea',
proargtypes => 'bytea bytea', prosrc => 'bytea_larger' },
-{ oid => '8921', descr => 'smaller of two',
+{ oid => '6394', descr => 'smaller of two',
proname => 'bytea_smaller', proleakproof => 't', prorettype => 'bytea',
proargtypes => 'bytea bytea', prosrc => 'bytea_smaller' },
@@ -1533,7 +1533,7 @@
{ oid => '6163', descr => 'number of set bits',
proname => 'bit_count', prorettype => 'int8', proargtypes => 'bytea',
prosrc => 'bytea_bit_count' },
-{ oid => '8694', descr => 'reverse bytea',
+{ oid => '6382', descr => 'reverse bytea',
proname => 'reverse', prorettype => 'bytea', proargtypes => 'bytea',
prosrc => 'bytea_reverse' },
@@ -1638,7 +1638,7 @@
proname => 'array_append', prosupport => 'array_append_support',
proisstrict => 'f', prorettype => 'anycompatiblearray',
proargtypes => 'anycompatiblearray anycompatible', prosrc => 'array_append' },
-{ oid => '8680', descr => 'planner support for array_append',
+{ oid => '6378', descr => 'planner support for array_append',
proname => 'array_append_support', prorettype => 'internal',
proargtypes => 'internal', prosrc => 'array_append_support' },
{ oid => '379', descr => 'prepend element onto front of array',
@@ -1646,7 +1646,7 @@
proisstrict => 'f', prorettype => 'anycompatiblearray',
proargtypes => 'anycompatible anycompatiblearray',
prosrc => 'array_prepend' },
-{ oid => '8681', descr => 'planner support for array_prepend',
+{ oid => '6379', descr => 'planner support for array_prepend',
proname => 'array_prepend_support', prorettype => 'internal',
proargtypes => 'internal', prosrc => 'array_prepend_support' },
{ oid => '383',
@@ -1784,17 +1784,17 @@
{ oid => '6216', descr => 'take samples from array',
proname => 'array_sample', provolatile => 'v', prorettype => 'anyarray',
proargtypes => 'anyarray int4', prosrc => 'array_sample' },
-{ oid => '8686', descr => 'reverse array',
+{ oid => '6381', descr => 'reverse array',
proname => 'array_reverse', prorettype => 'anyarray',
proargtypes => 'anyarray', prosrc => 'array_reverse' },
-{ oid => '8810', descr => 'sort array',
+{ oid => '6388', descr => 'sort array',
proname => 'array_sort', prorettype => 'anyarray', proargtypes => 'anyarray',
prosrc => 'array_sort' },
-{ oid => '8811', descr => 'sort array',
+{ oid => '6389', descr => 'sort array',
proname => 'array_sort', prorettype => 'anyarray',
proargtypes => 'anyarray bool', proargnames => '{array,descending}',
prosrc => 'array_sort_order' },
-{ oid => '8812', descr => 'sort array',
+{ oid => '6390', descr => 'sort array',
proname => 'array_sort', prorettype => 'anyarray',
proargtypes => 'anyarray bool bool',
proargnames => '{array,descending,nulls_first}',
@@ -2315,7 +2315,7 @@
{ oid => '3136', descr => 'sort support',
proname => 'date_sortsupport', prorettype => 'void',
proargtypes => 'internal', prosrc => 'date_sortsupport' },
-{ oid => '9295', descr => 'skip support',
+{ oid => '6407', descr => 'skip support',
proname => 'date_skipsupport', prorettype => 'void',
proargtypes => 'internal', prosrc => 'date_skipsupport' },
{ oid => '4133', descr => 'window RANGE support',
@@ -3433,7 +3433,7 @@
proname => 'pg_sequence_last_value', provolatile => 'v', proparallel => 'u',
prorettype => 'int8', proargtypes => 'regclass',
prosrc => 'pg_sequence_last_value' },
-{ oid => '9876', descr => 'return sequence tuple, for use by pg_dump',
+{ oid => '6427', descr => 'return sequence tuple, for use by pg_dump',
proname => 'pg_get_sequence_data', provolatile => 'v', proparallel => 'u',
prorettype => 'record', proargtypes => 'regclass',
proallargtypes => '{regclass,int8,bool}', proargmodes => '{i,o,o}',
@@ -3594,10 +3594,11 @@
proname => 'erfc', prorettype => 'float8', proargtypes => 'float8',
prosrc => 'derfc' },
-{ oid => '8702', descr => 'gamma function',
+{ oid => '6383', descr => 'gamma function',
proname => 'gamma', prorettype => 'float8', proargtypes => 'float8',
prosrc => 'dgamma' },
-{ oid => '8703', descr => 'natural logarithm of absolute value of gamma function',
+{ oid => '6384',
+ descr => 'natural logarithm of absolute value of gamma function',
proname => 'lgamma', prorettype => 'float8', proargtypes => 'float8',
prosrc => 'dlgamma' },
@@ -3688,7 +3689,7 @@
{ oid => '872', descr => 'capitalize each word',
proname => 'initcap', prorettype => 'text', proargtypes => 'text',
prosrc => 'initcap' },
-{ oid => '9569', descr => 'fold case',
+{ oid => '6412', descr => 'fold case',
proname => 'casefold', prorettype => 'text', proargtypes => 'text',
prosrc => 'casefold' },
{ oid => '873', descr => 'left-pad string to length',
@@ -4515,7 +4516,7 @@
{ oid => '1693', descr => 'less-equal-greater',
proname => 'btboolcmp', proleakproof => 't', prorettype => 'int4',
proargtypes => 'bool bool', prosrc => 'btboolcmp' },
-{ oid => '9296', descr => 'skip support',
+{ oid => '6408', descr => 'skip support',
proname => 'btboolskipsupport', prorettype => 'void',
proargtypes => 'internal', prosrc => 'btboolskipsupport' },
@@ -5450,17 +5451,17 @@
prorettype => 'bool', proargtypes => 'oid text',
prosrc => 'has_any_column_privilege_id' },
-{ oid => '8048',
+{ oid => '6348',
descr => 'user privilege on large object by username, large object oid',
proname => 'has_largeobject_privilege', procost => '10', provolatile => 's',
prorettype => 'bool', proargtypes => 'name oid text',
prosrc => 'has_largeobject_privilege_name_id' },
-{ oid => '8049',
+{ oid => '6349',
descr => 'current user privilege on large object by large object oid',
proname => 'has_largeobject_privilege', procost => '10', provolatile => 's',
prorettype => 'bool', proargtypes => 'oid text',
prosrc => 'has_largeobject_privilege_id' },
-{ oid => '8050',
+{ oid => '6350',
descr => 'user privilege on large object by user oid, large object oid',
proname => 'has_largeobject_privilege', procost => '10', provolatile => 's',
prorettype => 'bool', proargtypes => 'oid oid text',
@@ -5611,19 +5612,19 @@
proname => 'pg_stat_get_autoanalyze_count', provolatile => 's',
proparallel => 'r', prorettype => 'int8', proargtypes => 'oid',
prosrc => 'pg_stat_get_autoanalyze_count' },
-{ oid => '8406', descr => 'total vacuum time, in milliseconds',
+{ oid => '6358', descr => 'total vacuum time, in milliseconds',
proname => 'pg_stat_get_total_vacuum_time', provolatile => 's',
proparallel => 'r', prorettype => 'float8', proargtypes => 'oid',
prosrc => 'pg_stat_get_total_vacuum_time' },
-{ oid => '8407', descr => 'total autovacuum time, in milliseconds',
+{ oid => '6359', descr => 'total autovacuum time, in milliseconds',
proname => 'pg_stat_get_total_autovacuum_time', provolatile => 's',
proparallel => 'r', prorettype => 'float8', proargtypes => 'oid',
prosrc => 'pg_stat_get_total_autovacuum_time' },
-{ oid => '8408', descr => 'total analyze time, in milliseconds',
+{ oid => '6360', descr => 'total analyze time, in milliseconds',
proname => 'pg_stat_get_total_analyze_time', provolatile => 's',
proparallel => 'r', prorettype => 'float8', proargtypes => 'oid',
prosrc => 'pg_stat_get_total_analyze_time' },
-{ oid => '8409', descr => 'total autoanalyze time, in milliseconds',
+{ oid => '6361', descr => 'total autoanalyze time, in milliseconds',
proname => 'pg_stat_get_total_autoanalyze_time', provolatile => 's',
proparallel => 'r', prorettype => 'float8', proargtypes => 'oid',
prosrc => 'pg_stat_get_total_autoanalyze_time' },
@@ -5900,12 +5901,12 @@
proname => 'pg_stat_get_db_sessions_killed', provolatile => 's',
proparallel => 'r', prorettype => 'int8', proargtypes => 'oid',
prosrc => 'pg_stat_get_db_sessions_killed' },
-{ oid => '8403',
+{ oid => '6355',
descr => 'statistics: number of parallel workers planned to be launched by queries',
proname => 'pg_stat_get_db_parallel_workers_to_launch', provolatile => 's',
proparallel => 'r', prorettype => 'int8', proargtypes => 'oid',
prosrc => 'pg_stat_get_db_parallel_workers_to_launch' },
-{ oid => '8404',
+{ oid => '6356',
descr => 'statistics: number of parallel workers effectively launched by queries',
proname => 'pg_stat_get_db_parallel_workers_launched', provolatile => 's',
proparallel => 'r', prorettype => 'int8', proargtypes => 'oid',
@@ -5927,7 +5928,7 @@
proname => 'pg_stat_get_checkpointer_num_requested', provolatile => 's',
proparallel => 'r', prorettype => 'int8', proargtypes => '',
prosrc => 'pg_stat_get_checkpointer_num_requested' },
-{ oid => '8599',
+{ oid => '6377',
descr => 'statistics: number of checkpoints performed by the checkpointer',
proname => 'pg_stat_get_checkpointer_num_performed', provolatile => 's',
proparallel => 'r', prorettype => 'int8', proargtypes => '',
@@ -5954,7 +5955,7 @@
proname => 'pg_stat_get_checkpointer_buffers_written', provolatile => 's',
proparallel => 'r', prorettype => 'int8', proargtypes => '',
prosrc => 'pg_stat_get_checkpointer_buffers_written' },
-{ oid => '8573',
+{ oid => '6366',
descr => 'statistics: number of SLRU buffers written during checkpoints and restartpoints',
proname => 'pg_stat_get_checkpointer_slru_written', provolatile => 's',
proparallel => 'r', prorettype => 'int8', proargtypes => '',
@@ -6000,7 +6001,7 @@
proargnames => '{backend_type,object,context,reads,read_bytes,read_time,writes,write_bytes,write_time,writebacks,writeback_time,extends,extend_bytes,extend_time,hits,evictions,reuses,fsyncs,fsync_time,stats_reset}',
prosrc => 'pg_stat_get_io' },
-{ oid => '8806', descr => 'statistics: backend IO statistics',
+{ oid => '6386', descr => 'statistics: backend IO statistics',
proname => 'pg_stat_get_backend_io', prorows => '5', proretset => 't',
provolatile => 'v', proparallel => 'r', prorettype => 'record',
proargtypes => 'int4',
@@ -6016,7 +6017,7 @@
proargmodes => '{o,o,o,o,o}',
proargnames => '{wal_records,wal_fpi,wal_bytes,wal_buffers_full,stats_reset}',
prosrc => 'pg_stat_get_wal' },
-{ oid => '8037', descr => 'statistics: backend WAL activity',
+{ oid => '6313', descr => 'statistics: backend WAL activity',
proname => 'pg_stat_get_backend_wal', provolatile => 'v', proparallel => 'r',
prorettype => 'record', proargtypes => 'int4',
proallargtypes => '{int4,int8,int8,numeric,int8,timestamptz}',
@@ -6155,7 +6156,7 @@
proname => 'pg_stat_reset_single_function_counters', provolatile => 'v',
prorettype => 'void', proargtypes => 'oid',
prosrc => 'pg_stat_reset_single_function_counters' },
-{ oid => '8807', descr => 'statistics: reset statistics for a single backend',
+{ oid => '6387', descr => 'statistics: reset statistics for a single backend',
proname => 'pg_stat_reset_backend_stats', provolatile => 'v',
prorettype => 'void', proargtypes => 'int4',
prosrc => 'pg_stat_reset_backend_stats' },
@@ -6369,10 +6370,10 @@
{ oid => '3411', descr => 'hash',
proname => 'timestamp_hash_extended', prorettype => 'int8',
proargtypes => 'timestamp int8', prosrc => 'timestamp_hash_extended' },
-{ oid => '9720', descr => 'hash',
+{ oid => '6425', descr => 'hash',
proname => 'timestamptz_hash', prorettype => 'int4',
proargtypes => 'timestamptz', prosrc => 'timestamptz_hash' },
-{ oid => '9721', descr => 'hash',
+{ oid => '6426', descr => 'hash',
proname => 'timestamptz_hash_extended', prorettype => 'int8',
proargtypes => 'timestamptz int8', prosrc => 'timestamptz_hash_extended' },
{ oid => '2041', descr => 'intervals overlap?',
@@ -6397,7 +6398,7 @@
{ oid => '3137', descr => 'sort support',
proname => 'timestamp_sortsupport', prorettype => 'void',
proargtypes => 'internal', prosrc => 'timestamp_sortsupport' },
-{ oid => '9297', descr => 'skip support',
+{ oid => '6409', descr => 'skip support',
proname => 'timestamp_skipsupport', prorettype => 'void',
proargtypes => 'internal', prosrc => 'timestamp_skipsupport' },
@@ -6593,7 +6594,7 @@
proname => 'pg_describe_object', provolatile => 's', prorettype => 'text',
proargtypes => 'oid oid int4', prosrc => 'pg_describe_object' },
-{ oid => '8730', descr => 'get ACL for SQL object',
+{ oid => '6385', descr => 'get ACL for SQL object',
proname => 'pg_get_acl', provolatile => 's', prorettype => '_aclitem',
proargtypes => 'oid oid int4', proargnames => '{classid,objid,objsubid}',
prosrc => 'pg_get_acl' },
@@ -6792,7 +6793,7 @@
proargnames => '{rm_id, rm_name, rm_builtin}',
prosrc => 'pg_get_wal_resource_managers' },
-{ oid => '8303', descr => 'get info about loaded modules',
+{ oid => '6353', descr => 'get info about loaded modules',
proname => 'pg_get_loaded_modules', prorows => '10', proretset => 't',
provolatile => 'v', proparallel => 'r', prorettype => 'record',
proargtypes => '', proallargtypes => '{text,text,text}',
@@ -6992,7 +6993,7 @@
proname => 'max', prokind => 'a', proisstrict => 'f',
prorettype => 'anyarray', proargtypes => 'anyarray',
prosrc => 'aggregate_dummy' },
-{ oid => '8595', descr => 'maximum value of all record input values',
+{ oid => '6373', descr => 'maximum value of all record input values',
proname => 'max', prokind => 'a', proisstrict => 'f', prorettype => 'record',
proargtypes => 'record', prosrc => 'aggregate_dummy' },
{ oid => '2244', descr => 'maximum value of all bpchar input values',
@@ -7010,7 +7011,7 @@
{ oid => '5099', descr => 'maximum value of all xid8 input values',
proname => 'max', prokind => 'a', proisstrict => 'f', prorettype => 'xid8',
proargtypes => 'xid8', prosrc => 'aggregate_dummy' },
-{ oid => '8922', descr => 'maximum value of all bytea input values',
+{ oid => '6395', descr => 'maximum value of all bytea input values',
proname => 'max', prokind => 'a', proisstrict => 'f', prorettype => 'bytea',
proargtypes => 'bytea', prosrc => 'aggregate_dummy' },
@@ -7068,7 +7069,7 @@
proname => 'min', prokind => 'a', proisstrict => 'f',
prorettype => 'anyarray', proargtypes => 'anyarray',
prosrc => 'aggregate_dummy' },
-{ oid => '8596', descr => 'minimum value of all record input values',
+{ oid => '6374', descr => 'minimum value of all record input values',
proname => 'min', prokind => 'a', proisstrict => 'f', prorettype => 'record',
proargtypes => 'record', prosrc => 'aggregate_dummy' },
{ oid => '2245', descr => 'minimum value of all bpchar input values',
@@ -7086,7 +7087,7 @@
{ oid => '5100', descr => 'minimum value of all xid8 input values',
proname => 'min', prokind => 'a', proisstrict => 'f', prorettype => 'xid8',
proargtypes => 'xid8', prosrc => 'aggregate_dummy' },
-{ oid => '8923', descr => 'minimum value of all bytea input values',
+{ oid => '6396', descr => 'minimum value of all bytea input values',
proname => 'min', prokind => 'a', proisstrict => 'f', prorettype => 'bytea',
proargtypes => 'bytea', prosrc => 'aggregate_dummy' },
@@ -7949,10 +7950,10 @@
proargtypes => 'internal', prosrc => 'tsm_system_handler' },
# CRC variants
-{ oid => '8571', descr => 'CRC-32 value',
+{ oid => '6364', descr => 'CRC-32 value',
proname => 'crc32', proleakproof => 't', prorettype => 'int8',
proargtypes => 'bytea', prosrc => 'crc32_bytea' },
-{ oid => '8572', descr => 'CRC-32C value',
+{ oid => '6365', descr => 'CRC-32C value',
proname => 'crc32c', proleakproof => 't', prorettype => 'int8',
proargtypes => 'bytea', prosrc => 'crc32c_bytea' },
@@ -8496,7 +8497,7 @@
proargmodes => '{o,o,o,o,o,o}',
proargnames => '{name,statement,is_holdable,is_binary,is_scrollable,creation_time}',
prosrc => 'pg_cursor' },
-{ oid => '9221', descr => 'get abbreviations from current timezone',
+{ oid => '6401', descr => 'get abbreviations from current timezone',
proname => 'pg_timezone_abbrevs_zone', prorows => '10', proretset => 't',
provolatile => 's', prorettype => 'record', proargtypes => '',
proallargtypes => '{text,interval,bool}', proargmodes => '{o,o,o}',
@@ -8571,16 +8572,6 @@
prorettype => 'bool', proargtypes => 'int4',
prosrc => 'pg_log_backend_memory_contexts' },
-# publishing memory contexts of the specified postgres process
-{ oid => '2173', descr => 'publish memory contexts of the specified backend',
- proname => 'pg_get_process_memory_contexts', provolatile => 'v',
- prorows => '100', proretset => 't', proparallel => 'r',
- prorettype => 'record', proargtypes => 'int4 bool float8',
- proallargtypes => '{int4,bool,float8,text,text,text,_int4,int4,int8,int8,int8,int8,int8,int4,timestamptz}',
- proargmodes => '{i,i,i,o,o,o,o,o,o,o,o,o,o,o,o}',
- proargnames => '{pid, summary, timeout, name, ident, type, path, level, total_bytes, total_nblocks, free_bytes, free_chunks, used_bytes, num_agg_contexts, stats_timestamp}',
- prosrc => 'pg_get_process_memory_contexts' },
-
# non-persistent series generator
{ oid => '1066', descr => 'non-persistent series generator',
proname => 'generate_series', prorows => '1000',
@@ -8618,7 +8609,7 @@
prosupport => 'generate_series_numeric_support', proretset => 't',
prorettype => 'numeric', proargtypes => 'numeric numeric',
prosrc => 'generate_series_numeric' },
-{ oid => '8405', descr => 'planner support for generate_series',
+{ oid => '6357', descr => 'planner support for generate_series',
proname => 'generate_series_numeric_support', prorettype => 'internal',
proargtypes => 'internal', prosrc => 'generate_series_numeric_support' },
{ oid => '938', descr => 'non-persistent series generator',
@@ -8638,7 +8629,7 @@
prorettype => 'timestamptz',
proargtypes => 'timestamptz timestamptz interval text',
prosrc => 'generate_series_timestamptz_at_zone' },
-{ oid => '8402', descr => 'planner support for generate_series',
+{ oid => '6354', descr => 'planner support for generate_series',
proname => 'generate_series_timestamp_support', prorettype => 'internal',
proargtypes => 'internal', prosrc => 'generate_series_timestamp_support' },
@@ -9370,8 +9361,8 @@
proname => 'to_json', provolatile => 's', prorettype => 'json',
proargtypes => 'anyelement', prosrc => 'to_json' },
{ oid => '3261', descr => 'remove object fields with null values from json',
- proname => 'json_strip_nulls', prorettype => 'json', proargtypes => 'json bool',
- prosrc => 'json_strip_nulls' },
+ proname => 'json_strip_nulls', prorettype => 'json',
+ proargtypes => 'json bool', prosrc => 'json_strip_nulls' },
{ oid => '3947',
proname => 'json_object_field', prorettype => 'json',
@@ -9477,7 +9468,7 @@
{ oid => '3300', descr => 'sort support',
proname => 'uuid_sortsupport', prorettype => 'void',
proargtypes => 'internal', prosrc => 'uuid_sortsupport' },
-{ oid => '9298', descr => 'skip support',
+{ oid => '6410', descr => 'skip support',
proname => 'uuid_skipsupport', prorettype => 'void',
proargtypes => 'internal', prosrc => 'uuid_skipsupport' },
{ oid => '2961', descr => 'I/O',
@@ -9493,17 +9484,19 @@
proname => 'uuid_hash_extended', prorettype => 'int8',
proargtypes => 'uuid int8', prosrc => 'uuid_hash_extended' },
{ oid => '3432', descr => 'generate random UUID',
- proname => 'gen_random_uuid', provolatile => 'v',
- prorettype => 'uuid', proargtypes => '', prosrc => 'gen_random_uuid' },
-{ oid => '9895', descr => 'generate UUID version 4',
- proname => 'uuidv4', provolatile => 'v',
- prorettype => 'uuid', proargtypes => '', prosrc => 'gen_random_uuid' },
-{ oid => '9896', descr => 'generate UUID version 7',
- proname => 'uuidv7', provolatile => 'v',
- prorettype => 'uuid', proargtypes => '', prosrc => 'uuidv7' },
-{ oid => '9897', descr => 'generate UUID version 7 with a timestamp shifted by specified interval',
- proname => 'uuidv7', provolatile => 'v', proargnames => '{shift}',
- prorettype => 'uuid', proargtypes => 'interval', prosrc => 'uuidv7_interval' },
+ proname => 'gen_random_uuid', provolatile => 'v', prorettype => 'uuid',
+ proargtypes => '', prosrc => 'gen_random_uuid' },
+{ oid => '6428', descr => 'generate UUID version 4',
+ proname => 'uuidv4', provolatile => 'v', prorettype => 'uuid',
+ proargtypes => '', prosrc => 'gen_random_uuid' },
+{ oid => '6429', descr => 'generate UUID version 7',
+ proname => 'uuidv7', provolatile => 'v', prorettype => 'uuid',
+ proargtypes => '', prosrc => 'uuidv7' },
+{ oid => '6430',
+ descr => 'generate UUID version 7 with a timestamp shifted by specified interval',
+ proname => 'uuidv7', provolatile => 'v', prorettype => 'uuid',
+ proargtypes => 'interval', proargnames => '{shift}',
+ prosrc => 'uuidv7_interval' },
{ oid => '6342', descr => 'extract timestamp from UUID',
proname => 'uuid_extract_timestamp', proleakproof => 't',
prorettype => 'timestamptz', proargtypes => 'uuid',
@@ -10309,8 +10302,8 @@
prorettype => 'jsonb', proargtypes => '',
prosrc => 'jsonb_build_object_noargs' },
{ oid => '3262', descr => 'remove object fields with null values from jsonb',
- proname => 'jsonb_strip_nulls', prorettype => 'jsonb', proargtypes => 'jsonb bool',
- prosrc => 'jsonb_strip_nulls' },
+ proname => 'jsonb_strip_nulls', prorettype => 'jsonb',
+ proargtypes => 'jsonb bool', prosrc => 'jsonb_strip_nulls' },
{ oid => '3478',
proname => 'jsonb_object_field', prorettype => 'jsonb',
@@ -10661,10 +10654,10 @@
{ oid => '2987', descr => 'less-equal-greater',
proname => 'btrecordcmp', prorettype => 'int4',
proargtypes => 'record record', prosrc => 'btrecordcmp' },
-{ oid => '8597', descr => 'larger of two',
+{ oid => '6375', descr => 'larger of two',
proname => 'record_larger', prorettype => 'record',
proargtypes => 'record record', prosrc => 'record_larger' },
-{ oid => '8598', descr => 'smaller of two',
+{ oid => '6376', descr => 'smaller of two',
proname => 'record_smaller', prorettype => 'record',
proargtypes => 'record record', prosrc => 'record_smaller' },
@@ -10904,7 +10897,7 @@
{ oid => '3870', descr => 'less-equal-greater',
proname => 'range_cmp', prorettype => 'int4',
proargtypes => 'anyrange anyrange', prosrc => 'range_cmp' },
-{ oid => '8849', descr => 'sort support',
+{ oid => '6391', descr => 'sort support',
proname => 'range_sortsupport', prorettype => 'void',
proargtypes => 'internal', prosrc => 'range_sortsupport' },
{ oid => '3871',
@@ -12323,7 +12316,7 @@
proname => 'array_subscript_handler',
prosupport => 'array_subscript_handler_support', prorettype => 'internal',
proargtypes => 'internal', prosrc => 'array_subscript_handler' },
-{ oid => '8682', descr => 'planner support for array_subscript_handler',
+{ oid => '6380', descr => 'planner support for array_subscript_handler',
proname => 'array_subscript_handler_support', prorettype => 'internal',
proargtypes => 'internal', prosrc => 'array_subscript_handler_support' },
{ oid => '6180', descr => 'raw array subscripting support',
@@ -12362,7 +12355,7 @@
provolatile => 'v', prorettype => 'record', proargtypes => '',
proallargtypes => '{text,int8,timestamptz}', proargmodes => '{o,o,o}',
proargnames => '{name,size,modification}', prosrc => 'pg_ls_waldir' },
-{ oid => '9220', descr => 'list of files in the pg_wal/summaries directory',
+{ oid => '6400', descr => 'list of files in the pg_wal/summaries directory',
proname => 'pg_ls_summariesdir', procost => '10', prorows => '20',
proretset => 't', provolatile => 'v', prorettype => 'record',
proargtypes => '', proallargtypes => '{text,int8,timestamptz}',
@@ -12518,49 +12511,37 @@
proargnames => '{summarized_tli,summarized_lsn,pending_lsn,summarizer_pid}',
prosrc => 'pg_get_wal_summarizer_state' },
# Statistics Import
-{ oid => '8459',
- descr => 'restore statistics on relation',
- proname => 'pg_restore_relation_stats', provolatile => 'v', proisstrict => 'f',
- provariadic => 'any',
- proparallel => 'u', prorettype => 'bool',
- proargtypes => 'any',
- proargnames => '{kwargs}',
- proargmodes => '{v}',
- prosrc => 'pg_restore_relation_stats' },
-{ oid => '9160',
- descr => 'clear statistics on relation',
- proname => 'pg_clear_relation_stats', provolatile => 'v', proisstrict => 'f',
- proparallel => 'u', prorettype => 'void',
- proargtypes => 'text text',
- proargnames => '{schemaname,relname}',
- prosrc => 'pg_clear_relation_stats' },
-{ oid => '8461',
- descr => 'restore statistics on attribute',
- proname => 'pg_restore_attribute_stats', provolatile => 'v', proisstrict => 'f',
- provariadic => 'any',
- proparallel => 'u', prorettype => 'bool',
- proargtypes => 'any',
- proargnames => '{kwargs}',
- proargmodes => '{v}',
- prosrc => 'pg_restore_attribute_stats' },
-{ oid => '9162',
- descr => 'clear statistics on attribute',
- proname => 'pg_clear_attribute_stats', provolatile => 'v', proisstrict => 'f',
+{ oid => '6362', descr => 'restore statistics on relation',
+ proname => 'pg_restore_relation_stats', provariadic => 'any',
+ proisstrict => 'f', provolatile => 'v', proparallel => 'u',
+ prorettype => 'bool', proargtypes => 'any', proargmodes => '{v}',
+ proargnames => '{kwargs}', prosrc => 'pg_restore_relation_stats' },
+{ oid => '6397', descr => 'clear statistics on relation',
+ proname => 'pg_clear_relation_stats', proisstrict => 'f', provolatile => 'v',
+ proparallel => 'u', prorettype => 'void', proargtypes => 'text text',
+ proargnames => '{schemaname,relname}', prosrc => 'pg_clear_relation_stats' },
+{ oid => '6363', descr => 'restore statistics on attribute',
+ proname => 'pg_restore_attribute_stats', provariadic => 'any',
+ proisstrict => 'f', provolatile => 'v', proparallel => 'u',
+ prorettype => 'bool', proargtypes => 'any', proargmodes => '{v}',
+ proargnames => '{kwargs}', prosrc => 'pg_restore_attribute_stats' },
+{ oid => '6398', descr => 'clear statistics on attribute',
+ proname => 'pg_clear_attribute_stats', proisstrict => 'f', provolatile => 'v',
proparallel => 'u', prorettype => 'void',
proargtypes => 'text text text bool',
proargnames => '{schemaname,relname,attname,inherited}',
prosrc => 'pg_clear_attribute_stats' },
# GiST stratnum implementations
-{ oid => '8047', descr => 'GiST support',
- proname => 'gist_stratnum_common', prorettype => 'int2',
- proargtypes => 'int4',
- prosrc => 'gist_stratnum_common' },
+{ oid => '6347', descr => 'GiST support',
+ proname => 'gist_translate_cmptype_common', prorettype => 'int2',
+ proargtypes => 'int4', prosrc => 'gist_translate_cmptype_common' },
# AIO related functions
-{ oid => '9200', descr => 'information about in-progress asynchronous IOs',
+{ oid => '6399', descr => 'information about in-progress asynchronous IOs',
proname => 'pg_get_aios', prorows => '100', proretset => 't',
- provolatile => 'v', proparallel => 'r', prorettype => 'record', proargtypes => '',
+ provolatile => 'v', proparallel => 'r', prorettype => 'record',
+ proargtypes => '',
proallargtypes => '{int4,int4,int8,text,text,int8,int8,text,int2,int4,text,text,bool,bool,bool}',
proargmodes => '{o,o,o,o,o,o,o,o,o,o,o,o,o,o,o}',
proargnames => '{pid,io_id,io_generation,state,operation,off,length,target,handle_data_len,raw_result,result,target_desc,f_sync,f_localmem,f_buffered}',
diff --git a/src/include/commands/explain.h b/src/include/commands/explain.h
index 03c5b3d73e5..3b122f79ed8 100644
--- a/src/include/commands/explain.h
+++ b/src/include/commands/explain.h
@@ -63,10 +63,8 @@ extern void ExplainOneUtility(Node *utilityStmt, IntoClause *into,
struct ExplainState *es, ParseState *pstate,
ParamListInfo params);
-extern void ExplainOnePlan(PlannedStmt *plannedstmt, CachedPlan *cplan,
- CachedPlanSource *plansource, int query_index,
- IntoClause *into, struct ExplainState *es,
- const char *queryString,
+extern void ExplainOnePlan(PlannedStmt *plannedstmt, IntoClause *into,
+ struct ExplainState *es, const char *queryString,
ParamListInfo params, QueryEnvironment *queryEnv,
const instr_time *planduration,
const BufferUsage *bufusage,
diff --git a/src/include/commands/trigger.h b/src/include/commands/trigger.h
index 4180601dcd4..2ed2c4bb378 100644
--- a/src/include/commands/trigger.h
+++ b/src/include/commands/trigger.h
@@ -258,7 +258,6 @@ extern void ExecASTruncateTriggers(EState *estate,
extern void AfterTriggerBeginXact(void);
extern void AfterTriggerBeginQuery(void);
extern void AfterTriggerEndQuery(EState *estate);
-extern void AfterTriggerAbortQuery(void);
extern void AfterTriggerFireDeferred(void);
extern void AfterTriggerEndXact(bool isCommit);
extern void AfterTriggerBeginSubXact(void);
diff --git a/src/include/commands/vacuum.h b/src/include/commands/vacuum.h
index bc37a80dc74..14eeccbd718 100644
--- a/src/include/commands/vacuum.h
+++ b/src/include/commands/vacuum.h
@@ -336,7 +336,7 @@ extern PGDLLIMPORT int64 parallel_vacuum_worker_delay_ns;
/* in commands/vacuum.c */
extern void ExecVacuum(ParseState *pstate, VacuumStmt *vacstmt, bool isTopLevel);
-extern void vacuum(List *relations, VacuumParams *params,
+extern void vacuum(List *relations, const VacuumParams params,
BufferAccessStrategy bstrategy, MemoryContext vac_context,
bool isTopLevel);
extern void vac_open_indexes(Relation relation, LOCKMODE lockmode,
@@ -357,7 +357,7 @@ extern void vac_update_relstats(Relation relation,
bool *frozenxid_updated,
bool *minmulti_updated,
bool in_outer_xact);
-extern bool vacuum_get_cutoffs(Relation rel, const VacuumParams *params,
+extern bool vacuum_get_cutoffs(Relation rel, const VacuumParams params,
struct VacuumCutoffs *cutoffs);
extern bool vacuum_xid_failsafe_check(const struct VacuumCutoffs *cutoffs);
extern void vac_update_datfrozenxid(void);
@@ -398,7 +398,7 @@ extern void parallel_vacuum_main(dsm_segment *seg, shm_toc *toc);
/* in commands/analyze.c */
extern void analyze_rel(Oid relid, RangeVar *relation,
- VacuumParams *params, List *va_cols, bool in_outer_xact,
+ const VacuumParams params, List *va_cols, bool in_outer_xact,
BufferAccessStrategy bstrategy);
extern bool std_typanalyze(VacAttrStats *stats);
diff --git a/src/include/executor/execdesc.h b/src/include/executor/execdesc.h
index ba53305ad42..86db3dc8d0d 100644
--- a/src/include/executor/execdesc.h
+++ b/src/include/executor/execdesc.h
@@ -35,7 +35,6 @@ typedef struct QueryDesc
/* These fields are provided by CreateQueryDesc */
CmdType operation; /* CMD_SELECT, CMD_UPDATE, etc. */
PlannedStmt *plannedstmt; /* planner's output (could be utility, too) */
- CachedPlan *cplan; /* CachedPlan that supplies the plannedstmt */
const char *sourceText; /* source text of the query */
Snapshot snapshot; /* snapshot to use for query */
Snapshot crosscheck_snapshot; /* crosscheck for RI update/delete */
@@ -58,7 +57,6 @@ typedef struct QueryDesc
/* in pquery.c */
extern QueryDesc *CreateQueryDesc(PlannedStmt *plannedstmt,
- CachedPlan *cplan,
const char *sourceText,
Snapshot snapshot,
Snapshot crosscheck_snapshot,
diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h
index ae99407db89..104b059544d 100644
--- a/src/include/executor/executor.h
+++ b/src/include/executor/executor.h
@@ -19,7 +19,6 @@
#include "nodes/lockoptions.h"
#include "nodes/parsenodes.h"
#include "utils/memutils.h"
-#include "utils/plancache.h"
/*
@@ -73,7 +72,7 @@
/* Hook for plugins to get control in ExecutorStart() */
-typedef bool (*ExecutorStart_hook_type) (QueryDesc *queryDesc, int eflags);
+typedef void (*ExecutorStart_hook_type) (QueryDesc *queryDesc, int eflags);
extern PGDLLIMPORT ExecutorStart_hook_type ExecutorStart_hook;
/* Hook for plugins to get control in ExecutorRun() */
@@ -229,11 +228,8 @@ ExecGetJunkAttribute(TupleTableSlot *slot, AttrNumber attno, bool *isNull)
/*
* prototypes from functions in execMain.c
*/
-extern bool ExecutorStart(QueryDesc *queryDesc, int eflags);
-extern void ExecutorStartCachedPlan(QueryDesc *queryDesc, int eflags,
- CachedPlanSource *plansource,
- int query_index);
-extern bool standard_ExecutorStart(QueryDesc *queryDesc, int eflags);
+extern void ExecutorStart(QueryDesc *queryDesc, int eflags);
+extern void standard_ExecutorStart(QueryDesc *queryDesc, int eflags);
extern void ExecutorRun(QueryDesc *queryDesc,
ScanDirection direction, uint64 count);
extern void standard_ExecutorRun(QueryDesc *queryDesc,
@@ -300,30 +296,6 @@ extern void ExecEndNode(PlanState *node);
extern void ExecShutdownNode(PlanState *node);
extern void ExecSetTupleBound(int64 tuples_needed, PlanState *child_node);
-/*
- * Is the CachedPlan in es_cachedplan still valid?
- *
- * Called from InitPlan() because invalidation messages that affect the plan
- * might be received after locks have been taken on runtime-prunable relations.
- * The caller should take appropriate action if the plan has become invalid.
- */
-static inline bool
-ExecPlanStillValid(EState *estate)
-{
- return estate->es_cachedplan == NULL ? true :
- CachedPlanValid(estate->es_cachedplan);
-}
-
-/*
- * Locks are needed only if running a cached plan that might contain unlocked
- * relations, such as a reused generic plan.
- */
-static inline bool
-ExecShouldLockRelations(EState *estate)
-{
- return estate->es_cachedplan == NULL ? false :
- CachedPlanRequiresLocking(estate->es_cachedplan);
-}
/* ----------------------------------------------------------------
* ExecProcNode
diff --git a/src/include/executor/nodeAgg.h b/src/include/executor/nodeAgg.h
index 34b82d0f5d1..6c4891bbaeb 100644
--- a/src/include/executor/nodeAgg.h
+++ b/src/include/executor/nodeAgg.h
@@ -264,7 +264,7 @@ typedef struct AggStatePerGroupData
* NULL and not auto-replace it with a later input value. Only the first
* non-NULL input will be auto-substituted.
*/
-} AggStatePerGroupData;
+} AggStatePerGroupData;
/*
* AggStatePerPhaseData - per-grouping-set-phase state
diff --git a/src/include/miscadmin.h b/src/include/miscadmin.h
index 1e59a7f910f..1bef98471c3 100644
--- a/src/include/miscadmin.h
+++ b/src/include/miscadmin.h
@@ -96,7 +96,6 @@ extern PGDLLIMPORT volatile sig_atomic_t IdleSessionTimeoutPending;
extern PGDLLIMPORT volatile sig_atomic_t ProcSignalBarrierPending;
extern PGDLLIMPORT volatile sig_atomic_t LogMemoryContextPending;
extern PGDLLIMPORT volatile sig_atomic_t IdleStatsUpdateTimeoutPending;
-extern PGDLLIMPORT volatile sig_atomic_t PublishMemoryContextPending;
extern PGDLLIMPORT volatile sig_atomic_t CheckClientConnectionPending;
extern PGDLLIMPORT volatile sig_atomic_t ClientConnectionLost;
diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h
index 5b6cadb5a6c..2492282213f 100644
--- a/src/include/nodes/execnodes.h
+++ b/src/include/nodes/execnodes.h
@@ -42,7 +42,6 @@
#include "storage/condition_variable.h"
#include "utils/hsearch.h"
#include "utils/queryenvironment.h"
-#include "utils/plancache.h"
#include "utils/reltrigger.h"
#include "utils/sharedtuplestore.h"
#include "utils/snapshot.h"
@@ -664,7 +663,6 @@ typedef struct EState
* ExecRowMarks, or NULL if none */
List *es_rteperminfos; /* List of RTEPermissionInfo */
PlannedStmt *es_plannedstmt; /* link to top of plan tree */
- CachedPlan *es_cachedplan; /* CachedPlan providing the plan tree */
List *es_part_prune_infos; /* List of PartitionPruneInfo */
List *es_part_prune_states; /* List of PartitionPruneState */
List *es_part_prune_results; /* List of Bitmapset */
@@ -717,7 +715,6 @@ typedef struct EState
int es_top_eflags; /* eflags passed to ExecutorStart */
int es_instrument; /* OR of InstrumentOption flags */
bool es_finished; /* true when ExecutorFinish is done */
- bool es_aborted; /* true when execution was aborted */
List *es_exprcontexts; /* List of ExprContexts within EState */
diff --git a/src/include/nodes/parsenodes.h b/src/include/nodes/parsenodes.h
index 4610fc61293..ba12678d1cb 100644
--- a/src/include/nodes/parsenodes.h
+++ b/src/include/nodes/parsenodes.h
@@ -127,8 +127,13 @@ typedef struct Query
* query identifier (can be set by plugins); ignored for equal, as it
* might not be set; also not stored. This is the result of the query
* jumble, hence ignored.
+ *
+ * We store this as a signed value as this is the form it's displayed to
+ * users in places such as EXPLAIN and pg_stat_statements. Primarily this
+ * is done due to lack of an SQL type to represent the full range of
+ * uint64.
*/
- uint64 queryId pg_node_attr(equal_ignore, query_jumble_ignore, read_write_ignore, read_as(0));
+ int64 queryId pg_node_attr(equal_ignore, query_jumble_ignore, read_write_ignore, read_as(0));
/* do I set the command result tag? */
bool canSetTag pg_node_attr(query_jumble_ignore);
@@ -346,6 +351,14 @@ typedef struct A_Expr
List *name; /* possibly-qualified name of operator */
Node *lexpr; /* left argument, or NULL if none */
Node *rexpr; /* right argument, or NULL if none */
+
+ /*
+ * If rexpr is a list of some kind, we separately track its starting and
+ * ending location; it's not the same as the starting and ending location
+ * of the token itself.
+ */
+ ParseLoc rexpr_list_start;
+ ParseLoc rexpr_list_end;
ParseLoc location; /* token location, or -1 if unknown */
} A_Expr;
@@ -501,6 +514,8 @@ typedef struct A_ArrayExpr
{
NodeTag type;
List *elements; /* array element expressions */
+ ParseLoc list_start; /* start of the element list */
+ ParseLoc list_end; /* end of the elements list */
ParseLoc location; /* token location, or -1 if unknown */
} A_ArrayExpr;
@@ -2095,8 +2110,6 @@ typedef struct InsertStmt
ReturningClause *returningClause; /* RETURNING clause */
WithClause *withClause; /* WITH clause */
OverridingKind override; /* OVERRIDING clause */
- ParseLoc stmt_location; /* start location, or -1 if unknown */
- ParseLoc stmt_len; /* length in bytes; 0 means "rest of string" */
} InsertStmt;
/* ----------------------
@@ -2111,8 +2124,6 @@ typedef struct DeleteStmt
Node *whereClause; /* qualifications */
ReturningClause *returningClause; /* RETURNING clause */
WithClause *withClause; /* WITH clause */
- ParseLoc stmt_location; /* start location, or -1 if unknown */
- ParseLoc stmt_len; /* length in bytes; 0 means "rest of string" */
} DeleteStmt;
/* ----------------------
@@ -2128,8 +2139,6 @@ typedef struct UpdateStmt
List *fromClause; /* optional from clause for more tables */
ReturningClause *returningClause; /* RETURNING clause */
WithClause *withClause; /* WITH clause */
- ParseLoc stmt_location; /* start location, or -1 if unknown */
- ParseLoc stmt_len; /* length in bytes; 0 means "rest of string" */
} UpdateStmt;
/* ----------------------
@@ -2145,8 +2154,6 @@ typedef struct MergeStmt
List *mergeWhenClauses; /* list of MergeWhenClause(es) */
ReturningClause *returningClause; /* RETURNING clause */
WithClause *withClause; /* WITH clause */
- ParseLoc stmt_location; /* start location, or -1 if unknown */
- ParseLoc stmt_len; /* length in bytes; 0 means "rest of string" */
} MergeStmt;
/* ----------------------
@@ -2216,8 +2223,6 @@ typedef struct SelectStmt
bool all; /* ALL specified? */
struct SelectStmt *larg; /* left child */
struct SelectStmt *rarg; /* right child */
- ParseLoc stmt_location; /* start location, or -1 if unknown */
- ParseLoc stmt_len; /* length in bytes; 0 means "rest of string" */
/* Eventually add fields for CORRESPONDING spec here */
} SelectStmt;
diff --git a/src/include/nodes/pathnodes.h b/src/include/nodes/pathnodes.h
index 1dd2d1560cb..6567759595d 100644
--- a/src/include/nodes/pathnodes.h
+++ b/src/include/nodes/pathnodes.h
@@ -138,9 +138,6 @@ typedef struct PlannerGlobal
/* "flat" list of integer RT indexes */
List *resultRelations;
- /* "flat" list of integer RT indexes (one per ModifyTable node) */
- List *firstResultRels;
-
/* "flat" list of AppendRelInfos */
List *appendRelations;
diff --git a/src/include/nodes/plannodes.h b/src/include/nodes/plannodes.h
index 658d76225e4..4f59e30d62d 100644
--- a/src/include/nodes/plannodes.h
+++ b/src/include/nodes/plannodes.h
@@ -53,10 +53,10 @@ typedef struct PlannedStmt
CmdType commandType;
/* query identifier (copied from Query) */
- uint64 queryId;
+ int64 queryId;
/* plan identifier (can be set by plugins) */
- uint64 planId;
+ int64 planId;
/* is it insert|update|delete|merge RETURNING? */
bool hasReturning;
@@ -105,13 +105,6 @@ typedef struct PlannedStmt
/* integer list of RT indexes, or NIL */
List *resultRelations;
- /*
- * rtable indexes of first target relation in each ModifyTable node in the
- * plan for INSERT/UPDATE/DELETE/MERGE
- */
- /* integer list of RT indexes, or NIL */
- List *firstResultRels;
-
/* list of AppendRelInfo nodes */
List *appendRelations;
diff --git a/src/include/nodes/primnodes.h b/src/include/nodes/primnodes.h
index 7d3b4198f26..6dfca3cb35b 100644
--- a/src/include/nodes/primnodes.h
+++ b/src/include/nodes/primnodes.h
@@ -389,14 +389,16 @@ typedef enum ParamKind
typedef struct Param
{
+ pg_node_attr(custom_query_jumble)
+
Expr xpr;
ParamKind paramkind; /* kind of parameter. See above */
int paramid; /* numeric ID for parameter */
Oid paramtype; /* pg_type OID of parameter's datatype */
/* typmod value, if known */
- int32 paramtypmod pg_node_attr(query_jumble_ignore);
+ int32 paramtypmod;
/* OID of collation, or InvalidOid if none */
- Oid paramcollid pg_node_attr(query_jumble_ignore);
+ Oid paramcollid;
/* token location, or -1 if unknown */
ParseLoc location;
} Param;
@@ -1397,6 +1399,10 @@ typedef struct ArrayExpr
List *elements pg_node_attr(query_jumble_squash);
/* true if elements are sub-arrays */
bool multidims pg_node_attr(query_jumble_ignore);
+ /* location of the start of the elements list */
+ ParseLoc list_start;
+ /* location of the end of the elements list */
+ ParseLoc list_end;
/* token location, or -1 if unknown */
ParseLoc location;
} ArrayExpr;
diff --git a/src/include/nodes/queryjumble.h b/src/include/nodes/queryjumble.h
index da7c7abed2e..dcb36dcb44f 100644
--- a/src/include/nodes/queryjumble.h
+++ b/src/include/nodes/queryjumble.h
@@ -24,11 +24,11 @@ typedef struct LocationLen
int location; /* start offset in query text */
int length; /* length in bytes, or -1 to ignore */
- /*
- * Indicates that this location represents the beginning or end of a run
- * of squashed constants.
- */
+ /* Does this location represent a squashed list? */
bool squashed;
+
+ /* Is this location a PARAM_EXTERN parameter? */
+ bool extern_param;
} LocationLen;
/*
@@ -52,9 +52,18 @@ typedef struct JumbleState
/* Current number of valid entries in clocations array */
int clocations_count;
- /* highest Param id we've seen, in order to start normalization correctly */
+ /*
+ * ID of the highest PARAM_EXTERN parameter we've seen in the query; used
+ * to start normalization correctly. However, if there are any squashed
+ * lists in the query, we disregard query-supplied parameter numbers and
+ * renumber everything. This is to avoid possible gaps caused by
+ * squashing in case any params are in squashed lists.
+ */
int highest_extern_param_id;
+ /* Whether squashable lists are present */
+ bool has_squashed_lists;
+
/*
* Count of the number of NULL nodes seen since last appending a value.
* These are flushed out to the jumble buffer before subsequent appends
diff --git a/src/include/optimizer/paramassign.h b/src/include/optimizer/paramassign.h
index 59dcb1ff053..bbf7214289b 100644
--- a/src/include/optimizer/paramassign.h
+++ b/src/include/optimizer/paramassign.h
@@ -30,7 +30,8 @@ extern Param *replace_nestloop_param_placeholdervar(PlannerInfo *root,
extern void process_subquery_nestloop_params(PlannerInfo *root,
List *subplan_params);
extern List *identify_current_nestloop_params(PlannerInfo *root,
- Relids leftrelids);
+ Relids leftrelids,
+ Relids outerrelids);
extern Param *generate_new_exec_param(PlannerInfo *root, Oid paramtype,
int32 paramtypmod, Oid paramcollation);
extern int assign_special_exec_param(PlannerInfo *root);
diff --git a/src/include/optimizer/paths.h b/src/include/optimizer/paths.h
index a48c9721797..8410531f2d6 100644
--- a/src/include/optimizer/paths.h
+++ b/src/include/optimizer/paths.h
@@ -109,8 +109,6 @@ extern Relids add_outer_joins_to_relids(PlannerInfo *root, Relids input_relids,
List **pushed_down_joins);
extern bool have_join_order_restriction(PlannerInfo *root,
RelOptInfo *rel1, RelOptInfo *rel2);
-extern bool have_dangerous_phv(PlannerInfo *root,
- Relids outer_relids, Relids inner_params);
extern void mark_dummy_rel(RelOptInfo *rel);
extern void init_dummy_sjinfo(SpecialJoinInfo *sjinfo, Relids left_relids,
Relids right_relids);
diff --git a/src/include/optimizer/placeholder.h b/src/include/optimizer/placeholder.h
index d351045e2e0..db92d8861ba 100644
--- a/src/include/optimizer/placeholder.h
+++ b/src/include/optimizer/placeholder.h
@@ -30,5 +30,7 @@ extern void add_placeholders_to_joinrel(PlannerInfo *root, RelOptInfo *joinrel,
SpecialJoinInfo *sjinfo);
extern bool contain_placeholder_references_to(PlannerInfo *root, Node *clause,
int relid);
+extern Relids get_placeholder_nulling_relids(PlannerInfo *root,
+ PlaceHolderInfo *phinfo);
#endif /* PLACEHOLDER_H */
diff --git a/src/include/parser/parse_node.h b/src/include/parser/parse_node.h
index 994284019fb..f7d07c84542 100644
--- a/src/include/parser/parse_node.h
+++ b/src/include/parser/parse_node.h
@@ -108,20 +108,6 @@ typedef Node *(*CoerceParamHook) (ParseState *pstate, Param *param,
* byte-wise locations in parse structures to character-wise cursor
* positions.)
*
- * p_stmt_location: location of the top level RawStmt's start. During
- * transformation, the Query's location will be set to the statement's
- * location if available. Otherwise, the RawStmt's start location will
- * be used. Propagating the location through ParseState is needed for
- * the Query length calculation (see p_stmt_len below).
- *
- * p_stmt_len: length of the top level RawStmt. Most of the time, the
- * statement's length is not provided by the parser, with the exception
- * of SelectStmt within parentheses and PreparableStmt in COPY. If the
- * statement's location is provided by the parser, the top-level location
- * and length are needed to accurately compute the Query's length. If the
- * statement's location is not provided, the RawStmt's length can be used
- * directly.
- *
* p_rtable: list of RTEs that will become the rangetable of the query.
* Note that neither relname nor refname of these entries are necessarily
* unique; searching the rtable by name is a bad idea.
@@ -207,8 +193,6 @@ struct ParseState
{
ParseState *parentParseState; /* stack link */
const char *p_sourcetext; /* source text, or NULL if not available */
- ParseLoc p_stmt_location; /* start location, or -1 if unknown */
- ParseLoc p_stmt_len; /* length in bytes; 0 means "rest of string" */
List *p_rtable; /* range table so far */
List *p_rteperminfos; /* list of RTEPermissionInfo nodes for each
* RTE_RELATION entry in rtable */
diff --git a/src/include/pg_config.h.in b/src/include/pg_config.h.in
index c3cc9fa856d..726a7c1be1f 100644
--- a/src/include/pg_config.h.in
+++ b/src/include/pg_config.h.in
@@ -91,6 +91,10 @@
`LLVMCreatePerfJITEventListener', and to 0 if you don't. */
#undef HAVE_DECL_LLVMCREATEPERFJITEVENTLISTENER
+/* Define to 1 if you have the declaration of `memset_s', and to 0 if you
+ don't. */
+#undef HAVE_DECL_MEMSET_S
+
/* Define to 1 if you have the declaration of `posix_fadvise', and to 0 if you
don't. */
#undef HAVE_DECL_POSIX_FADVISE
@@ -291,9 +295,6 @@
/* Define to 1 if you have the <memory.h> header file. */
#undef HAVE_MEMORY_H
-/* Define to 1 if you have the `memset_s' function. */
-#undef HAVE_MEMSET_S
-
/* Define to 1 if you have the `mkdtemp' function. */
#undef HAVE_MKDTEMP
diff --git a/src/include/replication/reorderbuffer.h b/src/include/replication/reorderbuffer.h
index 24e88c409ba..fa0745552f8 100644
--- a/src/include/replication/reorderbuffer.h
+++ b/src/include/replication/reorderbuffer.h
@@ -176,6 +176,7 @@ typedef struct ReorderBufferChange
#define RBTXN_SENT_PREPARE 0x0200
#define RBTXN_IS_COMMITTED 0x0400
#define RBTXN_IS_ABORTED 0x0800
+#define RBTXN_DISTR_INVAL_OVERFLOWED 0x1000
#define RBTXN_PREPARE_STATUS_MASK (RBTXN_IS_PREPARED | RBTXN_SKIPPED_PREPARE | RBTXN_SENT_PREPARE)
@@ -265,6 +266,12 @@ typedef struct ReorderBufferChange
((txn)->txn_flags & RBTXN_SKIPPED_PREPARE) != 0 \
)
+/* Is the array of distributed inval messages overflowed? */
+#define rbtxn_distr_inval_overflowed(txn) \
+( \
+ ((txn)->txn_flags & RBTXN_DISTR_INVAL_OVERFLOWED) != 0 \
+)
+
/* Is this a top-level transaction? */
#define rbtxn_is_toptxn(txn) \
( \
@@ -422,6 +429,12 @@ typedef struct ReorderBufferTXN
uint32 ninvalidations;
SharedInvalidationMessage *invalidations;
+ /*
+ * Stores cache invalidation messages distributed by other transactions.
+ */
+ uint32 ninvalidations_distributed;
+ SharedInvalidationMessage *invalidations_distributed;
+
/* ---
* Position in one of two lists:
* * list of subtransactions if we are *known* to be subxact
@@ -738,6 +751,9 @@ extern void ReorderBufferAddNewTupleCids(ReorderBuffer *rb, TransactionId xid,
CommandId cmin, CommandId cmax, CommandId combocid);
extern void ReorderBufferAddInvalidations(ReorderBuffer *rb, TransactionId xid, XLogRecPtr lsn,
Size nmsgs, SharedInvalidationMessage *msgs);
+extern void ReorderBufferAddDistributedInvalidations(ReorderBuffer *rb, TransactionId xid,
+ XLogRecPtr lsn, Size nmsgs,
+ SharedInvalidationMessage *msgs);
extern void ReorderBufferImmediateInvalidation(ReorderBuffer *rb, uint32 ninvalidations,
SharedInvalidationMessage *invalidations);
extern void ReorderBufferProcessXid(ReorderBuffer *rb, TransactionId xid, XLogRecPtr lsn);
diff --git a/src/include/replication/slot.h b/src/include/replication/slot.h
index eb0b93b1114..ffacba9d2ae 100644
--- a/src/include/replication/slot.h
+++ b/src/include/replication/slot.h
@@ -215,6 +215,14 @@ typedef struct ReplicationSlot
* recently stopped.
*/
TimestampTz inactive_since;
+
+ /*
+ * Latest restart_lsn that has been flushed to disk. For persistent slots
+ * the flushed LSN should be taken into account when calculating the
+ * oldest LSN for WAL segments removal.
+ */
+ XLogRecPtr last_saved_restart_lsn;
+
} ReplicationSlot;
#define SlotIsPhysical(slot) ((slot)->data.database == InvalidOid)
diff --git a/src/include/storage/aio.h b/src/include/storage/aio.h
index f3726bc3dc5..e7a0a234b6c 100644
--- a/src/include/storage/aio.h
+++ b/src/include/storage/aio.h
@@ -36,7 +36,7 @@ typedef enum IoMethod
#ifdef IOMETHOD_IO_URING_ENABLED
IOMETHOD_IO_URING,
#endif
-} IoMethod;
+} IoMethod;
/* We'll default to worker based execution. */
#define DEFAULT_IO_METHOD IOMETHOD_WORKER
diff --git a/src/include/storage/copydir.h b/src/include/storage/copydir.h
index 940d74462d1..f1d7beeed1a 100644
--- a/src/include/storage/copydir.h
+++ b/src/include/storage/copydir.h
@@ -17,7 +17,7 @@ typedef enum FileCopyMethod
{
FILE_COPY_METHOD_COPY,
FILE_COPY_METHOD_CLONE,
-} FileCopyMethod;
+} FileCopyMethod;
/* GUC parameters */
extern PGDLLIMPORT int file_copy_method;
diff --git a/src/include/storage/lock.h b/src/include/storage/lock.h
index 6f2108a44e8..4862b80eec3 100644
--- a/src/include/storage/lock.h
+++ b/src/include/storage/lock.h
@@ -30,7 +30,7 @@ typedef struct PGPROC PGPROC;
/* GUC variables */
extern PGDLLIMPORT int max_locks_per_xact;
-extern PGDLLIMPORT bool log_lock_failure;
+extern PGDLLIMPORT bool log_lock_failures;
#ifdef LOCK_DEBUG
extern PGDLLIMPORT int Trace_lock_oidmin;
diff --git a/src/include/storage/lwlock.h b/src/include/storage/lwlock.h
index 2b4cbda39a5..08a72569ae5 100644
--- a/src/include/storage/lwlock.h
+++ b/src/include/storage/lwlock.h
@@ -221,8 +221,6 @@ typedef enum BuiltinTrancheIds
LWTRANCHE_XACT_SLRU,
LWTRANCHE_PARALLEL_VACUUM_DSA,
LWTRANCHE_AIO_URING_COMPLETION,
- LWTRANCHE_MEMORY_CONTEXT_REPORTING_STATE,
- LWTRANCHE_MEMORY_CONTEXT_REPORTING_PROC,
LWTRANCHE_FIRST_USER_DEFINED,
} BuiltinTrancheIds;
diff --git a/src/include/storage/procsignal.h b/src/include/storage/procsignal.h
index 345d5a0ecb1..afeeb1ca019 100644
--- a/src/include/storage/procsignal.h
+++ b/src/include/storage/procsignal.h
@@ -35,7 +35,6 @@ typedef enum
PROCSIG_WALSND_INIT_STOPPING, /* ask walsenders to prepare for shutdown */
PROCSIG_BARRIER, /* global barrier interrupt */
PROCSIG_LOG_MEMORY_CONTEXT, /* ask backend to log the memory contexts */
- PROCSIG_GET_MEMORY_CONTEXT, /* ask backend to send the memory contexts */
PROCSIG_PARALLEL_APPLY_MESSAGE, /* Message from parallel apply workers */
/* Recovery conflict reasons */
diff --git a/src/include/storage/sinval.h b/src/include/storage/sinval.h
index 5dc5aafe5c9..845a5851b57 100644
--- a/src/include/storage/sinval.h
+++ b/src/include/storage/sinval.h
@@ -119,7 +119,7 @@ typedef struct
Oid dbId; /* database ID */
Oid relid; /* relation ID, or 0 if whole
* RelationSyncCache */
-} SharedInvalRelSyncMsg;
+} SharedInvalRelSyncMsg;
typedef union
{
diff --git a/src/include/storage/waiteventset.h b/src/include/storage/waiteventset.h
index aa65b7a35e7..dd514d52991 100644
--- a/src/include/storage/waiteventset.h
+++ b/src/include/storage/waiteventset.h
@@ -15,7 +15,7 @@
* functions.
*
*
- * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* src/include/storage/waiteventset.h
diff --git a/src/include/tcop/backend_startup.h b/src/include/tcop/backend_startup.h
index dcb9d056643..e8639688c00 100644
--- a/src/include/tcop/backend_startup.h
+++ b/src/include/tcop/backend_startup.h
@@ -86,7 +86,7 @@ typedef enum LogConnectionOption
LOG_CONNECTION_AUTHENTICATION |
LOG_CONNECTION_AUTHORIZATION |
LOG_CONNECTION_SETUP_DURATIONS,
-} LogConnectionOption;
+} LogConnectionOption;
/*
* A collection of timings of various stages of connection establishment and
diff --git a/src/include/utils/backend_status.h b/src/include/utils/backend_status.h
index 430ccd7d78e..3016501ac05 100644
--- a/src/include/utils/backend_status.h
+++ b/src/include/utils/backend_status.h
@@ -170,10 +170,10 @@ typedef struct PgBackendStatus
int64 st_progress_param[PGSTAT_NUM_PROGRESS_PARAM];
/* query identifier, optionally computed using post_parse_analyze_hook */
- uint64 st_query_id;
+ int64 st_query_id;
/* plan identifier, optionally computed using planner_hook */
- uint64 st_plan_id;
+ int64 st_plan_id;
} PgBackendStatus;
@@ -321,16 +321,16 @@ extern void pgstat_clear_backend_activity_snapshot(void);
/* Activity reporting functions */
extern void pgstat_report_activity(BackendState state, const char *cmd_str);
-extern void pgstat_report_query_id(uint64 query_id, bool force);
-extern void pgstat_report_plan_id(uint64 plan_id, bool force);
+extern void pgstat_report_query_id(int64 query_id, bool force);
+extern void pgstat_report_plan_id(int64 plan_id, bool force);
extern void pgstat_report_tempfile(size_t filesize);
extern void pgstat_report_appname(const char *appname);
extern void pgstat_report_xact_timestamp(TimestampTz tstamp);
extern const char *pgstat_get_backend_current_activity(int pid, bool checkUser);
extern const char *pgstat_get_crashed_backend_activity(int pid, char *buffer,
int buflen);
-extern uint64 pgstat_get_my_query_id(void);
-extern uint64 pgstat_get_my_plan_id(void);
+extern int64 pgstat_get_my_query_id(void);
+extern int64 pgstat_get_my_plan_id(void);
extern BackendType pgstat_get_backend_type_by_proc_number(ProcNumber procNumber);
diff --git a/src/include/utils/elog.h b/src/include/utils/elog.h
index 5eac0e16970..675f4f5f469 100644
--- a/src/include/utils/elog.h
+++ b/src/include/utils/elog.h
@@ -485,7 +485,7 @@ typedef enum
PGERROR_TERSE, /* single-line error messages */
PGERROR_DEFAULT, /* recommended style */
PGERROR_VERBOSE, /* all the facts, ma'am */
-} PGErrorVerbosity;
+} PGErrorVerbosity;
extern PGDLLIMPORT int Log_error_verbosity;
extern PGDLLIMPORT char *Log_line_prefix;
diff --git a/src/include/utils/memutils.h b/src/include/utils/memutils.h
index c0987dca155..8abc26abce2 100644
--- a/src/include/utils/memutils.h
+++ b/src/include/utils/memutils.h
@@ -18,9 +18,6 @@
#define MEMUTILS_H
#include "nodes/memnodes.h"
-#include "storage/condition_variable.h"
-#include "storage/lmgr.h"
-#include "utils/dsa.h"
/*
@@ -51,23 +48,6 @@
#define AllocHugeSizeIsValid(size) ((Size) (size) <= MaxAllocHugeSize)
-/*
- * Memory Context reporting size limits.
- */
-
-/* Max length of context name and ident */
-#define MEMORY_CONTEXT_IDENT_SHMEM_SIZE 64
-/* Maximum size (in bytes) of DSA area per process */
-#define MEMORY_CONTEXT_REPORT_MAX_PER_BACKEND ((size_t) (1 * 1024 * 1024))
-
-/*
- * Maximum size per context. Actual size may be lower as this assumes the worst
- * case of deepest path and longest identifiers (name and ident, thus the
- * multiplication by 2). The path depth is limited to 100 like for memory
- * context logging.
- */
-#define MAX_MEMORY_CONTEXT_STATS_SIZE (sizeof(MemoryStatsEntry) + \
- (100 * sizeof(int)) + (2 * MEMORY_CONTEXT_IDENT_SHMEM_SIZE))
/*
* Standard top-level memory contexts.
@@ -339,66 +319,4 @@ pg_memory_is_all_zeros(const void *ptr, size_t len)
return true;
}
-/* Dynamic shared memory state for statistics per context */
-typedef struct MemoryStatsEntry
-{
- dsa_pointer name;
- dsa_pointer ident;
- dsa_pointer path;
- NodeTag type;
- int path_length;
- int levels;
- int64 totalspace;
- int64 nblocks;
- int64 freespace;
- int64 freechunks;
- int num_agg_stats;
-} MemoryStatsEntry;
-
-/*
- * Static shared memory state representing the DSA area created for memory
- * context statistics reporting. A single DSA area is created and used by all
- * the processes, each having its specific DSA allocations for sharing memory
- * statistics, tracked by per backend static shared memory state.
- */
-typedef struct MemoryStatsCtl
-{
- dsa_handle memstats_dsa_handle;
- LWLock lw_lock;
-} MemoryStatsCtl;
-
-/*
- * Per backend static shared memory state for memory context statistics
- * reporting.
- */
-typedef struct MemoryStatsBackendState
-{
- ConditionVariable memcxt_cv;
- LWLock lw_lock;
- int proc_id;
- int total_stats;
- bool summary;
- dsa_pointer memstats_dsa_pointer;
- TimestampTz stats_timestamp;
-} MemoryStatsBackendState;
-
-
-/*
- * Used for storage of transient identifiers for pg_get_backend_memory_contexts
- */
-typedef struct MemoryStatsContextId
-{
- MemoryContext context;
- int context_id;
-} MemoryStatsContextId;
-
-extern PGDLLIMPORT MemoryStatsBackendState *memCxtState;
-extern PGDLLIMPORT MemoryStatsCtl *memCxtArea;
-extern PGDLLIMPORT dsa_area *MemoryStatsDsaArea;
-extern void ProcessGetMemoryContextInterrupt(void);
-extern const char *ContextTypeToString(NodeTag type);
-extern void HandleGetMemoryContextInterrupt(void);
-extern Size MemoryContextReportingShmemSize(void);
-extern void MemoryContextReportingShmemInit(void);
-extern void AtProcExit_memstats_cleanup(int code, Datum arg);
#endif /* MEMUTILS_H */
diff --git a/src/include/utils/plancache.h b/src/include/utils/plancache.h
index 07ec5318db7..1baa6d50bfd 100644
--- a/src/include/utils/plancache.h
+++ b/src/include/utils/plancache.h
@@ -18,8 +18,6 @@
#include "access/tupdesc.h"
#include "lib/ilist.h"
#include "nodes/params.h"
-#include "nodes/parsenodes.h"
-#include "nodes/plannodes.h"
#include "tcop/cmdtag.h"
#include "utils/queryenvironment.h"
#include "utils/resowner.h"
@@ -153,11 +151,10 @@ typedef struct CachedPlanSource
* The reference count includes both the link from the parent CachedPlanSource
* (if any), and any active plan executions, so the plan can be discarded
* exactly when refcount goes to zero. Both the struct itself and the
- * subsidiary data, except the PlannedStmts in stmt_list live in the context
- * denoted by the context field; the PlannedStmts live in the context denoted
- * by stmt_context. Separate contexts makes it easy to free a no-longer-needed
- * cached plan. (However, if is_oneshot is true, the context does not belong
- * solely to the CachedPlan so no freeing is possible.)
+ * subsidiary data live in the context denoted by the context field.
+ * This makes it easy to free a no-longer-needed cached plan. (However,
+ * if is_oneshot is true, the context does not belong solely to the CachedPlan
+ * so no freeing is possible.)
*/
typedef struct CachedPlan
{
@@ -165,7 +162,6 @@ typedef struct CachedPlan
List *stmt_list; /* list of PlannedStmts */
bool is_oneshot; /* is it a "oneshot" plan? */
bool is_saved; /* is CachedPlan in a long-lived context? */
- bool is_reused; /* is it a reused generic plan? */
bool is_valid; /* is the stmt_list currently valid? */
Oid planRoleId; /* Role ID the plan was created for */
bool dependsOnRole; /* is plan specific to that role? */
@@ -174,10 +170,6 @@ typedef struct CachedPlan
int generation; /* parent's generation number for this plan */
int refcount; /* count of live references to this struct */
MemoryContext context; /* context containing this CachedPlan */
- MemoryContext stmt_context; /* context containing the PlannedStmts in
- * stmt_list, but not the List itself which is
- * in the above context; NULL if is_oneshot is
- * true. */
} CachedPlan;
/*
@@ -249,10 +241,6 @@ extern CachedPlan *GetCachedPlan(CachedPlanSource *plansource,
ParamListInfo boundParams,
ResourceOwner owner,
QueryEnvironment *queryEnv);
-extern PlannedStmt *UpdateCachedPlan(CachedPlanSource *plansource,
- int query_index,
- QueryEnvironment *queryEnv);
-
extern void ReleaseCachedPlan(CachedPlan *plan, ResourceOwner owner);
extern bool CachedPlanAllowsSimpleValidityCheck(CachedPlanSource *plansource,
@@ -265,30 +253,4 @@ extern bool CachedPlanIsSimplyValid(CachedPlanSource *plansource,
extern CachedExpression *GetCachedExpression(Node *expr);
extern void FreeCachedExpression(CachedExpression *cexpr);
-/*
- * CachedPlanRequiresLocking: should the executor acquire additional locks?
- *
- * If the plan is a saved generic plan, the executor must acquire locks for
- * relations that are not covered by AcquireExecutorLocks(), such as partitions
- * that are subject to initial runtime pruning.
- */
-static inline bool
-CachedPlanRequiresLocking(CachedPlan *cplan)
-{
- return !cplan->is_oneshot && cplan->is_reused;
-}
-
-/*
- * CachedPlanValid
- * Returns whether a cached generic plan is still valid.
- *
- * Invoked by the executor to check if the plan has not been invalidated after
- * taking locks during the initialization of the plan.
- */
-static inline bool
-CachedPlanValid(CachedPlan *cplan)
-{
- return cplan->is_valid;
-}
-
#endif /* PLANCACHE_H */
diff --git a/src/include/utils/portal.h b/src/include/utils/portal.h
index ddee031f551..0b62143af8b 100644
--- a/src/include/utils/portal.h
+++ b/src/include/utils/portal.h
@@ -138,7 +138,6 @@ typedef struct PortalData
QueryCompletion qc; /* command completion data for executed query */
List *stmts; /* list of PlannedStmts */
CachedPlan *cplan; /* CachedPlan, if stmts are from one */
- CachedPlanSource *plansource; /* CachedPlanSource, for cplan */
ParamListInfo portalParams; /* params to pass to query */
QueryEnvironment *queryEnv; /* environment for query */
@@ -241,8 +240,7 @@ extern void PortalDefineQuery(Portal portal,
const char *sourceText,
CommandTag commandTag,
List *stmts,
- CachedPlan *cplan,
- CachedPlanSource *plansource);
+ CachedPlan *cplan);
extern PlannedStmt *PortalGetPrimaryStmt(Portal portal);
extern void PortalCreateHoldStore(Portal portal);
extern void PortalHashTableDeleteAll(void);
diff --git a/src/include/utils/skipsupport.h b/src/include/utils/skipsupport.h
index bc51847cf61..c42be001fb5 100644
--- a/src/include/utils/skipsupport.h
+++ b/src/include/utils/skipsupport.h
@@ -90,7 +90,7 @@ typedef struct SkipSupportData
*/
SkipSupportIncDec decrement;
SkipSupportIncDec increment;
-} SkipSupportData;
+} SkipSupportData;
extern SkipSupport PrepareSkipSupportFromOpclass(Oid opfamily, Oid opcintype,
bool reverse);
diff --git a/src/interfaces/ecpg/preproc/meson.build b/src/interfaces/ecpg/preproc/meson.build
index c9f4035053d..aa948efc0dc 100644
--- a/src/interfaces/ecpg/preproc/meson.build
+++ b/src/interfaces/ecpg/preproc/meson.build
@@ -98,4 +98,4 @@ tests += {
],
'deps': [ecpg_exe],
},
-} \ No newline at end of file
+}
diff --git a/src/interfaces/libpq-oauth/.gitignore b/src/interfaces/libpq-oauth/.gitignore
new file mode 100644
index 00000000000..a4afe7c1c68
--- /dev/null
+++ b/src/interfaces/libpq-oauth/.gitignore
@@ -0,0 +1 @@
+/exports.list
diff --git a/src/interfaces/libpq-oauth/oauth-curl.c b/src/interfaces/libpq-oauth/oauth-curl.c
index d13b9cbabb4..dba9a684fa8 100644
--- a/src/interfaces/libpq-oauth/oauth-curl.c
+++ b/src/interfaces/libpq-oauth/oauth-curl.c
@@ -83,6 +83,20 @@
#define MAX_OAUTH_RESPONSE_SIZE (256 * 1024)
/*
+ * Similarly, a limit on the maximum JSON nesting level keeps a server from
+ * running us out of stack space. A common nesting level in practice is 2 (for a
+ * top-level object containing arrays of strings). As of May 2025, the maximum
+ * depth for standard server metadata appears to be 6, if the document contains
+ * a full JSON Web Key Set in its "jwks" parameter.
+ *
+ * Since it's easy to nest JSON, and the number of parameters and key types
+ * keeps growing, take a healthy buffer of 16. (If this ever proves to be a
+ * problem in practice, we may want to switch over to the incremental JSON
+ * parser instead of playing with this parameter.)
+ */
+#define MAX_OAUTH_NESTING_LEVEL 16
+
+/*
* Parsed JSON Representations
*
* As a general rule, we parse and cache only the fields we're currently using.
@@ -495,6 +509,12 @@ oauth_json_object_start(void *state)
}
++ctx->nested;
+ if (ctx->nested > MAX_OAUTH_NESTING_LEVEL)
+ {
+ oauth_parse_set_error(ctx, "JSON is too deeply nested");
+ return JSON_SEM_ACTION_FAILED;
+ }
+
return JSON_SUCCESS;
}
@@ -599,6 +619,12 @@ oauth_json_array_start(void *state)
}
++ctx->nested;
+ if (ctx->nested > MAX_OAUTH_NESTING_LEVEL)
+ {
+ oauth_parse_set_error(ctx, "JSON is too deeply nested");
+ return JSON_SEM_ACTION_FAILED;
+ }
+
return JSON_SUCCESS;
}
diff --git a/src/interfaces/libpq/Makefile b/src/interfaces/libpq/Makefile
index c6fe5fec7f6..853aab4b1b8 100644
--- a/src/interfaces/libpq/Makefile
+++ b/src/interfaces/libpq/Makefile
@@ -98,14 +98,21 @@ SHLIB_PREREQS = submake-libpgport
SHLIB_EXPORTS = exports.txt
+# Appends to a comma-separated list.
+comma := ,
+define add_to_list
+$(eval $1 := $(if $($1),$($1)$(comma) $2,$2))
+endef
+
ifeq ($(with_ssl),openssl)
-PKG_CONFIG_REQUIRES_PRIVATE = libssl, libcrypto
+$(call add_to_list,PKG_CONFIG_REQUIRES_PRIVATE,libssl)
+$(call add_to_list,PKG_CONFIG_REQUIRES_PRIVATE,libcrypto)
endif
ifeq ($(with_libcurl),yes)
# libpq.so doesn't link against libcurl, but libpq.a needs libpq-oauth, and
# libpq-oauth needs libcurl. Put both into *.private.
-PKG_CONFIG_REQUIRES_PRIVATE += libcurl
+$(call add_to_list,PKG_CONFIG_REQUIRES_PRIVATE,libcurl)
%.pc: override SHLIB_LINK_INTERNAL += -lpq-oauth
endif
diff --git a/src/interfaces/libpq/fe-auth-oauth.c b/src/interfaces/libpq/fe-auth-oauth.c
index 9fbff89a21d..d146c5f567c 100644
--- a/src/interfaces/libpq/fe-auth-oauth.c
+++ b/src/interfaces/libpq/fe-auth-oauth.c
@@ -157,6 +157,14 @@ client_initial_response(PGconn *conn, bool discover)
#define ERROR_SCOPE_FIELD "scope"
#define ERROR_OPENID_CONFIGURATION_FIELD "openid-configuration"
+/*
+ * Limit the maximum number of nested objects/arrays. Because OAUTHBEARER
+ * doesn't have any defined extensions for its JSON yet, we can be much more
+ * conservative here than with libpq-oauth's MAX_OAUTH_NESTING_LEVEL; we expect
+ * a nesting level of 1 in practice.
+ */
+#define MAX_SASL_NESTING_LEVEL 8
+
struct json_ctx
{
char *errmsg; /* any non-NULL value stops all processing */
@@ -196,6 +204,9 @@ oauth_json_object_start(void *state)
}
++ctx->nested;
+ if (ctx->nested > MAX_SASL_NESTING_LEVEL)
+ oauth_json_set_error(ctx, libpq_gettext("JSON is too deeply nested"));
+
return oauth_json_has_error(ctx) ? JSON_SEM_ACTION_FAILED : JSON_SUCCESS;
}
@@ -254,10 +265,23 @@ oauth_json_array_start(void *state)
ctx->target_field_name);
}
+ ++ctx->nested;
+ if (ctx->nested > MAX_SASL_NESTING_LEVEL)
+ oauth_json_set_error(ctx, libpq_gettext("JSON is too deeply nested"));
+
return oauth_json_has_error(ctx) ? JSON_SEM_ACTION_FAILED : JSON_SUCCESS;
}
static JsonParseErrorType
+oauth_json_array_end(void *state)
+{
+ struct json_ctx *ctx = state;
+
+ --ctx->nested;
+ return JSON_SUCCESS;
+}
+
+static JsonParseErrorType
oauth_json_scalar(void *state, char *token, JsonTokenType type)
{
struct json_ctx *ctx = state;
@@ -519,6 +543,7 @@ handle_oauth_sasl_error(PGconn *conn, const char *msg, int msglen)
sem.object_end = oauth_json_object_end;
sem.object_field_start = oauth_json_object_field_start;
sem.array_start = oauth_json_array_start;
+ sem.array_end = oauth_json_array_end;
sem.scalar = oauth_json_scalar;
err = pg_parse_json(lex, &sem);
diff --git a/src/interfaces/libpq/fe-cancel.c b/src/interfaces/libpq/fe-cancel.c
index 8c7c198a530..cd3102346bf 100644
--- a/src/interfaces/libpq/fe-cancel.c
+++ b/src/interfaces/libpq/fe-cancel.c
@@ -114,7 +114,7 @@ PQcancelCreate(PGconn *conn)
if (conn->be_cancel_key != NULL)
{
cancelConn->be_cancel_key = malloc(conn->be_cancel_key_len);
- if (!conn->be_cancel_key)
+ if (cancelConn->be_cancel_key == NULL)
goto oom_error;
memcpy(cancelConn->be_cancel_key, conn->be_cancel_key, conn->be_cancel_key_len);
}
diff --git a/src/interfaces/libpq/fe-connect.c b/src/interfaces/libpq/fe-connect.c
index 430c0fa4442..51a9c416584 100644
--- a/src/interfaces/libpq/fe-connect.c
+++ b/src/interfaces/libpq/fe-connect.c
@@ -2027,13 +2027,11 @@ pqConnectOptions2(PGconn *conn)
if (len < 0)
{
libpq_append_conn_error(conn, "invalid SCRAM client key");
- free(conn->scram_client_key_binary);
return false;
}
if (len != SCRAM_MAX_KEY_LEN)
{
libpq_append_conn_error(conn, "invalid SCRAM client key length: %d", len);
- free(conn->scram_client_key_binary);
return false;
}
conn->scram_client_key_len = len;
@@ -2052,13 +2050,11 @@ pqConnectOptions2(PGconn *conn)
if (len < 0)
{
libpq_append_conn_error(conn, "invalid SCRAM server key");
- free(conn->scram_server_key_binary);
return false;
}
if (len != SCRAM_MAX_KEY_LEN)
{
libpq_append_conn_error(conn, "invalid SCRAM server key length: %d", len);
- free(conn->scram_server_key_binary);
return false;
}
conn->scram_server_key_len = len;
@@ -2145,7 +2141,7 @@ pqConnectOptions2(PGconn *conn)
if (conn->min_pversion > conn->max_pversion)
{
conn->status = CONNECTION_BAD;
- libpq_append_conn_error(conn, "min_protocol_version is greater than max_protocol_version");
+ libpq_append_conn_error(conn, "\"%s\" is greater than \"%s\"", "min_protocol_version", "max_protocol_version");
return false;
}
@@ -5053,21 +5049,19 @@ freePGconn(PGconn *conn)
free(conn->events[i].name);
}
- release_conn_addrinfo(conn);
- pqReleaseConnHosts(conn);
-
- free(conn->client_encoding_initial);
- free(conn->events);
+ /* free everything not freed in pqClosePGconn */
free(conn->pghost);
free(conn->pghostaddr);
free(conn->pgport);
free(conn->connect_timeout);
free(conn->pgtcp_user_timeout);
+ free(conn->client_encoding_initial);
free(conn->pgoptions);
free(conn->appname);
free(conn->fbappname);
free(conn->dbName);
free(conn->replication);
+ free(conn->pgservice);
free(conn->pguser);
if (conn->pgpass)
{
@@ -5082,8 +5076,9 @@ freePGconn(PGconn *conn)
free(conn->keepalives_count);
free(conn->sslmode);
free(conn->sslnegotiation);
- free(conn->sslcert);
+ free(conn->sslcompression);
free(conn->sslkey);
+ free(conn->sslcert);
if (conn->sslpassword)
{
explicit_bzero(conn->sslpassword, strlen(conn->sslpassword));
@@ -5093,32 +5088,40 @@ freePGconn(PGconn *conn)
free(conn->sslrootcert);
free(conn->sslcrl);
free(conn->sslcrldir);
- free(conn->sslcompression);
free(conn->sslsni);
free(conn->requirepeer);
- free(conn->require_auth);
- free(conn->ssl_min_protocol_version);
- free(conn->ssl_max_protocol_version);
free(conn->gssencmode);
free(conn->krbsrvname);
free(conn->gsslib);
free(conn->gssdelegation);
- free(conn->connip);
- /* Note that conn->Pfdebug is not ours to close or free */
- free(conn->write_err_msg);
- free(conn->inBuffer);
- free(conn->outBuffer);
- free(conn->rowBuf);
+ free(conn->min_protocol_version);
+ free(conn->max_protocol_version);
+ free(conn->ssl_min_protocol_version);
+ free(conn->ssl_max_protocol_version);
free(conn->target_session_attrs);
+ free(conn->require_auth);
free(conn->load_balance_hosts);
free(conn->scram_client_key);
free(conn->scram_server_key);
+ free(conn->sslkeylogfile);
free(conn->oauth_issuer);
free(conn->oauth_issuer_id);
free(conn->oauth_discovery_uri);
free(conn->oauth_client_id);
free(conn->oauth_client_secret);
free(conn->oauth_scope);
+ /* Note that conn->Pfdebug is not ours to close or free */
+ free(conn->events);
+ pqReleaseConnHosts(conn);
+ free(conn->connip);
+ release_conn_addrinfo(conn);
+ free(conn->scram_client_key_binary);
+ free(conn->scram_server_key_binary);
+ /* if this is a cancel connection, be_cancel_key may still be allocated */
+ free(conn->be_cancel_key);
+ free(conn->inBuffer);
+ free(conn->outBuffer);
+ free(conn->rowBuf);
termPQExpBuffer(&conn->errorMessage);
termPQExpBuffer(&conn->workBuffer);
@@ -5147,6 +5150,7 @@ pqReleaseConnHosts(PGconn *conn)
}
}
free(conn->connhost);
+ conn->connhost = NULL;
}
}
diff --git a/src/interfaces/libpq/fe-misc.c b/src/interfaces/libpq/fe-misc.c
index c14e3c95250..dca44fdc5d2 100644
--- a/src/interfaces/libpq/fe-misc.c
+++ b/src/interfaces/libpq/fe-misc.c
@@ -553,9 +553,35 @@ pqPutMsgEnd(PGconn *conn)
/* Make message eligible to send */
conn->outCount = conn->outMsgEnd;
+ /* If appropriate, try to push out some data */
if (conn->outCount >= 8192)
{
- int toSend = conn->outCount - (conn->outCount % 8192);
+ int toSend = conn->outCount;
+
+ /*
+ * On Unix-pipe connections, it seems profitable to prefer sending
+ * pipe-buffer-sized packets not randomly-sized ones, so retain the
+ * last partial-8K chunk in our buffer for now. On TCP connections,
+ * the advantage of that is far less clear. Moreover, it flat out
+ * isn't safe when using SSL or GSSAPI, because those code paths have
+ * API stipulations that if they fail to send all the data that was
+ * offered in the previous write attempt, we mustn't offer less data
+ * in this write attempt. The previous write attempt might've been
+ * pqFlush attempting to send everything in the buffer, so we mustn't
+ * offer less now. (Presently, we won't try to use SSL or GSSAPI on
+ * Unix connections, so those checks are just Asserts. They'll have
+ * to become part of the regular if-test if we ever change that.)
+ */
+ if (conn->raddr.addr.ss_family == AF_UNIX)
+ {
+#ifdef USE_SSL
+ Assert(!conn->ssl_in_use);
+#endif
+#ifdef ENABLE_GSS
+ Assert(!conn->gssenc);
+#endif
+ toSend -= toSend % 8192;
+ }
if (pqSendSome(conn, toSend) < 0)
return EOF;
diff --git a/src/interfaces/libpq/fe-protocol3.c b/src/interfaces/libpq/fe-protocol3.c
index beb1c889aad..1599de757d1 100644
--- a/src/interfaces/libpq/fe-protocol3.c
+++ b/src/interfaces/libpq/fe-protocol3.c
@@ -1434,7 +1434,7 @@ pqGetNegotiateProtocolVersion3(PGconn *conn)
/* 3.1 never existed, we went straight from 3.0 to 3.2 */
if (their_version == PG_PROTOCOL(3, 1))
{
- libpq_append_conn_error(conn, "received invalid protocol negotiation message: server requests downgrade to non-existent 3.1 protocol version");
+ libpq_append_conn_error(conn, "received invalid protocol negotiation message: server requested downgrade to non-existent 3.1 protocol version");
goto failure;
}
@@ -1452,9 +1452,10 @@ pqGetNegotiateProtocolVersion3(PGconn *conn)
if (their_version < conn->min_pversion)
{
- libpq_append_conn_error(conn, "server only supports protocol version %d.%d, but min_protocol_version was set to %d.%d",
+ libpq_append_conn_error(conn, "server only supports protocol version %d.%d, but \"%s\" was set to %d.%d",
PG_PROTOCOL_MAJOR(their_version),
PG_PROTOCOL_MINOR(their_version),
+ "min_protocol_version",
PG_PROTOCOL_MAJOR(conn->min_pversion),
PG_PROTOCOL_MINOR(conn->min_pversion));
@@ -1476,7 +1477,7 @@ pqGetNegotiateProtocolVersion3(PGconn *conn)
}
if (strncmp(conn->workBuffer.data, "_pq_.", 5) != 0)
{
- libpq_append_conn_error(conn, "received invalid protocol negotiation message: server reported unsupported parameter name without a _pq_. prefix (\"%s\")", conn->workBuffer.data);
+ libpq_append_conn_error(conn, "received invalid protocol negotiation message: server reported unsupported parameter name without a \"%s\" prefix (\"%s\")", "_pq_.", conn->workBuffer.data);
goto failure;
}
libpq_append_conn_error(conn, "received invalid protocol negotiation message: server reported an unsupported parameter that was not requested (\"%s\")", conn->workBuffer.data);
diff --git a/src/interfaces/libpq/fe-secure-gssapi.c b/src/interfaces/libpq/fe-secure-gssapi.c
index ce183bc04b4..bc9e1ce06fa 100644
--- a/src/interfaces/libpq/fe-secure-gssapi.c
+++ b/src/interfaces/libpq/fe-secure-gssapi.c
@@ -47,11 +47,18 @@
* don't want the other side to send arbitrarily huge packets as we
* would have to allocate memory for them to then pass them to GSSAPI.
*
- * Therefore, these two #define's are effectively part of the protocol
+ * Therefore, this #define is effectively part of the protocol
* spec and can't ever be changed.
*/
-#define PQ_GSS_SEND_BUFFER_SIZE 16384
-#define PQ_GSS_RECV_BUFFER_SIZE 16384
+#define PQ_GSS_MAX_PACKET_SIZE 16384 /* includes uint32 header word */
+
+/*
+ * However, during the authentication exchange we must cope with whatever
+ * message size the GSSAPI library wants to send (because our protocol
+ * doesn't support splitting those messages). Depending on configuration
+ * those messages might be as much as 64kB.
+ */
+#define PQ_GSS_AUTH_BUFFER_SIZE 65536 /* includes uint32 header word */
/*
* We need these state variables per-connection. To allow the functions
@@ -105,9 +112,9 @@ pg_GSS_write(PGconn *conn, const void *ptr, size_t len)
* again, so if it offers a len less than that, something is wrong.
*
* Note: it may seem attractive to report partial write completion once
- * we've successfully sent any encrypted packets. However, that can cause
- * problems for callers; notably, pqPutMsgEnd's heuristic to send only
- * full 8K blocks interacts badly with such a hack. We won't save much,
+ * we've successfully sent any encrypted packets. However, doing that
+ * expands the state space of this processing and has been responsible for
+ * bugs in the past (cf. commit d053a879b). We won't save much,
* typically, by letting callers discard data early, so don't risk it.
*/
if (len < PqGSSSendConsumed)
@@ -203,11 +210,11 @@ pg_GSS_write(PGconn *conn, const void *ptr, size_t len)
goto cleanup;
}
- if (output.length > PQ_GSS_SEND_BUFFER_SIZE - sizeof(uint32))
+ if (output.length > PQ_GSS_MAX_PACKET_SIZE - sizeof(uint32))
{
libpq_append_conn_error(conn, "client tried to send oversize GSSAPI packet (%zu > %zu)",
(size_t) output.length,
- PQ_GSS_SEND_BUFFER_SIZE - sizeof(uint32));
+ PQ_GSS_MAX_PACKET_SIZE - sizeof(uint32));
errno = EIO; /* for lack of a better idea */
goto cleanup;
}
@@ -342,11 +349,11 @@ pg_GSS_read(PGconn *conn, void *ptr, size_t len)
/* Decode the packet length and check for overlength packet */
input.length = pg_ntoh32(*(uint32 *) PqGSSRecvBuffer);
- if (input.length > PQ_GSS_RECV_BUFFER_SIZE - sizeof(uint32))
+ if (input.length > PQ_GSS_MAX_PACKET_SIZE - sizeof(uint32))
{
libpq_append_conn_error(conn, "oversize GSSAPI packet sent by the server (%zu > %zu)",
(size_t) input.length,
- PQ_GSS_RECV_BUFFER_SIZE - sizeof(uint32));
+ PQ_GSS_MAX_PACKET_SIZE - sizeof(uint32));
errno = EIO; /* for lack of a better idea */
return -1;
}
@@ -485,12 +492,15 @@ pqsecure_open_gss(PGconn *conn)
* initialize state variables. By malloc'ing the buffers separately, we
* ensure that they are sufficiently aligned for the length-word accesses
* that we do in some places in this file.
+ *
+ * We'll use PQ_GSS_AUTH_BUFFER_SIZE-sized buffers until transport
+ * negotiation is complete, then switch to PQ_GSS_MAX_PACKET_SIZE.
*/
if (PqGSSSendBuffer == NULL)
{
- PqGSSSendBuffer = malloc(PQ_GSS_SEND_BUFFER_SIZE);
- PqGSSRecvBuffer = malloc(PQ_GSS_RECV_BUFFER_SIZE);
- PqGSSResultBuffer = malloc(PQ_GSS_RECV_BUFFER_SIZE);
+ PqGSSSendBuffer = malloc(PQ_GSS_AUTH_BUFFER_SIZE);
+ PqGSSRecvBuffer = malloc(PQ_GSS_AUTH_BUFFER_SIZE);
+ PqGSSResultBuffer = malloc(PQ_GSS_AUTH_BUFFER_SIZE);
if (!PqGSSSendBuffer || !PqGSSRecvBuffer || !PqGSSResultBuffer)
{
libpq_append_conn_error(conn, "out of memory");
@@ -564,13 +574,13 @@ pqsecure_open_gss(PGconn *conn)
* so leave a spot at the end for a NULL byte too) and report that
* back to the caller.
*/
- result = gss_read(conn, PqGSSRecvBuffer + PqGSSRecvLength, PQ_GSS_RECV_BUFFER_SIZE - PqGSSRecvLength - 1, &ret);
+ result = gss_read(conn, PqGSSRecvBuffer + PqGSSRecvLength, PQ_GSS_AUTH_BUFFER_SIZE - PqGSSRecvLength - 1, &ret);
if (result != PGRES_POLLING_OK)
return result;
PqGSSRecvLength += ret;
- Assert(PqGSSRecvLength < PQ_GSS_RECV_BUFFER_SIZE);
+ Assert(PqGSSRecvLength < PQ_GSS_AUTH_BUFFER_SIZE);
PqGSSRecvBuffer[PqGSSRecvLength] = '\0';
appendPQExpBuffer(&conn->errorMessage, "%s\n", PqGSSRecvBuffer + 1);
@@ -584,11 +594,11 @@ pqsecure_open_gss(PGconn *conn)
/* Get the length and check for over-length packet */
input.length = pg_ntoh32(*(uint32 *) PqGSSRecvBuffer);
- if (input.length > PQ_GSS_RECV_BUFFER_SIZE - sizeof(uint32))
+ if (input.length > PQ_GSS_AUTH_BUFFER_SIZE - sizeof(uint32))
{
libpq_append_conn_error(conn, "oversize GSSAPI packet sent by the server (%zu > %zu)",
(size_t) input.length,
- PQ_GSS_RECV_BUFFER_SIZE - sizeof(uint32));
+ PQ_GSS_AUTH_BUFFER_SIZE - sizeof(uint32));
return PGRES_POLLING_FAILED;
}
@@ -669,11 +679,32 @@ pqsecure_open_gss(PGconn *conn)
gss_release_buffer(&minor, &output);
/*
+ * Release the large authentication buffers and allocate the ones we
+ * want for normal operation. (This maneuver is safe only because
+ * pqDropConnection will drop the buffers; otherwise, during a
+ * reconnection we'd be at risk of using undersized buffers during
+ * negotiation.)
+ */
+ free(PqGSSSendBuffer);
+ free(PqGSSRecvBuffer);
+ free(PqGSSResultBuffer);
+ PqGSSSendBuffer = malloc(PQ_GSS_MAX_PACKET_SIZE);
+ PqGSSRecvBuffer = malloc(PQ_GSS_MAX_PACKET_SIZE);
+ PqGSSResultBuffer = malloc(PQ_GSS_MAX_PACKET_SIZE);
+ if (!PqGSSSendBuffer || !PqGSSRecvBuffer || !PqGSSResultBuffer)
+ {
+ libpq_append_conn_error(conn, "out of memory");
+ return PGRES_POLLING_FAILED;
+ }
+ PqGSSSendLength = PqGSSSendNext = PqGSSSendConsumed = 0;
+ PqGSSRecvLength = PqGSSResultLength = PqGSSResultNext = 0;
+
+ /*
* Determine the max packet size which will fit in our buffer, after
* accounting for the length. pg_GSS_write will need this.
*/
major = gss_wrap_size_limit(&minor, conn->gctx, 1, GSS_C_QOP_DEFAULT,
- PQ_GSS_SEND_BUFFER_SIZE - sizeof(uint32),
+ PQ_GSS_MAX_PACKET_SIZE - sizeof(uint32),
&PqGSSMaxPktSize);
if (GSS_ERROR(major))
@@ -687,10 +718,11 @@ pqsecure_open_gss(PGconn *conn)
}
/* Must have output.length > 0 */
- if (output.length > PQ_GSS_SEND_BUFFER_SIZE - sizeof(uint32))
+ if (output.length > PQ_GSS_AUTH_BUFFER_SIZE - sizeof(uint32))
{
- pg_GSS_error(libpq_gettext("GSSAPI context establishment error"),
- conn, major, minor);
+ libpq_append_conn_error(conn, "client tried to send oversize GSSAPI packet (%zu > %zu)",
+ (size_t) output.length,
+ PQ_GSS_AUTH_BUFFER_SIZE - sizeof(uint32));
gss_release_buffer(&minor, &output);
return PGRES_POLLING_FAILED;
}
diff --git a/src/interfaces/libpq/fe-secure-openssl.c b/src/interfaces/libpq/fe-secure-openssl.c
index 78f9e84eb35..b08b3a6901b 100644
--- a/src/interfaces/libpq/fe-secure-openssl.c
+++ b/src/interfaces/libpq/fe-secure-openssl.c
@@ -711,7 +711,7 @@ SSL_CTX_keylog_cb(const SSL *ssl, const char *line)
if (fd == -1)
{
- libpq_append_conn_error(conn, "could not open ssl keylog file \"%s\": %s",
+ libpq_append_conn_error(conn, "could not open SSL key logging file \"%s\": %s",
conn->sslkeylogfile, pg_strerror(errno));
return;
}
@@ -719,7 +719,7 @@ SSL_CTX_keylog_cb(const SSL *ssl, const char *line)
/* line is guaranteed by OpenSSL to be NUL terminated */
rc = write(fd, line, strlen(line));
if (rc < 0)
- libpq_append_conn_error(conn, "could not write to ssl keylog file \"%s\": %s",
+ libpq_append_conn_error(conn, "could not write to SSL key logging file \"%s\": %s",
conn->sslkeylogfile, pg_strerror(errno));
else
rc = write(fd, "\n", 1);
diff --git a/src/interfaces/libpq/t/005_negotiate_encryption.pl b/src/interfaces/libpq/t/005_negotiate_encryption.pl
index f6a453c1b41..ac6d8bcb4a6 100644
--- a/src/interfaces/libpq/t/005_negotiate_encryption.pl
+++ b/src/interfaces/libpq/t/005_negotiate_encryption.pl
@@ -107,7 +107,7 @@ $node->append_conf(
listen_addresses = '$hostaddr'
# Capturing the EVENTS that occur during tests requires these settings
-log_connections = on
+log_connections = 'receipt,authentication,authorization'
log_disconnections = on
trace_connection_negotiation = on
lc_messages = 'C'
diff --git a/src/makefiles/pgxs.mk b/src/makefiles/pgxs.mk
index 0de3737e789..039cee3dfe5 100644
--- a/src/makefiles/pgxs.mk
+++ b/src/makefiles/pgxs.mk
@@ -376,10 +376,7 @@ endif
ifdef REGRESS
# things created by various check targets
rm -rf $(pg_regress_clean_files)
-ifeq ($(PORTNAME), win)
- rm -f regress.def
endif
-endif # REGRESS
ifdef TAP_TESTS
rm -rf tmp_check/
endif
diff --git a/src/pl/plpgsql/src/pl_comp.c b/src/pl/plpgsql/src/pl_comp.c
index 519f7695d7c..b80c59447fb 100644
--- a/src/pl/plpgsql/src/pl_comp.c
+++ b/src/pl/plpgsql/src/pl_comp.c
@@ -226,8 +226,13 @@ plpgsql_compile_callback(FunctionCallInfo fcinfo,
/*
* All the permanent output of compilation (e.g. parse tree) is kept in a
* per-function memory context, so it can be reclaimed easily.
+ *
+ * While the func_cxt needs to be long-lived, we initially make it a child
+ * of the assumed-short-lived caller's context, and reparent it under
+ * CacheMemoryContext only upon success. This arrangement avoids memory
+ * leakage during compilation of a faulty function.
*/
- func_cxt = AllocSetContextCreate(TopMemoryContext,
+ func_cxt = AllocSetContextCreate(CurrentMemoryContext,
"PL/pgSQL function",
ALLOCSET_DEFAULT_SIZES);
plpgsql_compile_tmp_cxt = MemoryContextSwitchTo(func_cxt);
@@ -704,6 +709,11 @@ plpgsql_compile_callback(FunctionCallInfo fcinfo,
plpgsql_dumptree(function);
/*
+ * All is well, so make the func_cxt long-lived
+ */
+ MemoryContextSetParent(func_cxt, CacheMemoryContext);
+
+ /*
* Pop the error context stack
*/
error_context_stack = plerrcontext.previous;
diff --git a/src/pl/plpython/expected/README b/src/pl/plpython/expected/README
deleted file mode 100644
index 388c553a589..00000000000
--- a/src/pl/plpython/expected/README
+++ /dev/null
@@ -1,3 +0,0 @@
-Guide to alternative expected files:
-
-plpython_error_5.out Python 3.5 and newer
diff --git a/src/pl/plpython/expected/plpython_error.out b/src/pl/plpython/expected/plpython_error.out
index 68722b00097..fd9cd73be74 100644
--- a/src/pl/plpython/expected/plpython_error.out
+++ b/src/pl/plpython/expected/plpython_error.out
@@ -243,7 +243,7 @@ $$
plpy.nonexistent
$$ LANGUAGE plpython3u;
SELECT toplevel_attribute_error();
-ERROR: AttributeError: 'module' object has no attribute 'nonexistent'
+ERROR: AttributeError: module 'plpy' has no attribute 'nonexistent'
CONTEXT: Traceback (most recent call last):
PL/Python function "toplevel_attribute_error", line 2, in <module>
plpy.nonexistent
diff --git a/src/pl/plpython/expected/plpython_error_5.out b/src/pl/plpython/expected/plpython_error_5.out
deleted file mode 100644
index fd9cd73be74..00000000000
--- a/src/pl/plpython/expected/plpython_error_5.out
+++ /dev/null
@@ -1,460 +0,0 @@
--- test error handling, i forgot to restore Warn_restart in
--- the trigger handler once. the errors and subsequent core dump were
--- interesting.
-/* Flat out Python syntax error
- */
-CREATE FUNCTION python_syntax_error() RETURNS text
- AS
-'.syntaxerror'
- LANGUAGE plpython3u;
-ERROR: could not compile PL/Python function "python_syntax_error"
-DETAIL: SyntaxError: invalid syntax (<string>, line 2)
-/* With check_function_bodies = false the function should get defined
- * and the error reported when called
- */
-SET check_function_bodies = false;
-CREATE FUNCTION python_syntax_error() RETURNS text
- AS
-'.syntaxerror'
- LANGUAGE plpython3u;
-SELECT python_syntax_error();
-ERROR: could not compile PL/Python function "python_syntax_error"
-DETAIL: SyntaxError: invalid syntax (<string>, line 2)
-/* Run the function twice to check if the hashtable entry gets cleaned up */
-SELECT python_syntax_error();
-ERROR: could not compile PL/Python function "python_syntax_error"
-DETAIL: SyntaxError: invalid syntax (<string>, line 2)
-RESET check_function_bodies;
-/* Flat out syntax error
- */
-CREATE FUNCTION sql_syntax_error() RETURNS text
- AS
-'plpy.execute("syntax error")'
- LANGUAGE plpython3u;
-SELECT sql_syntax_error();
-ERROR: spiexceptions.SyntaxError: syntax error at or near "syntax"
-LINE 1: syntax error
- ^
-QUERY: syntax error
-CONTEXT: Traceback (most recent call last):
- PL/Python function "sql_syntax_error", line 1, in <module>
- plpy.execute("syntax error")
-PL/Python function "sql_syntax_error"
-/* check the handling of uncaught python exceptions
- */
-CREATE FUNCTION exception_index_invalid(text) RETURNS text
- AS
-'return args[1]'
- LANGUAGE plpython3u;
-SELECT exception_index_invalid('test');
-ERROR: IndexError: list index out of range
-CONTEXT: Traceback (most recent call last):
- PL/Python function "exception_index_invalid", line 1, in <module>
- return args[1]
-PL/Python function "exception_index_invalid"
-/* check handling of nested exceptions
- */
-CREATE FUNCTION exception_index_invalid_nested() RETURNS text
- AS
-'rv = plpy.execute("SELECT test5(''foo'')")
-return rv[0]'
- LANGUAGE plpython3u;
-SELECT exception_index_invalid_nested();
-ERROR: spiexceptions.UndefinedFunction: function test5(unknown) does not exist
-LINE 1: SELECT test5('foo')
- ^
-HINT: No function matches the given name and argument types. You might need to add explicit type casts.
-QUERY: SELECT test5('foo')
-CONTEXT: Traceback (most recent call last):
- PL/Python function "exception_index_invalid_nested", line 1, in <module>
- rv = plpy.execute("SELECT test5('foo')")
-PL/Python function "exception_index_invalid_nested"
-/* a typo
- */
-CREATE FUNCTION invalid_type_uncaught(a text) RETURNS text
- AS
-'if "plan" not in SD:
- q = "SELECT fname FROM users WHERE lname = $1"
- SD["plan"] = plpy.prepare(q, [ "test" ])
-rv = plpy.execute(SD["plan"], [ a ])
-if len(rv):
- return rv[0]["fname"]
-return None
-'
- LANGUAGE plpython3u;
-SELECT invalid_type_uncaught('rick');
-ERROR: spiexceptions.UndefinedObject: type "test" does not exist
-CONTEXT: Traceback (most recent call last):
- PL/Python function "invalid_type_uncaught", line 3, in <module>
- SD["plan"] = plpy.prepare(q, [ "test" ])
-PL/Python function "invalid_type_uncaught"
-/* for what it's worth catch the exception generated by
- * the typo, and return None
- */
-CREATE FUNCTION invalid_type_caught(a text) RETURNS text
- AS
-'if "plan" not in SD:
- q = "SELECT fname FROM users WHERE lname = $1"
- try:
- SD["plan"] = plpy.prepare(q, [ "test" ])
- except plpy.SPIError as ex:
- plpy.notice(str(ex))
- return None
-rv = plpy.execute(SD["plan"], [ a ])
-if len(rv):
- return rv[0]["fname"]
-return None
-'
- LANGUAGE plpython3u;
-SELECT invalid_type_caught('rick');
-NOTICE: type "test" does not exist
- invalid_type_caught
----------------------
-
-(1 row)
-
-/* for what it's worth catch the exception generated by
- * the typo, and reraise it as a plain error
- */
-CREATE FUNCTION invalid_type_reraised(a text) RETURNS text
- AS
-'if "plan" not in SD:
- q = "SELECT fname FROM users WHERE lname = $1"
- try:
- SD["plan"] = plpy.prepare(q, [ "test" ])
- except plpy.SPIError as ex:
- plpy.error(str(ex))
-rv = plpy.execute(SD["plan"], [ a ])
-if len(rv):
- return rv[0]["fname"]
-return None
-'
- LANGUAGE plpython3u;
-SELECT invalid_type_reraised('rick');
-ERROR: plpy.Error: type "test" does not exist
-CONTEXT: Traceback (most recent call last):
- PL/Python function "invalid_type_reraised", line 6, in <module>
- plpy.error(str(ex))
-PL/Python function "invalid_type_reraised"
-/* no typo no messing about
- */
-CREATE FUNCTION valid_type(a text) RETURNS text
- AS
-'if "plan" not in SD:
- SD["plan"] = plpy.prepare("SELECT fname FROM users WHERE lname = $1", [ "text" ])
-rv = plpy.execute(SD["plan"], [ a ])
-if len(rv):
- return rv[0]["fname"]
-return None
-'
- LANGUAGE plpython3u;
-SELECT valid_type('rick');
- valid_type
-------------
-
-(1 row)
-
-/* error in nested functions to get a traceback
-*/
-CREATE FUNCTION nested_error() RETURNS text
- AS
-'def fun1():
- plpy.error("boom")
-
-def fun2():
- fun1()
-
-def fun3():
- fun2()
-
-fun3()
-return "not reached"
-'
- LANGUAGE plpython3u;
-SELECT nested_error();
-ERROR: plpy.Error: boom
-CONTEXT: Traceback (most recent call last):
- PL/Python function "nested_error", line 10, in <module>
- fun3()
- PL/Python function "nested_error", line 8, in fun3
- fun2()
- PL/Python function "nested_error", line 5, in fun2
- fun1()
- PL/Python function "nested_error", line 2, in fun1
- plpy.error("boom")
-PL/Python function "nested_error"
-/* raising plpy.Error is just like calling plpy.error
-*/
-CREATE FUNCTION nested_error_raise() RETURNS text
- AS
-'def fun1():
- raise plpy.Error("boom")
-
-def fun2():
- fun1()
-
-def fun3():
- fun2()
-
-fun3()
-return "not reached"
-'
- LANGUAGE plpython3u;
-SELECT nested_error_raise();
-ERROR: plpy.Error: boom
-CONTEXT: Traceback (most recent call last):
- PL/Python function "nested_error_raise", line 10, in <module>
- fun3()
- PL/Python function "nested_error_raise", line 8, in fun3
- fun2()
- PL/Python function "nested_error_raise", line 5, in fun2
- fun1()
- PL/Python function "nested_error_raise", line 2, in fun1
- raise plpy.Error("boom")
-PL/Python function "nested_error_raise"
-/* using plpy.warning should not produce a traceback
-*/
-CREATE FUNCTION nested_warning() RETURNS text
- AS
-'def fun1():
- plpy.warning("boom")
-
-def fun2():
- fun1()
-
-def fun3():
- fun2()
-
-fun3()
-return "you''ve been warned"
-'
- LANGUAGE plpython3u;
-SELECT nested_warning();
-WARNING: boom
- nested_warning
---------------------
- you've been warned
-(1 row)
-
-/* AttributeError at toplevel used to give segfaults with the traceback
-*/
-CREATE FUNCTION toplevel_attribute_error() RETURNS void AS
-$$
-plpy.nonexistent
-$$ LANGUAGE plpython3u;
-SELECT toplevel_attribute_error();
-ERROR: AttributeError: module 'plpy' has no attribute 'nonexistent'
-CONTEXT: Traceback (most recent call last):
- PL/Python function "toplevel_attribute_error", line 2, in <module>
- plpy.nonexistent
-PL/Python function "toplevel_attribute_error"
-/* Calling PL/Python functions from SQL and vice versa should not lose context.
- */
-CREATE OR REPLACE FUNCTION python_traceback() RETURNS void AS $$
-def first():
- second()
-
-def second():
- third()
-
-def third():
- plpy.execute("select sql_error()")
-
-first()
-$$ LANGUAGE plpython3u;
-CREATE OR REPLACE FUNCTION sql_error() RETURNS void AS $$
-begin
- select 1/0;
-end
-$$ LANGUAGE plpgsql;
-CREATE OR REPLACE FUNCTION python_from_sql_error() RETURNS void AS $$
-begin
- select python_traceback();
-end
-$$ LANGUAGE plpgsql;
-CREATE OR REPLACE FUNCTION sql_from_python_error() RETURNS void AS $$
-plpy.execute("select sql_error()")
-$$ LANGUAGE plpython3u;
-SELECT python_traceback();
-ERROR: spiexceptions.DivisionByZero: division by zero
-CONTEXT: Traceback (most recent call last):
- PL/Python function "python_traceback", line 11, in <module>
- first()
- PL/Python function "python_traceback", line 3, in first
- second()
- PL/Python function "python_traceback", line 6, in second
- third()
- PL/Python function "python_traceback", line 9, in third
- plpy.execute("select sql_error()")
-PL/Python function "python_traceback"
-SELECT sql_error();
-ERROR: division by zero
-CONTEXT: SQL statement "select 1/0"
-PL/pgSQL function sql_error() line 3 at SQL statement
-SELECT python_from_sql_error();
-ERROR: spiexceptions.DivisionByZero: division by zero
-CONTEXT: Traceback (most recent call last):
- PL/Python function "python_traceback", line 11, in <module>
- first()
- PL/Python function "python_traceback", line 3, in first
- second()
- PL/Python function "python_traceback", line 6, in second
- third()
- PL/Python function "python_traceback", line 9, in third
- plpy.execute("select sql_error()")
-PL/Python function "python_traceback"
-SQL statement "select python_traceback()"
-PL/pgSQL function python_from_sql_error() line 3 at SQL statement
-SELECT sql_from_python_error();
-ERROR: spiexceptions.DivisionByZero: division by zero
-CONTEXT: Traceback (most recent call last):
- PL/Python function "sql_from_python_error", line 2, in <module>
- plpy.execute("select sql_error()")
-PL/Python function "sql_from_python_error"
-/* check catching specific types of exceptions
- */
-CREATE TABLE specific (
- i integer PRIMARY KEY
-);
-CREATE FUNCTION specific_exception(i integer) RETURNS void AS
-$$
-from plpy import spiexceptions
-try:
- plpy.execute("insert into specific values (%s)" % (i or "NULL"));
-except spiexceptions.NotNullViolation as e:
- plpy.notice("Violated the NOT NULL constraint, sqlstate %s" % e.sqlstate)
-except spiexceptions.UniqueViolation as e:
- plpy.notice("Violated the UNIQUE constraint, sqlstate %s" % e.sqlstate)
-$$ LANGUAGE plpython3u;
-SELECT specific_exception(2);
- specific_exception
---------------------
-
-(1 row)
-
-SELECT specific_exception(NULL);
-NOTICE: Violated the NOT NULL constraint, sqlstate 23502
- specific_exception
---------------------
-
-(1 row)
-
-SELECT specific_exception(2);
-NOTICE: Violated the UNIQUE constraint, sqlstate 23505
- specific_exception
---------------------
-
-(1 row)
-
-/* SPI errors in PL/Python functions should preserve the SQLSTATE value
- */
-CREATE FUNCTION python_unique_violation() RETURNS void AS $$
-plpy.execute("insert into specific values (1)")
-plpy.execute("insert into specific values (1)")
-$$ LANGUAGE plpython3u;
-CREATE FUNCTION catch_python_unique_violation() RETURNS text AS $$
-begin
- begin
- perform python_unique_violation();
- exception when unique_violation then
- return 'ok';
- end;
- return 'not reached';
-end;
-$$ language plpgsql;
-SELECT catch_python_unique_violation();
- catch_python_unique_violation
--------------------------------
- ok
-(1 row)
-
-/* manually starting subtransactions - a bad idea
- */
-CREATE FUNCTION manual_subxact() RETURNS void AS $$
-plpy.execute("savepoint save")
-plpy.execute("create table foo(x integer)")
-plpy.execute("rollback to save")
-$$ LANGUAGE plpython3u;
-SELECT manual_subxact();
-ERROR: plpy.SPIError: SPI_execute failed: SPI_ERROR_TRANSACTION
-CONTEXT: Traceback (most recent call last):
- PL/Python function "manual_subxact", line 2, in <module>
- plpy.execute("savepoint save")
-PL/Python function "manual_subxact"
-/* same for prepared plans
- */
-CREATE FUNCTION manual_subxact_prepared() RETURNS void AS $$
-save = plpy.prepare("savepoint save")
-rollback = plpy.prepare("rollback to save")
-plpy.execute(save)
-plpy.execute("create table foo(x integer)")
-plpy.execute(rollback)
-$$ LANGUAGE plpython3u;
-SELECT manual_subxact_prepared();
-ERROR: plpy.SPIError: SPI_execute_plan failed: SPI_ERROR_TRANSACTION
-CONTEXT: Traceback (most recent call last):
- PL/Python function "manual_subxact_prepared", line 4, in <module>
- plpy.execute(save)
-PL/Python function "manual_subxact_prepared"
-/* raising plpy.spiexception.* from python code should preserve sqlstate
- */
-CREATE FUNCTION plpy_raise_spiexception() RETURNS void AS $$
-raise plpy.spiexceptions.DivisionByZero()
-$$ LANGUAGE plpython3u;
-DO $$
-BEGIN
- SELECT plpy_raise_spiexception();
-EXCEPTION WHEN division_by_zero THEN
- -- NOOP
-END
-$$ LANGUAGE plpgsql;
-/* setting a custom sqlstate should be handled
- */
-CREATE FUNCTION plpy_raise_spiexception_override() RETURNS void AS $$
-exc = plpy.spiexceptions.DivisionByZero()
-exc.sqlstate = 'SILLY'
-raise exc
-$$ LANGUAGE plpython3u;
-DO $$
-BEGIN
- SELECT plpy_raise_spiexception_override();
-EXCEPTION WHEN SQLSTATE 'SILLY' THEN
- -- NOOP
-END
-$$ LANGUAGE plpgsql;
-/* test the context stack trace for nested execution levels
- */
-CREATE FUNCTION notice_innerfunc() RETURNS int AS $$
-plpy.execute("DO LANGUAGE plpython3u $x$ plpy.notice('inside DO') $x$")
-return 1
-$$ LANGUAGE plpython3u;
-CREATE FUNCTION notice_outerfunc() RETURNS int AS $$
-plpy.execute("SELECT notice_innerfunc()")
-return 1
-$$ LANGUAGE plpython3u;
-\set SHOW_CONTEXT always
-SELECT notice_outerfunc();
-NOTICE: inside DO
-CONTEXT: PL/Python anonymous code block
-SQL statement "DO LANGUAGE plpython3u $x$ plpy.notice('inside DO') $x$"
-PL/Python function "notice_innerfunc"
-SQL statement "SELECT notice_innerfunc()"
-PL/Python function "notice_outerfunc"
- notice_outerfunc
-------------------
- 1
-(1 row)
-
-/* test error logged with an underlying exception that includes a detail
- * string (bug #18070).
- */
-CREATE FUNCTION python_error_detail() RETURNS SETOF text AS $$
- plan = plpy.prepare("SELECT to_date('xy', 'DD') d")
- for row in plpy.cursor(plan):
- yield row['d']
-$$ LANGUAGE plpython3u;
-SELECT python_error_detail();
-ERROR: error fetching next item from iterator
-DETAIL: spiexceptions.InvalidDatetimeFormat: invalid value "xy" for "DD"
-CONTEXT: Traceback (most recent call last):
-PL/Python function "python_error_detail"
diff --git a/src/pl/plpython/plpy_cursorobject.c b/src/pl/plpython/plpy_cursorobject.c
index 37d7efca77c..cc74c4df6ba 100644
--- a/src/pl/plpython/plpy_cursorobject.c
+++ b/src/pl/plpython/plpy_cursorobject.c
@@ -58,9 +58,9 @@ static PyType_Slot PLyCursor_slots[] =
static PyType_Spec PLyCursor_spec =
{
.name = "PLyCursor",
- .basicsize = sizeof(PLyCursorObject),
- .flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE,
- .slots = PLyCursor_slots,
+ .basicsize = sizeof(PLyCursorObject),
+ .flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE,
+ .slots = PLyCursor_slots,
};
static PyTypeObject *PLy_CursorType;
diff --git a/src/pl/plpython/plpy_elog.c b/src/pl/plpython/plpy_elog.c
index ddf3573f0e7..f6d10045e5c 100644
--- a/src/pl/plpython/plpy_elog.c
+++ b/src/pl/plpython/plpy_elog.c
@@ -18,7 +18,8 @@ PyObject *PLy_exc_spi_error = NULL;
static void PLy_traceback(PyObject *e, PyObject *v, PyObject *tb,
- char **xmsg, char **tbmsg, int *tb_depth);
+ char *volatile *xmsg, char *volatile *tbmsg,
+ int *tb_depth);
static void PLy_get_spi_error_data(PyObject *exc, int *sqlerrcode, char **detail,
char **hint, char **query, int *position,
char **schema_name, char **table_name, char **column_name,
@@ -43,78 +44,82 @@ void
PLy_elog_impl(int elevel, const char *fmt,...)
{
int save_errno = errno;
- char *xmsg;
- char *tbmsg;
+ char *volatile xmsg = NULL;
+ char *volatile tbmsg = NULL;
int tb_depth;
StringInfoData emsg;
PyObject *exc,
*val,
*tb;
- const char *primary = NULL;
- int sqlerrcode = 0;
- char *detail = NULL;
- char *hint = NULL;
- char *query = NULL;
- int position = 0;
- char *schema_name = NULL;
- char *table_name = NULL;
- char *column_name = NULL;
- char *datatype_name = NULL;
- char *constraint_name = NULL;
+
+ /* If we'll need emsg, must initialize it before entering PG_TRY */
+ if (fmt)
+ initStringInfo(&emsg);
PyErr_Fetch(&exc, &val, &tb);
- if (exc != NULL)
+ /* Use a PG_TRY block to ensure we release the PyObjects just acquired */
+ PG_TRY();
{
- PyErr_NormalizeException(&exc, &val, &tb);
-
- if (PyErr_GivenExceptionMatches(val, PLy_exc_spi_error))
- PLy_get_spi_error_data(val, &sqlerrcode,
- &detail, &hint, &query, &position,
+ const char *primary = NULL;
+ int sqlerrcode = 0;
+ char *detail = NULL;
+ char *hint = NULL;
+ char *query = NULL;
+ int position = 0;
+ char *schema_name = NULL;
+ char *table_name = NULL;
+ char *column_name = NULL;
+ char *datatype_name = NULL;
+ char *constraint_name = NULL;
+
+ if (exc != NULL)
+ {
+ PyErr_NormalizeException(&exc, &val, &tb);
+
+ if (PyErr_GivenExceptionMatches(val, PLy_exc_spi_error))
+ PLy_get_spi_error_data(val, &sqlerrcode,
+ &detail, &hint, &query, &position,
+ &schema_name, &table_name, &column_name,
+ &datatype_name, &constraint_name);
+ else if (PyErr_GivenExceptionMatches(val, PLy_exc_error))
+ PLy_get_error_data(val, &sqlerrcode, &detail, &hint,
&schema_name, &table_name, &column_name,
&datatype_name, &constraint_name);
- else if (PyErr_GivenExceptionMatches(val, PLy_exc_error))
- PLy_get_error_data(val, &sqlerrcode, &detail, &hint,
- &schema_name, &table_name, &column_name,
- &datatype_name, &constraint_name);
- else if (PyErr_GivenExceptionMatches(val, PLy_exc_fatal))
- elevel = FATAL;
- }
+ else if (PyErr_GivenExceptionMatches(val, PLy_exc_fatal))
+ elevel = FATAL;
+ }
- /* this releases our refcount on tb! */
- PLy_traceback(exc, val, tb,
- &xmsg, &tbmsg, &tb_depth);
+ PLy_traceback(exc, val, tb,
+ &xmsg, &tbmsg, &tb_depth);
- if (fmt)
- {
- initStringInfo(&emsg);
- for (;;)
+ if (fmt)
{
- va_list ap;
- int needed;
-
- errno = save_errno;
- va_start(ap, fmt);
- needed = appendStringInfoVA(&emsg, dgettext(TEXTDOMAIN, fmt), ap);
- va_end(ap);
- if (needed == 0)
- break;
- enlargeStringInfo(&emsg, needed);
- }
- primary = emsg.data;
+ for (;;)
+ {
+ va_list ap;
+ int needed;
+
+ errno = save_errno;
+ va_start(ap, fmt);
+ needed = appendStringInfoVA(&emsg, dgettext(TEXTDOMAIN, fmt), ap);
+ va_end(ap);
+ if (needed == 0)
+ break;
+ enlargeStringInfo(&emsg, needed);
+ }
+ primary = emsg.data;
- /* If there's an exception message, it goes in the detail. */
- if (xmsg)
- detail = xmsg;
- }
- else
- {
- if (xmsg)
- primary = xmsg;
- }
+ /* If there's an exception message, it goes in the detail. */
+ if (xmsg)
+ detail = xmsg;
+ }
+ else
+ {
+ if (xmsg)
+ primary = xmsg;
+ }
- PG_TRY();
- {
ereport(elevel,
(errcode(sqlerrcode ? sqlerrcode : ERRCODE_EXTERNAL_ROUTINE_EXCEPTION),
errmsg_internal("%s", primary ? primary : "no exception data"),
@@ -136,14 +141,23 @@ PLy_elog_impl(int elevel, const char *fmt,...)
}
PG_FINALLY();
{
+ Py_XDECREF(exc);
+ Py_XDECREF(val);
+ /* Must release all the objects in the traceback stack */
+ while (tb != NULL && tb != Py_None)
+ {
+ PyObject *tb_prev = tb;
+
+ tb = PyObject_GetAttrString(tb, "tb_next");
+ Py_DECREF(tb_prev);
+ }
+ /* For neatness' sake, also release our string buffers */
if (fmt)
pfree(emsg.data);
if (xmsg)
pfree(xmsg);
if (tbmsg)
pfree(tbmsg);
- Py_XDECREF(exc);
- Py_XDECREF(val);
}
PG_END_TRY();
}
@@ -154,21 +168,14 @@ PLy_elog_impl(int elevel, const char *fmt,...)
* The exception error message is returned in xmsg, the traceback in
* tbmsg (both as palloc'd strings) and the traceback depth in
* tb_depth.
- *
- * We release refcounts on all the Python objects in the traceback stack,
- * but not on e or v.
*/
static void
PLy_traceback(PyObject *e, PyObject *v, PyObject *tb,
- char **xmsg, char **tbmsg, int *tb_depth)
+ char *volatile *xmsg, char *volatile *tbmsg, int *tb_depth)
{
- PyObject *e_type_o;
- PyObject *e_module_o;
- char *e_type_s = NULL;
- char *e_module_s = NULL;
- PyObject *vob = NULL;
- char *vstr;
- StringInfoData xstr;
+ PyObject *volatile e_type_o = NULL;
+ PyObject *volatile e_module_o = NULL;
+ PyObject *volatile vob = NULL;
StringInfoData tbstr;
/*
@@ -186,47 +193,59 @@ PLy_traceback(PyObject *e, PyObject *v, PyObject *tb,
/*
* Format the exception and its value and put it in xmsg.
*/
-
- e_type_o = PyObject_GetAttrString(e, "__name__");
- e_module_o = PyObject_GetAttrString(e, "__module__");
- if (e_type_o)
- e_type_s = PLyUnicode_AsString(e_type_o);
- if (e_type_s)
- e_module_s = PLyUnicode_AsString(e_module_o);
-
- if (v && ((vob = PyObject_Str(v)) != NULL))
- vstr = PLyUnicode_AsString(vob);
- else
- vstr = "unknown";
-
- initStringInfo(&xstr);
- if (!e_type_s || !e_module_s)
+ PG_TRY();
{
- /* shouldn't happen */
- appendStringInfoString(&xstr, "unrecognized exception");
+ char *e_type_s = NULL;
+ char *e_module_s = NULL;
+ const char *vstr;
+ StringInfoData xstr;
+
+ e_type_o = PyObject_GetAttrString(e, "__name__");
+ e_module_o = PyObject_GetAttrString(e, "__module__");
+ if (e_type_o)
+ e_type_s = PLyUnicode_AsString(e_type_o);
+ if (e_module_o)
+ e_module_s = PLyUnicode_AsString(e_module_o);
+
+ if (v && ((vob = PyObject_Str(v)) != NULL))
+ vstr = PLyUnicode_AsString(vob);
+ else
+ vstr = "unknown";
+
+ initStringInfo(&xstr);
+ if (!e_type_s || !e_module_s)
+ {
+ /* shouldn't happen */
+ appendStringInfoString(&xstr, "unrecognized exception");
+ }
+ /* mimics behavior of traceback.format_exception_only */
+ else if (strcmp(e_module_s, "builtins") == 0
+ || strcmp(e_module_s, "__main__") == 0
+ || strcmp(e_module_s, "exceptions") == 0)
+ appendStringInfoString(&xstr, e_type_s);
+ else
+ appendStringInfo(&xstr, "%s.%s", e_module_s, e_type_s);
+ appendStringInfo(&xstr, ": %s", vstr);
+
+ *xmsg = xstr.data;
}
- /* mimics behavior of traceback.format_exception_only */
- else if (strcmp(e_module_s, "builtins") == 0
- || strcmp(e_module_s, "__main__") == 0
- || strcmp(e_module_s, "exceptions") == 0)
- appendStringInfoString(&xstr, e_type_s);
- else
- appendStringInfo(&xstr, "%s.%s", e_module_s, e_type_s);
- appendStringInfo(&xstr, ": %s", vstr);
-
- *xmsg = xstr.data;
+ PG_FINALLY();
+ {
+ Py_XDECREF(e_type_o);
+ Py_XDECREF(e_module_o);
+ Py_XDECREF(vob);
+ }
+ PG_END_TRY();
/*
* Now format the traceback and put it in tbmsg.
*/
-
*tb_depth = 0;
initStringInfo(&tbstr);
/* Mimic Python traceback reporting as close as possible. */
appendStringInfoString(&tbstr, "Traceback (most recent call last):");
while (tb != NULL && tb != Py_None)
{
- PyObject *volatile tb_prev = NULL;
PyObject *volatile frame = NULL;
PyObject *volatile code = NULL;
PyObject *volatile name = NULL;
@@ -254,84 +273,74 @@ PLy_traceback(PyObject *e, PyObject *v, PyObject *tb,
filename = PyObject_GetAttrString(code, "co_filename");
if (filename == NULL)
elog(ERROR, "could not get file name from Python code object");
+
+ /* The first frame always points at <module>, skip it. */
+ if (*tb_depth > 0)
+ {
+ PLyExecutionContext *exec_ctx = PLy_current_execution_context();
+ char *proname;
+ char *fname;
+ char *line;
+ char *plain_filename;
+ long plain_lineno;
+
+ /*
+ * The second frame points at the internal function, but to
+ * mimic Python error reporting we want to say <module>.
+ */
+ if (*tb_depth == 1)
+ fname = "<module>";
+ else
+ fname = PLyUnicode_AsString(name);
+
+ proname = PLy_procedure_name(exec_ctx->curr_proc);
+ plain_filename = PLyUnicode_AsString(filename);
+ plain_lineno = PyLong_AsLong(lineno);
+
+ if (proname == NULL)
+ appendStringInfo(&tbstr, "\n PL/Python anonymous code block, line %ld, in %s",
+ plain_lineno - 1, fname);
+ else
+ appendStringInfo(&tbstr, "\n PL/Python function \"%s\", line %ld, in %s",
+ proname, plain_lineno - 1, fname);
+
+ /*
+ * function code object was compiled with "<string>" as the
+ * filename
+ */
+ if (exec_ctx->curr_proc && plain_filename != NULL &&
+ strcmp(plain_filename, "<string>") == 0)
+ {
+ /*
+ * If we know the current procedure, append the exact line
+ * from the source, again mimicking Python's traceback.py
+ * module behavior. We could store the already line-split
+ * source to avoid splitting it every time, but producing
+ * a traceback is not the most important scenario to
+ * optimize for. But we do not go as far as traceback.py
+ * in reading the source of imported modules.
+ */
+ line = get_source_line(exec_ctx->curr_proc->src, plain_lineno);
+ if (line)
+ {
+ appendStringInfo(&tbstr, "\n %s", line);
+ pfree(line);
+ }
+ }
+ }
}
- PG_CATCH();
+ PG_FINALLY();
{
Py_XDECREF(frame);
Py_XDECREF(code);
Py_XDECREF(name);
Py_XDECREF(lineno);
Py_XDECREF(filename);
- PG_RE_THROW();
}
PG_END_TRY();
- /* The first frame always points at <module>, skip it. */
- if (*tb_depth > 0)
- {
- PLyExecutionContext *exec_ctx = PLy_current_execution_context();
- char *proname;
- char *fname;
- char *line;
- char *plain_filename;
- long plain_lineno;
-
- /*
- * The second frame points at the internal function, but to mimic
- * Python error reporting we want to say <module>.
- */
- if (*tb_depth == 1)
- fname = "<module>";
- else
- fname = PLyUnicode_AsString(name);
-
- proname = PLy_procedure_name(exec_ctx->curr_proc);
- plain_filename = PLyUnicode_AsString(filename);
- plain_lineno = PyLong_AsLong(lineno);
-
- if (proname == NULL)
- appendStringInfo(&tbstr, "\n PL/Python anonymous code block, line %ld, in %s",
- plain_lineno - 1, fname);
- else
- appendStringInfo(&tbstr, "\n PL/Python function \"%s\", line %ld, in %s",
- proname, plain_lineno - 1, fname);
-
- /*
- * function code object was compiled with "<string>" as the
- * filename
- */
- if (exec_ctx->curr_proc && plain_filename != NULL &&
- strcmp(plain_filename, "<string>") == 0)
- {
- /*
- * If we know the current procedure, append the exact line
- * from the source, again mimicking Python's traceback.py
- * module behavior. We could store the already line-split
- * source to avoid splitting it every time, but producing a
- * traceback is not the most important scenario to optimize
- * for. But we do not go as far as traceback.py in reading
- * the source of imported modules.
- */
- line = get_source_line(exec_ctx->curr_proc->src, plain_lineno);
- if (line)
- {
- appendStringInfo(&tbstr, "\n %s", line);
- pfree(line);
- }
- }
- }
-
- Py_DECREF(frame);
- Py_DECREF(code);
- Py_DECREF(name);
- Py_DECREF(lineno);
- Py_DECREF(filename);
-
- /* Release the current frame and go to the next one. */
- tb_prev = tb;
+ /* Advance to the next frame. */
tb = PyObject_GetAttrString(tb, "tb_next");
- Assert(tb_prev != Py_None);
- Py_DECREF(tb_prev);
if (tb == NULL)
elog(ERROR, "could not traverse Python traceback");
(*tb_depth)++;
@@ -339,10 +348,6 @@ PLy_traceback(PyObject *e, PyObject *v, PyObject *tb,
/* Return the traceback. */
*tbmsg = tbstr.data;
-
- Py_XDECREF(e_type_o);
- Py_XDECREF(e_module_o);
- Py_XDECREF(vob);
}
/*
diff --git a/src/pl/plpython/plpy_planobject.c b/src/pl/plpython/plpy_planobject.c
index 6044893afdd..edfb76c8770 100644
--- a/src/pl/plpython/plpy_planobject.c
+++ b/src/pl/plpython/plpy_planobject.c
@@ -45,9 +45,9 @@ static PyType_Slot PLyPlan_slots[] =
static PyType_Spec PLyPlan_spec =
{
.name = "PLyPlan",
- .basicsize = sizeof(PLyPlanObject),
- .flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE,
- .slots = PLyPlan_slots,
+ .basicsize = sizeof(PLyPlanObject),
+ .flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE,
+ .slots = PLyPlan_slots,
};
static PyTypeObject *PLy_PlanType;
diff --git a/src/pl/plpython/plpy_resultobject.c b/src/pl/plpython/plpy_resultobject.c
index 0d9997cbaa3..d433929b360 100644
--- a/src/pl/plpython/plpy_resultobject.c
+++ b/src/pl/plpython/plpy_resultobject.c
@@ -70,9 +70,9 @@ static PyType_Slot PLyResult_slots[] =
static PyType_Spec PLyResult_spec =
{
.name = "PLyResult",
- .basicsize = sizeof(PLyResultObject),
- .flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE,
- .slots = PLyResult_slots,
+ .basicsize = sizeof(PLyResultObject),
+ .flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE,
+ .slots = PLyResult_slots,
};
static PyTypeObject *PLy_ResultType;
diff --git a/src/pl/plpython/plpy_subxactobject.c b/src/pl/plpython/plpy_subxactobject.c
index c2484a99b4a..c225b652ab4 100644
--- a/src/pl/plpython/plpy_subxactobject.c
+++ b/src/pl/plpython/plpy_subxactobject.c
@@ -46,9 +46,9 @@ static PyType_Slot PLySubtransaction_slots[] =
static PyType_Spec PLySubtransaction_spec =
{
.name = "PLySubtransaction",
- .basicsize = sizeof(PLySubtransactionObject),
- .flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE,
- .slots = PLySubtransaction_slots,
+ .basicsize = sizeof(PLySubtransactionObject),
+ .flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE,
+ .slots = PLySubtransaction_slots,
};
static PyTypeObject *PLy_SubtransactionType;
diff --git a/src/port/explicit_bzero.c b/src/port/explicit_bzero.c
index 1d37b119bab..53766e86e94 100644
--- a/src/port/explicit_bzero.c
+++ b/src/port/explicit_bzero.c
@@ -12,9 +12,11 @@
*-------------------------------------------------------------------------
*/
+#define __STDC_WANT_LIB_EXT1__ 1 /* needed to access memset_s() */
+
#include "c.h"
-#if defined(HAVE_MEMSET_S)
+#if HAVE_DECL_MEMSET_S
void
explicit_bzero(void *buf, size_t len)
diff --git a/src/port/pg_crc32c_sse42.c b/src/port/pg_crc32c_sse42.c
index 9af3474a6ca..1a717255355 100644
--- a/src/port/pg_crc32c_sse42.c
+++ b/src/port/pg_crc32c_sse42.c
@@ -123,7 +123,7 @@ pg_comp_crc32c_avx512(pg_crc32c crc, const void *data, size_t len)
__m512i k;
k = _mm512_broadcast_i32x4(_mm_setr_epi32(0x740eef02, 0, 0x9e4addf8, 0));
- x0 = _mm512_xor_si512(_mm512_castsi128_si512(_mm_cvtsi32_si128(crc0)), x0);
+ x0 = _mm512_xor_si512(_mm512_zextsi128_si512(_mm_cvtsi32_si128(crc0)), x0);
buf += 64;
/* Main loop. */
diff --git a/src/port/pg_localeconv_r.c b/src/port/pg_localeconv_r.c
index 4554ab84e9b..61510b2e0ea 100644
--- a/src/port/pg_localeconv_r.c
+++ b/src/port/pg_localeconv_r.c
@@ -3,7 +3,7 @@
* pg_localeconv_r.c
* Thread-safe implementations of localeconv()
*
- * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
diff --git a/src/test/authentication/t/001_password.pl b/src/test/authentication/t/001_password.pl
index 37d96d95a1a..a16e9a563f3 100644
--- a/src/test/authentication/t/001_password.pl
+++ b/src/test/authentication/t/001_password.pl
@@ -79,39 +79,40 @@ $node->start;
# other tests are added to this file in the future
$node->safe_psql('postgres', "CREATE DATABASE test_log_connections");
-my $log_connections = $node->safe_psql('test_log_connections', q(SHOW log_connections;));
+my $log_connections =
+ $node->safe_psql('test_log_connections', q(SHOW log_connections;));
is($log_connections, 'on', qq(check log connections has expected value 'on'));
-$node->connect_ok('test_log_connections',
+$node->connect_ok(
+ 'test_log_connections',
qq(log_connections 'on' works as expected for backwards compatibility),
log_like => [
qr/connection received/,
qr/connection authenticated/,
qr/connection authorized: user=\S+ database=test_log_connections/,
],
- log_unlike => [
- qr/connection ready/,
- ],);
+ log_unlike => [ qr/connection ready/, ],);
-$node->safe_psql('test_log_connections',
+$node->safe_psql(
+ 'test_log_connections',
q[ALTER SYSTEM SET log_connections = receipt,authorization,setup_durations;
SELECT pg_reload_conf();]);
-$node->connect_ok('test_log_connections',
+$node->connect_ok(
+ 'test_log_connections',
q(log_connections with subset of specified options logs only those aspects),
log_like => [
qr/connection received/,
qr/connection authorized: user=\S+ database=test_log_connections/,
qr/connection ready/,
],
- log_unlike => [
- qr/connection authenticated/,
- ],);
+ log_unlike => [ qr/connection authenticated/, ],);
$node->safe_psql('test_log_connections',
qq(ALTER SYSTEM SET log_connections = 'all'; SELECT pg_reload_conf();));
-$node->connect_ok('test_log_connections',
+$node->connect_ok(
+ 'test_log_connections',
qq(log_connections 'all' logs all available connection aspects),
log_like => [
qr/connection received/,
diff --git a/src/test/authentication/t/003_peer.pl b/src/test/authentication/t/003_peer.pl
index 2879800eacf..f2320b62c87 100644
--- a/src/test/authentication/t/003_peer.pl
+++ b/src/test/authentication/t/003_peer.pl
@@ -71,7 +71,7 @@ sub test_role
my $node = PostgreSQL::Test::Cluster->new('node');
$node->init;
-$node->append_conf('postgresql.conf', "log_connections = on\n");
+$node->append_conf('postgresql.conf', "log_connections = authentication\n");
# Needed to allow connect_fails to inspect postmaster log:
$node->append_conf('postgresql.conf', "log_min_messages = debug2");
$node->start;
diff --git a/src/test/authentication/t/005_sspi.pl b/src/test/authentication/t/005_sspi.pl
index b480b702590..cb3e169002f 100644
--- a/src/test/authentication/t/005_sspi.pl
+++ b/src/test/authentication/t/005_sspi.pl
@@ -18,7 +18,7 @@ if (!$windows_os || $use_unix_sockets)
# Initialize primary node
my $node = PostgreSQL::Test::Cluster->new('primary');
$node->init;
-$node->append_conf('postgresql.conf', "log_connections = on\n");
+$node->append_conf('postgresql.conf', "log_connections = authentication\n");
$node->start;
my $huge_pages_status =
diff --git a/src/test/authentication/t/007_pre_auth.pl b/src/test/authentication/t/007_pre_auth.pl
index 12e40dc722c..7b3765e6d25 100644
--- a/src/test/authentication/t/007_pre_auth.pl
+++ b/src/test/authentication/t/007_pre_auth.pl
@@ -20,7 +20,7 @@ my $node = PostgreSQL::Test::Cluster->new('primary');
$node->init;
$node->append_conf(
'postgresql.conf', q[
-log_connections = on
+log_connections = 'receipt,authentication'
]);
$node->start;
diff --git a/src/test/kerberos/t/001_auth.pl b/src/test/kerberos/t/001_auth.pl
index 2dc6bec9b89..b0be96f2beb 100644
--- a/src/test/kerberos/t/001_auth.pl
+++ b/src/test/kerberos/t/001_auth.pl
@@ -65,7 +65,7 @@ $node->append_conf(
'postgresql.conf', qq{
listen_addresses = '$hostaddr'
krb_server_keyfile = '$krb->{keytab}'
-log_connections = on
+log_connections = all
log_min_messages = debug2
lc_messages = 'C'
});
diff --git a/src/test/ldap/t/001_auth.pl b/src/test/ldap/t/001_auth.pl
index d1315ed5351..440c30b7ddd 100644
--- a/src/test/ldap/t/001_auth.pl
+++ b/src/test/ldap/t/001_auth.pl
@@ -47,7 +47,7 @@ note "setting up PostgreSQL instance";
my $node = PostgreSQL::Test::Cluster->new('node');
$node->init;
-$node->append_conf('postgresql.conf', "log_connections = on\n");
+$node->append_conf('postgresql.conf', "log_connections = all\n");
# Needed to allow connect_fails to inspect postmaster log:
$node->append_conf('postgresql.conf', "log_min_messages = debug2");
$node->start;
diff --git a/src/test/ldap/t/002_bindpasswd.pl b/src/test/ldap/t/002_bindpasswd.pl
index f8beba2b279..642bb2d9a77 100644
--- a/src/test/ldap/t/002_bindpasswd.pl
+++ b/src/test/ldap/t/002_bindpasswd.pl
@@ -43,7 +43,7 @@ note "setting up PostgreSQL instance";
my $node = PostgreSQL::Test::Cluster->new('node');
$node->init;
-$node->append_conf('postgresql.conf', "log_connections = on\n");
+$node->append_conf('postgresql.conf', "log_connections = all\n");
$node->start;
$node->safe_psql('postgres', 'CREATE USER test0;');
diff --git a/src/test/modules/injection_points/Makefile b/src/test/modules/injection_points/Makefile
index e680991f8d4..fc82cd67f6c 100644
--- a/src/test/modules/injection_points/Makefile
+++ b/src/test/modules/injection_points/Makefile
@@ -11,7 +11,7 @@ EXTENSION = injection_points
DATA = injection_points--1.0.sql
PGFILEDESC = "injection_points - facility for injection points"
-REGRESS = injection_points hashagg reindex_conc
+REGRESS = injection_points hashagg reindex_conc vacuum
REGRESS_OPTS = --dlpath=$(top_builddir)/src/test/regress
ISOLATION = basic inplace syscache-update-pruned
diff --git a/src/test/modules/injection_points/expected/vacuum.out b/src/test/modules/injection_points/expected/vacuum.out
new file mode 100644
index 00000000000..58df59fa927
--- /dev/null
+++ b/src/test/modules/injection_points/expected/vacuum.out
@@ -0,0 +1,122 @@
+-- Tests for VACUUM
+CREATE EXTENSION injection_points;
+SELECT injection_points_set_local();
+ injection_points_set_local
+----------------------------
+
+(1 row)
+
+SELECT injection_points_attach('vacuum-index-cleanup-auto', 'notice');
+ injection_points_attach
+-------------------------
+
+(1 row)
+
+SELECT injection_points_attach('vacuum-index-cleanup-disabled', 'notice');
+ injection_points_attach
+-------------------------
+
+(1 row)
+
+SELECT injection_points_attach('vacuum-index-cleanup-enabled', 'notice');
+ injection_points_attach
+-------------------------
+
+(1 row)
+
+SELECT injection_points_attach('vacuum-truncate-auto', 'notice');
+ injection_points_attach
+-------------------------
+
+(1 row)
+
+SELECT injection_points_attach('vacuum-truncate-disabled', 'notice');
+ injection_points_attach
+-------------------------
+
+(1 row)
+
+SELECT injection_points_attach('vacuum-truncate-enabled', 'notice');
+ injection_points_attach
+-------------------------
+
+(1 row)
+
+-- Check state of index_cleanup and truncate in VACUUM.
+CREATE TABLE vac_tab_on_toast_off(i int, j text) WITH
+ (autovacuum_enabled=false,
+ vacuum_index_cleanup=true, toast.vacuum_index_cleanup=false,
+ vacuum_truncate=true, toast.vacuum_truncate=false);
+CREATE TABLE vac_tab_off_toast_on(i int, j text) WITH
+ (autovacuum_enabled=false,
+ vacuum_index_cleanup=false, toast.vacuum_index_cleanup=true,
+ vacuum_truncate=false, toast.vacuum_truncate=true);
+-- Multiple relations should use their options in isolation.
+VACUUM vac_tab_on_toast_off, vac_tab_off_toast_on;
+NOTICE: notice triggered for injection point vacuum-index-cleanup-enabled
+NOTICE: notice triggered for injection point vacuum-truncate-enabled
+NOTICE: notice triggered for injection point vacuum-index-cleanup-disabled
+NOTICE: notice triggered for injection point vacuum-truncate-disabled
+NOTICE: notice triggered for injection point vacuum-index-cleanup-disabled
+NOTICE: notice triggered for injection point vacuum-truncate-disabled
+NOTICE: notice triggered for injection point vacuum-index-cleanup-enabled
+NOTICE: notice triggered for injection point vacuum-truncate-enabled
+-- Check "auto" case of index_cleanup and "truncate" controlled by
+-- its GUC.
+CREATE TABLE vac_tab_auto(i int, j text) WITH
+ (autovacuum_enabled=false,
+ vacuum_index_cleanup=auto, toast.vacuum_index_cleanup=auto);
+SET vacuum_truncate = false;
+VACUUM vac_tab_auto;
+NOTICE: notice triggered for injection point vacuum-index-cleanup-auto
+NOTICE: notice triggered for injection point vacuum-truncate-disabled
+NOTICE: notice triggered for injection point vacuum-index-cleanup-auto
+NOTICE: notice triggered for injection point vacuum-truncate-disabled
+SET vacuum_truncate = true;
+VACUUM vac_tab_auto;
+NOTICE: notice triggered for injection point vacuum-index-cleanup-auto
+NOTICE: notice triggered for injection point vacuum-truncate-enabled
+NOTICE: notice triggered for injection point vacuum-index-cleanup-auto
+NOTICE: notice triggered for injection point vacuum-truncate-enabled
+RESET vacuum_truncate;
+DROP TABLE vac_tab_auto;
+DROP TABLE vac_tab_on_toast_off;
+DROP TABLE vac_tab_off_toast_on;
+-- Cleanup
+SELECT injection_points_detach('vacuum-index-cleanup-auto');
+ injection_points_detach
+-------------------------
+
+(1 row)
+
+SELECT injection_points_detach('vacuum-index-cleanup-disabled');
+ injection_points_detach
+-------------------------
+
+(1 row)
+
+SELECT injection_points_detach('vacuum-index-cleanup-enabled');
+ injection_points_detach
+-------------------------
+
+(1 row)
+
+SELECT injection_points_detach('vacuum-truncate-auto');
+ injection_points_detach
+-------------------------
+
+(1 row)
+
+SELECT injection_points_detach('vacuum-truncate-disabled');
+ injection_points_detach
+-------------------------
+
+(1 row)
+
+SELECT injection_points_detach('vacuum-truncate-enabled');
+ injection_points_detach
+-------------------------
+
+(1 row)
+
+DROP EXTENSION injection_points;
diff --git a/src/test/modules/injection_points/meson.build b/src/test/modules/injection_points/meson.build
index d61149712fd..ce778ccf9ac 100644
--- a/src/test/modules/injection_points/meson.build
+++ b/src/test/modules/injection_points/meson.build
@@ -37,6 +37,7 @@ tests += {
'injection_points',
'hashagg',
'reindex_conc',
+ 'vacuum',
],
'regress_args': ['--dlpath', meson.build_root() / 'src/test/regress'],
# The injection points are cluster-wide, so disable installcheck
diff --git a/src/test/modules/injection_points/sql/vacuum.sql b/src/test/modules/injection_points/sql/vacuum.sql
new file mode 100644
index 00000000000..23760dd0f38
--- /dev/null
+++ b/src/test/modules/injection_points/sql/vacuum.sql
@@ -0,0 +1,47 @@
+-- Tests for VACUUM
+
+CREATE EXTENSION injection_points;
+
+SELECT injection_points_set_local();
+SELECT injection_points_attach('vacuum-index-cleanup-auto', 'notice');
+SELECT injection_points_attach('vacuum-index-cleanup-disabled', 'notice');
+SELECT injection_points_attach('vacuum-index-cleanup-enabled', 'notice');
+SELECT injection_points_attach('vacuum-truncate-auto', 'notice');
+SELECT injection_points_attach('vacuum-truncate-disabled', 'notice');
+SELECT injection_points_attach('vacuum-truncate-enabled', 'notice');
+
+-- Check state of index_cleanup and truncate in VACUUM.
+CREATE TABLE vac_tab_on_toast_off(i int, j text) WITH
+ (autovacuum_enabled=false,
+ vacuum_index_cleanup=true, toast.vacuum_index_cleanup=false,
+ vacuum_truncate=true, toast.vacuum_truncate=false);
+CREATE TABLE vac_tab_off_toast_on(i int, j text) WITH
+ (autovacuum_enabled=false,
+ vacuum_index_cleanup=false, toast.vacuum_index_cleanup=true,
+ vacuum_truncate=false, toast.vacuum_truncate=true);
+-- Multiple relations should use their options in isolation.
+VACUUM vac_tab_on_toast_off, vac_tab_off_toast_on;
+
+-- Check "auto" case of index_cleanup and "truncate" controlled by
+-- its GUC.
+CREATE TABLE vac_tab_auto(i int, j text) WITH
+ (autovacuum_enabled=false,
+ vacuum_index_cleanup=auto, toast.vacuum_index_cleanup=auto);
+SET vacuum_truncate = false;
+VACUUM vac_tab_auto;
+SET vacuum_truncate = true;
+VACUUM vac_tab_auto;
+RESET vacuum_truncate;
+
+DROP TABLE vac_tab_auto;
+DROP TABLE vac_tab_on_toast_off;
+DROP TABLE vac_tab_off_toast_on;
+
+-- Cleanup
+SELECT injection_points_detach('vacuum-index-cleanup-auto');
+SELECT injection_points_detach('vacuum-index-cleanup-disabled');
+SELECT injection_points_detach('vacuum-index-cleanup-enabled');
+SELECT injection_points_detach('vacuum-truncate-auto');
+SELECT injection_points_detach('vacuum-truncate-disabled');
+SELECT injection_points_detach('vacuum-truncate-enabled');
+DROP EXTENSION injection_points;
diff --git a/src/test/modules/ldap_password_func/t/001_mutated_bindpasswd.pl b/src/test/modules/ldap_password_func/t/001_mutated_bindpasswd.pl
index 9b062e1c800..5dc1e442d29 100644
--- a/src/test/modules/ldap_password_func/t/001_mutated_bindpasswd.pl
+++ b/src/test/modules/ldap_password_func/t/001_mutated_bindpasswd.pl
@@ -42,7 +42,8 @@ note "setting up PostgreSQL instance";
my $node = PostgreSQL::Test::Cluster->new('node');
$node->init;
-$node->append_conf('postgresql.conf', "log_connections = on\n");
+$node->append_conf('postgresql.conf',
+ "log_connections = 'receipt,authentication,authorization'\n");
$node->append_conf('postgresql.conf',
"shared_preload_libraries = 'ldap_password_func'");
$node->start;
diff --git a/src/test/modules/libpq_pipeline/t/001_libpq_pipeline.pl b/src/test/modules/libpq_pipeline/t/001_libpq_pipeline.pl
index 61524bdbd8f..f9678853070 100644
--- a/src/test/modules/libpq_pipeline/t/001_libpq_pipeline.pl
+++ b/src/test/modules/libpq_pipeline/t/001_libpq_pipeline.pl
@@ -53,7 +53,8 @@ for my $testname (@tests)
$node->command_ok(
[
'libpq_pipeline', @extraargs,
- $testname, $node->connstr('postgres') . " max_protocol_version=latest"
+ $testname,
+ $node->connstr('postgres') . " max_protocol_version=latest"
],
"libpq_pipeline $testname");
@@ -76,7 +77,8 @@ for my $testname (@tests)
# test separately that it still works the old protocol version too.
$node->command_ok(
[
- 'libpq_pipeline', 'cancel', $node->connstr('postgres') . " max_protocol_version=3.0"
+ 'libpq_pipeline', 'cancel',
+ $node->connstr('postgres') . " max_protocol_version=3.0"
],
"libpq_pipeline cancel with protocol 3.0");
diff --git a/src/test/modules/oauth_validator/t/001_server.pl b/src/test/modules/oauth_validator/t/001_server.pl
index 4f035417a40..41672ebd5c6 100644
--- a/src/test/modules/oauth_validator/t/001_server.pl
+++ b/src/test/modules/oauth_validator/t/001_server.pl
@@ -45,7 +45,7 @@ if ($ENV{with_python} ne 'yes')
my $node = PostgreSQL::Test::Cluster->new('primary');
$node->init;
-$node->append_conf('postgresql.conf', "log_connections = on\n");
+$node->append_conf('postgresql.conf', "log_connections = all\n");
$node->append_conf('postgresql.conf',
"oauth_validator_libraries = 'validator'\n");
# Needed to allow connect_fails to inspect postmaster log:
@@ -295,6 +295,26 @@ $node->connect_fails(
expected_stderr =>
qr/failed to obtain access token: response is too large/);
+my $nesting_limit = 16;
+$node->connect_ok(
+ connstr(
+ stage => 'device',
+ nested_array => $nesting_limit,
+ nested_object => $nesting_limit),
+ "nested arrays and objects, up to parse limit",
+ expected_stderr =>
+ qr@Visit https://example\.com/ and enter the code: postgresuser@);
+$node->connect_fails(
+ connstr(stage => 'device', nested_array => $nesting_limit + 1),
+ "bad discovery response: overly nested JSON array",
+ expected_stderr =>
+ qr/failed to parse device authorization: JSON is too deeply nested/);
+$node->connect_fails(
+ connstr(stage => 'device', nested_object => $nesting_limit + 1),
+ "bad discovery response: overly nested JSON object",
+ expected_stderr =>
+ qr/failed to parse device authorization: JSON is too deeply nested/);
+
$node->connect_fails(
connstr(stage => 'device', content_type => 'text/plain'),
"bad device authz response: wrong content type",
diff --git a/src/test/modules/oauth_validator/t/002_client.pl b/src/test/modules/oauth_validator/t/002_client.pl
index 21d4acc1926..aac0220d215 100644
--- a/src/test/modules/oauth_validator/t/002_client.pl
+++ b/src/test/modules/oauth_validator/t/002_client.pl
@@ -26,7 +26,7 @@ if (!$ENV{PG_TEST_EXTRA} || $ENV{PG_TEST_EXTRA} !~ /\boauth\b/)
my $node = PostgreSQL::Test::Cluster->new('primary');
$node->init;
-$node->append_conf('postgresql.conf', "log_connections = on\n");
+$node->append_conf('postgresql.conf', "log_connections = all\n");
$node->append_conf('postgresql.conf',
"oauth_validator_libraries = 'validator'\n");
$node->start;
diff --git a/src/test/modules/oauth_validator/t/oauth_server.py b/src/test/modules/oauth_validator/t/oauth_server.py
index 20b3a9506cb..0f8836aadf3 100755
--- a/src/test/modules/oauth_validator/t/oauth_server.py
+++ b/src/test/modules/oauth_validator/t/oauth_server.py
@@ -7,6 +7,7 @@
#
import base64
+import functools
import http.server
import json
import os
@@ -213,14 +214,32 @@ class OAuthHandler(http.server.BaseHTTPRequestHandler):
@property
def _response_padding(self):
"""
- If the huge_response test parameter is set to True, returns a dict
- containing a gigantic string value, which can then be folded into a JSON
- response.
+ Returns a dict with any additional entries that should be folded into a
+ JSON response, as determined by test parameters provided by the client:
+
+ - huge_response: if set to True, the dict will contain a gigantic string
+ value
+
+ - nested_array: if set to nonzero, the dict will contain a deeply nested
+ array so that the top-level object has the given depth
+
+ - nested_object: if set to nonzero, the dict will contain a deeply
+ nested JSON object so that the top-level object has the given depth
"""
- if not self._get_param("huge_response", False):
- return dict()
+ ret = dict()
+
+ if self._get_param("huge_response", False):
+ ret["_pad_"] = "x" * 1024 * 1024
+
+ depth = self._get_param("nested_array", 0)
+ if depth:
+ ret["_arr_"] = functools.reduce(lambda x, _: [x], range(depth))
+
+ depth = self._get_param("nested_object", 0)
+ if depth:
+ ret["_obj_"] = functools.reduce(lambda x, _: {"": x}, range(depth))
- return {"_pad_": "x" * 1024 * 1024}
+ return ret
@property
def _access_token(self):
diff --git a/src/test/modules/test_aio/t/001_aio.pl b/src/test/modules/test_aio/t/001_aio.pl
index 4527c70785d..82ffffc058f 100644
--- a/src/test/modules/test_aio/t/001_aio.pl
+++ b/src/test/modules/test_aio/t/001_aio.pl
@@ -1123,7 +1123,8 @@ COMMIT;
{
# Create a corruption and then read the block without waiting for
# completion.
- $psql_a->query(qq(
+ $psql_a->query(
+ qq(
SELECT modify_rel_block('tbl_zero', 1, corrupt_header=>true);
SELECT read_rel_block_ll('tbl_zero', 1, wait_complete=>false, zero_on_error=>true)
));
@@ -1133,7 +1134,8 @@ SELECT read_rel_block_ll('tbl_zero', 1, wait_complete=>false, zero_on_error=>tru
$psql_b,
"$persistency: test completing read by other session doesn't generate warning",
qq(SELECT count(*) > 0 FROM tbl_zero;),
- qr/^t$/, qr/^$/);
+ qr/^t$/,
+ qr/^$/);
}
# Clean up
@@ -1355,18 +1357,24 @@ SELECT modify_rel_block('tbl_cs_fail', 6, corrupt_checksum=>true);
));
$psql->query_safe($invalidate_sql);
- psql_like($io_method, $psql,
+ psql_like(
+ $io_method,
+ $psql,
"reading block w/ wrong checksum with ignore_checksum_failure=off fails",
- $count_sql, qr/^$/, qr/ERROR: invalid page in block/);
+ $count_sql,
+ qr/^$/,
+ qr/ERROR: invalid page in block/);
$psql->query_safe("SET ignore_checksum_failure=on");
$psql->query_safe($invalidate_sql);
- psql_like($io_method, $psql,
- "reading block w/ wrong checksum with ignore_checksum_failure=off succeeds",
- $count_sql,
- qr/^$expect$/,
- qr/WARNING: ignoring (checksum failure|\d checksum failures)/);
+ psql_like(
+ $io_method,
+ $psql,
+ "reading block w/ wrong checksum with ignore_checksum_failure=off succeeds",
+ $count_sql,
+ qr/^$expect$/,
+ qr/WARNING: ignoring (checksum failure|\d checksum failures)/);
# Verify that ignore_checksum_failure=off works in multi-block reads
@@ -1432,19 +1440,22 @@ SELECT read_rel_block_ll('tbl_cs_fail', 1, nblocks=>5, zero_on_error=>true);),
# file.
$node->wait_for_log(qr/LOG: ignoring checksum failure in block 2/,
- $log_location);
+ $log_location);
ok(1, "$io_method: found information about checksum failure in block 2");
- $node->wait_for_log(qr/LOG: invalid page in block 3 of relation base.*; zeroing out page/,
- $log_location);
+ $node->wait_for_log(
+ qr/LOG: invalid page in block 3 of relation base.*; zeroing out page/,
+ $log_location);
ok(1, "$io_method: found information about invalid page in block 3");
- $node->wait_for_log(qr/LOG: invalid page in block 4 of relation base.*; zeroing out page/,
- $log_location);
+ $node->wait_for_log(
+ qr/LOG: invalid page in block 4 of relation base.*; zeroing out page/,
+ $log_location);
ok(1, "$io_method: found information about checksum failure in block 4");
- $node->wait_for_log(qr/LOG: invalid page in block 5 of relation base.*; zeroing out page/,
- $log_location);
+ $node->wait_for_log(
+ qr/LOG: invalid page in block 5 of relation base.*; zeroing out page/,
+ $log_location);
ok(1, "$io_method: found information about checksum failure in block 5");
@@ -1462,8 +1473,7 @@ SELECT modify_rel_block('tbl_cs_fail', 3, corrupt_checksum=>true, corrupt_header
qq(
SELECT read_rel_block_ll('tbl_cs_fail', 3, nblocks=>1, zero_on_error=>false);),
qr/^$/,
- qr/^psql:<stdin>:\d+: ERROR: invalid page in block 3 of relation/
- );
+ qr/^psql:<stdin>:\d+: ERROR: invalid page in block 3 of relation/);
psql_like(
$io_method,
diff --git a/src/test/modules/test_aio/test_aio.c b/src/test/modules/test_aio/test_aio.c
index 5cdfb89210b..c55cf6c0aac 100644
--- a/src/test/modules/test_aio/test_aio.c
+++ b/src/test/modules/test_aio/test_aio.c
@@ -42,9 +42,9 @@ typedef struct InjIoErrorState
bool short_read_result_set;
int short_read_result;
-} InjIoErrorState;
+} InjIoErrorState;
-static InjIoErrorState * inj_io_error_state;
+static InjIoErrorState *inj_io_error_state;
/* Shared memory init callbacks */
static shmem_request_hook_type prev_shmem_request_hook = NULL;
diff --git a/src/test/modules/test_dsm_registry/test_dsm_registry.c b/src/test/modules/test_dsm_registry/test_dsm_registry.c
index 462a80f8790..96a890be228 100644
--- a/src/test/modules/test_dsm_registry/test_dsm_registry.c
+++ b/src/test/modules/test_dsm_registry/test_dsm_registry.c
@@ -54,7 +54,7 @@ set_val_in_shmem(PG_FUNCTION_ARGS)
tdr_attach_shmem();
LWLockAcquire(&tdr_state->lck, LW_EXCLUSIVE);
- tdr_state->val = PG_GETARG_UINT32(0);
+ tdr_state->val = PG_GETARG_INT32(0);
LWLockRelease(&tdr_state->lck);
PG_RETURN_VOID();
@@ -72,5 +72,5 @@ get_val_in_shmem(PG_FUNCTION_ARGS)
ret = tdr_state->val;
LWLockRelease(&tdr_state->lck);
- PG_RETURN_UINT32(ret);
+ PG_RETURN_INT32(ret);
}
diff --git a/src/test/modules/test_shm_mq/worker.c b/src/test/modules/test_shm_mq/worker.c
index 96cd304dbbc..c1d321b69a4 100644
--- a/src/test/modules/test_shm_mq/worker.c
+++ b/src/test/modules/test_shm_mq/worker.c
@@ -77,7 +77,7 @@ test_shm_mq_main(Datum main_arg)
* exit, which is fine. If there were a ResourceOwner, it would acquire
* ownership of the mapping, but we have no need for that.
*/
- seg = dsm_attach(DatumGetInt32(main_arg));
+ seg = dsm_attach(DatumGetUInt32(main_arg));
if (seg == NULL)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
diff --git a/src/test/perl/PostgreSQL/Test/AdjustUpgrade.pm b/src/test/perl/PostgreSQL/Test/AdjustUpgrade.pm
index 609275e2c26..1725fe2f948 100644
--- a/src/test/perl/PostgreSQL/Test/AdjustUpgrade.pm
+++ b/src/test/perl/PostgreSQL/Test/AdjustUpgrade.pm
@@ -538,6 +538,7 @@ my @_unused_view_qualifiers = (
{ obj => 'VIEW public.limit_thousand_v_2', qual => 'onek' },
{ obj => 'VIEW public.limit_thousand_v_3', qual => 'onek' },
{ obj => 'VIEW public.limit_thousand_v_4', qual => 'onek' },
+ { obj => 'VIEW public.limit_thousand_v_5', qual => 'onek' },
# Since 14
{ obj => 'MATERIALIZED VIEW public.compressmv', qual => 'cmdata1' });
diff --git a/src/test/perl/PostgreSQL/Test/Cluster.pm b/src/test/perl/PostgreSQL/Test/Cluster.pm
index 1c11750ac1d..49b2c86b29c 100644
--- a/src/test/perl/PostgreSQL/Test/Cluster.pm
+++ b/src/test/perl/PostgreSQL/Test/Cluster.pm
@@ -684,7 +684,7 @@ sub init
print $conf "\n# Added by PostgreSQL::Test::Cluster.pm\n";
print $conf "fsync = off\n";
print $conf "restart_after_crash = off\n";
- print $conf "log_line_prefix = '%m [%p] %q%a '\n";
+ print $conf "log_line_prefix = '%m %b[%p] %q%a '\n";
print $conf "log_statement = all\n";
print $conf "log_replication_commands = on\n";
print $conf "wal_retrieve_retry_interval = '500ms'\n";
diff --git a/src/test/postmaster/t/002_connection_limits.pl b/src/test/postmaster/t/002_connection_limits.pl
index 325a00efd47..4a7fb16261f 100644
--- a/src/test/postmaster/t/002_connection_limits.pl
+++ b/src/test/postmaster/t/002_connection_limits.pl
@@ -20,7 +20,8 @@ $node->init(
$node->append_conf('postgresql.conf', "max_connections = 6");
$node->append_conf('postgresql.conf', "reserved_connections = 2");
$node->append_conf('postgresql.conf', "superuser_reserved_connections = 1");
-$node->append_conf('postgresql.conf', "log_connections = on");
+$node->append_conf('postgresql.conf',
+ "log_connections = 'receipt,authentication,authorization'");
$node->append_conf('postgresql.conf', "log_min_messages=debug2");
$node->start;
@@ -67,7 +68,8 @@ sub connect_fails_wait
my $log_location = -s $node->logfile;
$node->connect_fails($connstr, $test_name, %params);
- $node->wait_for_log(qr/DEBUG: (00000: )?client backend.*exited with exit code 1/,
+ $node->wait_for_log(
+ qr/DEBUG: (00000: )?client backend.*exited with exit code 1/,
$log_location);
ok(1, "$test_name: client backend process exited");
}
diff --git a/src/test/postmaster/t/003_start_stop.pl b/src/test/postmaster/t/003_start_stop.pl
index 4dc394139d9..58e7ba6cc42 100644
--- a/src/test/postmaster/t/003_start_stop.pl
+++ b/src/test/postmaster/t/003_start_stop.pl
@@ -33,7 +33,8 @@ $node->append_conf('postgresql.conf', "max_connections = 5");
$node->append_conf('postgresql.conf', "max_wal_senders = 0");
$node->append_conf('postgresql.conf', "autovacuum_max_workers = 1");
$node->append_conf('postgresql.conf', "max_worker_processes = 1");
-$node->append_conf('postgresql.conf', "log_connections = on");
+$node->append_conf('postgresql.conf',
+ "log_connections = 'receipt,authentication,authorization'");
$node->append_conf('postgresql.conf', "log_min_messages = debug2");
$node->append_conf('postgresql.conf',
"authentication_timeout = '$authentication_timeout s'");
diff --git a/src/test/recovery/meson.build b/src/test/recovery/meson.build
index cb983766c67..6e78ff1a030 100644
--- a/src/test/recovery/meson.build
+++ b/src/test/recovery/meson.build
@@ -54,6 +54,8 @@ tests += {
't/043_no_contrecord_switch.pl',
't/044_invalidate_inactive_slots.pl',
't/045_archive_restartpoint.pl',
+ 't/047_checkpoint_physical_slot.pl',
+ 't/048_vacuum_horizon_floor.pl'
],
},
}
diff --git a/src/test/recovery/t/013_crash_restart.pl b/src/test/recovery/t/013_crash_restart.pl
index 4e60806563f..debfa635c36 100644
--- a/src/test/recovery/t/013_crash_restart.pl
+++ b/src/test/recovery/t/013_crash_restart.pl
@@ -27,7 +27,7 @@ $node->start();
$node->safe_psql(
'postgres',
q[ALTER SYSTEM SET restart_after_crash = 1;
- ALTER SYSTEM SET log_connections = 1;
+ ALTER SYSTEM SET log_connections = receipt;
SELECT pg_reload_conf();]);
# Run psql, keeping session alive, so we have an alive backend to kill.
diff --git a/src/test/recovery/t/022_crash_temp_files.pl b/src/test/recovery/t/022_crash_temp_files.pl
index 50def031c96..0b68860bd3e 100644
--- a/src/test/recovery/t/022_crash_temp_files.pl
+++ b/src/test/recovery/t/022_crash_temp_files.pl
@@ -26,7 +26,7 @@ $node->start();
$node->safe_psql(
'postgres',
q[ALTER SYSTEM SET remove_temp_files_after_crash = on;
- ALTER SYSTEM SET log_connections = 1;
+ ALTER SYSTEM SET log_connections = receipt;
ALTER SYSTEM SET work_mem = '64kB';
ALTER SYSTEM SET restart_after_crash = on;
SELECT pg_reload_conf();]);
diff --git a/src/test/recovery/t/032_relfilenode_reuse.pl b/src/test/recovery/t/032_relfilenode_reuse.pl
index 492ef115ba4..0c44883cc34 100644
--- a/src/test/recovery/t/032_relfilenode_reuse.pl
+++ b/src/test/recovery/t/032_relfilenode_reuse.pl
@@ -14,7 +14,7 @@ $node_primary->init(allows_streaming => 1);
$node_primary->append_conf(
'postgresql.conf', q[
allow_in_place_tablespaces = true
-log_connections=on
+log_connections=receipt
# to avoid "repairing" corruption
full_page_writes=off
log_min_messages=debug2
diff --git a/src/test/recovery/t/037_invalid_database.pl b/src/test/recovery/t/037_invalid_database.pl
index bdf39397397..dc52c55c7af 100644
--- a/src/test/recovery/t/037_invalid_database.pl
+++ b/src/test/recovery/t/037_invalid_database.pl
@@ -15,7 +15,7 @@ $node->append_conf(
autovacuum = off
max_prepared_transactions=5
log_min_duration_statement=0
-log_connections=on
+log_connections=receipt
log_disconnections=on
));
diff --git a/src/test/recovery/t/040_standby_failover_slots_sync.pl b/src/test/recovery/t/040_standby_failover_slots_sync.pl
index 9c8b49e942d..2c61c51e914 100644
--- a/src/test/recovery/t/040_standby_failover_slots_sync.pl
+++ b/src/test/recovery/t/040_standby_failover_slots_sync.pl
@@ -941,8 +941,7 @@ is( $standby1->safe_psql(
'synced slot retained on the new primary');
# Commit the prepared transaction
-$standby1->safe_psql('postgres',
- "COMMIT PREPARED 'test_twophase_slotsync';");
+$standby1->safe_psql('postgres', "COMMIT PREPARED 'test_twophase_slotsync';");
$standby1->wait_for_catchup('regress_mysub1');
# Confirm that the prepared transaction is replicated to the subscriber
diff --git a/src/test/recovery/t/047_checkpoint_physical_slot.pl b/src/test/recovery/t/047_checkpoint_physical_slot.pl
new file mode 100644
index 00000000000..a1332b5d44c
--- /dev/null
+++ b/src/test/recovery/t/047_checkpoint_physical_slot.pl
@@ -0,0 +1,132 @@
+# Copyright (c) 2025, PostgreSQL Global Development Group
+#
+# This test verifies the case when the physical slot is advanced during
+# checkpoint. The test checks that the physical slot's restart_lsn still refers
+# to an existed WAL segment after immediate restart.
+#
+use strict;
+use warnings FATAL => 'all';
+
+use PostgreSQL::Test::Cluster;
+use PostgreSQL::Test::Utils;
+
+use Test::More;
+
+if ($ENV{enable_injection_points} ne 'yes')
+{
+ plan skip_all => 'Injection points not supported by this build';
+}
+
+my ($node, $result);
+
+$node = PostgreSQL::Test::Cluster->new('mike');
+$node->init;
+$node->append_conf('postgresql.conf', "wal_level = 'replica'");
+$node->start;
+
+# Check if the extension injection_points is available, as it may be
+# possible that this script is run with installcheck, where the module
+# would not be installed by default.
+if (!$node->check_extension('injection_points'))
+{
+ plan skip_all => 'Extension injection_points not installed';
+}
+
+$node->safe_psql('postgres', q(CREATE EXTENSION injection_points));
+
+# Create a physical replication slot.
+$node->safe_psql('postgres',
+ q{select pg_create_physical_replication_slot('slot_physical', true)});
+
+# Advance slot to the current position, just to have everything "valid".
+$node->safe_psql('postgres',
+ q{select pg_replication_slot_advance('slot_physical', pg_current_wal_lsn())}
+);
+
+# Run checkpoint to flush current state to disk and set a baseline.
+$node->safe_psql('postgres', q{checkpoint});
+
+# Insert 2M rows; that's about 260MB (~20 segments) worth of WAL.
+$node->advance_wal(20);
+
+# Advance slot to the current position, just to have everything "valid".
+$node->safe_psql('postgres',
+ q{select pg_replication_slot_advance('slot_physical', pg_current_wal_lsn())}
+);
+
+# Run another checkpoint to set a new restore LSN.
+$node->safe_psql('postgres', q{checkpoint});
+
+# Another 2M rows; that's about 260MB (~20 segments) worth of WAL.
+$node->advance_wal(20);
+
+my $restart_lsn_init = $node->safe_psql('postgres',
+ q{select restart_lsn from pg_replication_slots where slot_name = 'slot_physical'}
+);
+chomp($restart_lsn_init);
+note("restart lsn before checkpoint: $restart_lsn_init");
+
+# Run another checkpoint, this time in the background, and make it wait
+# on the injection point) so that the checkpoint stops right before
+# removing old WAL segments.
+note('starting checkpoint');
+
+my $checkpoint = $node->background_psql('postgres');
+$checkpoint->query_safe(
+ q{select injection_points_attach('checkpoint-before-old-wal-removal','wait')}
+);
+$checkpoint->query_until(
+ qr/starting_checkpoint/,
+ q(\echo starting_checkpoint
+checkpoint;
+\q
+));
+
+# Wait until the checkpoint stops right before removing WAL segments.
+note('waiting for injection_point');
+$node->wait_for_event('checkpointer', 'checkpoint-before-old-wal-removal');
+note('injection_point is reached');
+
+# OK, we're in the right situation: time to advance the physical slot, which
+# recalculates the required LSN and then unblock the checkpoint, which
+# removes the WAL still needed by the physical slot.
+$node->safe_psql('postgres',
+ q{select pg_replication_slot_advance('slot_physical', pg_current_wal_lsn())}
+);
+
+# Continue the checkpoint.
+$node->safe_psql('postgres',
+ q{select injection_points_wakeup('checkpoint-before-old-wal-removal')});
+
+my $restart_lsn_old = $node->safe_psql('postgres',
+ q{select restart_lsn from pg_replication_slots where slot_name = 'slot_physical'}
+);
+chomp($restart_lsn_old);
+note("restart lsn before stop: $restart_lsn_old");
+
+# Abruptly stop the server (1 second should be enough for the checkpoint
+# to finish; it would be better).
+$node->stop('immediate');
+
+$node->start;
+
+# Get the restart_lsn of the slot right after restarting.
+my $restart_lsn = $node->safe_psql('postgres',
+ q{select restart_lsn from pg_replication_slots where slot_name = 'slot_physical'}
+);
+chomp($restart_lsn);
+note("restart lsn: $restart_lsn");
+
+# Get the WAL segment name for the slot's restart_lsn.
+my $restart_lsn_segment = $node->safe_psql('postgres',
+ "SELECT pg_walfile_name('$restart_lsn'::pg_lsn)");
+chomp($restart_lsn_segment);
+
+# Check if the required wal segment exists.
+note("required by slot segment name: $restart_lsn_segment");
+my $datadir = $node->data_dir;
+ok( -f "$datadir/pg_wal/$restart_lsn_segment",
+ "WAL segment $restart_lsn_segment for physical slot's restart_lsn $restart_lsn exists"
+);
+
+done_testing();
diff --git a/src/test/recovery/t/048_vacuum_horizon_floor.pl b/src/test/recovery/t/048_vacuum_horizon_floor.pl
new file mode 100644
index 00000000000..e56fce59d58
--- /dev/null
+++ b/src/test/recovery/t/048_vacuum_horizon_floor.pl
@@ -0,0 +1,288 @@
+use strict;
+use warnings;
+use PostgreSQL::Test::Cluster;
+use Test::More;
+
+# Test that vacuum prunes away all dead tuples killed before OldestXmin
+#
+# This test creates a table on a primary, updates the table to generate dead
+# tuples for vacuum, and then, during the vacuum, uses the replica to force
+# GlobalVisState->maybe_needed on the primary to move backwards and precede
+# the value of OldestXmin set at the beginning of vacuuming the table.
+
+# Set up nodes
+my $node_primary = PostgreSQL::Test::Cluster->new('primary');
+$node_primary->init(allows_streaming => 'physical');
+
+# io_combine_limit is set to 1 to avoid pinning more than one buffer at a time
+# to ensure test determinism.
+$node_primary->append_conf(
+ 'postgresql.conf', qq[
+hot_standby_feedback = on
+autovacuum = off
+log_min_messages = INFO
+maintenance_work_mem = 64
+io_combine_limit = 1
+]);
+$node_primary->start;
+
+my $node_replica = PostgreSQL::Test::Cluster->new('standby');
+
+$node_primary->backup('my_backup');
+$node_replica->init_from_backup($node_primary, 'my_backup',
+ has_streaming => 1);
+
+$node_replica->start;
+
+my $test_db = "test_db";
+$node_primary->safe_psql('postgres', "CREATE DATABASE $test_db");
+
+# Save the original connection info for later use
+my $orig_conninfo = $node_primary->connstr();
+
+my $table1 = "vac_horizon_floor_table";
+
+# Long-running Primary Session A
+my $psql_primaryA =
+ $node_primary->background_psql($test_db, on_error_stop => 1);
+
+# Long-running Primary Session B
+my $psql_primaryB =
+ $node_primary->background_psql($test_db, on_error_stop => 1);
+
+# Our test relies on two rounds of index vacuuming for reasons elaborated
+# later. To trigger two rounds of index vacuuming, we must fill up the
+# TIDStore with dead items partway through a vacuum of the table. The number
+# of rows is just enough to ensure we exceed maintenance_work_mem on all
+# supported platforms, while keeping test runtime as short as we can.
+my $nrows = 2000;
+
+# Because vacuum's first pass, pruning, is where we use the GlobalVisState to
+# check tuple visibility, GlobalVisState->maybe_needed must move backwards
+# during pruning before checking the visibility for a tuple which would have
+# been considered HEAPTUPLE_DEAD prior to maybe_needed moving backwards but
+# HEAPTUPLE_RECENTLY_DEAD compared to the new, older value of maybe_needed.
+#
+# We must not only force the horizon on the primary to move backwards but also
+# force the vacuuming backend's GlobalVisState to be updated. GlobalVisState
+# is forced to update during index vacuuming.
+#
+# _bt_pendingfsm_finalize() calls GetOldestNonRemovableTransactionId() at the
+# end of a round of index vacuuming, updating the backend's GlobalVisState
+# and, in our case, moving maybe_needed backwards.
+#
+# Then vacuum's first (pruning) pass will continue and pruning will find our
+# later inserted and updated tuple HEAPTUPLE_RECENTLY_DEAD when compared to
+# maybe_needed but HEAPTUPLE_DEAD when compared to OldestXmin.
+#
+# Thus, we must force at least two rounds of index vacuuming to ensure that
+# some tuple visibility checks will happen after a round of index vacuuming.
+# To accomplish this, we set maintenance_work_mem to its minimum value and
+# insert and delete enough rows that we force at least one round of index
+# vacuuming before getting to a dead tuple which was killed after the standby
+# is disconnected.
+$node_primary->safe_psql(
+ $test_db, qq[
+ CREATE TABLE ${table1}(col1 int)
+ WITH (autovacuum_enabled=false, fillfactor=10);
+ INSERT INTO $table1 VALUES(7);
+ INSERT INTO $table1 SELECT generate_series(1, $nrows) % 3;
+ CREATE INDEX on ${table1}(col1);
+ DELETE FROM $table1 WHERE col1 = 0;
+ INSERT INTO $table1 VALUES(7);
+]);
+
+# We will later move the primary forward while the standby is disconnected.
+# For now, however, there is no reason not to wait for the standby to catch
+# up.
+my $primary_lsn = $node_primary->lsn('flush');
+$node_primary->wait_for_catchup($node_replica, 'replay', $primary_lsn);
+
+# Test that the WAL receiver is up and running.
+$node_replica->poll_query_until(
+ $test_db, qq[
+ SELECT EXISTS (SELECT * FROM pg_stat_wal_receiver);], 't');
+
+# Set primary_conninfo to something invalid on the replica and reload the
+# config. Once the config is reloaded, the startup process will force the WAL
+# receiver to restart and it will be unable to reconnect because of the
+# invalid connection information.
+$node_replica->safe_psql(
+ $test_db, qq[
+ ALTER SYSTEM SET primary_conninfo = '';
+ SELECT pg_reload_conf();
+ ]);
+
+# Wait until the WAL receiver has shut down and been unable to start up again.
+$node_replica->poll_query_until(
+ $test_db, qq[
+ SELECT EXISTS (SELECT * FROM pg_stat_wal_receiver);], 'f');
+
+# Now insert and update a tuple which will be visible to the vacuum on the
+# primary but which will have xmax newer than the oldest xmin on the standby
+# that was recently disconnected.
+my $res = $psql_primaryA->query_safe(
+ qq[
+ INSERT INTO $table1 VALUES (99);
+ UPDATE $table1 SET col1 = 100 WHERE col1 = 99;
+ SELECT 'after_update';
+ ]
+);
+
+# Make sure the UPDATE finished
+like($res, qr/^after_update$/m, "UPDATE occurred on primary session A");
+
+# Open a cursor on the primary whose pin will keep VACUUM from getting a
+# cleanup lock on the first page of the relation. We want VACUUM to be able to
+# start, calculate initial values for OldestXmin and GlobalVisState and then
+# be unable to proceed with pruning our dead tuples. This will allow us to
+# reconnect the standby and push the horizon back before we start actual
+# pruning and vacuuming.
+my $primary_cursor1 = "vac_horizon_floor_cursor1";
+
+# The first value inserted into the table was a 7, so FETCH FORWARD should
+# return a 7. That's how we know the cursor has a pin.
+# Disable index scans so the cursor pins heap pages and not index pages.
+$res = $psql_primaryB->query_safe(
+ qq[
+ BEGIN;
+ SET enable_bitmapscan = off;
+ SET enable_indexscan = off;
+ SET enable_indexonlyscan = off;
+ DECLARE $primary_cursor1 CURSOR FOR SELECT * FROM $table1 WHERE col1 = 7;
+ FETCH $primary_cursor1;
+ ]
+);
+
+is($res, 7, qq[Cursor query returned $res. Expected value 7.]);
+
+# Get the PID of the session which will run the VACUUM FREEZE so that we can
+# use it to filter pg_stat_activity later.
+my $vacuum_pid = $psql_primaryA->query_safe("SELECT pg_backend_pid();");
+
+# Now start a VACUUM FREEZE on the primary. It will call vacuum_get_cutoffs()
+# and establish values of OldestXmin and GlobalVisState which are newer than
+# all of our dead tuples. Then it will be unable to get a cleanup lock to
+# start pruning, so it will hang.
+#
+# We use VACUUM FREEZE because it will wait for a cleanup lock instead of
+# skipping the page pinned by the cursor. Note that works because the target
+# tuple's xmax precedes OldestXmin which ensures that lazy_scan_noprune() will
+# return false and we will wait for the cleanup lock.
+#
+# Disable any prefetching, parallelism, or other concurrent I/O by vacuum. The
+# pages of the heap must be processed in order by a single worker to ensure
+# test stability (PARALLEL 0 shouldn't be necessary but guards against the
+# possibility of parallel heap vacuuming).
+$psql_primaryA->{stdin} .= qq[
+ SET maintenance_io_concurrency = 0;
+ VACUUM (VERBOSE, FREEZE, PARALLEL 0) $table1;
+ \\echo VACUUM
+ ];
+
+# Make sure the VACUUM command makes it to the server.
+$psql_primaryA->{run}->pump_nb();
+
+# Make sure that the VACUUM has already called vacuum_get_cutoffs() and is
+# just waiting on the lock to start vacuuming. We don't want the standby to
+# re-establish a connection to the primary and push the horizon back until
+# we've saved initial values in GlobalVisState and calculated OldestXmin.
+$node_primary->poll_query_until(
+ $test_db,
+ qq[
+ SELECT count(*) >= 1 FROM pg_stat_activity
+ WHERE pid = $vacuum_pid
+ AND wait_event = 'BufferPin';
+ ],
+ 't');
+
+# Ensure the WAL receiver is still not active on the replica.
+$node_replica->poll_query_until(
+ $test_db, qq[
+ SELECT EXISTS (SELECT * FROM pg_stat_wal_receiver);], 'f');
+
+# Allow the WAL receiver connection to re-establish.
+$node_replica->safe_psql(
+ $test_db, qq[
+ ALTER SYSTEM SET primary_conninfo = '$orig_conninfo';
+ SELECT pg_reload_conf();
+ ]);
+
+# Ensure the new WAL receiver has connected.
+$node_replica->poll_query_until(
+ $test_db, qq[
+ SELECT EXISTS (SELECT * FROM pg_stat_wal_receiver);], 't');
+
+# Once the WAL sender is shown on the primary, the replica should have
+# connected with the primary and pushed the horizon backward. Primary Session
+# A won't see that until the VACUUM FREEZE proceeds and does its first round
+# of index vacuuming.
+$node_primary->poll_query_until(
+ $test_db, qq[
+ SELECT EXISTS (SELECT * FROM pg_stat_replication);], 't');
+
+# Move the cursor forward to the next 7. We inserted the 7 much later, so
+# advancing the cursor should allow vacuum to proceed vacuuming most pages of
+# the relation. Because we set maintanence_work_mem sufficiently low, we
+# expect that a round of index vacuuming has happened and that the vacuum is
+# now waiting for the cursor to release its pin on the last page of the
+# relation.
+$res = $psql_primaryB->query_safe("FETCH $primary_cursor1");
+is($res, 7,
+ qq[Cursor query returned $res from second fetch. Expected value 7.]);
+
+# Prevent the test from incorrectly passing by confirming that we did indeed
+# do a pass of index vacuuming.
+$node_primary->poll_query_until(
+ $test_db, qq[
+ SELECT index_vacuum_count > 0
+ FROM pg_stat_progress_vacuum
+ WHERE datname='$test_db' AND relid::regclass = '$table1'::regclass;
+ ], 't');
+
+# Commit the transaction with the open cursor so that the VACUUM can finish.
+$psql_primaryB->query_until(
+ qr/^commit$/m,
+ qq[
+ COMMIT;
+ \\echo commit
+ ]
+);
+
+# VACUUM proceeds with pruning and does a visibility check on each tuple. In
+# older versions of Postgres, pruning found our final dead tuple
+# non-removable (HEAPTUPLE_RECENTLY_DEAD) since its xmax is after the new
+# value of maybe_needed. Then heap_prepare_freeze_tuple() would decide the
+# tuple xmax should be frozen because it precedes OldestXmin. Vacuum would
+# then error out in heap_pre_freeze_checks() with "cannot freeze committed
+# xmax". This was fixed by changing pruning to find all
+# HEAPTUPLE_RECENTLY_DEAD tuples with xmaxes preceding OldestXmin
+# HEAPTUPLE_DEAD and removing them.
+
+# With the fix, VACUUM should finish successfully, incrementing the table
+# vacuum_count.
+$node_primary->poll_query_until(
+ $test_db,
+ qq[
+ SELECT vacuum_count > 0
+ FROM pg_stat_all_tables WHERE relname = '${table1}';
+ ]
+ , 't');
+
+$primary_lsn = $node_primary->lsn('flush');
+
+# Make sure something causes us to flush
+$node_primary->safe_psql($test_db, "INSERT INTO $table1 VALUES (1);");
+
+# Nothing on the replica should cause a recovery conflict, so this should
+# finish successfully.
+$node_primary->wait_for_catchup($node_replica, 'replay', $primary_lsn);
+
+## Shut down psqls
+$psql_primaryA->quit;
+$psql_primaryB->quit;
+
+$node_replica->stop();
+$node_primary->stop();
+
+done_testing();
diff --git a/src/test/regress/expected/alter_table.out b/src/test/regress/expected/alter_table.out
index 476266e3f4b..750efc042d8 100644
--- a/src/test/regress/expected/alter_table.out
+++ b/src/test/regress/expected/alter_table.out
@@ -4745,6 +4745,13 @@ alter table attbl alter column p1 set data type bigint;
alter table atref alter column c1 set data type bigint;
drop table attbl, atref;
/* End test case for bug #17409 */
+/* Test case for bug #18970 */
+create table attbl(a int);
+create table atref(b attbl check ((b).a is not null));
+alter table attbl alter column a type numeric; -- someday this should work
+ERROR: cannot alter table "attbl" because column "atref.b" uses its row type
+drop table attbl, atref;
+/* End test case for bug #18970 */
-- Test that ALTER TABLE rewrite preserves a clustered index
-- for normal indexes and indexes on constraints.
create table alttype_cluster (a int);
diff --git a/src/test/regress/expected/constraints.out b/src/test/regress/expected/constraints.out
index ad6aaab7385..b5592617d97 100644
--- a/src/test/regress/expected/constraints.out
+++ b/src/test/regress/expected/constraints.out
@@ -1659,6 +1659,8 @@ EXECUTE get_nnconstraint_info('{constr_parent3, constr_child3}');
constr_parent3 | constr_parent3_a_not_null | t | t | 0
(2 rows)
+COMMENT ON CONSTRAINT constr_parent2_a_not_null ON constr_parent2 IS 'this constraint is invalid';
+COMMENT ON CONSTRAINT constr_parent2_a_not_null ON constr_child2 IS 'this constraint is valid';
DEALLOCATE get_nnconstraint_info;
-- end NOT NULL NOT VALID
-- Comments
diff --git a/src/test/regress/expected/create_table_like.out b/src/test/regress/expected/create_table_like.out
index bf34289e984..29a779c2e90 100644
--- a/src/test/regress/expected/create_table_like.out
+++ b/src/test/regress/expected/create_table_like.out
@@ -332,9 +332,10 @@ COMMENT ON CONSTRAINT ctlt1_a_check ON ctlt1 IS 't1_a_check';
COMMENT ON INDEX ctlt1_pkey IS 'index pkey';
COMMENT ON INDEX ctlt1_b_key IS 'index b_key';
ALTER TABLE ctlt1 ALTER COLUMN a SET STORAGE MAIN;
-CREATE TABLE ctlt2 (c text);
+CREATE TABLE ctlt2 (c text NOT NULL);
ALTER TABLE ctlt2 ALTER COLUMN c SET STORAGE EXTERNAL;
COMMENT ON COLUMN ctlt2.c IS 'C';
+COMMENT ON CONSTRAINT ctlt2_c_not_null ON ctlt2 IS 't2_c_not_null';
CREATE TABLE ctlt3 (a text CHECK (length(a) < 5), c text CHECK (length(c) < 7));
ALTER TABLE ctlt3 ALTER COLUMN c SET STORAGE EXTERNAL;
ALTER TABLE ctlt3 ALTER COLUMN a SET STORAGE MAIN;
@@ -351,9 +352,10 @@ CREATE TABLE ctlt12_storage (LIKE ctlt1 INCLUDING STORAGE, LIKE ctlt2 INCLUDING
--------+------+-----------+----------+---------+----------+--------------+-------------
a | text | | not null | | main | |
b | text | | | | extended | |
- c | text | | | | external | |
+ c | text | | not null | | external | |
Not-null constraints:
"ctlt1_a_not_null" NOT NULL "a"
+ "ctlt2_c_not_null" NOT NULL "c"
CREATE TABLE ctlt12_comments (LIKE ctlt1 INCLUDING COMMENTS, LIKE ctlt2 INCLUDING COMMENTS);
\d+ ctlt12_comments
@@ -362,9 +364,16 @@ CREATE TABLE ctlt12_comments (LIKE ctlt1 INCLUDING COMMENTS, LIKE ctlt2 INCLUDIN
--------+------+-----------+----------+---------+----------+--------------+-------------
a | text | | not null | | extended | | A
b | text | | | | extended | | B
- c | text | | | | extended | | C
+ c | text | | not null | | extended | | C
Not-null constraints:
"ctlt1_a_not_null" NOT NULL "a"
+ "ctlt2_c_not_null" NOT NULL "c"
+
+SELECT conname, description FROM pg_description, pg_constraint c WHERE classoid = 'pg_constraint'::regclass AND objoid = c.oid AND c.conrelid = 'ctlt12_comments'::regclass;
+ conname | description
+------------------+---------------
+ ctlt2_c_not_null | t2_c_not_null
+(1 row)
CREATE TABLE ctlt1_inh (LIKE ctlt1 INCLUDING CONSTRAINTS INCLUDING COMMENTS) INHERITS (ctlt1);
NOTICE: merging column "a" with inherited definition
@@ -529,7 +538,9 @@ NOTICE: drop cascades to table inhe
-- LIKE must respect NO INHERIT property of constraints
CREATE TABLE noinh_con_copy (a int CHECK (a > 0) NO INHERIT, b int not null,
c int not null no inherit);
-CREATE TABLE noinh_con_copy1 (LIKE noinh_con_copy INCLUDING CONSTRAINTS);
+COMMENT ON CONSTRAINT noinh_con_copy_b_not_null ON noinh_con_copy IS 'not null b';
+COMMENT ON CONSTRAINT noinh_con_copy_c_not_null ON noinh_con_copy IS 'not null c no inherit';
+CREATE TABLE noinh_con_copy1 (LIKE noinh_con_copy INCLUDING CONSTRAINTS INCLUDING COMMENTS);
\d+ noinh_con_copy1
Table "public.noinh_con_copy1"
Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
@@ -543,6 +554,17 @@ Not-null constraints:
"noinh_con_copy_b_not_null" NOT NULL "b"
"noinh_con_copy_c_not_null" NOT NULL "c" NO INHERIT
+SELECT conname, description
+FROM pg_description, pg_constraint c
+WHERE classoid = 'pg_constraint'::regclass
+AND objoid = c.oid AND c.conrelid = 'noinh_con_copy1'::regclass
+ORDER BY conname COLLATE "C";
+ conname | description
+---------------------------+-----------------------
+ noinh_con_copy_b_not_null | not null b
+ noinh_con_copy_c_not_null | not null c no inherit
+(2 rows)
+
-- fail, as partitioned tables don't allow NO INHERIT constraints
CREATE TABLE noinh_con_copy1_parted (LIKE noinh_con_copy INCLUDING ALL)
PARTITION BY LIST (a);
diff --git a/src/test/regress/expected/foreign_key.out b/src/test/regress/expected/foreign_key.out
index 4f3f280a439..6a8f3959345 100644
--- a/src/test/regress/expected/foreign_key.out
+++ b/src/test/regress/expected/foreign_key.out
@@ -1895,29 +1895,76 @@ WHERE conrelid::regclass::text like 'fk_partitioned_fk%' ORDER BY oid::regclass:
(5 rows)
DROP TABLE fk_partitioned_fk, fk_notpartitioned_pk;
--- NOT VALID foreign key on a non-partitioned table referencing a partitioned table
+-- NOT VALID and NOT ENFORCED foreign key on a non-partitioned table
+-- referencing a partitioned table
CREATE TABLE fk_partitioned_pk (a int, b int, PRIMARY KEY (a, b)) PARTITION BY RANGE (a, b);
CREATE TABLE fk_partitioned_pk_1 PARTITION OF fk_partitioned_pk FOR VALUES FROM (0,0) TO (1000,1000);
+CREATE TABLE fk_partitioned_pk_2 PARTITION OF fk_partitioned_pk FOR VALUES FROM (1000,1000) TO (2000,2000);
CREATE TABLE fk_notpartitioned_fk (b int, a int);
-ALTER TABLE fk_notpartitioned_fk ADD FOREIGN KEY (a, b) REFERENCES fk_partitioned_pk NOT VALID;
--- Constraint will be invalid.
-SELECT conname, convalidated FROM pg_constraint
+INSERT INTO fk_partitioned_pk VALUES(100,100), (1000,1000);
+INSERT INTO fk_notpartitioned_fk VALUES(100,100), (1000,1000);
+ALTER TABLE fk_notpartitioned_fk ADD CONSTRAINT fk_notpartitioned_fk_a_b_fkey
+ FOREIGN KEY (a, b) REFERENCES fk_partitioned_pk NOT VALID;
+ALTER TABLE fk_notpartitioned_fk ADD CONSTRAINT fk_notpartitioned_fk_a_b_fkey2
+ FOREIGN KEY (a, b) REFERENCES fk_partitioned_pk NOT ENFORCED;
+-- All constraints will be invalid, and _fkey2 constraints will not be enforced.
+SELECT conname, conenforced, convalidated FROM pg_constraint
WHERE conrelid = 'fk_notpartitioned_fk'::regclass ORDER BY oid::regclass::text;
- conname | convalidated
----------------------------------+--------------
- fk_notpartitioned_fk_a_b_fkey | f
- fk_notpartitioned_fk_a_b_fkey_1 | f
-(2 rows)
+ conname | conenforced | convalidated
+----------------------------------+-------------+--------------
+ fk_notpartitioned_fk_a_b_fkey | t | f
+ fk_notpartitioned_fk_a_b_fkey_1 | t | f
+ fk_notpartitioned_fk_a_b_fkey_2 | t | f
+ fk_notpartitioned_fk_a_b_fkey2 | f | f
+ fk_notpartitioned_fk_a_b_fkey2_1 | f | f
+ fk_notpartitioned_fk_a_b_fkey2_2 | f | f
+(6 rows)
ALTER TABLE fk_notpartitioned_fk VALIDATE CONSTRAINT fk_notpartitioned_fk_a_b_fkey;
--- All constraints are now valid.
-SELECT conname, convalidated FROM pg_constraint
+ALTER TABLE fk_notpartitioned_fk ALTER CONSTRAINT fk_notpartitioned_fk_a_b_fkey2 ENFORCED;
+-- All constraints are now valid and enforced.
+SELECT conname, conenforced, convalidated FROM pg_constraint
WHERE conrelid = 'fk_notpartitioned_fk'::regclass ORDER BY oid::regclass::text;
- conname | convalidated
----------------------------------+--------------
- fk_notpartitioned_fk_a_b_fkey | t
- fk_notpartitioned_fk_a_b_fkey_1 | t
-(2 rows)
+ conname | conenforced | convalidated
+----------------------------------+-------------+--------------
+ fk_notpartitioned_fk_a_b_fkey | t | t
+ fk_notpartitioned_fk_a_b_fkey_1 | t | t
+ fk_notpartitioned_fk_a_b_fkey_2 | t | t
+ fk_notpartitioned_fk_a_b_fkey2 | t | t
+ fk_notpartitioned_fk_a_b_fkey2_1 | t | t
+ fk_notpartitioned_fk_a_b_fkey2_2 | t | t
+(6 rows)
+
+-- test a self-referential FK
+ALTER TABLE fk_partitioned_pk ADD CONSTRAINT selffk FOREIGN KEY (a, b) REFERENCES fk_partitioned_pk NOT VALID;
+CREATE TABLE fk_partitioned_pk_3 PARTITION OF fk_partitioned_pk FOR VALUES FROM (2000,2000) TO (3000,3000)
+ PARTITION BY RANGE (a);
+CREATE TABLE fk_partitioned_pk_3_1 PARTITION OF fk_partitioned_pk_3 FOR VALUES FROM (2000) TO (2100);
+SELECT conname, conenforced, convalidated FROM pg_constraint
+WHERE conrelid = 'fk_partitioned_pk'::regclass AND contype = 'f'
+ORDER BY oid::regclass::text;
+ conname | conenforced | convalidated
+------------+-------------+--------------
+ selffk | t | f
+ selffk_1 | t | f
+ selffk_2 | t | f
+ selffk_3 | t | f
+ selffk_3_1 | t | f
+(5 rows)
+
+ALTER TABLE fk_partitioned_pk_2 VALIDATE CONSTRAINT selffk;
+ALTER TABLE fk_partitioned_pk VALIDATE CONSTRAINT selffk;
+SELECT conname, conenforced, convalidated FROM pg_constraint
+WHERE conrelid = 'fk_partitioned_pk'::regclass AND contype = 'f'
+ORDER BY oid::regclass::text;
+ conname | conenforced | convalidated
+------------+-------------+--------------
+ selffk | t | t
+ selffk_1 | t | t
+ selffk_2 | t | t
+ selffk_3 | t | t
+ selffk_3_1 | t | t
+(5 rows)
DROP TABLE fk_notpartitioned_fk, fk_partitioned_pk;
-- Test some other exotic foreign key features: MATCH SIMPLE, ON UPDATE/DELETE
diff --git a/src/test/regress/expected/generated_virtual.out b/src/test/regress/expected/generated_virtual.out
index 6300e7c1d96..df704b5166f 100644
--- a/src/test/regress/expected/generated_virtual.out
+++ b/src/test/regress/expected/generated_virtual.out
@@ -553,15 +553,11 @@ CREATE TABLE gtest4 (
a int,
b double_int GENERATED ALWAYS AS ((a * 2, a * 3)) VIRTUAL
);
-INSERT INTO gtest4 VALUES (1), (6);
-SELECT * FROM gtest4;
- a | b
----+---------
- 1 | (2,3)
- 6 | (12,18)
-(2 rows)
-
-DROP TABLE gtest4;
+ERROR: virtual generated column "b" cannot have a user-defined type
+DETAIL: Virtual generated columns that make use of user-defined types are not yet supported.
+--INSERT INTO gtest4 VALUES (1), (6);
+--SELECT * FROM gtest4;
+--DROP TABLE gtest4;
DROP TYPE double_int;
-- using tableoid is allowed
CREATE TABLE gtest_tableoid (
@@ -604,9 +600,13 @@ INSERT INTO gtest11 VALUES (1, 10), (2, 20);
GRANT SELECT (a, c) ON gtest11 TO regress_user11;
CREATE FUNCTION gf1(a int) RETURNS int AS $$ SELECT a * 3 $$ IMMUTABLE LANGUAGE SQL;
REVOKE ALL ON FUNCTION gf1(int) FROM PUBLIC;
-CREATE TABLE gtest12 (a int PRIMARY KEY, b int, c int GENERATED ALWAYS AS (gf1(b)) VIRTUAL);
-INSERT INTO gtest12 VALUES (1, 10), (2, 20);
-GRANT SELECT (a, c), INSERT ON gtest12 TO regress_user11;
+CREATE TABLE gtest12 (a int PRIMARY KEY, b int, c int GENERATED ALWAYS AS (gf1(b)) VIRTUAL); -- fails, user-defined function
+ERROR: generation expression uses user-defined function
+LINE 1: ...nt PRIMARY KEY, b int, c int GENERATED ALWAYS AS (gf1(b)) VI...
+ ^
+DETAIL: Virtual generated columns that make use of user-defined functions are not yet supported.
+--INSERT INTO gtest12 VALUES (1, 10), (2, 20);
+--GRANT SELECT (a, c), INSERT ON gtest12 TO regress_user11;
SET ROLE regress_user11;
SELECT a, b FROM gtest11; -- not allowed
ERROR: permission denied for table gtest11
@@ -619,15 +619,12 @@ SELECT a, c FROM gtest11; -- allowed
SELECT gf1(10); -- not allowed
ERROR: permission denied for function gf1
-INSERT INTO gtest12 VALUES (3, 30), (4, 40); -- allowed (does not actually invoke the function)
-SELECT a, c FROM gtest12; -- currently not allowed because of function permissions, should arguably be allowed
-ERROR: permission denied for function gf1
+--INSERT INTO gtest12 VALUES (3, 30), (4, 40); -- allowed (does not actually invoke the function)
+--SELECT a, c FROM gtest12; -- currently not allowed because of function permissions, should arguably be allowed
RESET ROLE;
-DROP FUNCTION gf1(int); -- fail
-ERROR: cannot drop function gf1(integer) because other objects depend on it
-DETAIL: column c of table gtest12 depends on function gf1(integer)
-HINT: Use DROP ... CASCADE to drop the dependent objects too.
-DROP TABLE gtest11, gtest12;
+--DROP FUNCTION gf1(int); -- fail
+DROP TABLE gtest11;
+--DROP TABLE gtest12;
DROP FUNCTION gf1(int);
DROP USER regress_user11;
-- check constraints
@@ -637,10 +634,10 @@ INSERT INTO gtest20 (a) VALUES (30); -- violates constraint
ERROR: new row for relation "gtest20" violates check constraint "gtest20_b_check"
DETAIL: Failing row contains (30, virtual).
ALTER TABLE gtest20 ALTER COLUMN b SET EXPRESSION AS (a * 100); -- violates constraint (currently not supported)
-ERROR: ALTER TABLE / SET EXPRESSION is not supported for virtual generated columns on tables with check constraints
+ERROR: ALTER TABLE / SET EXPRESSION is not supported for virtual generated columns in tables with check constraints
DETAIL: Column "b" of relation "gtest20" is a virtual generated column.
ALTER TABLE gtest20 ALTER COLUMN b SET EXPRESSION AS (a * 3); -- ok (currently not supported)
-ERROR: ALTER TABLE / SET EXPRESSION is not supported for virtual generated columns on tables with check constraints
+ERROR: ALTER TABLE / SET EXPRESSION is not supported for virtual generated columns in tables with check constraints
DETAIL: Column "b" of relation "gtest20" is a virtual generated column.
CREATE TABLE gtest20a (a int PRIMARY KEY, b int GENERATED ALWAYS AS (a * 2) VIRTUAL);
INSERT INTO gtest20a (a) VALUES (10);
@@ -800,11 +797,23 @@ CREATE TABLE gtest24r (a int PRIMARY KEY, b gtestdomain1range GENERATED ALWAYS A
ERROR: virtual generated column "b" cannot have a domain type
--INSERT INTO gtest24r (a) VALUES (4); -- ok
--INSERT INTO gtest24r (a) VALUES (6); -- error
+CREATE TABLE gtest24at (a int PRIMARY KEY);
+ALTER TABLE gtest24at ADD COLUMN b gtestdomain1 GENERATED ALWAYS AS (a * 2) VIRTUAL; -- error
+ERROR: virtual generated column "b" cannot have a domain type
+CREATE TABLE gtest24ata (a int PRIMARY KEY, b int GENERATED ALWAYS AS (a * 2) VIRTUAL);
+ALTER TABLE gtest24ata ALTER COLUMN b TYPE gtestdomain1; -- error
+ERROR: virtual generated column "b" cannot have a domain type
CREATE DOMAIN gtestdomainnn AS int CHECK (VALUE IS NOT NULL);
CREATE TABLE gtest24nn (a int, b gtestdomainnn GENERATED ALWAYS AS (a * 2) VIRTUAL);
ERROR: virtual generated column "b" cannot have a domain type
--INSERT INTO gtest24nn (a) VALUES (4); -- ok
--INSERT INTO gtest24nn (a) VALUES (NULL); -- error
+-- using user-defined type not yet supported
+CREATE TABLE gtest24xxx (a gtestdomain1, b gtestdomain1, c int GENERATED ALWAYS AS (greatest(a, b)) VIRTUAL); -- error
+ERROR: generation expression uses user-defined type
+LINE 1: ...main1, b gtestdomain1, c int GENERATED ALWAYS AS (greatest(a...
+ ^
+DETAIL: Virtual generated columns that make use of user-defined types are not yet supported.
-- typed tables (currently not supported)
CREATE TYPE gtest_type AS (f1 integer, f2 text, f3 bigint);
CREATE TABLE gtest28 OF gtest_type (f1 WITH OPTIONS GENERATED ALWAYS AS (f2 *2) VIRTUAL);
@@ -1470,7 +1479,8 @@ create table gtest32 (
a int primary key,
b int generated always as (a * 2),
c int generated always as (10 + 10),
- d int generated always as (coalesce(a, 100))
+ d int generated always as (coalesce(a, 100)),
+ e int
);
insert into gtest32 values (1), (2);
analyze gtest32;
@@ -1554,41 +1564,44 @@ select t2.* from gtest32 t1 left join gtest32 t2 on false;
QUERY PLAN
------------------------------------------------------
Nested Loop Left Join
- Output: a, (a * 2), (20), (COALESCE(a, 100))
+ Output: a, (a * 2), (20), (COALESCE(a, 100)), e
Join Filter: false
-> Seq Scan on generated_virtual_tests.gtest32 t1
- Output: t1.a, t1.b, t1.c, t1.d
+ Output: t1.a, t1.b, t1.c, t1.d, t1.e
-> Result
- Output: a, 20, COALESCE(a, 100)
+ Output: a, e, 20, COALESCE(a, 100)
One-Time Filter: false
(8 rows)
select t2.* from gtest32 t1 left join gtest32 t2 on false;
- a | b | c | d
----+---+---+---
- | | |
- | | |
+ a | b | c | d | e
+---+---+---+---+---
+ | | | |
+ | | | |
(2 rows)
explain (verbose, costs off)
-select * from gtest32 t group by grouping sets (a, b, c, d) having c = 20;
+select * from gtest32 t group by grouping sets (a, b, c, d, e) having c = 20;
QUERY PLAN
-----------------------------------------------------
HashAggregate
- Output: a, ((a * 2)), (20), (COALESCE(a, 100))
+ Output: a, ((a * 2)), (20), (COALESCE(a, 100)), e
Hash Key: t.a
Hash Key: (t.a * 2)
Hash Key: 20
Hash Key: COALESCE(t.a, 100)
+ Hash Key: t.e
Filter: ((20) = 20)
-> Seq Scan on generated_virtual_tests.gtest32 t
- Output: a, (a * 2), 20, COALESCE(a, 100)
-(9 rows)
+ Output: a, (a * 2), 20, COALESCE(a, 100), e
+(10 rows)
-select * from gtest32 t group by grouping sets (a, b, c, d) having c = 20;
- a | b | c | d
----+---+----+---
- | | 20 |
+select * from gtest32 t group by grouping sets (a, b, c, d, e) having c = 20;
+ a | b | c | d | e
+---+---+----+---+---
+ | | 20 | |
(1 row)
+-- Ensure that the virtual generated columns in ALTER COLUMN TYPE USING expression are expanded
+alter table gtest32 alter column e type bigint using b;
drop table gtest32;
diff --git a/src/test/regress/expected/horology.out b/src/test/regress/expected/horology.out
index b90bfcd794f..5ae93d8e8a5 100644
--- a/src/test/regress/expected/horology.out
+++ b/src/test/regress/expected/horology.out
@@ -467,6 +467,15 @@ SELECT timestamp with time zone 'Y2001M12D27H04MM05S06.789-08';
ERROR: invalid input syntax for type timestamp with time zone: "Y2001M12D27H04MM05S06.789-08"
LINE 1: SELECT timestamp with time zone 'Y2001M12D27H04MM05S06.789-0...
^
+-- More examples we used to accept and should not
+SELECT timestamp with time zone 'J2452271 T X03456-08';
+ERROR: invalid input syntax for type timestamp with time zone: "J2452271 T X03456-08"
+LINE 1: SELECT timestamp with time zone 'J2452271 T X03456-08';
+ ^
+SELECT timestamp with time zone 'J2452271 T X03456.001e6-08';
+ERROR: invalid input syntax for type timestamp with time zone: "J2452271 T X03456.001e6-08"
+LINE 1: SELECT timestamp with time zone 'J2452271 T X03456.001e6-08'...
+ ^
-- conflicting fields should throw errors
SELECT date '1995-08-06 epoch';
ERROR: invalid input syntax for type date: "1995-08-06 epoch"
diff --git a/src/test/regress/expected/inherit.out b/src/test/regress/expected/inherit.out
index f9b0c415cfd..78dead65325 100644
--- a/src/test/regress/expected/inherit.out
+++ b/src/test/regress/expected/inherit.out
@@ -2281,7 +2281,7 @@ Inherits: pp1,
create table cc3 (a2 int not null no inherit) inherits (cc1);
NOTICE: moving and merging column "a2" with inherited definition
DETAIL: User-specified column moved to the position of the inherited column.
-ERROR: cannot define not-null constraint on column "a2" with NO INHERIT
+ERROR: cannot define not-null constraint with NO INHERIT on column "a2"
DETAIL: The column has an inherited not-null constraint.
-- change NO INHERIT status of inherited constraint: no dice, it's inherited
alter table cc2 add not null a2 no inherit;
@@ -2530,7 +2530,7 @@ ERROR: conflicting NO INHERIT declaration for not-null constraint on column "a"
CREATE TABLE inh_nn1 (a int not null);
CREATE TABLE inh_nn2 (a int not null no inherit) INHERITS (inh_nn1);
NOTICE: merging column "a" with inherited definition
-ERROR: cannot define not-null constraint on column "a" with NO INHERIT
+ERROR: cannot define not-null constraint with NO INHERIT on column "a"
DETAIL: The column has an inherited not-null constraint.
CREATE TABLE inh_nn3 (a int not null, b int, not null a no inherit);
ERROR: conflicting NO INHERIT declaration for not-null constraint on column "a"
diff --git a/src/test/regress/expected/join.out b/src/test/regress/expected/join.out
index f35a0b18c37..390aabfb34b 100644
--- a/src/test/regress/expected/join.out
+++ b/src/test/regress/expected/join.out
@@ -3946,6 +3946,59 @@ where t1.unique2 < 42 and t1.stringu1 > t2.stringu2;
(1 row)
-- variant that isn't quite a star-schema case
+explain (verbose, costs off)
+select ss1.d1 from
+ tenk1 as t1
+ inner join tenk1 as t2
+ on t1.tenthous = t2.ten
+ inner join
+ int8_tbl as i8
+ left join int4_tbl as i4
+ inner join (select 64::information_schema.cardinal_number as d1
+ from tenk1 t3,
+ lateral (select abs(t3.unique1) + random()) ss0(x)
+ where t3.fivethous < 0) as ss1
+ on i4.f1 = ss1.d1
+ on i8.q1 = i4.f1
+ on t1.tenthous = ss1.d1
+where t1.unique1 < i4.f1;
+ QUERY PLAN
+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
+ Nested Loop
+ Output: (64)::information_schema.cardinal_number
+ Join Filter: (t1.tenthous = ((64)::information_schema.cardinal_number)::integer)
+ -> Seq Scan on public.tenk1 t3
+ Output: t3.unique1, t3.unique2, t3.two, t3.four, t3.ten, t3.twenty, t3.hundred, t3.thousand, t3.twothousand, t3.fivethous, t3.tenthous, t3.odd, t3.even, t3.stringu1, t3.stringu2, t3.string4
+ Filter: (t3.fivethous < 0)
+ -> Nested Loop
+ Output: t1.tenthous, t2.ten
+ -> Nested Loop
+ Output: t1.tenthous, t2.ten, i4.f1
+ Join Filter: (t1.unique1 < i4.f1)
+ -> Hash Join
+ Output: t1.tenthous, t1.unique1, t2.ten
+ Hash Cond: (t2.ten = t1.tenthous)
+ -> Seq Scan on public.tenk1 t2
+ Output: t2.unique1, t2.unique2, t2.two, t2.four, t2.ten, t2.twenty, t2.hundred, t2.thousand, t2.twothousand, t2.fivethous, t2.tenthous, t2.odd, t2.even, t2.stringu1, t2.stringu2, t2.string4
+ -> Hash
+ Output: t1.tenthous, t1.unique1
+ -> Nested Loop
+ Output: t1.tenthous, t1.unique1
+ -> Subquery Scan on ss0
+ Output: ss0.x, (64)::information_schema.cardinal_number
+ -> Result
+ Output: ((abs(t3.unique1))::double precision + random())
+ -> Index Scan using tenk1_thous_tenthous on public.tenk1 t1
+ Output: t1.unique1, t1.unique2, t1.two, t1.four, t1.ten, t1.twenty, t1.hundred, t1.thousand, t1.twothousand, t1.fivethous, t1.tenthous, t1.odd, t1.even, t1.stringu1, t1.stringu2, t1.string4
+ Index Cond: (t1.tenthous = (((64)::information_schema.cardinal_number))::integer)
+ -> Seq Scan on public.int4_tbl i4
+ Output: i4.f1
+ Filter: (i4.f1 = ((64)::information_schema.cardinal_number)::integer)
+ -> Seq Scan on public.int8_tbl i8
+ Output: i8.q1, i8.q2
+ Filter: (i8.q1 = ((64)::information_schema.cardinal_number)::integer)
+(33 rows)
+
select ss1.d1 from
tenk1 as t1
inner join tenk1 as t2
@@ -4035,6 +4088,195 @@ select * from
1 | 2 | 2
(1 row)
+-- This example demonstrates the folly of our old "have_dangerous_phv" logic
+begin;
+set local from_collapse_limit to 2;
+explain (verbose, costs off)
+select * from int8_tbl t1
+ left join
+ (select coalesce(t2.q1 + x, 0) from int8_tbl t2,
+ lateral (select t3.q1 as x from int8_tbl t3,
+ lateral (select t2.q1, t3.q1 offset 0) s))
+ on true;
+ QUERY PLAN
+------------------------------------------------------------------
+ Nested Loop Left Join
+ Output: t1.q1, t1.q2, (COALESCE((t2.q1 + t3.q1), '0'::bigint))
+ -> Seq Scan on public.int8_tbl t1
+ Output: t1.q1, t1.q2
+ -> Materialize
+ Output: (COALESCE((t2.q1 + t3.q1), '0'::bigint))
+ -> Nested Loop
+ Output: COALESCE((t2.q1 + t3.q1), '0'::bigint)
+ -> Seq Scan on public.int8_tbl t2
+ Output: t2.q1, t2.q2
+ -> Nested Loop
+ Output: t3.q1
+ -> Seq Scan on public.int8_tbl t3
+ Output: t3.q1, t3.q2
+ -> Result
+ Output: NULL::bigint, NULL::bigint
+(16 rows)
+
+rollback;
+-- ... not that the initial replacement didn't have some bugs too
+begin;
+create temp table t(i int primary key);
+explain (verbose, costs off)
+select * from t t1
+ left join (select 1 as x, * from t t2(i2)) t2ss on t1.i = t2ss.i2
+ left join t t3(i3) on false
+ left join t t4(i4) on t4.i4 > t2ss.x;
+ QUERY PLAN
+----------------------------------------------------------
+ Nested Loop Left Join
+ Output: t1.i, (1), t2.i2, i3, t4.i4
+ -> Nested Loop Left Join
+ Output: t1.i, t2.i2, (1), i3
+ Join Filter: false
+ -> Hash Left Join
+ Output: t1.i, t2.i2, (1)
+ Inner Unique: true
+ Hash Cond: (t1.i = t2.i2)
+ -> Seq Scan on pg_temp.t t1
+ Output: t1.i
+ -> Hash
+ Output: t2.i2, (1)
+ -> Seq Scan on pg_temp.t t2
+ Output: t2.i2, 1
+ -> Result
+ Output: i3
+ One-Time Filter: false
+ -> Memoize
+ Output: t4.i4
+ Cache Key: (1)
+ Cache Mode: binary
+ -> Index Only Scan using t_pkey on pg_temp.t t4
+ Output: t4.i4
+ Index Cond: (t4.i4 > (1))
+(25 rows)
+
+explain (verbose, costs off)
+select * from
+ (select k from
+ (select i, coalesce(i, j) as k from
+ (select i from t union all select 0)
+ join (select 1 as j limit 1) on i = j)
+ right join (select 2 as x) on true
+ join (select 3 as y) on i is not null
+ ),
+ lateral (select k as kl limit 1);
+ QUERY PLAN
+-------------------------------------------------------------------
+ Nested Loop
+ Output: COALESCE(t.i, (1)), ((COALESCE(t.i, (1))))
+ -> Limit
+ Output: 1
+ -> Result
+ Output: 1
+ -> Nested Loop
+ Output: t.i, ((COALESCE(t.i, (1))))
+ -> Result
+ Output: t.i, COALESCE(t.i, (1))
+ -> Append
+ -> Index Only Scan using t_pkey on pg_temp.t
+ Output: t.i
+ Index Cond: (t.i = (1))
+ -> Result
+ Output: 0
+ One-Time Filter: ((1) = 0)
+ -> Limit
+ Output: ((COALESCE(t.i, (1))))
+ -> Result
+ Output: (COALESCE(t.i, (1)))
+(21 rows)
+
+rollback;
+-- PHVs containing SubLinks are quite tricky to get right
+explain (verbose, costs off)
+select *
+from int8_tbl i8
+ inner join
+ (select (select true) as x
+ from int4_tbl i4, lateral (select i4.f1 as y limit 1) ss1
+ where i4.f1 = 0) ss2 on true
+ right join (select false as z) ss3 on true,
+ lateral (select i8.q2 as q2l where x limit 1) ss4
+where i8.q2 = 123;
+ QUERY PLAN
+----------------------------------------------------------------
+ Nested Loop
+ Output: i8.q1, i8.q2, (InitPlan 1).col1, false, (i8.q2)
+ InitPlan 1
+ -> Result
+ Output: true
+ InitPlan 2
+ -> Result
+ Output: true
+ -> Seq Scan on public.int4_tbl i4
+ Output: i4.f1
+ Filter: (i4.f1 = 0)
+ -> Nested Loop
+ Output: i8.q1, i8.q2, (i8.q2)
+ -> Subquery Scan on ss1
+ Output: ss1.y, (InitPlan 1).col1
+ -> Limit
+ Output: NULL::integer
+ -> Result
+ Output: NULL::integer
+ -> Nested Loop
+ Output: i8.q1, i8.q2, (i8.q2)
+ -> Seq Scan on public.int8_tbl i8
+ Output: i8.q1, i8.q2
+ Filter: (i8.q2 = 123)
+ -> Limit
+ Output: (i8.q2)
+ -> Result
+ Output: i8.q2
+ One-Time Filter: ((InitPlan 1).col1)
+(29 rows)
+
+explain (verbose, costs off)
+select *
+from int8_tbl i8
+ inner join
+ (select (select true) as x
+ from int4_tbl i4, lateral (select 1 as y limit 1) ss1
+ where i4.f1 = 0) ss2 on true
+ right join (select false as z) ss3 on true,
+ lateral (select i8.q2 as q2l where x limit 1) ss4
+where i8.q2 = 123;
+ QUERY PLAN
+----------------------------------------------------------------
+ Nested Loop
+ Output: i8.q1, i8.q2, (InitPlan 1).col1, false, (i8.q2)
+ InitPlan 1
+ -> Result
+ Output: true
+ InitPlan 2
+ -> Result
+ Output: true
+ -> Limit
+ Output: NULL::integer
+ -> Result
+ Output: NULL::integer
+ -> Nested Loop
+ Output: i8.q1, i8.q2, (i8.q2)
+ -> Seq Scan on public.int4_tbl i4
+ Output: i4.f1, (InitPlan 1).col1
+ Filter: (i4.f1 = 0)
+ -> Nested Loop
+ Output: i8.q1, i8.q2, (i8.q2)
+ -> Seq Scan on public.int8_tbl i8
+ Output: i8.q1, i8.q2
+ Filter: (i8.q2 = 123)
+ -> Limit
+ Output: (i8.q2)
+ -> Result
+ Output: i8.q2
+ One-Time Filter: ((InitPlan 1).col1)
+(27 rows)
+
-- Test proper handling of appendrel PHVs during useless-RTE removal
explain (costs off)
select * from
diff --git a/src/test/regress/expected/limit.out b/src/test/regress/expected/limit.out
index f4267c002d7..e3bcc680653 100644
--- a/src/test/regress/expected/limit.out
+++ b/src/test/regress/expected/limit.out
@@ -647,7 +647,7 @@ View definition:
WHERE thousand < 995
ORDER BY thousand
OFFSET 10
- FETCH FIRST 5 ROWS WITH TIES;
+ FETCH FIRST (5) ROWS WITH TIES;
CREATE VIEW limit_thousand_v_2 AS SELECT thousand FROM onek WHERE thousand < 995
ORDER BY thousand OFFSET 10 FETCH FIRST 5 ROWS ONLY;
@@ -679,10 +679,10 @@ View definition:
FROM onek
WHERE thousand < 995
ORDER BY thousand
- FETCH FIRST (NULL::integer + 1) ROWS WITH TIES;
+ FETCH FIRST ((NULL::integer + 1)) ROWS WITH TIES;
CREATE VIEW limit_thousand_v_4 AS SELECT thousand FROM onek WHERE thousand < 995
- ORDER BY thousand FETCH FIRST NULL ROWS ONLY;
+ ORDER BY thousand FETCH FIRST (5::bigint) ROWS WITH TIES;
\d+ limit_thousand_v_4
View "public.limit_thousand_v_4"
Column | Type | Collation | Nullable | Default | Storage | Description
@@ -693,6 +693,20 @@ View definition:
FROM onek
WHERE thousand < 995
ORDER BY thousand
+ FETCH FIRST (5::bigint) ROWS WITH TIES;
+
+CREATE VIEW limit_thousand_v_5 AS SELECT thousand FROM onek WHERE thousand < 995
+ ORDER BY thousand FETCH FIRST NULL ROWS ONLY;
+\d+ limit_thousand_v_5
+ View "public.limit_thousand_v_5"
+ Column | Type | Collation | Nullable | Default | Storage | Description
+----------+---------+-----------+----------+---------+---------+-------------
+ thousand | integer | | | | plain |
+View definition:
+ SELECT thousand
+ FROM onek
+ WHERE thousand < 995
+ ORDER BY thousand
LIMIT ALL;
-- leave these views
diff --git a/src/test/regress/expected/matview.out b/src/test/regress/expected/matview.out
index 54939ecc6b0..c56c9fa3a25 100644
--- a/src/test/regress/expected/matview.out
+++ b/src/test/regress/expected/matview.out
@@ -587,7 +587,7 @@ CREATE MATERIALIZED VIEW drop_idx_matview AS
NOTICE: index "mvtest_drop_idx" does not exist, skipping
CREATE UNIQUE INDEX mvtest_drop_idx ON drop_idx_matview (i);
REFRESH MATERIALIZED VIEW CONCURRENTLY drop_idx_matview;
-ERROR: could not find suitable unique index on materialized view
+ERROR: could not find suitable unique index on materialized view "drop_idx_matview"
DROP MATERIALIZED VIEW drop_idx_matview; -- clean up
RESET search_path;
-- make sure that create WITH NO DATA works via SPI
diff --git a/src/test/regress/expected/merge.out b/src/test/regress/expected/merge.out
index bcd29668297..cf2219df754 100644
--- a/src/test/regress/expected/merge.out
+++ b/src/test/regress/expected/merge.out
@@ -2702,6 +2702,76 @@ SELECT * FROM new_measurement ORDER BY city_id, logdate;
1 | 01-17-2007 | |
(2 rows)
+-- MERGE into inheritance root table
+DROP TRIGGER insert_measurement_trigger ON measurement;
+ALTER TABLE measurement ADD CONSTRAINT mcheck CHECK (city_id = 0) NO INHERIT;
+EXPLAIN (COSTS OFF)
+MERGE INTO measurement m
+ USING (VALUES (1, '01-17-2007'::date)) nm(city_id, logdate) ON
+ (m.city_id = nm.city_id and m.logdate=nm.logdate)
+WHEN NOT MATCHED THEN INSERT
+ (city_id, logdate, peaktemp, unitsales)
+ VALUES (city_id - 1, logdate, 25, 100);
+ QUERY PLAN
+--------------------------------------------------------------------------
+ Merge on measurement m
+ Merge on measurement_y2007m01 m_1
+ -> Nested Loop Left Join
+ -> Result
+ -> Seq Scan on measurement_y2007m01 m_1
+ Filter: ((city_id = 1) AND (logdate = '01-17-2007'::date))
+(6 rows)
+
+BEGIN;
+MERGE INTO measurement m
+ USING (VALUES (1, '01-17-2007'::date)) nm(city_id, logdate) ON
+ (m.city_id = nm.city_id and m.logdate=nm.logdate)
+WHEN NOT MATCHED THEN INSERT
+ (city_id, logdate, peaktemp, unitsales)
+ VALUES (city_id - 1, logdate, 25, 100);
+SELECT * FROM ONLY measurement ORDER BY city_id, logdate;
+ city_id | logdate | peaktemp | unitsales
+---------+------------+----------+-----------
+ 0 | 07-21-2005 | 25 | 35
+ 0 | 01-17-2007 | 25 | 100
+(2 rows)
+
+ROLLBACK;
+ALTER TABLE measurement ENABLE ROW LEVEL SECURITY;
+ALTER TABLE measurement FORCE ROW LEVEL SECURITY;
+CREATE POLICY measurement_p ON measurement USING (peaktemp IS NOT NULL);
+MERGE INTO measurement m
+ USING (VALUES (1, '01-17-2007'::date)) nm(city_id, logdate) ON
+ (m.city_id = nm.city_id and m.logdate=nm.logdate)
+WHEN NOT MATCHED THEN INSERT
+ (city_id, logdate, peaktemp, unitsales)
+ VALUES (city_id - 1, logdate, NULL, 100); -- should fail
+ERROR: new row violates row-level security policy for table "measurement"
+MERGE INTO measurement m
+ USING (VALUES (1, '01-17-2007'::date)) nm(city_id, logdate) ON
+ (m.city_id = nm.city_id and m.logdate=nm.logdate)
+WHEN NOT MATCHED THEN INSERT
+ (city_id, logdate, peaktemp, unitsales)
+ VALUES (city_id - 1, logdate, 25, 100); -- ok
+SELECT * FROM ONLY measurement ORDER BY city_id, logdate;
+ city_id | logdate | peaktemp | unitsales
+---------+------------+----------+-----------
+ 0 | 07-21-2005 | 25 | 35
+ 0 | 01-17-2007 | 25 | 100
+(2 rows)
+
+MERGE INTO measurement m
+ USING (VALUES (1, '01-18-2007'::date)) nm(city_id, logdate) ON
+ (m.city_id = nm.city_id and m.logdate=nm.logdate)
+WHEN NOT MATCHED THEN INSERT
+ (city_id, logdate, peaktemp, unitsales)
+ VALUES (city_id - 1, logdate, 25, 200)
+RETURNING merge_action(), m.*;
+ merge_action | city_id | logdate | peaktemp | unitsales
+--------------+---------+------------+----------+-----------
+ INSERT | 0 | 01-18-2007 | 25 | 200
+(1 row)
+
DROP TABLE measurement, new_measurement CASCADE;
NOTICE: drop cascades to 3 other objects
DETAIL: drop cascades to table measurement_y2006m02
diff --git a/src/test/regress/expected/misc_functions.out b/src/test/regress/expected/misc_functions.out
index cc517ed5e90..c3b2b9d8603 100644
--- a/src/test/regress/expected/misc_functions.out
+++ b/src/test/regress/expected/misc_functions.out
@@ -890,17 +890,17 @@ SELECT pg_column_toast_chunk_id(a) IS NULL,
DROP TABLE test_chunk_id;
DROP FUNCTION explain_mask_costs(text, bool, bool, bool, bool);
--- test stratnum support functions
-SELECT gist_stratnum_common(7);
- gist_stratnum_common
-----------------------
- 3
+-- test stratnum translation support functions
+SELECT gist_translate_cmptype_common(7);
+ gist_translate_cmptype_common
+-------------------------------
+ 3
(1 row)
-SELECT gist_stratnum_common(3);
- gist_stratnum_common
-----------------------
- 18
+SELECT gist_translate_cmptype_common(3);
+ gist_translate_cmptype_common
+-------------------------------
+ 18
(1 row)
-- relpath tests
diff --git a/src/test/regress/expected/partition_join.out b/src/test/regress/expected/partition_join.out
index 6101c8c7cf1..d5368186caa 100644
--- a/src/test/regress/expected/partition_join.out
+++ b/src/test/regress/expected/partition_join.out
@@ -5260,6 +5260,24 @@ SELECT x.id, y.id FROM fract_t x LEFT JOIN fract_t y USING (id) ORDER BY x.id DE
Index Cond: (id = x_2.id)
(11 rows)
+EXPLAIN (COSTS OFF) -- Should use NestLoop with parameterised inner scan
+SELECT x.id, y.id FROM fract_t x LEFT JOIN fract_t y USING (id)
+ORDER BY x.id DESC LIMIT 2;
+ QUERY PLAN
+--------------------------------------------------------------------------------
+ Limit
+ -> Merge Append
+ Sort Key: x.id DESC
+ -> Nested Loop Left Join
+ -> Index Only Scan Backward using fract_t0_pkey on fract_t0 x_1
+ -> Index Only Scan using fract_t0_pkey on fract_t0 y_1
+ Index Cond: (id = x_1.id)
+ -> Nested Loop Left Join
+ -> Index Only Scan Backward using fract_t1_pkey on fract_t1 x_2
+ -> Index Only Scan using fract_t1_pkey on fract_t1 y_2
+ Index Cond: (id = x_2.id)
+(11 rows)
+
--
-- Test Append's fractional paths
--
diff --git a/src/test/regress/expected/partition_prune.out b/src/test/regress/expected/partition_prune.out
index 0bf35260b46..d1966cd7d82 100644
--- a/src/test/regress/expected/partition_prune.out
+++ b/src/test/regress/expected/partition_prune.out
@@ -4553,16 +4553,18 @@ create view part_abc_view as select * from part_abc where b <> 'a' with check op
prepare update_part_abc_view as update part_abc_view set b = $2 where a = $1 returning *;
-- Only the unpruned partition should be shown in the list of relations to be
-- updated
-explain (costs off) execute update_part_abc_view (1, 'd');
- QUERY PLAN
--------------------------------------------------------
- Update on part_abc
- Update on part_abc_1
+explain (verbose, costs off) execute update_part_abc_view (1, 'd');
+ QUERY PLAN
+-----------------------------------------------------------------------------
+ Update on public.part_abc
+ Output: part_abc_1.a, part_abc_1.b, part_abc_1.c
+ Update on public.part_abc_1
-> Append
Subplans Removed: 1
- -> Seq Scan on part_abc_1
- Filter: ((b <> 'a'::text) AND (a = $1))
-(6 rows)
+ -> Seq Scan on public.part_abc_1
+ Output: $2, part_abc_1.tableoid, part_abc_1.ctid
+ Filter: ((part_abc_1.b <> 'a'::text) AND (part_abc_1.a = $1))
+(8 rows)
execute update_part_abc_view (1, 'd');
a | b | c
@@ -4570,28 +4572,31 @@ execute update_part_abc_view (1, 'd');
1 | d | t
(1 row)
-explain (costs off) execute update_part_abc_view (2, 'a');
- QUERY PLAN
--------------------------------------------------------
- Update on part_abc
- Update on part_abc_2 part_abc_1
+explain (verbose, costs off) execute update_part_abc_view (2, 'a');
+ QUERY PLAN
+-----------------------------------------------------------------------------
+ Update on public.part_abc
+ Output: part_abc_1.a, part_abc_1.b, part_abc_1.c
+ Update on public.part_abc_2
-> Append
Subplans Removed: 1
- -> Seq Scan on part_abc_2 part_abc_1
- Filter: ((b <> 'a'::text) AND (a = $1))
-(6 rows)
+ -> Seq Scan on public.part_abc_2
+ Output: $2, part_abc_2.tableoid, part_abc_2.ctid
+ Filter: ((part_abc_2.b <> 'a'::text) AND (part_abc_2.a = $1))
+(8 rows)
execute update_part_abc_view (2, 'a');
ERROR: new row violates check option for view "part_abc_view"
DETAIL: Failing row contains (2, a, t).
-- All pruned.
-explain (costs off) execute update_part_abc_view (3, 'a');
- QUERY PLAN
------------------------------
- Update on part_abc
+explain (verbose, costs off) execute update_part_abc_view (3, 'a');
+ QUERY PLAN
+----------------------------------------------------
+ Update on public.part_abc
+ Output: part_abc_1.a, part_abc_1.b, part_abc_1.c
-> Append
Subplans Removed: 2
-(3 rows)
+(4 rows)
execute update_part_abc_view (3, 'a');
a | b | c
diff --git a/src/test/regress/expected/psql.out b/src/test/regress/expected/psql.out
index cf48ae6d0c2..236eba2540e 100644
--- a/src/test/regress/expected/psql.out
+++ b/src/test/regress/expected/psql.out
@@ -160,12 +160,12 @@ LINE 1: SELECT $1, $2
foo4 | bar4
(1 row)
--- \close (extended query protocol)
-\close
-\close: missing required argument
-\close ''
-\close stmt2
-\close stmt2
+-- \close_prepared (extended query protocol)
+\close_prepared
+\close_prepared: missing required argument
+\close_prepared ''
+\close_prepared stmt2
+\close_prepared stmt2
SELECT name, statement FROM pg_prepared_statements ORDER BY name;
name | statement
-------+----------------
@@ -4666,7 +4666,7 @@ bar 'bar' "bar"
\C arg1
\c arg1 arg2 arg3 arg4
\cd arg1
- \close stmt1
+ \close_prepared stmt1
\conninfo
\copy arg1 arg2 arg3 arg4 arg5 arg6
\copyright
diff --git a/src/test/regress/expected/psql_pipeline.out b/src/test/regress/expected/psql_pipeline.out
index a30dec088b9..a0816fb10b6 100644
--- a/src/test/regress/expected/psql_pipeline.out
+++ b/src/test/regress/expected/psql_pipeline.out
@@ -228,192 +228,6 @@ BEGIN \bind \sendpipeline
INSERT INTO psql_pipeline VALUES ($1) \bind 1 \sendpipeline
COMMIT \bind \sendpipeline
\endpipeline
--- COPY FROM STDIN
--- with \sendpipeline and \bind
-\startpipeline
-SELECT $1 \bind 'val1' \sendpipeline
-COPY psql_pipeline FROM STDIN \bind \sendpipeline
-\endpipeline
- ?column?
-----------
- val1
-(1 row)
-
--- with semicolon
-\startpipeline
-SELECT 'val1';
-COPY psql_pipeline FROM STDIN;
-\endpipeline
- ?column?
-----------
- val1
-(1 row)
-
--- COPY FROM STDIN with \flushrequest + \getresults
--- with \sendpipeline and \bind
-\startpipeline
-SELECT $1 \bind 'val1' \sendpipeline
-COPY psql_pipeline FROM STDIN \bind \sendpipeline
-\flushrequest
-\getresults
- ?column?
-----------
- val1
-(1 row)
-
-message type 0x5a arrived from server while idle
-\endpipeline
--- with semicolon
-\startpipeline
-SELECT 'val1';
-COPY psql_pipeline FROM STDIN;
-\flushrequest
-\getresults
- ?column?
-----------
- val1
-(1 row)
-
-message type 0x5a arrived from server while idle
-\endpipeline
--- COPY FROM STDIN with \syncpipeline + \getresults
--- with \bind and \sendpipeline
-\startpipeline
-SELECT $1 \bind 'val1' \sendpipeline
-COPY psql_pipeline FROM STDIN \bind \sendpipeline
-\syncpipeline
-\getresults
- ?column?
-----------
- val1
-(1 row)
-
-\endpipeline
--- with semicolon
-\startpipeline
-SELECT 'val1';
-COPY psql_pipeline FROM STDIN;
-\syncpipeline
-\getresults
- ?column?
-----------
- val1
-(1 row)
-
-\endpipeline
--- COPY TO STDOUT
--- with \bind and \sendpipeline
-\startpipeline
-SELECT $1 \bind 'val1' \sendpipeline
-copy psql_pipeline TO STDOUT \bind \sendpipeline
-\endpipeline
- ?column?
-----------
- val1
-(1 row)
-
-1 \N
-2 test2
-20 test2
-3 test3
-30 test3
-4 test4
-40 test4
--- with semicolon
-\startpipeline
-SELECT 'val1';
-copy psql_pipeline TO STDOUT;
-\endpipeline
- ?column?
-----------
- val1
-(1 row)
-
-1 \N
-2 test2
-20 test2
-3 test3
-30 test3
-4 test4
-40 test4
--- COPY TO STDOUT with \flushrequest + \getresults
--- with \bind and \sendpipeline
-\startpipeline
-SELECT $1 \bind 'val1' \sendpipeline
-copy psql_pipeline TO STDOUT \bind \sendpipeline
-\flushrequest
-\getresults
- ?column?
-----------
- val1
-(1 row)
-
-1 \N
-2 test2
-20 test2
-3 test3
-30 test3
-4 test4
-40 test4
-\endpipeline
--- with semicolon
-\startpipeline
-SELECT 'val1';
-copy psql_pipeline TO STDOUT;
-\flushrequest
-\getresults
- ?column?
-----------
- val1
-(1 row)
-
-1 \N
-2 test2
-20 test2
-3 test3
-30 test3
-4 test4
-40 test4
-\endpipeline
--- COPY TO STDOUT with \syncpipeline + \getresults
--- with \bind and \sendpipeline
-\startpipeline
-SELECT $1 \bind 'val1' \sendpipeline
-copy psql_pipeline TO STDOUT \bind \sendpipeline
-\syncpipeline
-\getresults
- ?column?
-----------
- val1
-(1 row)
-
-1 \N
-2 test2
-20 test2
-3 test3
-30 test3
-4 test4
-40 test4
-\endpipeline
--- with semicolon
-\startpipeline
-SELECT 'val1';
-copy psql_pipeline TO STDOUT;
-\syncpipeline
-\getresults
- ?column?
-----------
- val1
-(1 row)
-
-1 \N
-2 test2
-20 test2
-3 test3
-30 test3
-4 test4
-40 test4
-\endpipeline
-- Use \parse and \bind_named
\startpipeline
SELECT $1 \parse ''
@@ -740,7 +554,7 @@ SELECT COUNT(*) FROM psql_pipeline \bind \sendpipeline
count
-------
- 7
+ 1
(1 row)
-- After an error, pipeline is aborted and requires \syncpipeline to be
@@ -750,7 +564,7 @@ SELECT $1 \bind \sendpipeline
SELECT $1 \bind 1 \sendpipeline
SELECT $1 \parse a
\bind_named a 1 \sendpipeline
-\close a
+\close_prepared a
\flushrequest
\getresults
ERROR: bind message supplies 0 parameters, but prepared statement "" requires 1
@@ -758,7 +572,7 @@ ERROR: bind message supplies 0 parameters, but prepared statement "" requires 1
SELECT $1 \bind 1 \sendpipeline
SELECT $1 \parse a
\bind_named a 1 \sendpipeline
-\close a
+\close_prepared a
-- Sync allows pipeline to recover.
\syncpipeline
\getresults
@@ -766,7 +580,7 @@ Pipeline aborted, command did not run
SELECT $1 \bind 1 \sendpipeline
SELECT $1 \parse a
\bind_named a 1 \sendpipeline
-\close a
+\close_prepared a
\flushrequest
\getresults
?column?
diff --git a/src/test/regress/expected/publication.out b/src/test/regress/expected/publication.out
index 4de96c04f9d..3a2eacd793f 100644
--- a/src/test/regress/expected/publication.out
+++ b/src/test/regress/expected/publication.out
@@ -34,7 +34,8 @@ ERROR: conflicting or redundant options
LINE 1: ...pub_xxx WITH (publish_generated_columns = stored, publish_ge...
^
CREATE PUBLICATION testpub_xxx WITH (publish_generated_columns = foo);
-ERROR: publish_generated_columns requires a "none" or "stored" value
+ERROR: invalid value for publication parameter "publish_generated_columns": "foo"
+DETAIL: Valid values are "none" and "stored".
\dRp
List of publications
Name | Owner | All tables | Inserts | Updates | Deletes | Truncates | Generated columns | Via root
@@ -524,16 +525,22 @@ Tables from schemas:
"testpub_rf_schema2"
-- fail - virtual generated column uses user-defined function
+-- (Actually, this already fails at CREATE TABLE rather than at CREATE
+-- PUBLICATION, but let's keep the test in case the former gets
+-- relaxed sometime.)
CREATE TABLE testpub_rf_tbl6 (id int PRIMARY KEY, x int, y int GENERATED ALWAYS AS (x * testpub_rf_func2()) VIRTUAL);
+ERROR: generation expression uses user-defined function
+LINE 1: ...RIMARY KEY, x int, y int GENERATED ALWAYS AS (x * testpub_rf...
+ ^
+DETAIL: Virtual generated columns that make use of user-defined functions are not yet supported.
CREATE PUBLICATION testpub7 FOR TABLE testpub_rf_tbl6 WHERE (y > 100);
-ERROR: invalid publication WHERE expression
-DETAIL: User-defined or built-in mutable functions are not allowed.
+ERROR: relation "testpub_rf_tbl6" does not exist
-- test that SET EXPRESSION is rejected, because it could affect a row filter
SET client_min_messages = 'ERROR';
CREATE TABLE testpub_rf_tbl7 (id int PRIMARY KEY, x int, y int GENERATED ALWAYS AS (x * 111) VIRTUAL);
CREATE PUBLICATION testpub8 FOR TABLE testpub_rf_tbl7 WHERE (y > 100);
ALTER TABLE testpub_rf_tbl7 ALTER COLUMN y SET EXPRESSION AS (x * testpub_rf_func2());
-ERROR: ALTER TABLE / SET EXPRESSION is not supported for virtual generated columns on tables that are part of a publication
+ERROR: ALTER TABLE / SET EXPRESSION is not supported for virtual generated columns in tables that are part of a publication
DETAIL: Column "y" of relation "testpub_rf_tbl7" is a virtual generated column.
RESET client_min_messages;
DROP TABLE testpub_rf_tbl1;
@@ -541,7 +548,7 @@ DROP TABLE testpub_rf_tbl2;
DROP TABLE testpub_rf_tbl3;
DROP TABLE testpub_rf_tbl4;
DROP TABLE testpub_rf_tbl5;
-DROP TABLE testpub_rf_tbl6;
+--DROP TABLE testpub_rf_tbl6;
DROP TABLE testpub_rf_schema1.testpub_rf_tbl5;
DROP TABLE testpub_rf_schema2.testpub_rf_tbl6;
DROP SCHEMA testpub_rf_schema1;
diff --git a/src/test/regress/expected/strings.out b/src/test/regress/expected/strings.out
index 174f0a68331..788844abd20 100644
--- a/src/test/regress/expected/strings.out
+++ b/src/test/regress/expected/strings.out
@@ -614,6 +614,73 @@ SELECT 'abcdefg' SIMILAR TO '_bcd%' ESCAPE NULL AS null;
SELECT 'abcdefg' SIMILAR TO '_bcd#%' ESCAPE '##' AS error;
ERROR: invalid escape string
HINT: Escape string must be empty or one character.
+-- Characters that should be left alone in character classes when a
+-- SIMILAR TO regexp pattern is converted to POSIX style.
+-- Underscore "_"
+EXPLAIN (COSTS OFF) SELECT * FROM TEXT_TBL WHERE f1 SIMILAR TO '_[_[:alpha:]_]_';
+ QUERY PLAN
+------------------------------------------------
+ Seq Scan on text_tbl
+ Filter: (f1 ~ '^(?:.[_[:alpha:]_].)$'::text)
+(2 rows)
+
+-- Percentage "%"
+EXPLAIN (COSTS OFF) SELECT * FROM TEXT_TBL WHERE f1 SIMILAR TO '%[%[:alnum:]%]%';
+ QUERY PLAN
+--------------------------------------------------
+ Seq Scan on text_tbl
+ Filter: (f1 ~ '^(?:.*[%[:alnum:]%].*)$'::text)
+(2 rows)
+
+-- Dot "."
+EXPLAIN (COSTS OFF) SELECT * FROM TEXT_TBL WHERE f1 SIMILAR TO '.[.[:alnum:].].';
+ QUERY PLAN
+--------------------------------------------------
+ Seq Scan on text_tbl
+ Filter: (f1 ~ '^(?:\.[.[:alnum:].]\.)$'::text)
+(2 rows)
+
+-- Dollar "$"
+EXPLAIN (COSTS OFF) SELECT * FROM TEXT_TBL WHERE f1 SIMILAR TO '$[$[:alnum:]$]$';
+ QUERY PLAN
+--------------------------------------------------
+ Seq Scan on text_tbl
+ Filter: (f1 ~ '^(?:\$[$[:alnum:]$]\$)$'::text)
+(2 rows)
+
+-- Opening parenthesis "("
+EXPLAIN (COSTS OFF) SELECT * FROM TEXT_TBL WHERE f1 SIMILAR TO '()[([:alnum:](]()';
+ QUERY PLAN
+------------------------------------------------------
+ Seq Scan on text_tbl
+ Filter: (f1 ~ '^(?:(?:)[([:alnum:](](?:))$'::text)
+(2 rows)
+
+-- Caret "^"
+EXPLAIN (COSTS OFF) SELECT * FROM TEXT_TBL WHERE f1 SIMILAR TO '^[^[:alnum:]^[^^][[^^]][\^][[\^]]\^]^';
+ QUERY PLAN
+------------------------------------------------------------------------
+ Seq Scan on text_tbl
+ Filter: (f1 ~ '^(?:\^[^[:alnum:]^[^^][[^^]][\^][[\^]]\^]\^)$'::text)
+(2 rows)
+
+-- Closing square bracket "]" at the beginning of character class
+EXPLAIN (COSTS OFF) SELECT * FROM TEXT_TBL WHERE f1 SIMILAR TO '[]%][^]%][^%]%';
+ QUERY PLAN
+------------------------------------------------
+ Seq Scan on text_tbl
+ Filter: (f1 ~ '^(?:[]%][^]%][^%].*)$'::text)
+(2 rows)
+
+-- Closing square bracket effective after two carets at the beginning
+-- of character class.
+EXPLAIN (COSTS OFF) SELECT * FROM TEXT_TBL WHERE f1 SIMILAR TO '[^^]^';
+ QUERY PLAN
+---------------------------------------
+ Seq Scan on text_tbl
+ Filter: (f1 ~ '^(?:[^^]\^)$'::text)
+(2 rows)
+
-- Test backslash escapes in regexp_replace's replacement string
SELECT regexp_replace('1112223333', E'(\\d{3})(\\d{3})(\\d{4})', E'(\\1) \\2-\\3');
regexp_replace
diff --git a/src/test/regress/expected/sysviews.out b/src/test/regress/expected/sysviews.out
index ae17d028ed3..83228cfca29 100644
--- a/src/test/regress/expected/sysviews.out
+++ b/src/test/regress/expected/sysviews.out
@@ -232,22 +232,3 @@ select * from pg_timezone_abbrevs where abbrev = 'LMT';
LMT | @ 7 hours 52 mins 58 secs ago | f
(1 row)
-DO $$
-DECLARE
- bg_writer_pid int;
- r RECORD;
-BEGIN
- SELECT pid from pg_stat_activity where backend_type='background writer'
- INTO bg_writer_pid;
-
- select type, name, ident
- from pg_get_process_memory_contexts(bg_writer_pid, false, 20)
- where path = '{1}' into r;
- RAISE NOTICE '%', r;
- select type, name, ident
- from pg_get_process_memory_contexts(pg_backend_pid(), false, 20)
- where path = '{1}' into r;
- RAISE NOTICE '%', r;
-END $$;
-NOTICE: (AllocSet,TopMemoryContext,)
-NOTICE: (AllocSet,TopMemoryContext,)
diff --git a/src/test/regress/expected/triggers.out b/src/test/regress/expected/triggers.out
index f245d7f1549..2bf0e77d61e 100644
--- a/src/test/regress/expected/triggers.out
+++ b/src/test/regress/expected/triggers.out
@@ -3535,8 +3535,8 @@ drop table parent, child;
drop function f();
-- Test who runs deferred trigger functions
-- setup
-create role regress_groot;
-create role regress_outis;
+create role regress_caller;
+create role regress_fn_owner;
create function whoami() returns trigger language plpgsql
as $$
begin
@@ -3544,7 +3544,7 @@ begin
return null;
end;
$$;
-alter function whoami() owner to regress_outis;
+alter function whoami() owner to regress_fn_owner;
create table defer_trig (id integer);
grant insert on defer_trig to public;
create constraint trigger whoami after insert on defer_trig
@@ -3553,23 +3553,23 @@ create constraint trigger whoami after insert on defer_trig
execute function whoami();
-- deferred triggers must run as the user that queued the trigger
begin;
-set role regress_groot;
+set role regress_caller;
insert into defer_trig values (1);
reset role;
-set role regress_outis;
+set role regress_fn_owner;
insert into defer_trig values (2);
reset role;
commit;
-NOTICE: I am regress_groot
-NOTICE: I am regress_outis
+NOTICE: I am regress_caller
+NOTICE: I am regress_fn_owner
-- security definer functions override the user who queued the trigger
alter function whoami() security definer;
begin;
-set role regress_groot;
+set role regress_caller;
insert into defer_trig values (3);
reset role;
commit;
-NOTICE: I am regress_outis
+NOTICE: I am regress_fn_owner
alter function whoami() security invoker;
-- make sure the current user is restored after error
create or replace function whoami() returns trigger language plpgsql
@@ -3581,11 +3581,11 @@ begin
end;
$$;
begin;
-set role regress_groot;
+set role regress_caller;
insert into defer_trig values (4);
reset role;
commit; -- error expected
-NOTICE: I am regress_groot
+NOTICE: I am regress_caller
ERROR: division by zero
CONTEXT: SQL statement "SELECT 1 / 0"
PL/pgSQL function whoami() line 4 at PERFORM
@@ -3598,5 +3598,5 @@ select current_user = session_user;
-- clean up
drop table defer_trig;
drop function whoami();
-drop role regress_outis;
-drop role regress_groot;
+drop role regress_fn_owner;
+drop role regress_caller;
diff --git a/src/test/regress/expected/without_overlaps.out b/src/test/regress/expected/without_overlaps.out
index ea607bed0a4..f3144bdc39c 100644
--- a/src/test/regress/expected/without_overlaps.out
+++ b/src/test/regress/expected/without_overlaps.out
@@ -1426,7 +1426,7 @@ CREATE TABLE temporal_fk_rng2rng (
CONSTRAINT temporal_fk_rng2rng_fk FOREIGN KEY (parent_id, valid_at)
REFERENCES temporal_rng (id, valid_at)
);
-ERROR: foreign key must use PERIOD when referencing a primary using WITHOUT OVERLAPS
+ERROR: foreign key must use PERIOD when referencing a primary key using WITHOUT OVERLAPS
-- (parent_id, valid_at) REFERENCES (id, PERIOD valid_at)
-- FOREIGN KEY part should specify PERIOD
CREATE TABLE temporal_fk_rng2rng (
@@ -1900,7 +1900,7 @@ CREATE TABLE temporal_fk_mltrng2mltrng (
CONSTRAINT temporal_fk_mltrng2mltrng_fk FOREIGN KEY (parent_id, valid_at)
REFERENCES temporal_mltrng (id, valid_at)
);
-ERROR: foreign key must use PERIOD when referencing a primary using WITHOUT OVERLAPS
+ERROR: foreign key must use PERIOD when referencing a primary key using WITHOUT OVERLAPS
-- (parent_id, valid_at) REFERENCES (id, PERIOD valid_at)
-- FOREIGN KEY part should specify PERIOD
CREATE TABLE temporal_fk_mltrng2mltrng (
diff --git a/src/test/regress/sql/alter_table.sql b/src/test/regress/sql/alter_table.sql
index 5ce9d1e429f..41cff198e18 100644
--- a/src/test/regress/sql/alter_table.sql
+++ b/src/test/regress/sql/alter_table.sql
@@ -3069,6 +3069,15 @@ drop table attbl, atref;
/* End test case for bug #17409 */
+/* Test case for bug #18970 */
+
+create table attbl(a int);
+create table atref(b attbl check ((b).a is not null));
+alter table attbl alter column a type numeric; -- someday this should work
+drop table attbl, atref;
+
+/* End test case for bug #18970 */
+
-- Test that ALTER TABLE rewrite preserves a clustered index
-- for normal indexes and indexes on constraints.
create table alttype_cluster (a int);
diff --git a/src/test/regress/sql/constraints.sql b/src/test/regress/sql/constraints.sql
index 337baab7ced..12668f0e0ce 100644
--- a/src/test/regress/sql/constraints.sql
+++ b/src/test/regress/sql/constraints.sql
@@ -997,6 +997,9 @@ create table constr_parent3 (a int not null);
create table constr_child3 () inherits (constr_parent2, constr_parent3);
EXECUTE get_nnconstraint_info('{constr_parent3, constr_child3}');
+COMMENT ON CONSTRAINT constr_parent2_a_not_null ON constr_parent2 IS 'this constraint is invalid';
+COMMENT ON CONSTRAINT constr_parent2_a_not_null ON constr_child2 IS 'this constraint is valid';
+
DEALLOCATE get_nnconstraint_info;
-- end NOT NULL NOT VALID
diff --git a/src/test/regress/sql/create_table_like.sql b/src/test/regress/sql/create_table_like.sql
index 6e21722aaeb..bf8702116a7 100644
--- a/src/test/regress/sql/create_table_like.sql
+++ b/src/test/regress/sql/create_table_like.sql
@@ -143,9 +143,10 @@ COMMENT ON INDEX ctlt1_pkey IS 'index pkey';
COMMENT ON INDEX ctlt1_b_key IS 'index b_key';
ALTER TABLE ctlt1 ALTER COLUMN a SET STORAGE MAIN;
-CREATE TABLE ctlt2 (c text);
+CREATE TABLE ctlt2 (c text NOT NULL);
ALTER TABLE ctlt2 ALTER COLUMN c SET STORAGE EXTERNAL;
COMMENT ON COLUMN ctlt2.c IS 'C';
+COMMENT ON CONSTRAINT ctlt2_c_not_null ON ctlt2 IS 't2_c_not_null';
CREATE TABLE ctlt3 (a text CHECK (length(a) < 5), c text CHECK (length(c) < 7));
ALTER TABLE ctlt3 ALTER COLUMN c SET STORAGE EXTERNAL;
@@ -162,6 +163,7 @@ CREATE TABLE ctlt12_storage (LIKE ctlt1 INCLUDING STORAGE, LIKE ctlt2 INCLUDING
\d+ ctlt12_storage
CREATE TABLE ctlt12_comments (LIKE ctlt1 INCLUDING COMMENTS, LIKE ctlt2 INCLUDING COMMENTS);
\d+ ctlt12_comments
+SELECT conname, description FROM pg_description, pg_constraint c WHERE classoid = 'pg_constraint'::regclass AND objoid = c.oid AND c.conrelid = 'ctlt12_comments'::regclass;
CREATE TABLE ctlt1_inh (LIKE ctlt1 INCLUDING CONSTRAINTS INCLUDING COMMENTS) INHERITS (ctlt1);
\d+ ctlt1_inh
SELECT description FROM pg_description, pg_constraint c WHERE classoid = 'pg_constraint'::regclass AND objoid = c.oid AND c.conrelid = 'ctlt1_inh'::regclass;
@@ -197,9 +199,19 @@ DROP TABLE ctlt1, ctlt2, ctlt3, ctlt4, ctlt12_storage, ctlt12_comments, ctlt1_in
-- LIKE must respect NO INHERIT property of constraints
CREATE TABLE noinh_con_copy (a int CHECK (a > 0) NO INHERIT, b int not null,
c int not null no inherit);
-CREATE TABLE noinh_con_copy1 (LIKE noinh_con_copy INCLUDING CONSTRAINTS);
+
+COMMENT ON CONSTRAINT noinh_con_copy_b_not_null ON noinh_con_copy IS 'not null b';
+COMMENT ON CONSTRAINT noinh_con_copy_c_not_null ON noinh_con_copy IS 'not null c no inherit';
+
+CREATE TABLE noinh_con_copy1 (LIKE noinh_con_copy INCLUDING CONSTRAINTS INCLUDING COMMENTS);
\d+ noinh_con_copy1
+SELECT conname, description
+FROM pg_description, pg_constraint c
+WHERE classoid = 'pg_constraint'::regclass
+AND objoid = c.oid AND c.conrelid = 'noinh_con_copy1'::regclass
+ORDER BY conname COLLATE "C";
+
-- fail, as partitioned tables don't allow NO INHERIT constraints
CREATE TABLE noinh_con_copy1_parted (LIKE noinh_con_copy INCLUDING ALL)
PARTITION BY LIST (a);
diff --git a/src/test/regress/sql/foreign_key.sql b/src/test/regress/sql/foreign_key.sql
index 8159e363022..cfcecb4e911 100644
--- a/src/test/regress/sql/foreign_key.sql
+++ b/src/test/regress/sql/foreign_key.sql
@@ -1389,22 +1389,44 @@ WHERE conrelid::regclass::text like 'fk_partitioned_fk%' ORDER BY oid::regclass:
DROP TABLE fk_partitioned_fk, fk_notpartitioned_pk;
--- NOT VALID foreign key on a non-partitioned table referencing a partitioned table
+-- NOT VALID and NOT ENFORCED foreign key on a non-partitioned table
+-- referencing a partitioned table
CREATE TABLE fk_partitioned_pk (a int, b int, PRIMARY KEY (a, b)) PARTITION BY RANGE (a, b);
CREATE TABLE fk_partitioned_pk_1 PARTITION OF fk_partitioned_pk FOR VALUES FROM (0,0) TO (1000,1000);
+CREATE TABLE fk_partitioned_pk_2 PARTITION OF fk_partitioned_pk FOR VALUES FROM (1000,1000) TO (2000,2000);
CREATE TABLE fk_notpartitioned_fk (b int, a int);
-ALTER TABLE fk_notpartitioned_fk ADD FOREIGN KEY (a, b) REFERENCES fk_partitioned_pk NOT VALID;
-
--- Constraint will be invalid.
-SELECT conname, convalidated FROM pg_constraint
+INSERT INTO fk_partitioned_pk VALUES(100,100), (1000,1000);
+INSERT INTO fk_notpartitioned_fk VALUES(100,100), (1000,1000);
+ALTER TABLE fk_notpartitioned_fk ADD CONSTRAINT fk_notpartitioned_fk_a_b_fkey
+ FOREIGN KEY (a, b) REFERENCES fk_partitioned_pk NOT VALID;
+ALTER TABLE fk_notpartitioned_fk ADD CONSTRAINT fk_notpartitioned_fk_a_b_fkey2
+ FOREIGN KEY (a, b) REFERENCES fk_partitioned_pk NOT ENFORCED;
+
+-- All constraints will be invalid, and _fkey2 constraints will not be enforced.
+SELECT conname, conenforced, convalidated FROM pg_constraint
WHERE conrelid = 'fk_notpartitioned_fk'::regclass ORDER BY oid::regclass::text;
ALTER TABLE fk_notpartitioned_fk VALIDATE CONSTRAINT fk_notpartitioned_fk_a_b_fkey;
+ALTER TABLE fk_notpartitioned_fk ALTER CONSTRAINT fk_notpartitioned_fk_a_b_fkey2 ENFORCED;
--- All constraints are now valid.
-SELECT conname, convalidated FROM pg_constraint
+-- All constraints are now valid and enforced.
+SELECT conname, conenforced, convalidated FROM pg_constraint
WHERE conrelid = 'fk_notpartitioned_fk'::regclass ORDER BY oid::regclass::text;
+-- test a self-referential FK
+ALTER TABLE fk_partitioned_pk ADD CONSTRAINT selffk FOREIGN KEY (a, b) REFERENCES fk_partitioned_pk NOT VALID;
+CREATE TABLE fk_partitioned_pk_3 PARTITION OF fk_partitioned_pk FOR VALUES FROM (2000,2000) TO (3000,3000)
+ PARTITION BY RANGE (a);
+CREATE TABLE fk_partitioned_pk_3_1 PARTITION OF fk_partitioned_pk_3 FOR VALUES FROM (2000) TO (2100);
+SELECT conname, conenforced, convalidated FROM pg_constraint
+WHERE conrelid = 'fk_partitioned_pk'::regclass AND contype = 'f'
+ORDER BY oid::regclass::text;
+ALTER TABLE fk_partitioned_pk_2 VALIDATE CONSTRAINT selffk;
+ALTER TABLE fk_partitioned_pk VALIDATE CONSTRAINT selffk;
+SELECT conname, conenforced, convalidated FROM pg_constraint
+WHERE conrelid = 'fk_partitioned_pk'::regclass AND contype = 'f'
+ORDER BY oid::regclass::text;
+
DROP TABLE fk_notpartitioned_fk, fk_partitioned_pk;
-- Test some other exotic foreign key features: MATCH SIMPLE, ON UPDATE/DELETE
diff --git a/src/test/regress/sql/generated_virtual.sql b/src/test/regress/sql/generated_virtual.sql
index b4eedeee2fb..6fa986515b9 100644
--- a/src/test/regress/sql/generated_virtual.sql
+++ b/src/test/regress/sql/generated_virtual.sql
@@ -253,10 +253,10 @@ CREATE TABLE gtest4 (
a int,
b double_int GENERATED ALWAYS AS ((a * 2, a * 3)) VIRTUAL
);
-INSERT INTO gtest4 VALUES (1), (6);
-SELECT * FROM gtest4;
+--INSERT INTO gtest4 VALUES (1), (6);
+--SELECT * FROM gtest4;
-DROP TABLE gtest4;
+--DROP TABLE gtest4;
DROP TYPE double_int;
-- using tableoid is allowed
@@ -290,20 +290,21 @@ GRANT SELECT (a, c) ON gtest11 TO regress_user11;
CREATE FUNCTION gf1(a int) RETURNS int AS $$ SELECT a * 3 $$ IMMUTABLE LANGUAGE SQL;
REVOKE ALL ON FUNCTION gf1(int) FROM PUBLIC;
-CREATE TABLE gtest12 (a int PRIMARY KEY, b int, c int GENERATED ALWAYS AS (gf1(b)) VIRTUAL);
-INSERT INTO gtest12 VALUES (1, 10), (2, 20);
-GRANT SELECT (a, c), INSERT ON gtest12 TO regress_user11;
+CREATE TABLE gtest12 (a int PRIMARY KEY, b int, c int GENERATED ALWAYS AS (gf1(b)) VIRTUAL); -- fails, user-defined function
+--INSERT INTO gtest12 VALUES (1, 10), (2, 20);
+--GRANT SELECT (a, c), INSERT ON gtest12 TO regress_user11;
SET ROLE regress_user11;
SELECT a, b FROM gtest11; -- not allowed
SELECT a, c FROM gtest11; -- allowed
SELECT gf1(10); -- not allowed
-INSERT INTO gtest12 VALUES (3, 30), (4, 40); -- allowed (does not actually invoke the function)
-SELECT a, c FROM gtest12; -- currently not allowed because of function permissions, should arguably be allowed
+--INSERT INTO gtest12 VALUES (3, 30), (4, 40); -- allowed (does not actually invoke the function)
+--SELECT a, c FROM gtest12; -- currently not allowed because of function permissions, should arguably be allowed
RESET ROLE;
-DROP FUNCTION gf1(int); -- fail
-DROP TABLE gtest11, gtest12;
+--DROP FUNCTION gf1(int); -- fail
+DROP TABLE gtest11;
+--DROP TABLE gtest12;
DROP FUNCTION gf1(int);
DROP USER regress_user11;
@@ -453,11 +454,19 @@ CREATE TABLE gtest24r (a int PRIMARY KEY, b gtestdomain1range GENERATED ALWAYS A
--INSERT INTO gtest24r (a) VALUES (4); -- ok
--INSERT INTO gtest24r (a) VALUES (6); -- error
+CREATE TABLE gtest24at (a int PRIMARY KEY);
+ALTER TABLE gtest24at ADD COLUMN b gtestdomain1 GENERATED ALWAYS AS (a * 2) VIRTUAL; -- error
+CREATE TABLE gtest24ata (a int PRIMARY KEY, b int GENERATED ALWAYS AS (a * 2) VIRTUAL);
+ALTER TABLE gtest24ata ALTER COLUMN b TYPE gtestdomain1; -- error
+
CREATE DOMAIN gtestdomainnn AS int CHECK (VALUE IS NOT NULL);
CREATE TABLE gtest24nn (a int, b gtestdomainnn GENERATED ALWAYS AS (a * 2) VIRTUAL);
--INSERT INTO gtest24nn (a) VALUES (4); -- ok
--INSERT INTO gtest24nn (a) VALUES (NULL); -- error
+-- using user-defined type not yet supported
+CREATE TABLE gtest24xxx (a gtestdomain1, b gtestdomain1, c int GENERATED ALWAYS AS (greatest(a, b)) VIRTUAL); -- error
+
-- typed tables (currently not supported)
CREATE TYPE gtest_type AS (f1 integer, f2 text, f3 bigint);
CREATE TABLE gtest28 OF gtest_type (f1 WITH OPTIONS GENERATED ALWAYS AS (f2 *2) VIRTUAL);
@@ -788,7 +797,8 @@ create table gtest32 (
a int primary key,
b int generated always as (a * 2),
c int generated always as (10 + 10),
- d int generated always as (coalesce(a, 100))
+ d int generated always as (coalesce(a, 100)),
+ e int
);
insert into gtest32 values (1), (2);
@@ -829,7 +839,10 @@ select t2.* from gtest32 t1 left join gtest32 t2 on false;
select t2.* from gtest32 t1 left join gtest32 t2 on false;
explain (verbose, costs off)
-select * from gtest32 t group by grouping sets (a, b, c, d) having c = 20;
-select * from gtest32 t group by grouping sets (a, b, c, d) having c = 20;
+select * from gtest32 t group by grouping sets (a, b, c, d, e) having c = 20;
+select * from gtest32 t group by grouping sets (a, b, c, d, e) having c = 20;
+
+-- Ensure that the virtual generated columns in ALTER COLUMN TYPE USING expression are expanded
+alter table gtest32 alter column e type bigint using b;
drop table gtest32;
diff --git a/src/test/regress/sql/horology.sql b/src/test/regress/sql/horology.sql
index 1310b432773..8978249a5dc 100644
--- a/src/test/regress/sql/horology.sql
+++ b/src/test/regress/sql/horology.sql
@@ -102,6 +102,10 @@ SELECT date 'J J 1520447';
SELECT timestamp with time zone 'Y2001M12D27H04M05S06.789+08';
SELECT timestamp with time zone 'Y2001M12D27H04MM05S06.789-08';
+-- More examples we used to accept and should not
+SELECT timestamp with time zone 'J2452271 T X03456-08';
+SELECT timestamp with time zone 'J2452271 T X03456.001e6-08';
+
-- conflicting fields should throw errors
SELECT date '1995-08-06 epoch';
SELECT date '1995-08-06 infinity';
diff --git a/src/test/regress/sql/join.sql b/src/test/regress/sql/join.sql
index cc5128add4d..f6e7070db65 100644
--- a/src/test/regress/sql/join.sql
+++ b/src/test/regress/sql/join.sql
@@ -1277,6 +1277,23 @@ where t1.unique2 < 42 and t1.stringu1 > t2.stringu2;
-- variant that isn't quite a star-schema case
+explain (verbose, costs off)
+select ss1.d1 from
+ tenk1 as t1
+ inner join tenk1 as t2
+ on t1.tenthous = t2.ten
+ inner join
+ int8_tbl as i8
+ left join int4_tbl as i4
+ inner join (select 64::information_schema.cardinal_number as d1
+ from tenk1 t3,
+ lateral (select abs(t3.unique1) + random()) ss0(x)
+ where t3.fivethous < 0) as ss1
+ on i4.f1 = ss1.d1
+ on i8.q1 = i4.f1
+ on t1.tenthous = ss1.d1
+where t1.unique1 < i4.f1;
+
select ss1.d1 from
tenk1 as t1
inner join tenk1 as t2
@@ -1332,6 +1349,64 @@ select * from
(select 1 as x) ss1 left join (select 2 as y) ss2 on (true),
lateral (select ss2.y as z limit 1) ss3;
+-- This example demonstrates the folly of our old "have_dangerous_phv" logic
+begin;
+set local from_collapse_limit to 2;
+explain (verbose, costs off)
+select * from int8_tbl t1
+ left join
+ (select coalesce(t2.q1 + x, 0) from int8_tbl t2,
+ lateral (select t3.q1 as x from int8_tbl t3,
+ lateral (select t2.q1, t3.q1 offset 0) s))
+ on true;
+rollback;
+
+-- ... not that the initial replacement didn't have some bugs too
+begin;
+create temp table t(i int primary key);
+
+explain (verbose, costs off)
+select * from t t1
+ left join (select 1 as x, * from t t2(i2)) t2ss on t1.i = t2ss.i2
+ left join t t3(i3) on false
+ left join t t4(i4) on t4.i4 > t2ss.x;
+
+explain (verbose, costs off)
+select * from
+ (select k from
+ (select i, coalesce(i, j) as k from
+ (select i from t union all select 0)
+ join (select 1 as j limit 1) on i = j)
+ right join (select 2 as x) on true
+ join (select 3 as y) on i is not null
+ ),
+ lateral (select k as kl limit 1);
+
+rollback;
+
+-- PHVs containing SubLinks are quite tricky to get right
+explain (verbose, costs off)
+select *
+from int8_tbl i8
+ inner join
+ (select (select true) as x
+ from int4_tbl i4, lateral (select i4.f1 as y limit 1) ss1
+ where i4.f1 = 0) ss2 on true
+ right join (select false as z) ss3 on true,
+ lateral (select i8.q2 as q2l where x limit 1) ss4
+where i8.q2 = 123;
+
+explain (verbose, costs off)
+select *
+from int8_tbl i8
+ inner join
+ (select (select true) as x
+ from int4_tbl i4, lateral (select 1 as y limit 1) ss1
+ where i4.f1 = 0) ss2 on true
+ right join (select false as z) ss3 on true,
+ lateral (select i8.q2 as q2l where x limit 1) ss4
+where i8.q2 = 123;
+
-- Test proper handling of appendrel PHVs during useless-RTE removal
explain (costs off)
select * from
diff --git a/src/test/regress/sql/limit.sql b/src/test/regress/sql/limit.sql
index 6f0cda98701..603910fe6d1 100644
--- a/src/test/regress/sql/limit.sql
+++ b/src/test/regress/sql/limit.sql
@@ -196,6 +196,9 @@ CREATE VIEW limit_thousand_v_3 AS SELECT thousand FROM onek WHERE thousand < 995
ORDER BY thousand FETCH FIRST (NULL+1) ROWS WITH TIES;
\d+ limit_thousand_v_3
CREATE VIEW limit_thousand_v_4 AS SELECT thousand FROM onek WHERE thousand < 995
- ORDER BY thousand FETCH FIRST NULL ROWS ONLY;
+ ORDER BY thousand FETCH FIRST (5::bigint) ROWS WITH TIES;
\d+ limit_thousand_v_4
+CREATE VIEW limit_thousand_v_5 AS SELECT thousand FROM onek WHERE thousand < 995
+ ORDER BY thousand FETCH FIRST NULL ROWS ONLY;
+\d+ limit_thousand_v_5
-- leave these views
diff --git a/src/test/regress/sql/merge.sql b/src/test/regress/sql/merge.sql
index f7a19c0e7dd..2660b19f238 100644
--- a/src/test/regress/sql/merge.sql
+++ b/src/test/regress/sql/merge.sql
@@ -1722,6 +1722,55 @@ WHEN MATCHED THEN DELETE;
SELECT * FROM new_measurement ORDER BY city_id, logdate;
+-- MERGE into inheritance root table
+DROP TRIGGER insert_measurement_trigger ON measurement;
+ALTER TABLE measurement ADD CONSTRAINT mcheck CHECK (city_id = 0) NO INHERIT;
+
+EXPLAIN (COSTS OFF)
+MERGE INTO measurement m
+ USING (VALUES (1, '01-17-2007'::date)) nm(city_id, logdate) ON
+ (m.city_id = nm.city_id and m.logdate=nm.logdate)
+WHEN NOT MATCHED THEN INSERT
+ (city_id, logdate, peaktemp, unitsales)
+ VALUES (city_id - 1, logdate, 25, 100);
+
+BEGIN;
+MERGE INTO measurement m
+ USING (VALUES (1, '01-17-2007'::date)) nm(city_id, logdate) ON
+ (m.city_id = nm.city_id and m.logdate=nm.logdate)
+WHEN NOT MATCHED THEN INSERT
+ (city_id, logdate, peaktemp, unitsales)
+ VALUES (city_id - 1, logdate, 25, 100);
+SELECT * FROM ONLY measurement ORDER BY city_id, logdate;
+ROLLBACK;
+
+ALTER TABLE measurement ENABLE ROW LEVEL SECURITY;
+ALTER TABLE measurement FORCE ROW LEVEL SECURITY;
+CREATE POLICY measurement_p ON measurement USING (peaktemp IS NOT NULL);
+
+MERGE INTO measurement m
+ USING (VALUES (1, '01-17-2007'::date)) nm(city_id, logdate) ON
+ (m.city_id = nm.city_id and m.logdate=nm.logdate)
+WHEN NOT MATCHED THEN INSERT
+ (city_id, logdate, peaktemp, unitsales)
+ VALUES (city_id - 1, logdate, NULL, 100); -- should fail
+
+MERGE INTO measurement m
+ USING (VALUES (1, '01-17-2007'::date)) nm(city_id, logdate) ON
+ (m.city_id = nm.city_id and m.logdate=nm.logdate)
+WHEN NOT MATCHED THEN INSERT
+ (city_id, logdate, peaktemp, unitsales)
+ VALUES (city_id - 1, logdate, 25, 100); -- ok
+SELECT * FROM ONLY measurement ORDER BY city_id, logdate;
+
+MERGE INTO measurement m
+ USING (VALUES (1, '01-18-2007'::date)) nm(city_id, logdate) ON
+ (m.city_id = nm.city_id and m.logdate=nm.logdate)
+WHEN NOT MATCHED THEN INSERT
+ (city_id, logdate, peaktemp, unitsales)
+ VALUES (city_id - 1, logdate, 25, 200)
+RETURNING merge_action(), m.*;
+
DROP TABLE measurement, new_measurement CASCADE;
DROP FUNCTION measurement_insert_trigger();
diff --git a/src/test/regress/sql/misc_functions.sql b/src/test/regress/sql/misc_functions.sql
index 5f9c77512d1..23792c4132a 100644
--- a/src/test/regress/sql/misc_functions.sql
+++ b/src/test/regress/sql/misc_functions.sql
@@ -400,9 +400,9 @@ SELECT pg_column_toast_chunk_id(a) IS NULL,
DROP TABLE test_chunk_id;
DROP FUNCTION explain_mask_costs(text, bool, bool, bool, bool);
--- test stratnum support functions
-SELECT gist_stratnum_common(7);
-SELECT gist_stratnum_common(3);
+-- test stratnum translation support functions
+SELECT gist_translate_cmptype_common(7);
+SELECT gist_translate_cmptype_common(3);
-- relpath tests
diff --git a/src/test/regress/sql/partition_join.sql b/src/test/regress/sql/partition_join.sql
index b76c5451001..30f15ee9acb 100644
--- a/src/test/regress/sql/partition_join.sql
+++ b/src/test/regress/sql/partition_join.sql
@@ -1224,6 +1224,9 @@ SELECT x.id, y.id FROM fract_t x LEFT JOIN fract_t y USING (id) ORDER BY x.id AS
EXPLAIN (COSTS OFF)
SELECT x.id, y.id FROM fract_t x LEFT JOIN fract_t y USING (id) ORDER BY x.id DESC LIMIT 10;
+EXPLAIN (COSTS OFF) -- Should use NestLoop with parameterised inner scan
+SELECT x.id, y.id FROM fract_t x LEFT JOIN fract_t y USING (id)
+ORDER BY x.id DESC LIMIT 2;
--
-- Test Append's fractional paths
diff --git a/src/test/regress/sql/partition_prune.sql b/src/test/regress/sql/partition_prune.sql
index f6db9479f54..d93c0c03bab 100644
--- a/src/test/regress/sql/partition_prune.sql
+++ b/src/test/regress/sql/partition_prune.sql
@@ -1371,12 +1371,12 @@ create view part_abc_view as select * from part_abc where b <> 'a' with check op
prepare update_part_abc_view as update part_abc_view set b = $2 where a = $1 returning *;
-- Only the unpruned partition should be shown in the list of relations to be
-- updated
-explain (costs off) execute update_part_abc_view (1, 'd');
+explain (verbose, costs off) execute update_part_abc_view (1, 'd');
execute update_part_abc_view (1, 'd');
-explain (costs off) execute update_part_abc_view (2, 'a');
+explain (verbose, costs off) execute update_part_abc_view (2, 'a');
execute update_part_abc_view (2, 'a');
-- All pruned.
-explain (costs off) execute update_part_abc_view (3, 'a');
+explain (verbose, costs off) execute update_part_abc_view (3, 'a');
execute update_part_abc_view (3, 'a');
deallocate update_part_abc_view;
diff --git a/src/test/regress/sql/psql.sql b/src/test/regress/sql/psql.sql
index 1a8a83462f0..e2e31245439 100644
--- a/src/test/regress/sql/psql.sql
+++ b/src/test/regress/sql/psql.sql
@@ -68,11 +68,11 @@ SELECT $1, $2 \parse stmt3
-- Multiple \g calls mean multiple executions
\bind_named stmt2 'foo3' \g \bind_named stmt3 'foo4' 'bar4' \g
--- \close (extended query protocol)
-\close
-\close ''
-\close stmt2
-\close stmt2
+-- \close_prepared (extended query protocol)
+\close_prepared
+\close_prepared ''
+\close_prepared stmt2
+\close_prepared stmt2
SELECT name, statement FROM pg_prepared_statements ORDER BY name;
-- \bind (extended query protocol)
@@ -1035,7 +1035,7 @@ select \if false \\ (bogus \else \\ 42 \endif \\ forty_two;
\C arg1
\c arg1 arg2 arg3 arg4
\cd arg1
- \close stmt1
+ \close_prepared stmt1
\conninfo
\copy arg1 arg2 arg3 arg4 arg5 arg6
\copyright
diff --git a/src/test/regress/sql/psql_pipeline.sql b/src/test/regress/sql/psql_pipeline.sql
index 16e1e1e84cd..6788dceee2e 100644
--- a/src/test/regress/sql/psql_pipeline.sql
+++ b/src/test/regress/sql/psql_pipeline.sql
@@ -105,106 +105,6 @@ INSERT INTO psql_pipeline VALUES ($1) \bind 1 \sendpipeline
COMMIT \bind \sendpipeline
\endpipeline
--- COPY FROM STDIN
--- with \sendpipeline and \bind
-\startpipeline
-SELECT $1 \bind 'val1' \sendpipeline
-COPY psql_pipeline FROM STDIN \bind \sendpipeline
-\endpipeline
-2 test2
-\.
--- with semicolon
-\startpipeline
-SELECT 'val1';
-COPY psql_pipeline FROM STDIN;
-\endpipeline
-20 test2
-\.
-
--- COPY FROM STDIN with \flushrequest + \getresults
--- with \sendpipeline and \bind
-\startpipeline
-SELECT $1 \bind 'val1' \sendpipeline
-COPY psql_pipeline FROM STDIN \bind \sendpipeline
-\flushrequest
-\getresults
-3 test3
-\.
-\endpipeline
--- with semicolon
-\startpipeline
-SELECT 'val1';
-COPY psql_pipeline FROM STDIN;
-\flushrequest
-\getresults
-30 test3
-\.
-\endpipeline
-
--- COPY FROM STDIN with \syncpipeline + \getresults
--- with \bind and \sendpipeline
-\startpipeline
-SELECT $1 \bind 'val1' \sendpipeline
-COPY psql_pipeline FROM STDIN \bind \sendpipeline
-\syncpipeline
-\getresults
-4 test4
-\.
-\endpipeline
--- with semicolon
-\startpipeline
-SELECT 'val1';
-COPY psql_pipeline FROM STDIN;
-\syncpipeline
-\getresults
-40 test4
-\.
-\endpipeline
-
--- COPY TO STDOUT
--- with \bind and \sendpipeline
-\startpipeline
-SELECT $1 \bind 'val1' \sendpipeline
-copy psql_pipeline TO STDOUT \bind \sendpipeline
-\endpipeline
--- with semicolon
-\startpipeline
-SELECT 'val1';
-copy psql_pipeline TO STDOUT;
-\endpipeline
-
--- COPY TO STDOUT with \flushrequest + \getresults
--- with \bind and \sendpipeline
-\startpipeline
-SELECT $1 \bind 'val1' \sendpipeline
-copy psql_pipeline TO STDOUT \bind \sendpipeline
-\flushrequest
-\getresults
-\endpipeline
--- with semicolon
-\startpipeline
-SELECT 'val1';
-copy psql_pipeline TO STDOUT;
-\flushrequest
-\getresults
-\endpipeline
-
--- COPY TO STDOUT with \syncpipeline + \getresults
--- with \bind and \sendpipeline
-\startpipeline
-SELECT $1 \bind 'val1' \sendpipeline
-copy psql_pipeline TO STDOUT \bind \sendpipeline
-\syncpipeline
-\getresults
-\endpipeline
--- with semicolon
-\startpipeline
-SELECT 'val1';
-copy psql_pipeline TO STDOUT;
-\syncpipeline
-\getresults
-\endpipeline
-
-- Use \parse and \bind_named
\startpipeline
SELECT $1 \parse ''
@@ -406,21 +306,21 @@ SELECT $1 \bind \sendpipeline
SELECT $1 \bind 1 \sendpipeline
SELECT $1 \parse a
\bind_named a 1 \sendpipeline
-\close a
+\close_prepared a
\flushrequest
\getresults
-- Pipeline is aborted.
SELECT $1 \bind 1 \sendpipeline
SELECT $1 \parse a
\bind_named a 1 \sendpipeline
-\close a
+\close_prepared a
-- Sync allows pipeline to recover.
\syncpipeline
\getresults
SELECT $1 \bind 1 \sendpipeline
SELECT $1 \parse a
\bind_named a 1 \sendpipeline
-\close a
+\close_prepared a
\flushrequest
\getresults
\endpipeline
diff --git a/src/test/regress/sql/publication.sql b/src/test/regress/sql/publication.sql
index 68001de4000..c9e309190df 100644
--- a/src/test/regress/sql/publication.sql
+++ b/src/test/regress/sql/publication.sql
@@ -262,6 +262,9 @@ ALTER PUBLICATION testpub6 SET TABLES IN SCHEMA testpub_rf_schema2, TABLE testpu
RESET client_min_messages;
\dRp+ testpub6
-- fail - virtual generated column uses user-defined function
+-- (Actually, this already fails at CREATE TABLE rather than at CREATE
+-- PUBLICATION, but let's keep the test in case the former gets
+-- relaxed sometime.)
CREATE TABLE testpub_rf_tbl6 (id int PRIMARY KEY, x int, y int GENERATED ALWAYS AS (x * testpub_rf_func2()) VIRTUAL);
CREATE PUBLICATION testpub7 FOR TABLE testpub_rf_tbl6 WHERE (y > 100);
-- test that SET EXPRESSION is rejected, because it could affect a row filter
@@ -276,7 +279,7 @@ DROP TABLE testpub_rf_tbl2;
DROP TABLE testpub_rf_tbl3;
DROP TABLE testpub_rf_tbl4;
DROP TABLE testpub_rf_tbl5;
-DROP TABLE testpub_rf_tbl6;
+--DROP TABLE testpub_rf_tbl6;
DROP TABLE testpub_rf_schema1.testpub_rf_tbl5;
DROP TABLE testpub_rf_schema2.testpub_rf_tbl6;
DROP SCHEMA testpub_rf_schema1;
diff --git a/src/test/regress/sql/strings.sql b/src/test/regress/sql/strings.sql
index f7b325baadf..2577a42987d 100644
--- a/src/test/regress/sql/strings.sql
+++ b/src/test/regress/sql/strings.sql
@@ -197,6 +197,26 @@ SELECT 'abcd\efg' SIMILAR TO '_bcd\%' ESCAPE '' AS true;
SELECT 'abcdefg' SIMILAR TO '_bcd%' ESCAPE NULL AS null;
SELECT 'abcdefg' SIMILAR TO '_bcd#%' ESCAPE '##' AS error;
+-- Characters that should be left alone in character classes when a
+-- SIMILAR TO regexp pattern is converted to POSIX style.
+-- Underscore "_"
+EXPLAIN (COSTS OFF) SELECT * FROM TEXT_TBL WHERE f1 SIMILAR TO '_[_[:alpha:]_]_';
+-- Percentage "%"
+EXPLAIN (COSTS OFF) SELECT * FROM TEXT_TBL WHERE f1 SIMILAR TO '%[%[:alnum:]%]%';
+-- Dot "."
+EXPLAIN (COSTS OFF) SELECT * FROM TEXT_TBL WHERE f1 SIMILAR TO '.[.[:alnum:].].';
+-- Dollar "$"
+EXPLAIN (COSTS OFF) SELECT * FROM TEXT_TBL WHERE f1 SIMILAR TO '$[$[:alnum:]$]$';
+-- Opening parenthesis "("
+EXPLAIN (COSTS OFF) SELECT * FROM TEXT_TBL WHERE f1 SIMILAR TO '()[([:alnum:](]()';
+-- Caret "^"
+EXPLAIN (COSTS OFF) SELECT * FROM TEXT_TBL WHERE f1 SIMILAR TO '^[^[:alnum:]^[^^][[^^]][\^][[\^]]\^]^';
+-- Closing square bracket "]" at the beginning of character class
+EXPLAIN (COSTS OFF) SELECT * FROM TEXT_TBL WHERE f1 SIMILAR TO '[]%][^]%][^%]%';
+-- Closing square bracket effective after two carets at the beginning
+-- of character class.
+EXPLAIN (COSTS OFF) SELECT * FROM TEXT_TBL WHERE f1 SIMILAR TO '[^^]^';
+
-- Test backslash escapes in regexp_replace's replacement string
SELECT regexp_replace('1112223333', E'(\\d{3})(\\d{3})(\\d{4})', E'(\\1) \\2-\\3');
SELECT regexp_replace('foobarrbazz', E'(.)\\1', E'X\\&Y', 'g');
diff --git a/src/test/regress/sql/sysviews.sql b/src/test/regress/sql/sysviews.sql
index d0917b6868e..66179f026b3 100644
--- a/src/test/regress/sql/sysviews.sql
+++ b/src/test/regress/sql/sysviews.sql
@@ -101,21 +101,3 @@ select count(distinct utc_offset) >= 24 as ok from pg_timezone_abbrevs;
-- One specific case we can check without much fear of breakage
-- is the historical local-mean-time value used for America/Los_Angeles.
select * from pg_timezone_abbrevs where abbrev = 'LMT';
-
-DO $$
-DECLARE
- bg_writer_pid int;
- r RECORD;
-BEGIN
- SELECT pid from pg_stat_activity where backend_type='background writer'
- INTO bg_writer_pid;
-
- select type, name, ident
- from pg_get_process_memory_contexts(bg_writer_pid, false, 20)
- where path = '{1}' into r;
- RAISE NOTICE '%', r;
- select type, name, ident
- from pg_get_process_memory_contexts(pg_backend_pid(), false, 20)
- where path = '{1}' into r;
- RAISE NOTICE '%', r;
-END $$;
diff --git a/src/test/regress/sql/triggers.sql b/src/test/regress/sql/triggers.sql
index d3d242dd29b..9ffd318385f 100644
--- a/src/test/regress/sql/triggers.sql
+++ b/src/test/regress/sql/triggers.sql
@@ -2701,8 +2701,8 @@ drop function f();
-- Test who runs deferred trigger functions
-- setup
-create role regress_groot;
-create role regress_outis;
+create role regress_caller;
+create role regress_fn_owner;
create function whoami() returns trigger language plpgsql
as $$
begin
@@ -2710,7 +2710,7 @@ begin
return null;
end;
$$;
-alter function whoami() owner to regress_outis;
+alter function whoami() owner to regress_fn_owner;
create table defer_trig (id integer);
grant insert on defer_trig to public;
@@ -2721,10 +2721,10 @@ create constraint trigger whoami after insert on defer_trig
-- deferred triggers must run as the user that queued the trigger
begin;
-set role regress_groot;
+set role regress_caller;
insert into defer_trig values (1);
reset role;
-set role regress_outis;
+set role regress_fn_owner;
insert into defer_trig values (2);
reset role;
commit;
@@ -2732,7 +2732,7 @@ commit;
-- security definer functions override the user who queued the trigger
alter function whoami() security definer;
begin;
-set role regress_groot;
+set role regress_caller;
insert into defer_trig values (3);
reset role;
commit;
@@ -2749,7 +2749,7 @@ end;
$$;
begin;
-set role regress_groot;
+set role regress_caller;
insert into defer_trig values (4);
reset role;
commit; -- error expected
@@ -2758,5 +2758,5 @@ select current_user = session_user;
-- clean up
drop table defer_trig;
drop function whoami();
-drop role regress_outis;
-drop role regress_groot;
+drop role regress_fn_owner;
+drop role regress_caller;
diff --git a/src/test/ssl/t/SSL/Server.pm b/src/test/ssl/t/SSL/Server.pm
index 33975b28e8c..efbd0dafaf6 100644
--- a/src/test/ssl/t/SSL/Server.pm
+++ b/src/test/ssl/t/SSL/Server.pm
@@ -200,7 +200,7 @@ sub configure_test_server_for_ssl
$node->append_conf(
'postgresql.conf', <<EOF
fsync=off
-log_connections=on
+log_connections=all
log_hostname=on
listen_addresses='$serverhost'
log_statement=all
@@ -318,7 +318,8 @@ sub switch_server_cert
$node->append_conf('sslconfig.conf', "ssl=on");
$node->append_conf('sslconfig.conf', $backend->set_server_cert(\%params));
# use lists of ECDH curves and cipher suites for syntax testing
- $node->append_conf('sslconfig.conf', 'ssl_groups=X25519:prime256v1:secp521r1');
+ $node->append_conf('sslconfig.conf',
+ 'ssl_groups=X25519:prime256v1:secp521r1');
$node->append_conf('sslconfig.conf',
'ssl_tls13_ciphers=TLS_AES_256_GCM_SHA384:TLS_AES_128_GCM_SHA256');
diff --git a/src/test/subscription/t/007_ddl.pl b/src/test/subscription/t/007_ddl.pl
index 7d12bcbddb6..2a45fb13739 100644
--- a/src/test/subscription/t/007_ddl.pl
+++ b/src/test/subscription/t/007_ddl.pl
@@ -70,7 +70,8 @@ ok( $stderr =~
);
# Cleanup
-$node_publisher->safe_psql('postgres', qq[
+$node_publisher->safe_psql(
+ 'postgres', qq[
DROP PUBLICATION mypub;
SELECT pg_drop_replication_slot('mysub');
]);
@@ -86,32 +87,38 @@ sub test_swap
my ($table_name, $pubname, $appname) = @_;
# Confirms tuples can be replicated
- $node_publisher->safe_psql('postgres', "INSERT INTO $table_name VALUES (1);");
+ $node_publisher->safe_psql('postgres',
+ "INSERT INTO $table_name VALUES (1);");
$node_publisher->wait_for_catchup($appname);
my $result =
- $node_subscriber->safe_psql('postgres', "SELECT a FROM $table_name");
- is($result, qq(1), 'check replication worked well before renaming a publication');
+ $node_subscriber->safe_psql('postgres', "SELECT a FROM $table_name");
+ is($result, qq(1),
+ 'check replication worked well before renaming a publication');
# Swap the name of publications; $pubname <-> pub_empty
- $node_publisher->safe_psql('postgres', qq[
+ $node_publisher->safe_psql(
+ 'postgres', qq[
ALTER PUBLICATION $pubname RENAME TO tap_pub_tmp;
ALTER PUBLICATION pub_empty RENAME TO $pubname;
ALTER PUBLICATION tap_pub_tmp RENAME TO pub_empty;
]);
# Insert the data again
- $node_publisher->safe_psql('postgres', "INSERT INTO $table_name VALUES (2);");
+ $node_publisher->safe_psql('postgres',
+ "INSERT INTO $table_name VALUES (2);");
$node_publisher->wait_for_catchup($appname);
# Confirms the second tuple won't be replicated because $pubname does not
# contains relations anymore.
$result =
- $node_subscriber->safe_psql('postgres', "SELECT a FROM $table_name ORDER BY a");
+ $node_subscriber->safe_psql('postgres',
+ "SELECT a FROM $table_name ORDER BY a");
is($result, qq(1),
'check the tuple inserted after the RENAME was not replicated');
# Restore the name of publications because it can be called several times
- $node_publisher->safe_psql('postgres', qq[
+ $node_publisher->safe_psql(
+ 'postgres', qq[
ALTER PUBLICATION $pubname RENAME TO tap_pub_tmp;
ALTER PUBLICATION pub_empty RENAME TO $pubname;
ALTER PUBLICATION tap_pub_tmp RENAME TO pub_empty;
@@ -124,7 +131,8 @@ $node_publisher->safe_psql('postgres', $ddl);
$node_subscriber->safe_psql('postgres', $ddl);
# Create publications and a subscription
-$node_publisher->safe_psql('postgres', qq[
+$node_publisher->safe_psql(
+ 'postgres', qq[
CREATE PUBLICATION pub_empty;
CREATE PUBLICATION pub_for_tab FOR TABLE test1;
CREATE PUBLICATION pub_for_all_tables FOR ALL TABLES;
@@ -139,19 +147,20 @@ test_swap('test1', 'pub_for_tab', 'tap_sub');
# Switches a publication which includes all tables
$node_subscriber->safe_psql('postgres',
- "ALTER SUBSCRIPTION tap_sub SET PUBLICATION pub_for_all_tables;"
-);
+ "ALTER SUBSCRIPTION tap_sub SET PUBLICATION pub_for_all_tables;");
$node_subscriber->wait_for_subscription_sync($node_publisher, 'tap_sub');
# Confirms RENAME command works well for ALL TABLES publication
test_swap('test2', 'pub_for_all_tables', 'tap_sub');
# Cleanup
-$node_publisher->safe_psql('postgres', qq[
+$node_publisher->safe_psql(
+ 'postgres', qq[
DROP PUBLICATION pub_empty, pub_for_tab, pub_for_all_tables;
DROP TABLE test1, test2;
]);
-$node_subscriber->safe_psql('postgres', qq[
+$node_subscriber->safe_psql(
+ 'postgres', qq[
DROP SUBSCRIPTION tap_sub;
DROP TABLE test1, test2;
]);
diff --git a/src/test/subscription/t/013_partition.pl b/src/test/subscription/t/013_partition.pl
index 61b0cb4aa1a..4f78dd48815 100644
--- a/src/test/subscription/t/013_partition.pl
+++ b/src/test/subscription/t/013_partition.pl
@@ -51,8 +51,7 @@ $node_subscriber1->safe_psql('postgres',
);
# make a BRIN index to test aminsertcleanup logic in subscriber
$node_subscriber1->safe_psql('postgres',
- "CREATE INDEX tab1_c_brin_idx ON tab1 USING brin (c)"
-);
+ "CREATE INDEX tab1_c_brin_idx ON tab1 USING brin (c)");
$node_subscriber1->safe_psql('postgres',
"CREATE TABLE tab1_1 (b text, c text DEFAULT 'sub1_tab1', a int NOT NULL)"
);
diff --git a/src/test/subscription/t/021_twophase.pl b/src/test/subscription/t/021_twophase.pl
index 61c427aed21..b8e4242d1f1 100644
--- a/src/test/subscription/t/021_twophase.pl
+++ b/src/test/subscription/t/021_twophase.pl
@@ -373,7 +373,14 @@ $result =
$node_publisher->safe_psql('postgres', "SELECT count(*) FROM tab_copy;");
is($result, qq(6), 'publisher inserted data');
+# Wait for both subscribers to catchup
$node_publisher->wait_for_catchup($appname_copy);
+$node_publisher->wait_for_catchup($appname);
+
+# Make sure there are no prepared transactions on the subscriber
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_prepared_xacts;");
+is($result, qq(0), 'should be no prepared transactions on subscriber');
$result =
$node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_copy;");
diff --git a/src/test/subscription/t/024_add_drop_pub.pl b/src/test/subscription/t/024_add_drop_pub.pl
index e995d8b3839..b396abe5599 100644
--- a/src/test/subscription/t/024_add_drop_pub.pl
+++ b/src/test/subscription/t/024_add_drop_pub.pl
@@ -108,11 +108,12 @@ $node_publisher->poll_query_until('postgres',
my $offset = -s $node_publisher->logfile;
-$node_publisher->safe_psql('postgres',"INSERT INTO tab_3 values(1)");
+$node_publisher->safe_psql('postgres', "INSERT INTO tab_3 values(1)");
# Verify that a warning is logged.
$node_publisher->wait_for_log(
- qr/WARNING: ( [A-Z0-9]+:)? skipped loading publication: tap_pub_3/, $offset);
+ qr/WARNING: ( [A-Z0-9]+:)? skipped loading publication "tap_pub_3"/,
+ $offset);
$node_publisher->safe_psql('postgres',
"CREATE PUBLICATION tap_pub_3 FOR TABLE tab_3");
@@ -128,10 +129,11 @@ $node_publisher->wait_for_catchup('tap_sub');
# Verify that the insert operation gets replicated to subscriber after
# publication is created.
-$result = $node_subscriber->safe_psql('postgres',
- "SELECT * FROM tab_3");
-is($result, qq(1
-2), 'check that the incremental data is replicated after the publication is created');
+$result = $node_subscriber->safe_psql('postgres', "SELECT * FROM tab_3");
+is( $result, qq(1
+2),
+ 'check that the incremental data is replicated after the publication is created'
+);
# shutdown
$node_subscriber->stop('fast');
diff --git a/src/test/subscription/t/035_conflicts.pl b/src/test/subscription/t/035_conflicts.pl
index 2a7a8239a29..d78a6bac16a 100644
--- a/src/test/subscription/t/035_conflicts.pl
+++ b/src/test/subscription/t/035_conflicts.pl
@@ -26,7 +26,8 @@ $node_publisher->safe_psql('postgres',
"CREATE TABLE conf_tab (a int PRIMARY KEY, b int UNIQUE, c int UNIQUE);");
$node_publisher->safe_psql('postgres',
- "CREATE TABLE conf_tab_2 (a int PRIMARY KEY, b int UNIQUE, c int UNIQUE);");
+ "CREATE TABLE conf_tab_2 (a int PRIMARY KEY, b int UNIQUE, c int UNIQUE);"
+);
# Create same table on subscriber
$node_subscriber->safe_psql('postgres',
diff --git a/src/tools/RELEASE_CHANGES b/src/tools/RELEASE_CHANGES
index 94c5a0f3bfb..c0d75c213be 100644
--- a/src/tools/RELEASE_CHANGES
+++ b/src/tools/RELEASE_CHANGES
@@ -89,6 +89,9 @@ Starting a New Development Cycle
* Typically, we do pgindent and perltidy runs just before branching,
as well as before beta (complete steps from src/tools/pgindent/README)
+* It's also advisable to check that copyright years are up-to-date
+ (run src/tools/copyright.pl, commit any changes it finds)
+
* Create a branch in git for maintenance of the previous release
o on master branch, do:
git pull # be sure you have the latest "master"
diff --git a/src/tools/ci/pg_ci_base.conf b/src/tools/ci/pg_ci_base.conf
index d8faa9c26c1..695e0a0d6ec 100644
--- a/src/tools/ci/pg_ci_base.conf
+++ b/src/tools/ci/pg_ci_base.conf
@@ -8,7 +8,7 @@ max_prepared_transactions = 10
# Settings that make logs more useful
log_autovacuum_min_duration = 0
log_checkpoints = true
-log_connections = true
+log_connections = all
log_disconnections = true
-log_line_prefix = '%m [%p][%b] %q[%a][%v:%x] '
+log_line_prefix = '%m %b[%p] %q%a '
log_lock_waits = true
diff --git a/src/tools/git_changelog b/src/tools/git_changelog
index b8bd874f208..c25e399a87f 100755
--- a/src/tools/git_changelog
+++ b/src/tools/git_changelog
@@ -59,6 +59,7 @@ require IPC::Open2;
# (We could get this from "git branches", but not worth the trouble.)
# NB: master must be first!
my @BRANCHES = qw(master
+ REL_18_STABLE
REL_17_STABLE REL_16_STABLE REL_15_STABLE REL_14_STABLE REL_13_STABLE
REL_12_STABLE REL_11_STABLE REL_10_STABLE REL9_6_STABLE REL9_5_STABLE
REL9_4_STABLE REL9_3_STABLE REL9_2_STABLE REL9_1_STABLE REL9_0_STABLE
diff --git a/src/tools/pgflex b/src/tools/pgflex
index 3986b06874e..b8d9aa0086f 100755
--- a/src/tools/pgflex
+++ b/src/tools/pgflex
@@ -48,7 +48,7 @@ os.chdir(args.privatedir)
# contents. Set FLEX_TMP_DIR to the target private directory to avoid
# that. That environment variable isn't consulted on other platforms, so we
# don't even need to make this conditional.
-env = {'FLEX_TMP_DIR': args.privatedir}
+os.environ['FLEX_TMP_DIR'] = args.privatedir
# build flex invocation
command = [args.flex, '-o', args.output_file]
@@ -58,7 +58,7 @@ command += args.flex_flags
command += [args.input_file]
# create .c file from .l file
-sp = subprocess.run(command, env=env)
+sp = subprocess.run(command)
if sp.returncode != 0:
sys.exit(sp.returncode)
diff --git a/src/tools/pgindent/pgindent b/src/tools/pgindent/pgindent
index 54e138b598d..b7d71808924 100755
--- a/src/tools/pgindent/pgindent
+++ b/src/tools/pgindent/pgindent
@@ -73,11 +73,14 @@ if ($sourcedir)
# might make them so. For the moment we just hardwire a list of names
# to add and a list of names to exclude; eventually this may need to be
# easier to configure. Note that the typedefs need trailing newlines.
-my @additional = ("bool\n");
+my @additional = map { "$_\n" } qw(
+ bool regex_t regmatch_t regoff
+);
my %excluded = map { +"$_\n" => 1 } qw(
- ANY FD_SET U abs allocfunc boolean date digit ilist interval iterator other
- pointer printfunc reference string timestamp type wrap
+ FD_SET LookupSet boolean date duration
+ element_type inquiry iterator other
+ pointer reference rep string timestamp type wrap
);
# globals
diff --git a/src/tools/pgindent/typedefs.list b/src/tools/pgindent/typedefs.list
index 9ea573fae21..32d6e718adc 100644
--- a/src/tools/pgindent/typedefs.list
+++ b/src/tools/pgindent/typedefs.list
@@ -6,6 +6,7 @@ ASN1_INTEGER
ASN1_OBJECT
ASN1_OCTET_STRING
ASN1_STRING
+ATAlterConstraint
AV
A_ArrayExpr
A_Const
@@ -47,7 +48,6 @@ AggSplit
AggState
AggStatePerAgg
AggStatePerGroup
-AggStatePerGroupData
AggStatePerHash
AggStatePerPhase
AggStatePerTrans
@@ -161,7 +161,6 @@ ArrayType
AsyncQueueControl
AsyncQueueEntry
AsyncRequest
-ATAlterConstraint
AttInMetadata
AttStatsSlot
AttoptCacheEntry
@@ -174,8 +173,8 @@ AttrNumber
AttributeOpts
AuthRequest
AuthToken
-AutoPrewarmSharedState
AutoPrewarmReadStreamData
+AutoPrewarmSharedState
AutoVacOpts
AutoVacuumShmemStruct
AutoVacuumWorkItem
@@ -222,7 +221,6 @@ BTScanInsertData
BTScanKeyPreproc
BTScanOpaque
BTScanOpaqueData
-BTScanPos
BTScanPosData
BTScanPosItem
BTShared
@@ -270,8 +268,8 @@ BitmapAndPath
BitmapAndState
BitmapHeapPath
BitmapHeapScan
-BitmapHeapScanInstrumentation
BitmapHeapScanDesc
+BitmapHeapScanInstrumentation
BitmapHeapScanState
BitmapIndexScan
BitmapIndexScanState
@@ -341,8 +339,8 @@ BufFile
Buffer
BufferAccessStrategy
BufferAccessStrategyType
-BufferCacheNumaRec
BufferCacheNumaContext
+BufferCacheNumaRec
BufferCachePagesContext
BufferCachePagesRec
BufferDesc
@@ -382,6 +380,9 @@ CTEMaterialize
CTESearchClause
CURL
CURLM
+CURLMcode
+CURLMsg
+CURLcode
CURLoption
CV
CachedExpression
@@ -628,6 +629,7 @@ DefElem
DefElemAction
DefaultACLInfo
DefineStmt
+DefnDumperPtr
DeleteStmt
DependencyGenerator
DependencyGeneratorData
@@ -677,9 +679,8 @@ DumpableObjectType
DumpableObjectWithAcl
DynamicFileList
DynamicZoneAbbrev
-EC_KEY
-ECDerivesKey
ECDerivesEntry
+ECDerivesKey
EDGE
ENGINE
EOM_flatten_into_method
@@ -761,10 +762,12 @@ ExpandedRange
ExpandedRecordFieldInfo
ExpandedRecordHeader
ExplainDirectModify_function
+ExplainExtensionOption
ExplainForeignModify_function
ExplainForeignScan_function
ExplainFormat
ExplainOneQuery_hook_type
+ExplainOptionHandler
ExplainSerializeOption
ExplainState
ExplainStmt
@@ -792,6 +795,7 @@ FDWCollateState
FD_SET
FILE
FILETIME
+FPI
FSMAddress
FSMPage
FSMPageData
@@ -806,7 +810,6 @@ FieldSelect
FieldStore
File
FileBackupMethod
-FileCopyMethod
FileFdwExecutionState
FileFdwPlanState
FileNameMap
@@ -1190,6 +1193,7 @@ HeapCheckContext
HeapCheckReadStreamData
HeapPageFreeze
HeapScanDesc
+HeapScanDescData
HeapTuple
HeapTupleData
HeapTupleFields
@@ -1249,6 +1253,7 @@ IndexClause
IndexClauseSet
IndexDeleteCounts
IndexDeletePrefetchState
+IndexDoCheckCallback
IndexElem
IndexFetchHeapData
IndexFetchTableData
@@ -1279,13 +1284,14 @@ InheritableSocket
InitSampleScan_function
InitializeDSMForeignScan_function
InitializeWorkerForeignScan_function
+InjIoErrorState
InjectionPointCacheEntry
InjectionPointCallback
InjectionPointCondition
InjectionPointConditionType
InjectionPointEntry
-InjectionPointsCtl
InjectionPointSharedState
+InjectionPointsCtl
InlineCodeBlock
InsertStmt
Instrumentation
@@ -1302,7 +1308,6 @@ IntoClause
InvalMessageArray
InvalidationInfo
InvalidationMsgsGroup
-IoMethod
IoMethodOps
IpcMemoryId
IpcMemoryKey
@@ -1492,8 +1497,7 @@ LLVMOrcResourceTrackerRef
LLVMOrcSymbolStringPoolRef
LLVMOrcThreadSafeContextRef
LLVMOrcThreadSafeModuleRef
-LLVMPassManagerBuilderRef
-LLVMPassManagerRef
+LLVMPassBuilderOptionsRef
LLVMTargetMachineRef
LLVMTargetRef
LLVMTypeRef
@@ -1563,6 +1567,7 @@ LoadStmt
LocalBufferLookupEnt
LocalPgBackendStatus
LocalTransactionId
+Location
LocationIndex
LocationLen
LockAcquireResult
@@ -1582,7 +1587,6 @@ LockTupleMode
LockViewRecurse_context
LockWaitPolicy
LockingClause
-LogConnectionOption
LogOpts
LogStmtLevel
LogicalDecodeBeginCB
@@ -1633,6 +1637,7 @@ LogicalSlotInfo
LogicalSlotInfoArr
LogicalTape
LogicalTapeSet
+LookupSet
LsnReadQueue
LsnReadQueueNextFun
LsnReadQueueNextStatus
@@ -1657,8 +1662,8 @@ ManyTestResourceKind
Material
MaterialPath
MaterialState
-MdfdVec
MdPathStr
+MdfdVec
Memoize
MemoizeEntry
MemoizeInstrumentation
@@ -1672,12 +1677,9 @@ MemoryContextCallback
MemoryContextCallbackFunction
MemoryContextCounters
MemoryContextData
+MemoryContextId
MemoryContextMethodID
MemoryContextMethods
-MemoryStatsBackendState
-MemoryStatsContextId
-MemoryStatsCtl
-MemoryStatsEntry
MemoryStatsPrintFunc
MergeAction
MergeActionState
@@ -1769,6 +1771,7 @@ NumericSortSupport
NumericSumAccum
NumericVar
OAuthValidatorCallbacks
+OAuthValidatorModuleInit
OM_uint32
OP
OSAPerGroupState
@@ -1838,7 +1841,6 @@ PGCALL2
PGCRYPTO_SHA_t
PGChecksummablePage
PGContextVisibility
-PGErrorVerbosity
PGEvent
PGEventConnDestroy
PGEventConnReset
@@ -1908,7 +1910,6 @@ PLpgSQL_exception
PLpgSQL_exception_block
PLpgSQL_execstate
PLpgSQL_expr
-PLpgSQL_func_hashkey
PLpgSQL_function
PLpgSQL_getdiag_kind
PLpgSQL_if_elsif
@@ -2159,10 +2160,10 @@ PermutationStepBlockerType
PgAioBackend
PgAioCtl
PgAioHandle
-PgAioHandleCallbackID
-PgAioHandleCallbackStage
PgAioHandleCallbackComplete
+PgAioHandleCallbackID
PgAioHandleCallbackReport
+PgAioHandleCallbackStage
PgAioHandleCallbacks
PgAioHandleCallbacksEntry
PgAioHandleFlags
@@ -2207,9 +2208,9 @@ PgStatShared_Common
PgStatShared_Database
PgStatShared_Function
PgStatShared_HashEntry
+PgStatShared_IO
PgStatShared_InjectionPoint
PgStatShared_InjectionPointFixed
-PgStatShared_IO
PgStatShared_Relation
PgStatShared_ReplSlot
PgStatShared_SLRU
@@ -2230,7 +2231,6 @@ PgStat_FunctionCallUsage
PgStat_FunctionCounts
PgStat_HashKey
PgStat_IO
-PgStat_Kind
PgStat_KindInfo
PgStat_LocalState
PgStat_PendingDroppedStatsItem
@@ -2358,12 +2358,12 @@ PushFilter
PushFilterOps
PushFunction
PyCFunction
-PyMappingMethods
PyMethodDef
PyModuleDef
PyObject
-PySequenceMethods
PyTypeObject
+PyType_Slot
+PyType_Spec
Py_ssize_t
QPRS_STATE
QTN2QTState
@@ -2477,6 +2477,7 @@ RelOptInfo
RelOptKind
RelPathStr
RelStatsInfo
+RelSyncCallbackFunction
RelToCheck
RelToCluster
RelabelType
@@ -2629,7 +2630,6 @@ SQLDropObject
SQLFunctionCache
SQLFunctionCachePtr
SQLFunctionHashEntry
-SQLFunctionLink
SQLFunctionParseInfo
SQLFunctionParseInfoPtr
SQLValueFunction
@@ -2641,6 +2641,7 @@ STARTUPINFO
STRLEN
SV
SYNCHRONIZATION_BARRIER
+SYSTEM_INFO
SampleScan
SampleScanGetSampleSize_function
SampleScanState
@@ -2728,6 +2729,7 @@ SharedIncrementalSortInfo
SharedIndexScanInstrumentation
SharedInvalCatalogMsg
SharedInvalCatcacheMsg
+SharedInvalRelSyncMsg
SharedInvalRelcacheMsg
SharedInvalRelmapMsg
SharedInvalSmgrMsg
@@ -2767,7 +2769,7 @@ SingleBoundSortItem
Size
SkipPages
SkipSupport
-SkipSupportData
+SkipSupportIncDec
SlabBlock
SlabContext
SlabSlot
@@ -2993,6 +2995,7 @@ TarMethodData
TarMethodFile
TargetEntry
TclExceptionNameMap
+Tcl_CmdInfo
Tcl_DString
Tcl_FileProc
Tcl_HashEntry
@@ -3000,6 +3003,7 @@ Tcl_HashTable
Tcl_Interp
Tcl_NotifierProcs
Tcl_Obj
+Tcl_Size
Tcl_Time
TempNamespaceStatus
TestDSMRegistryStruct
@@ -3145,6 +3149,7 @@ UnicodeNormalizationQC
Unique
UniquePath
UniquePathMethod
+UniqueRelInfo
UniqueState
UnlistenStmt
UnresolvedTup
@@ -3175,8 +3180,11 @@ VacuumRelation
VacuumStmt
ValidIOData
ValidateIndexState
-ValidatorModuleState
ValidatorModuleResult
+ValidatorModuleState
+ValidatorShutdownCB
+ValidatorStartupCB
+ValidatorValidateCB
ValuesScan
ValuesScanState
Var
@@ -3381,10 +3389,9 @@ _resultmap
_stringlist
access_vector_t
acquireLocksOnSubLinks_context
-add_nulling_relids_context
addFkConstraintSides
+add_nulling_relids_context
adjust_appendrel_attrs_context
-allocfunc
amadjustmembers_function
ambeginscan_function
ambuild_function
@@ -3396,6 +3403,7 @@ amcostestimate_function
amendscan_function
amestimateparallelscan_function
amgetbitmap_function
+amgettreeheight_function
amgettuple_function
aminitparallelscan_function
aminsert_function
@@ -3406,13 +3414,27 @@ amparallelrescan_function
amproperty_function
amrescan_function
amrestrpos_function
-amtranslate_strategy_function amtranslatestrategy;
-amtranslate_cmptype_function amtranslatecmptype;
+amtranslate_cmptype_function
+amtranslate_strategy_function
amvacuumcleanup_function
amvalidate_function
array_iter
array_unnest_fctx
assign_collations_context
+astreamer
+astreamer_archive_context
+astreamer_extractor
+astreamer_gzip_decompressor
+astreamer_gzip_writer
+astreamer_lz4_frame
+astreamer_member
+astreamer_ops
+astreamer_plain_writer
+astreamer_recovery_injector
+astreamer_tar_archiver
+astreamer_tar_parser
+astreamer_verify
+astreamer_zstd_frame
auth_password_hook_typ
autovac_table
av_relation
@@ -3439,20 +3461,6 @@ bbsink_shell
bbsink_state
bbsink_throttle
bbsink_zstd
-astreamer
-astreamer_archive_context
-astreamer_extractor
-astreamer_gzip_decompressor
-astreamer_gzip_writer
-astreamer_lz4_frame
-astreamer_member
-astreamer_ops
-astreamer_plain_writer
-astreamer_recovery_injector
-astreamer_tar_archiver
-astreamer_tar_parser
-astreamer_verify
-astreamer_zstd_frame
bgworker_main_type
bh_node_type
binaryheap
@@ -3492,6 +3500,13 @@ colormaprange
compare_context
config_handle
config_var_value
+conn_errorMessage_func
+conn_oauth_client_id_func
+conn_oauth_client_secret_func
+conn_oauth_discovery_uri_func
+conn_oauth_issuer_id_func
+conn_oauth_scope_func
+conn_sasl_state_func
contain_aggs_of_level_context
contain_placeholder_references_context
convert_testexpr_context
@@ -3508,6 +3523,9 @@ create_upper_paths_hook_type
createdb_failure_params
crosstab_HashEnt
crosstab_cat_desc
+curl_infotype
+curl_socket_t
+curl_version_info_data
datapagemap_iterator_t
datapagemap_t
dateKEY
@@ -3519,9 +3537,8 @@ deparse_columns
deparse_context
deparse_expr_cxt
deparse_namespace
-destructor
+derives_hash
dev_t
-digit
disassembledLeaf
dlist_head
dlist_iter
@@ -3559,18 +3576,23 @@ dsm_handle
dsm_op
dsm_segment
dsm_segment_detach_callback
+duration
eLogType
ean13
eary
ec_matches_callback_type
ec_member_foreign_arg
ec_member_matches_arg
+element_type
emit_log_hook_type
eval_const_expressions_context
exec_thread_arg
execution_state
exit_function
explain_get_index_name_hook_type
+explain_per_node_hook_type
+explain_per_plan_hook_type
+explain_validate_options_hook_type
f_smgr
fasthash_state
fd_set
@@ -3653,7 +3675,6 @@ gss_key_value_set_desc
gss_name_t
gtrgm_consistent_cache
gzFile
-hashfunc
hbaPort
heap_page_items_state
help_handler
@@ -3675,17 +3696,21 @@ init_function
inline_cte_walker_context
inline_error_callback_arg
ino_t
+inquiry
instr_time
int128
int16
int16KEY
+int16_t
int2vector
int32
int32KEY
int32_t
int64
int64KEY
+int64_t
int8
+int8_t
int8x16_t
internalPQconninfoOption
intptr_t
@@ -3717,6 +3742,7 @@ lclContext
lclTocEntry
leafSegmentInfo
leaf_item
+libpq_gettext_func
libpq_source
line_t
lineno_t
@@ -3773,6 +3799,7 @@ mxact
mxtruncinfo
needs_fmgr_hook_type
network_sortsupport_state
+nl_item
nodeitem
normal_rand_fctx
nsphash_hash
@@ -3790,6 +3817,7 @@ openssl_tls_init_hook_typ
ossl_EVP_cipher_func
other
output_type
+overexplain_options
pagetable_hash
pagetable_iterator
pairingheap
@@ -3809,7 +3837,6 @@ pg_atomic_flag
pg_atomic_uint32
pg_atomic_uint64
pg_be_sasl_mech
-pg_case_map
pg_category_range
pg_checksum_context
pg_checksum_raw_context
@@ -3833,7 +3860,6 @@ pg_funcptr_t
pg_gssinfo
pg_hmac_ctx
pg_hmac_errno
-pg_int64
pg_local_to_utf_combined
pg_locale_t
pg_mb_radix_tree
@@ -3902,7 +3928,8 @@ plperl_query_entry
plpgsql_CastExprHashEntry
plpgsql_CastHashEntry
plpgsql_CastHashKey
-plpgsql_HashEnt
+plpgsql_expr_walker_callback
+plpgsql_stmt_walker_callback
pltcl_call_state
pltcl_interp_desc
pltcl_proc_desc
@@ -3925,7 +3952,6 @@ printTextLineFormat
printTextLineWrap
printTextRule
printXheaderWidthType
-printfunc
priv_map
process_file_callback_t
process_sublinks_context
@@ -3965,12 +3991,9 @@ reduce_outer_joins_pass1_state
reduce_outer_joins_pass2_state
reference
regex_arc_t
-regex_t
regexp
regexp_matches_ctx
registered_buffer
-regmatch_t
-regoff_t
regproc
relopt_bool
relopt_enum
@@ -3989,6 +4012,7 @@ remoteConnHashEnt
remoteDep
remove_nulling_relids_context
rendezvousHashEntry
+rep
replace_rte_variables_callback
replace_rte_variables_context
report_error_fn
@@ -4007,6 +4031,7 @@ rt_node_class_test_elem
rt_radix_tree
saophash_hash
save_buffer
+save_locale_t
scram_state
scram_state_enum
script_error_callback_arg
@@ -4014,6 +4039,8 @@ security_class_t
sem_t
sepgsql_context_info_t
sequence_magic
+set_conn_altsock_func
+set_conn_oauth_token_func
set_join_pathlist_hook_type
set_rel_pathlist_hook_type
shared_ts_iter
@@ -4134,6 +4161,7 @@ uint32_t
uint32x4_t
uint64
uint64_t
+uint64x2_t
uint8
uint8_t
uint8x16_t
@@ -4143,7 +4171,6 @@ unicodeStyleColumnFormat
unicodeStyleFormat
unicodeStyleRowFormat
unicode_linestyle
-UniqueRelInfo
unit_conversion
unlogged_relation_entry
utf_local_conversion_func
@@ -4286,6 +4313,7 @@ xmlGenericErrorFunc
xmlNodePtr
xmlNodeSetPtr
xmlParserCtxtPtr
+xmlParserErrors
xmlParserInputPtr
xmlSaveCtxt
xmlSaveCtxtPtr
@@ -4306,6 +4334,3 @@ yyscan_t
z_stream
z_streamp
zic_t
-ExplainExtensionOption
-ExplainOptionHandler
-overexplain_options
diff --git a/src/tools/version_stamp.pl b/src/tools/version_stamp.pl
index c3509474d83..a9d2d0910f3 100755
--- a/src/tools/version_stamp.pl
+++ b/src/tools/version_stamp.pl
@@ -25,7 +25,7 @@ use warnings FATAL => 'all';
# Major version is hard-wired into the script. We update it when we branch
# a new development version.
-my $majorversion = 18;
+my $majorversion = 19;
# Validate argument and compute derived variables
my $minor = shift;