aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.git-blame-ignore-revs3
-rw-r--r--config/llvm.m44
-rw-r--r--config/programs.m44
-rwxr-xr-xconfigure87
-rw-r--r--configure.ac67
-rw-r--r--contrib/postgres_fdw/connection.c39
-rw-r--r--contrib/postgres_fdw/postgres_fdw.c46
-rw-r--r--contrib/postgres_fdw/postgres_fdw.h6
-rw-r--r--doc/src/sgml/func.sgml7
-rw-r--r--doc/src/sgml/pageinspect.sgml2
-rw-r--r--meson.build5
-rw-r--r--src/Makefile.global.in2
-rw-r--r--src/backend/commands/explain.c21
-rw-r--r--src/backend/jit/llvm/Makefile2
-rw-r--r--src/backend/optimizer/path/costsize.c18
-rw-r--r--src/backend/optimizer/plan/createplan.c15
-rw-r--r--src/backend/optimizer/util/pathnode.c11
-rw-r--r--src/backend/replication/syncrep_scanner.l11
-rw-r--r--src/backend/storage/buffer/bufmgr.c13
-rw-r--r--src/backend/storage/ipc/procsignal.c6
-rw-r--r--src/backend/utils/adt/tid.c2
-rw-r--r--src/backend/utils/adt/xml.c74
-rw-r--r--src/bin/initdb/Makefile2
-rw-r--r--src/common/Makefile2
-rw-r--r--src/include/nodes/pathnodes.h4
-rw-r--r--src/include/nodes/plannodes.h10
-rw-r--r--src/include/optimizer/pathnode.h2
-rw-r--r--src/interfaces/ecpg/ecpglib/prepare.c9
-rw-r--r--src/interfaces/libpq-oauth/Makefile2
-rw-r--r--src/interfaces/libpq/Makefile2
-rw-r--r--src/pl/plpython/Makefile2
-rw-r--r--src/pl/tcl/Makefile2
-rw-r--r--src/test/perl/PostgreSQL/Test/Cluster.pm1
-rw-r--r--src/test/recovery/t/013_crash_restart.pl7
34 files changed, 286 insertions, 204 deletions
diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs
index f8526d4d1a9..f83e2fc6586 100644
--- a/.git-blame-ignore-revs
+++ b/.git-blame-ignore-revs
@@ -14,6 +14,9 @@
#
# $ git log --pretty=format:"%H # %cd%n# %s" $PGINDENTGITHASH -1 --date=iso
+1d1612aec7688139e1a5506df1366b4b6a69605d # 2025-07-29 09:10:41 -0400
+# Run pgindent.
+
73873805fb3627cb23937c750fa83ffd8f16fc6c # 2025-07-25 16:36:44 -0400
# Run pgindent on the changes of the previous patch.
diff --git a/config/llvm.m4 b/config/llvm.m4
index fa4bedd9370..9d6fe8199e3 100644
--- a/config/llvm.m4
+++ b/config/llvm.m4
@@ -4,7 +4,7 @@
# -----------------
#
# Look for the LLVM installation, check that it's new enough, set the
-# corresponding LLVM_{CFLAGS,CXXFLAGS,BINPATH} and LDFLAGS
+# corresponding LLVM_{CFLAGS,CXXFLAGS,BINPATH,LIBS}
# variables. Also verify that CLANG is available, to transform C
# into bitcode.
#
@@ -55,7 +55,7 @@ AC_DEFUN([PGAC_LLVM_SUPPORT],
for pgac_option in `$LLVM_CONFIG --ldflags`; do
case $pgac_option in
- -L*) LDFLAGS="$LDFLAGS $pgac_option";;
+ -L*) LLVM_LIBS="$LLVM_LIBS $pgac_option";;
esac
done
diff --git a/config/programs.m4 b/config/programs.m4
index c73d9307ea8..e57fe4907b8 100644
--- a/config/programs.m4
+++ b/config/programs.m4
@@ -290,8 +290,8 @@ AC_DEFUN([PGAC_CHECK_LIBCURL],
pgac_save_LDFLAGS=$LDFLAGS
pgac_save_LIBS=$LIBS
- CPPFLAGS="$LIBCURL_CPPFLAGS $CPPFLAGS"
- LDFLAGS="$LIBCURL_LDFLAGS $LDFLAGS"
+ CPPFLAGS="$CPPFLAGS $LIBCURL_CPPFLAGS"
+ LDFLAGS="$LDFLAGS $LIBCURL_LDFLAGS"
AC_CHECK_HEADER(curl/curl.h, [],
[AC_MSG_ERROR([header file <curl/curl.h> is required for --with-libcurl])])
diff --git a/configure b/configure
index 6d7c22e153f..507a2437c33 100755
--- a/configure
+++ b/configure
@@ -5194,7 +5194,7 @@ fi
for pgac_option in `$LLVM_CONFIG --ldflags`; do
case $pgac_option in
- -L*) LDFLAGS="$LDFLAGS $pgac_option";;
+ -L*) LLVM_LIBS="$LLVM_LIBS $pgac_option";;
esac
done
@@ -9436,12 +9436,12 @@ fi
# Note the user could also set XML2_CFLAGS/XML2_LIBS directly
for pgac_option in $XML2_CFLAGS; do
case $pgac_option in
- -I*|-D*) CPPFLAGS="$CPPFLAGS $pgac_option";;
+ -I*|-D*) INCLUDES="$INCLUDES $pgac_option";;
esac
done
for pgac_option in $XML2_LIBS; do
case $pgac_option in
- -L*) LDFLAGS="$LDFLAGS $pgac_option";;
+ -L*) LIBDIRS="$LIBDIRS $pgac_option";;
esac
done
fi
@@ -9666,12 +9666,12 @@ fi
# note that -llz4 will be added by AC_CHECK_LIB below.
for pgac_option in $LZ4_CFLAGS; do
case $pgac_option in
- -I*|-D*) CPPFLAGS="$CPPFLAGS $pgac_option";;
+ -I*|-D*) INCLUDES="$INCLUDES $pgac_option";;
esac
done
for pgac_option in $LZ4_LIBS; do
case $pgac_option in
- -L*) LDFLAGS="$LDFLAGS $pgac_option";;
+ -L*) LIBDIRS="$LIBDIRS $pgac_option";;
esac
done
fi
@@ -9807,12 +9807,12 @@ fi
# note that -lzstd will be added by AC_CHECK_LIB below.
for pgac_option in $ZSTD_CFLAGS; do
case $pgac_option in
- -I*|-D*) CPPFLAGS="$CPPFLAGS $pgac_option";;
+ -I*|-D*) INCLUDES="$INCLUDES $pgac_option";;
esac
done
for pgac_option in $ZSTD_LIBS; do
case $pgac_option in
- -L*) LDFLAGS="$LDFLAGS $pgac_option";;
+ -L*) LIBDIRS="$LIBDIRS $pgac_option";;
esac
done
fi
@@ -12723,8 +12723,8 @@ if test "$with_libcurl" = yes ; then
pgac_save_LDFLAGS=$LDFLAGS
pgac_save_LIBS=$LIBS
- CPPFLAGS="$LIBCURL_CPPFLAGS $CPPFLAGS"
- LDFLAGS="$LIBCURL_LDFLAGS $LDFLAGS"
+ CPPFLAGS="$CPPFLAGS $LIBCURL_CPPFLAGS"
+ LDFLAGS="$LDFLAGS $LIBCURL_LDFLAGS"
ac_fn_c_check_header_mongrel "$LINENO" "curl/curl.h" "ac_cv_header_curl_curl_h" "$ac_includes_default"
if test "x$ac_cv_header_curl_curl_h" = xyes; then :
@@ -16658,7 +16658,7 @@ fi
if test "$with_icu" = yes; then
ac_save_CPPFLAGS=$CPPFLAGS
- CPPFLAGS="$ICU_CFLAGS $CPPFLAGS"
+ CPPFLAGS="$CPPFLAGS $ICU_CFLAGS"
# Verify we have ICU's header files
ac_fn_c_check_header_mongrel "$LINENO" "unicode/ucol.h" "ac_cv_header_unicode_ucol_h" "$ac_includes_default"
@@ -17565,7 +17565,7 @@ $as_echo "#define HAVE_GCC__ATOMIC_INT64_CAS 1" >>confdefs.h
fi
-# Check for x86 cpuid instruction
+# Check for __get_cpuid() and __cpuid()
{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for __get_cpuid" >&5
$as_echo_n "checking for __get_cpuid... " >&6; }
if ${pgac_cv__get_cpuid+:} false; then :
@@ -17598,77 +17598,79 @@ if test x"$pgac_cv__get_cpuid" = x"yes"; then
$as_echo "#define HAVE__GET_CPUID 1" >>confdefs.h
-fi
-
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for __get_cpuid_count" >&5
-$as_echo_n "checking for __get_cpuid_count... " >&6; }
-if ${pgac_cv__get_cpuid_count+:} false; then :
+else
+ # __cpuid()
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for __cpuid" >&5
+$as_echo_n "checking for __cpuid... " >&6; }
+if ${pgac_cv__cpuid+:} false; then :
$as_echo_n "(cached) " >&6
else
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */
-#include <cpuid.h>
+#include <intrin.h>
int
main ()
{
unsigned int exx[4] = {0, 0, 0, 0};
- __get_cpuid_count(7, 0, &exx[0], &exx[1], &exx[2], &exx[3]);
+ __cpuid(exx, 1);
;
return 0;
}
_ACEOF
if ac_fn_c_try_link "$LINENO"; then :
- pgac_cv__get_cpuid_count="yes"
+ pgac_cv__cpuid="yes"
else
- pgac_cv__get_cpuid_count="no"
+ pgac_cv__cpuid="no"
fi
rm -f core conftest.err conftest.$ac_objext \
conftest$ac_exeext conftest.$ac_ext
fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv__get_cpuid_count" >&5
-$as_echo "$pgac_cv__get_cpuid_count" >&6; }
-if test x"$pgac_cv__get_cpuid_count" = x"yes"; then
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv__cpuid" >&5
+$as_echo "$pgac_cv__cpuid" >&6; }
+ if test x"$pgac_cv__cpuid" = x"yes"; then
-$as_echo "#define HAVE__GET_CPUID_COUNT 1" >>confdefs.h
+$as_echo "#define HAVE__CPUID 1" >>confdefs.h
+ fi
fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for __cpuid" >&5
-$as_echo_n "checking for __cpuid... " >&6; }
-if ${pgac_cv__cpuid+:} false; then :
+# Check for __get_cpuid_count() and __cpuidex() in a similar fashion.
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for __get_cpuid_count" >&5
+$as_echo_n "checking for __get_cpuid_count... " >&6; }
+if ${pgac_cv__get_cpuid_count+:} false; then :
$as_echo_n "(cached) " >&6
else
cat confdefs.h - <<_ACEOF >conftest.$ac_ext
/* end confdefs.h. */
-#include <intrin.h>
+#include <cpuid.h>
int
main ()
{
unsigned int exx[4] = {0, 0, 0, 0};
- __get_cpuid(exx[0], 1);
+ __get_cpuid_count(7, 0, &exx[0], &exx[1], &exx[2], &exx[3]);
;
return 0;
}
_ACEOF
if ac_fn_c_try_link "$LINENO"; then :
- pgac_cv__cpuid="yes"
+ pgac_cv__get_cpuid_count="yes"
else
- pgac_cv__cpuid="no"
+ pgac_cv__get_cpuid_count="no"
fi
rm -f core conftest.err conftest.$ac_objext \
conftest$ac_exeext conftest.$ac_ext
fi
-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv__cpuid" >&5
-$as_echo "$pgac_cv__cpuid" >&6; }
-if test x"$pgac_cv__cpuid" = x"yes"; then
-
-$as_echo "#define HAVE__CPUID 1" >>confdefs.h
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv__get_cpuid_count" >&5
+$as_echo "$pgac_cv__get_cpuid_count" >&6; }
+if test x"$pgac_cv__get_cpuid_count" = x"yes"; then
-fi
+$as_echo "#define HAVE__GET_CPUID_COUNT 1" >>confdefs.h
-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for __cpuidex" >&5
+else
+ # __cpuidex()
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for __cpuidex" >&5
$as_echo_n "checking for __cpuidex... " >&6; }
if ${pgac_cv__cpuidex+:} false; then :
$as_echo_n "(cached) " >&6
@@ -17680,7 +17682,7 @@ int
main ()
{
unsigned int exx[4] = {0, 0, 0, 0};
- __get_cpuidex(exx[0], 7, 0);
+ __cpuidex(exx, 7, 0);
;
return 0;
@@ -17696,10 +17698,11 @@ rm -f core conftest.err conftest.$ac_objext \
fi
{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv__cpuidex" >&5
$as_echo "$pgac_cv__cpuidex" >&6; }
-if test x"$pgac_cv__cpuidex" = x"yes"; then
+ if test x"$pgac_cv__cpuidex" = x"yes"; then
$as_echo "#define HAVE__CPUIDEX 1" >>confdefs.h
+ fi
fi
# Check for XSAVE intrinsics
@@ -18876,7 +18879,7 @@ Use --without-tcl to disable building PL/Tcl." "$LINENO" 5
fi
# now that we have TCL_INCLUDE_SPEC, we can check for <tcl.h>
ac_save_CPPFLAGS=$CPPFLAGS
- CPPFLAGS="$TCL_INCLUDE_SPEC $CPPFLAGS"
+ CPPFLAGS="$CPPFLAGS $TCL_INCLUDE_SPEC"
ac_fn_c_check_header_mongrel "$LINENO" "tcl.h" "ac_cv_header_tcl_h" "$ac_includes_default"
if test "x$ac_cv_header_tcl_h" = xyes; then :
@@ -18945,7 +18948,7 @@ fi
# check for <Python.h>
if test "$with_python" = yes; then
ac_save_CPPFLAGS=$CPPFLAGS
- CPPFLAGS="$python_includespec $CPPFLAGS"
+ CPPFLAGS="$CPPFLAGS $python_includespec"
ac_fn_c_check_header_mongrel "$LINENO" "Python.h" "ac_cv_header_Python_h" "$ac_includes_default"
if test "x$ac_cv_header_Python_h" = xyes; then :
diff --git a/configure.ac b/configure.ac
index c2877e36935..5f4548adc5c 100644
--- a/configure.ac
+++ b/configure.ac
@@ -1103,12 +1103,12 @@ if test "$with_libxml" = yes ; then
# Note the user could also set XML2_CFLAGS/XML2_LIBS directly
for pgac_option in $XML2_CFLAGS; do
case $pgac_option in
- -I*|-D*) CPPFLAGS="$CPPFLAGS $pgac_option";;
+ -I*|-D*) INCLUDES="$INCLUDES $pgac_option";;
esac
done
for pgac_option in $XML2_LIBS; do
case $pgac_option in
- -L*) LDFLAGS="$LDFLAGS $pgac_option";;
+ -L*) LIBDIRS="$LIBDIRS $pgac_option";;
esac
done
fi
@@ -1152,12 +1152,12 @@ if test "$with_lz4" = yes; then
# note that -llz4 will be added by AC_CHECK_LIB below.
for pgac_option in $LZ4_CFLAGS; do
case $pgac_option in
- -I*|-D*) CPPFLAGS="$CPPFLAGS $pgac_option";;
+ -I*|-D*) INCLUDES="$INCLUDES $pgac_option";;
esac
done
for pgac_option in $LZ4_LIBS; do
case $pgac_option in
- -L*) LDFLAGS="$LDFLAGS $pgac_option";;
+ -L*) LIBDIRS="$LIBDIRS $pgac_option";;
esac
done
fi
@@ -1177,12 +1177,12 @@ if test "$with_zstd" = yes; then
# note that -lzstd will be added by AC_CHECK_LIB below.
for pgac_option in $ZSTD_CFLAGS; do
case $pgac_option in
- -I*|-D*) CPPFLAGS="$CPPFLAGS $pgac_option";;
+ -I*|-D*) INCLUDES="$INCLUDES $pgac_option";;
esac
done
for pgac_option in $ZSTD_LIBS; do
case $pgac_option in
- -L*) LDFLAGS="$LDFLAGS $pgac_option";;
+ -L*) LIBDIRS="$LIBDIRS $pgac_option";;
esac
done
fi
@@ -1944,7 +1944,7 @@ fi
if test "$with_icu" = yes; then
ac_save_CPPFLAGS=$CPPFLAGS
- CPPFLAGS="$ICU_CFLAGS $CPPFLAGS"
+ CPPFLAGS="$CPPFLAGS $ICU_CFLAGS"
# Verify we have ICU's header files
AC_CHECK_HEADER(unicode/ucol.h, [],
@@ -2044,7 +2044,7 @@ PGAC_HAVE_GCC__ATOMIC_INT32_CAS
PGAC_HAVE_GCC__ATOMIC_INT64_CAS
-# Check for x86 cpuid instruction
+# Check for __get_cpuid() and __cpuid()
AC_CACHE_CHECK([for __get_cpuid], [pgac_cv__get_cpuid],
[AC_LINK_IFELSE([AC_LANG_PROGRAM([#include <cpuid.h>],
[[unsigned int exx[4] = {0, 0, 0, 0};
@@ -2054,8 +2054,21 @@ AC_CACHE_CHECK([for __get_cpuid], [pgac_cv__get_cpuid],
[pgac_cv__get_cpuid="no"])])
if test x"$pgac_cv__get_cpuid" = x"yes"; then
AC_DEFINE(HAVE__GET_CPUID, 1, [Define to 1 if you have __get_cpuid.])
+else
+ # __cpuid()
+ AC_CACHE_CHECK([for __cpuid], [pgac_cv__cpuid],
+ [AC_LINK_IFELSE([AC_LANG_PROGRAM([#include <intrin.h>],
+ [[unsigned int exx[4] = {0, 0, 0, 0};
+ __cpuid(exx, 1);
+ ]])],
+ [pgac_cv__cpuid="yes"],
+ [pgac_cv__cpuid="no"])])
+ if test x"$pgac_cv__cpuid" = x"yes"; then
+ AC_DEFINE(HAVE__CPUID, 1, [Define to 1 if you have __cpuid.])
+ fi
fi
+# Check for __get_cpuid_count() and __cpuidex() in a similar fashion.
AC_CACHE_CHECK([for __get_cpuid_count], [pgac_cv__get_cpuid_count],
[AC_LINK_IFELSE([AC_LANG_PROGRAM([#include <cpuid.h>],
[[unsigned int exx[4] = {0, 0, 0, 0};
@@ -2065,28 +2078,18 @@ AC_CACHE_CHECK([for __get_cpuid_count], [pgac_cv__get_cpuid_count],
[pgac_cv__get_cpuid_count="no"])])
if test x"$pgac_cv__get_cpuid_count" = x"yes"; then
AC_DEFINE(HAVE__GET_CPUID_COUNT, 1, [Define to 1 if you have __get_cpuid_count.])
-fi
-
-AC_CACHE_CHECK([for __cpuid], [pgac_cv__cpuid],
-[AC_LINK_IFELSE([AC_LANG_PROGRAM([#include <intrin.h>],
- [[unsigned int exx[4] = {0, 0, 0, 0};
- __get_cpuid(exx[0], 1);
- ]])],
- [pgac_cv__cpuid="yes"],
- [pgac_cv__cpuid="no"])])
-if test x"$pgac_cv__cpuid" = x"yes"; then
- AC_DEFINE(HAVE__CPUID, 1, [Define to 1 if you have __cpuid.])
-fi
-
-AC_CACHE_CHECK([for __cpuidex], [pgac_cv__cpuidex],
-[AC_LINK_IFELSE([AC_LANG_PROGRAM([#include <intrin.h>],
- [[unsigned int exx[4] = {0, 0, 0, 0};
- __get_cpuidex(exx[0], 7, 0);
- ]])],
- [pgac_cv__cpuidex="yes"],
- [pgac_cv__cpuidex="no"])])
-if test x"$pgac_cv__cpuidex" = x"yes"; then
- AC_DEFINE(HAVE__CPUIDEX, 1, [Define to 1 if you have __cpuidex.])
+else
+ # __cpuidex()
+ AC_CACHE_CHECK([for __cpuidex], [pgac_cv__cpuidex],
+ [AC_LINK_IFELSE([AC_LANG_PROGRAM([#include <intrin.h>],
+ [[unsigned int exx[4] = {0, 0, 0, 0};
+ __cpuidex(exx, 7, 0);
+ ]])],
+ [pgac_cv__cpuidex="yes"],
+ [pgac_cv__cpuidex="no"])])
+ if test x"$pgac_cv__cpuidex" = x"yes"; then
+ AC_DEFINE(HAVE__CPUIDEX, 1, [Define to 1 if you have __cpuidex.])
+ fi
fi
# Check for XSAVE intrinsics
@@ -2344,7 +2347,7 @@ Use --without-tcl to disable building PL/Tcl.])
fi
# now that we have TCL_INCLUDE_SPEC, we can check for <tcl.h>
ac_save_CPPFLAGS=$CPPFLAGS
- CPPFLAGS="$TCL_INCLUDE_SPEC $CPPFLAGS"
+ CPPFLAGS="$CPPFLAGS $TCL_INCLUDE_SPEC"
AC_CHECK_HEADER(tcl.h, [], [AC_MSG_ERROR([header file <tcl.h> is required for Tcl])])
CPPFLAGS=$ac_save_CPPFLAGS
fi
@@ -2381,7 +2384,7 @@ fi
# check for <Python.h>
if test "$with_python" = yes; then
ac_save_CPPFLAGS=$CPPFLAGS
- CPPFLAGS="$python_includespec $CPPFLAGS"
+ CPPFLAGS="$CPPFLAGS $python_includespec"
AC_CHECK_HEADER(Python.h, [], [AC_MSG_ERROR([header file <Python.h> is required for Python])])
CPPFLAGS=$ac_save_CPPFLAGS
fi
diff --git a/contrib/postgres_fdw/connection.c b/contrib/postgres_fdw/connection.c
index a33843fcf85..e8148f2c5a2 100644
--- a/contrib/postgres_fdw/connection.c
+++ b/contrib/postgres_fdw/connection.c
@@ -142,6 +142,8 @@ static void do_sql_command_begin(PGconn *conn, const char *sql);
static void do_sql_command_end(PGconn *conn, const char *sql,
bool consume_input);
static void begin_remote_xact(ConnCacheEntry *entry);
+static void pgfdw_report_internal(int elevel, PGresult *res, PGconn *conn,
+ const char *sql);
static void pgfdw_xact_callback(XactEvent event, void *arg);
static void pgfdw_subxact_callback(SubXactEvent event,
SubTransactionId mySubid,
@@ -815,7 +817,7 @@ static void
do_sql_command_begin(PGconn *conn, const char *sql)
{
if (!PQsendQuery(conn, sql))
- pgfdw_report_error(ERROR, NULL, conn, sql);
+ pgfdw_report_error(NULL, conn, sql);
}
static void
@@ -830,10 +832,10 @@ do_sql_command_end(PGconn *conn, const char *sql, bool consume_input)
* would be large compared to the overhead of PQconsumeInput.)
*/
if (consume_input && !PQconsumeInput(conn))
- pgfdw_report_error(ERROR, NULL, conn, sql);
+ pgfdw_report_error(NULL, conn, sql);
res = pgfdw_get_result(conn);
if (PQresultStatus(res) != PGRES_COMMAND_OK)
- pgfdw_report_error(ERROR, res, conn, sql);
+ pgfdw_report_error(res, conn, sql);
PQclear(res);
}
@@ -966,7 +968,10 @@ pgfdw_get_result(PGconn *conn)
/*
* Report an error we got from the remote server.
*
- * elevel: error level to use (typically ERROR, but might be less)
+ * Callers should use pgfdw_report_error() to throw an error, or use
+ * pgfdw_report() for lesser message levels. (We make this distinction
+ * so that pgfdw_report_error() can be marked noreturn.)
+ *
* res: PGresult containing the error (might be NULL)
* conn: connection we did the query on
* sql: NULL, or text of remote command we tried to execute
@@ -979,8 +984,22 @@ pgfdw_get_result(PGconn *conn)
* marked with have_error = true.
*/
void
-pgfdw_report_error(int elevel, PGresult *res, PGconn *conn,
- const char *sql)
+pgfdw_report_error(PGresult *res, PGconn *conn, const char *sql)
+{
+ pgfdw_report_internal(ERROR, res, conn, sql);
+ pg_unreachable();
+}
+
+void
+pgfdw_report(int elevel, PGresult *res, PGconn *conn, const char *sql)
+{
+ Assert(elevel < ERROR); /* use pgfdw_report_error for that */
+ pgfdw_report_internal(elevel, res, conn, sql);
+}
+
+static void
+pgfdw_report_internal(int elevel, PGresult *res, PGconn *conn,
+ const char *sql)
{
char *diag_sqlstate = PQresultErrorField(res, PG_DIAG_SQLSTATE);
char *message_primary = PQresultErrorField(res, PG_DIAG_MESSAGE_PRIMARY);
@@ -1538,7 +1557,7 @@ pgfdw_exec_cleanup_query_begin(PGconn *conn, const char *query)
*/
if (!PQsendQuery(conn, query))
{
- pgfdw_report_error(WARNING, NULL, conn, query);
+ pgfdw_report(WARNING, NULL, conn, query);
return false;
}
@@ -1563,7 +1582,7 @@ pgfdw_exec_cleanup_query_end(PGconn *conn, const char *query,
*/
if (consume_input && !PQconsumeInput(conn))
{
- pgfdw_report_error(WARNING, NULL, conn, query);
+ pgfdw_report(WARNING, NULL, conn, query);
return false;
}
@@ -1575,7 +1594,7 @@ pgfdw_exec_cleanup_query_end(PGconn *conn, const char *query,
(errmsg("could not get query result due to timeout"),
errcontext("remote SQL command: %s", query)));
else
- pgfdw_report_error(WARNING, NULL, conn, query);
+ pgfdw_report(WARNING, NULL, conn, query);
return false;
}
@@ -1583,7 +1602,7 @@ pgfdw_exec_cleanup_query_end(PGconn *conn, const char *query,
/* Issue a warning if not successful. */
if (PQresultStatus(result) != PGRES_COMMAND_OK)
{
- pgfdw_report_error(WARNING, result, conn, query);
+ pgfdw_report(WARNING, result, conn, query);
return ignore_errors;
}
PQclear(result);
diff --git a/contrib/postgres_fdw/postgres_fdw.c b/contrib/postgres_fdw/postgres_fdw.c
index 25b287be069..456b267f70b 100644
--- a/contrib/postgres_fdw/postgres_fdw.c
+++ b/contrib/postgres_fdw/postgres_fdw.c
@@ -1704,7 +1704,7 @@ postgresReScanForeignScan(ForeignScanState *node)
res = pgfdw_exec_query(fsstate->conn, sql, fsstate->conn_state);
if (PQresultStatus(res) != PGRES_COMMAND_OK)
- pgfdw_report_error(ERROR, res, fsstate->conn, sql);
+ pgfdw_report_error(res, fsstate->conn, sql);
PQclear(res);
/* Now force a fresh FETCH. */
@@ -3614,7 +3614,7 @@ get_remote_estimate(const char *sql, PGconn *conn,
*/
res = pgfdw_exec_query(conn, sql, NULL);
if (PQresultStatus(res) != PGRES_TUPLES_OK)
- pgfdw_report_error(ERROR, res, conn, sql);
+ pgfdw_report_error(res, conn, sql);
/*
* Extract cost numbers for topmost plan node. Note we search for a left
@@ -3769,14 +3769,14 @@ create_cursor(ForeignScanState *node)
*/
if (!PQsendQueryParams(conn, buf.data, numParams,
NULL, values, NULL, NULL, 0))
- pgfdw_report_error(ERROR, NULL, conn, buf.data);
+ pgfdw_report_error(NULL, conn, buf.data);
/*
* Get the result, and check for success.
*/
res = pgfdw_get_result(conn);
if (PQresultStatus(res) != PGRES_COMMAND_OK)
- pgfdw_report_error(ERROR, res, conn, fsstate->query);
+ pgfdw_report_error(res, conn, fsstate->query);
PQclear(res);
/* Mark the cursor as created, and show no tuples have been retrieved */
@@ -3823,7 +3823,7 @@ fetch_more_data(ForeignScanState *node)
res = pgfdw_get_result(conn);
/* On error, report the original query, not the FETCH. */
if (PQresultStatus(res) != PGRES_TUPLES_OK)
- pgfdw_report_error(ERROR, res, conn, fsstate->query);
+ pgfdw_report_error(res, conn, fsstate->query);
/* Reset per-connection state */
fsstate->conn_state->pendingAreq = NULL;
@@ -3839,7 +3839,7 @@ fetch_more_data(ForeignScanState *node)
res = pgfdw_exec_query(conn, sql, fsstate->conn_state);
/* On error, report the original query, not the FETCH. */
if (PQresultStatus(res) != PGRES_TUPLES_OK)
- pgfdw_report_error(ERROR, res, conn, fsstate->query);
+ pgfdw_report_error(res, conn, fsstate->query);
}
/* Convert the data into HeapTuples */
@@ -3944,7 +3944,7 @@ close_cursor(PGconn *conn, unsigned int cursor_number,
snprintf(sql, sizeof(sql), "CLOSE c%u", cursor_number);
res = pgfdw_exec_query(conn, sql, conn_state);
if (PQresultStatus(res) != PGRES_COMMAND_OK)
- pgfdw_report_error(ERROR, res, conn, sql);
+ pgfdw_report_error(res, conn, sql);
PQclear(res);
}
@@ -4152,7 +4152,7 @@ execute_foreign_modify(EState *estate,
NULL,
NULL,
0))
- pgfdw_report_error(ERROR, NULL, fmstate->conn, fmstate->query);
+ pgfdw_report_error(NULL, fmstate->conn, fmstate->query);
/*
* Get the result, and check for success.
@@ -4160,7 +4160,7 @@ execute_foreign_modify(EState *estate,
res = pgfdw_get_result(fmstate->conn);
if (PQresultStatus(res) !=
(fmstate->has_returning ? PGRES_TUPLES_OK : PGRES_COMMAND_OK))
- pgfdw_report_error(ERROR, res, fmstate->conn, fmstate->query);
+ pgfdw_report_error(res, fmstate->conn, fmstate->query);
/* Check number of rows affected, and fetch RETURNING tuple if any */
if (fmstate->has_returning)
@@ -4219,14 +4219,14 @@ prepare_foreign_modify(PgFdwModifyState *fmstate)
fmstate->query,
0,
NULL))
- pgfdw_report_error(ERROR, NULL, fmstate->conn, fmstate->query);
+ pgfdw_report_error(NULL, fmstate->conn, fmstate->query);
/*
* Get the result, and check for success.
*/
res = pgfdw_get_result(fmstate->conn);
if (PQresultStatus(res) != PGRES_COMMAND_OK)
- pgfdw_report_error(ERROR, res, fmstate->conn, fmstate->query);
+ pgfdw_report_error(res, fmstate->conn, fmstate->query);
PQclear(res);
/* This action shows that the prepare has been done. */
@@ -4373,7 +4373,7 @@ deallocate_query(PgFdwModifyState *fmstate)
snprintf(sql, sizeof(sql), "DEALLOCATE %s", fmstate->p_name);
res = pgfdw_exec_query(fmstate->conn, sql, fmstate->conn_state);
if (PQresultStatus(res) != PGRES_COMMAND_OK)
- pgfdw_report_error(ERROR, res, fmstate->conn, sql);
+ pgfdw_report_error(res, fmstate->conn, sql);
PQclear(res);
pfree(fmstate->p_name);
fmstate->p_name = NULL;
@@ -4541,7 +4541,7 @@ execute_dml_stmt(ForeignScanState *node)
*/
if (!PQsendQueryParams(dmstate->conn, dmstate->query, numParams,
NULL, values, NULL, NULL, 0))
- pgfdw_report_error(ERROR, NULL, dmstate->conn, dmstate->query);
+ pgfdw_report_error(NULL, dmstate->conn, dmstate->query);
/*
* Get the result, and check for success.
@@ -4549,7 +4549,7 @@ execute_dml_stmt(ForeignScanState *node)
dmstate->result = pgfdw_get_result(dmstate->conn);
if (PQresultStatus(dmstate->result) !=
(dmstate->has_returning ? PGRES_TUPLES_OK : PGRES_COMMAND_OK))
- pgfdw_report_error(ERROR, dmstate->result, dmstate->conn,
+ pgfdw_report_error(dmstate->result, dmstate->conn,
dmstate->query);
/*
@@ -4923,7 +4923,7 @@ postgresAnalyzeForeignTable(Relation relation,
res = pgfdw_exec_query(conn, sql.data, NULL);
if (PQresultStatus(res) != PGRES_TUPLES_OK)
- pgfdw_report_error(ERROR, res, conn, sql.data);
+ pgfdw_report_error(res, conn, sql.data);
if (PQntuples(res) != 1 || PQnfields(res) != 1)
elog(ERROR, "unexpected result from deparseAnalyzeSizeSql query");
@@ -4972,7 +4972,7 @@ postgresGetAnalyzeInfoForForeignTable(Relation relation, bool *can_tablesample)
res = pgfdw_exec_query(conn, sql.data, NULL);
if (PQresultStatus(res) != PGRES_TUPLES_OK)
- pgfdw_report_error(ERROR, res, conn, sql.data);
+ pgfdw_report_error(res, conn, sql.data);
if (PQntuples(res) != 1 || PQnfields(res) != 2)
elog(ERROR, "unexpected result from deparseAnalyzeInfoSql query");
@@ -5018,7 +5018,7 @@ postgresAcquireSampleRowsFunc(Relation relation, int elevel,
int server_version_num;
PgFdwSamplingMethod method = ANALYZE_SAMPLE_AUTO; /* auto is default */
double sample_frac = -1.0;
- double reltuples;
+ double reltuples = -1.0;
unsigned int cursor_number;
StringInfoData sql;
PGresult *res;
@@ -5202,7 +5202,7 @@ postgresAcquireSampleRowsFunc(Relation relation, int elevel,
res = pgfdw_exec_query(conn, sql.data, NULL);
if (PQresultStatus(res) != PGRES_COMMAND_OK)
- pgfdw_report_error(ERROR, res, conn, sql.data);
+ pgfdw_report_error(res, conn, sql.data);
PQclear(res);
/*
@@ -5254,7 +5254,7 @@ postgresAcquireSampleRowsFunc(Relation relation, int elevel,
res = pgfdw_exec_query(conn, fetch_sql, NULL);
/* On error, report the original query, not the FETCH. */
if (PQresultStatus(res) != PGRES_TUPLES_OK)
- pgfdw_report_error(ERROR, res, conn, sql.data);
+ pgfdw_report_error(res, conn, sql.data);
/* Process whatever we got. */
numrows = PQntuples(res);
@@ -5426,7 +5426,7 @@ postgresImportForeignSchema(ImportForeignSchemaStmt *stmt, Oid serverOid)
res = pgfdw_exec_query(conn, buf.data, NULL);
if (PQresultStatus(res) != PGRES_TUPLES_OK)
- pgfdw_report_error(ERROR, res, conn, buf.data);
+ pgfdw_report_error(res, conn, buf.data);
if (PQntuples(res) != 1)
ereport(ERROR,
@@ -5540,7 +5540,7 @@ postgresImportForeignSchema(ImportForeignSchemaStmt *stmt, Oid serverOid)
/* Fetch the data */
res = pgfdw_exec_query(conn, buf.data, NULL);
if (PQresultStatus(res) != PGRES_TUPLES_OK)
- pgfdw_report_error(ERROR, res, conn, buf.data);
+ pgfdw_report_error(res, conn, buf.data);
/* Process results */
numrows = PQntuples(res);
@@ -7312,7 +7312,7 @@ postgresForeignAsyncNotify(AsyncRequest *areq)
/* On error, report the original query, not the FETCH. */
if (!PQconsumeInput(fsstate->conn))
- pgfdw_report_error(ERROR, NULL, fsstate->conn, fsstate->query);
+ pgfdw_report_error(NULL, fsstate->conn, fsstate->query);
fetch_more_data(node);
@@ -7411,7 +7411,7 @@ fetch_more_data_begin(AsyncRequest *areq)
fsstate->fetch_size, fsstate->cursor_number);
if (!PQsendQuery(fsstate->conn, sql))
- pgfdw_report_error(ERROR, NULL, fsstate->conn, fsstate->query);
+ pgfdw_report_error(NULL, fsstate->conn, fsstate->query);
/* Remember that the request is in process */
fsstate->conn_state->pendingAreq = areq;
diff --git a/contrib/postgres_fdw/postgres_fdw.h b/contrib/postgres_fdw/postgres_fdw.h
index 38e1a885941..e69735298d7 100644
--- a/contrib/postgres_fdw/postgres_fdw.h
+++ b/contrib/postgres_fdw/postgres_fdw.h
@@ -166,8 +166,10 @@ extern void do_sql_command(PGconn *conn, const char *sql);
extern PGresult *pgfdw_get_result(PGconn *conn);
extern PGresult *pgfdw_exec_query(PGconn *conn, const char *query,
PgFdwConnState *state);
-extern void pgfdw_report_error(int elevel, PGresult *res, PGconn *conn,
- const char *sql);
+pg_noreturn extern void pgfdw_report_error(PGresult *res, PGconn *conn,
+ const char *sql);
+extern void pgfdw_report(int elevel, PGresult *res, PGconn *conn,
+ const char *sql);
/* in option.c */
extern int ExtractConnectionOptions(List *defelems,
diff --git a/doc/src/sgml/func.sgml b/doc/src/sgml/func.sgml
index de5b5929ee0..74a16af04ad 100644
--- a/doc/src/sgml/func.sgml
+++ b/doc/src/sgml/func.sgml
@@ -3148,8 +3148,11 @@ SELECT NOT(ROW(table.*) IS NOT NULL) FROM TABLE; -- detect at least one null in
</para>
<para>
Converts the first letter of each word to upper case and the
- rest to lower case. Words are sequences of alphanumeric
- characters separated by non-alphanumeric characters.
+ rest to lower case. When using the <literal>libc</literal> locale
+ provider, words are sequences of alphanumeric characters separated
+ by non-alphanumeric characters; when using the ICU locale provider,
+ words are separated according to
+ <ulink url="https://www.unicode.org/reports/tr29/#Word_Boundaries">Unicode Standard Annex #29</ulink>.
</para>
<para>
<literal>initcap('hi THOMAS')</literal>
diff --git a/doc/src/sgml/pageinspect.sgml b/doc/src/sgml/pageinspect.sgml
index 12929333665..f5014787c78 100644
--- a/doc/src/sgml/pageinspect.sgml
+++ b/doc/src/sgml/pageinspect.sgml
@@ -741,7 +741,7 @@ test=# SELECT first_tid, nbytes, tids[0:5] AS some_tids
For example:
<screen>
test=# SELECT * FROM gist_page_opaque_info(get_raw_page('test_gist_idx', 2));
- lsn | nsn | rightlink | flags
+ lsn | nsn | rightlink | flags
------------+------------+-----------+--------
0/0B5FE088 | 0/00000000 | 1 | {leaf}
(1 row)
diff --git a/meson.build b/meson.build
index 5365aaf95e6..ca423dc8e12 100644
--- a/meson.build
+++ b/meson.build
@@ -1996,10 +1996,7 @@ if cc.links('''
cdata.set('HAVE__BUILTIN_OP_OVERFLOW', 1)
endif
-
-# XXX: The configure.ac check for __cpuid() is broken, we don't copy that
-# here. To prevent problems due to two detection methods working, stop
-# checking after one.
+# Check for __get_cpuid() and __cpuid().
if cc.links('''
#include <cpuid.h>
int main(int arg, char **argv)
diff --git a/src/Makefile.global.in b/src/Makefile.global.in
index 04952b533de..8b1b357beaa 100644
--- a/src/Makefile.global.in
+++ b/src/Makefile.global.in
@@ -254,7 +254,7 @@ CPP = @CPP@
CPPFLAGS = @CPPFLAGS@
PG_SYSROOT = @PG_SYSROOT@
-override CPPFLAGS := $(ICU_CFLAGS) $(LIBNUMA_CFLAGS) $(LIBURING_CFLAGS) $(CPPFLAGS)
+override CPPFLAGS += $(ICU_CFLAGS) $(LIBNUMA_CFLAGS) $(LIBURING_CFLAGS)
ifdef PGXS
override CPPFLAGS := -I$(includedir_server) -I$(includedir_internal) $(CPPFLAGS)
diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c
index 7e2792ead71..8345bc0264b 100644
--- a/src/backend/commands/explain.c
+++ b/src/backend/commands/explain.c
@@ -3582,6 +3582,7 @@ static void
show_memoize_info(MemoizeState *mstate, List *ancestors, ExplainState *es)
{
Plan *plan = ((PlanState *) mstate)->plan;
+ Memoize *mplan = (Memoize *) plan;
ListCell *lc;
List *context;
StringInfoData keystr;
@@ -3602,7 +3603,7 @@ show_memoize_info(MemoizeState *mstate, List *ancestors, ExplainState *es)
plan,
ancestors);
- foreach(lc, ((Memoize *) plan)->param_exprs)
+ foreach(lc, mplan->param_exprs)
{
Node *expr = (Node *) lfirst(lc);
@@ -3618,6 +3619,24 @@ show_memoize_info(MemoizeState *mstate, List *ancestors, ExplainState *es)
pfree(keystr.data);
+ if (es->costs)
+ {
+ if (es->format == EXPLAIN_FORMAT_TEXT)
+ {
+ ExplainIndentText(es);
+ appendStringInfo(es->str, "Estimates: capacity=%u distinct keys=%.0f lookups=%.0f hit percent=%.2f%%\n",
+ mplan->est_entries, mplan->est_unique_keys,
+ mplan->est_calls, mplan->est_hit_ratio * 100.0);
+ }
+ else
+ {
+ ExplainPropertyUInteger("Estimated Capacity", NULL, mplan->est_entries, es);
+ ExplainPropertyFloat("Estimated Distinct Lookup Keys", NULL, mplan->est_unique_keys, 0, es);
+ ExplainPropertyFloat("Estimated Lookups", NULL, mplan->est_calls, 0, es);
+ ExplainPropertyFloat("Estimated Hit Percent", NULL, mplan->est_hit_ratio * 100.0, 2, es);
+ }
+ }
+
if (!es->analyze)
return;
diff --git a/src/backend/jit/llvm/Makefile b/src/backend/jit/llvm/Makefile
index e8c12060b93..68677ba42e1 100644
--- a/src/backend/jit/llvm/Makefile
+++ b/src/backend/jit/llvm/Makefile
@@ -31,7 +31,7 @@ endif
# All files in this directory use LLVM.
CFLAGS += $(LLVM_CFLAGS)
CXXFLAGS += $(LLVM_CXXFLAGS)
-override CPPFLAGS := $(LLVM_CPPFLAGS) $(CPPFLAGS)
+override CPPFLAGS += $(LLVM_CPPFLAGS)
SHLIB_LINK += $(LLVM_LIBS)
# Because this module includes C++ files, we need to use a C++
diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c
index 1f04a2c182c..344a3188317 100644
--- a/src/backend/optimizer/path/costsize.c
+++ b/src/backend/optimizer/path/costsize.c
@@ -2572,13 +2572,13 @@ cost_memoize_rescan(PlannerInfo *root, MemoizePath *mpath,
Cost input_startup_cost = mpath->subpath->startup_cost;
Cost input_total_cost = mpath->subpath->total_cost;
double tuples = mpath->subpath->rows;
- double calls = mpath->calls;
+ Cardinality est_calls = mpath->est_calls;
int width = mpath->subpath->pathtarget->width;
double hash_mem_bytes;
double est_entry_bytes;
- double est_cache_entries;
- double ndistinct;
+ Cardinality est_cache_entries;
+ Cardinality ndistinct;
double evict_ratio;
double hit_ratio;
Cost startup_cost;
@@ -2604,7 +2604,7 @@ cost_memoize_rescan(PlannerInfo *root, MemoizePath *mpath,
est_cache_entries = floor(hash_mem_bytes / est_entry_bytes);
/* estimate on the distinct number of parameter values */
- ndistinct = estimate_num_groups(root, mpath->param_exprs, calls, NULL,
+ ndistinct = estimate_num_groups(root, mpath->param_exprs, est_calls, NULL,
&estinfo);
/*
@@ -2616,7 +2616,10 @@ cost_memoize_rescan(PlannerInfo *root, MemoizePath *mpath,
* certainly mean a MemoizePath will never survive add_path().
*/
if ((estinfo.flags & SELFLAG_USED_DEFAULT) != 0)
- ndistinct = calls;
+ ndistinct = est_calls;
+
+ /* Remember the ndistinct estimate for EXPLAIN */
+ mpath->est_unique_keys = ndistinct;
/*
* Since we've already estimated the maximum number of entries we can
@@ -2644,9 +2647,12 @@ cost_memoize_rescan(PlannerInfo *root, MemoizePath *mpath,
* must look at how many scans are estimated in total for this node and
* how many of those scans we expect to get a cache hit.
*/
- hit_ratio = ((calls - ndistinct) / calls) *
+ hit_ratio = ((est_calls - ndistinct) / est_calls) *
(est_cache_entries / Max(ndistinct, est_cache_entries));
+ /* Remember the hit ratio estimate for EXPLAIN */
+ mpath->est_hit_ratio = hit_ratio;
+
Assert(hit_ratio >= 0 && hit_ratio <= 1.0);
/*
diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c
index 8a9f1d7a943..bfefc7dbea1 100644
--- a/src/backend/optimizer/plan/createplan.c
+++ b/src/backend/optimizer/plan/createplan.c
@@ -284,7 +284,10 @@ static Material *make_material(Plan *lefttree);
static Memoize *make_memoize(Plan *lefttree, Oid *hashoperators,
Oid *collations, List *param_exprs,
bool singlerow, bool binary_mode,
- uint32 est_entries, Bitmapset *keyparamids);
+ uint32 est_entries, Bitmapset *keyparamids,
+ Cardinality est_calls,
+ Cardinality est_unique_keys,
+ double est_hit_ratio);
static WindowAgg *make_windowagg(List *tlist, WindowClause *wc,
int partNumCols, AttrNumber *partColIdx, Oid *partOperators, Oid *partCollations,
int ordNumCols, AttrNumber *ordColIdx, Oid *ordOperators, Oid *ordCollations,
@@ -1753,7 +1756,8 @@ create_memoize_plan(PlannerInfo *root, MemoizePath *best_path, int flags)
plan = make_memoize(subplan, operators, collations, param_exprs,
best_path->singlerow, best_path->binary_mode,
- best_path->est_entries, keyparamids);
+ best_path->est_entries, keyparamids, best_path->est_calls,
+ best_path->est_unique_keys, best_path->est_hit_ratio);
copy_generic_path_info(&plan->plan, (Path *) best_path);
@@ -6749,7 +6753,9 @@ materialize_finished_plan(Plan *subplan)
static Memoize *
make_memoize(Plan *lefttree, Oid *hashoperators, Oid *collations,
List *param_exprs, bool singlerow, bool binary_mode,
- uint32 est_entries, Bitmapset *keyparamids)
+ uint32 est_entries, Bitmapset *keyparamids,
+ Cardinality est_calls, Cardinality est_unique_keys,
+ double est_hit_ratio)
{
Memoize *node = makeNode(Memoize);
Plan *plan = &node->plan;
@@ -6767,6 +6773,9 @@ make_memoize(Plan *lefttree, Oid *hashoperators, Oid *collations,
node->binary_mode = binary_mode;
node->est_entries = est_entries;
node->keyparamids = keyparamids;
+ node->est_calls = est_calls;
+ node->est_unique_keys = est_unique_keys;
+ node->est_hit_ratio = est_hit_ratio;
return node;
}
diff --git a/src/backend/optimizer/util/pathnode.c b/src/backend/optimizer/util/pathnode.c
index 9cc602788ea..a4c5867cdcb 100644
--- a/src/backend/optimizer/util/pathnode.c
+++ b/src/backend/optimizer/util/pathnode.c
@@ -1689,7 +1689,7 @@ create_material_path(RelOptInfo *rel, Path *subpath)
MemoizePath *
create_memoize_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
List *param_exprs, List *hash_operators,
- bool singlerow, bool binary_mode, double calls)
+ bool singlerow, bool binary_mode, Cardinality est_calls)
{
MemoizePath *pathnode = makeNode(MemoizePath);
@@ -1710,7 +1710,6 @@ create_memoize_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
pathnode->param_exprs = param_exprs;
pathnode->singlerow = singlerow;
pathnode->binary_mode = binary_mode;
- pathnode->calls = clamp_row_est(calls);
/*
* For now we set est_entries to 0. cost_memoize_rescan() does all the
@@ -1720,6 +1719,12 @@ create_memoize_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
*/
pathnode->est_entries = 0;
+ pathnode->est_calls = clamp_row_est(est_calls);
+
+ /* These will also be set later in cost_memoize_rescan() */
+ pathnode->est_unique_keys = 0.0;
+ pathnode->est_hit_ratio = 0.0;
+
/* we should not generate this path type when enable_memoize=false */
Assert(enable_memoize);
pathnode->path.disabled_nodes = subpath->disabled_nodes;
@@ -4259,7 +4264,7 @@ reparameterize_path(PlannerInfo *root, Path *path,
mpath->hash_operators,
mpath->singlerow,
mpath->binary_mode,
- mpath->calls);
+ mpath->est_calls);
}
default:
break;
diff --git a/src/backend/replication/syncrep_scanner.l b/src/backend/replication/syncrep_scanner.l
index 7dec1f869c7..02004d621e7 100644
--- a/src/backend/replication/syncrep_scanner.l
+++ b/src/backend/replication/syncrep_scanner.l
@@ -157,17 +157,16 @@ syncrep_yyerror(SyncRepConfigData **syncrep_parse_result_p, char **syncrep_parse
{
struct yyguts_t *yyg = (struct yyguts_t *) yyscanner; /* needed for yytext
* macro */
- char *syncrep_parse_error_msg = *syncrep_parse_error_msg_p;
/* report only the first error in a parse operation */
- if (syncrep_parse_error_msg)
+ if (*syncrep_parse_error_msg_p)
return;
if (yytext[0])
- syncrep_parse_error_msg = psprintf("%s at or near \"%s\"",
- message, yytext);
+ *syncrep_parse_error_msg_p = psprintf("%s at or near \"%s\"",
+ message, yytext);
else
- syncrep_parse_error_msg = psprintf("%s at end of input",
- message);
+ *syncrep_parse_error_msg_p = psprintf("%s at end of input",
+ message);
}
void
diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c
index 6afdd28dba6..67431208e7f 100644
--- a/src/backend/storage/buffer/bufmgr.c
+++ b/src/backend/storage/buffer/bufmgr.c
@@ -2743,12 +2743,10 @@ ExtendBufferedRelShared(BufferManagerRelation bmr,
* because mdread doesn't complain about reads beyond EOF (when
* zero_damaged_pages is ON) and so a previous attempt to read a block
* beyond EOF could have left a "valid" zero-filled buffer.
- * Unfortunately, we have also seen this case occurring because of
- * buggy Linux kernels that sometimes return an lseek(SEEK_END) result
- * that doesn't account for a recent write. In that situation, the
- * pre-existing buffer would contain valid data that we don't want to
- * overwrite. Since the legitimate cases should always have left a
- * zero-filled buffer, complain if not PageIsNew.
+ *
+ * This has also been observed when relation was overwritten by
+ * external process. Since the legitimate cases should always have
+ * left a zero-filled buffer, complain if not PageIsNew.
*/
if (existing_id >= 0)
{
@@ -2778,8 +2776,7 @@ ExtendBufferedRelShared(BufferManagerRelation bmr,
ereport(ERROR,
(errmsg("unexpected data beyond EOF in block %u of relation %s",
existing_hdr->tag.blockNum,
- relpath(bmr.smgr->smgr_rlocator, fork).str),
- errhint("This has been seen to occur with buggy kernels; consider updating your system.")));
+ relpath(bmr.smgr->smgr_rlocator, fork).str)));
/*
* We *must* do smgr[zero]extend before succeeding, else the page
diff --git a/src/backend/storage/ipc/procsignal.c b/src/backend/storage/ipc/procsignal.c
index a9bb540b55a..087821311cc 100644
--- a/src/backend/storage/ipc/procsignal.c
+++ b/src/backend/storage/ipc/procsignal.c
@@ -728,7 +728,11 @@ procsignal_sigusr1_handler(SIGNAL_ARGS)
void
SendCancelRequest(int backendPID, const uint8 *cancel_key, int cancel_key_len)
{
- Assert(backendPID != 0);
+ if (backendPID == 0)
+ {
+ ereport(LOG, (errmsg("invalid cancel request with PID 0")));
+ return;
+ }
/*
* See if we have a matching backend. Reading the pss_pid and
diff --git a/src/backend/utils/adt/tid.c b/src/backend/utils/adt/tid.c
index 1b0df111717..39dab3e42df 100644
--- a/src/backend/utils/adt/tid.c
+++ b/src/backend/utils/adt/tid.c
@@ -84,7 +84,7 @@ tidin(PG_FUNCTION_ARGS)
/*
* Cope with possibility that unsigned long is wider than BlockNumber, in
* which case strtoul will not raise an error for some values that are out
- * of the range of BlockNumber. (See similar code in oidin().)
+ * of the range of BlockNumber. (See similar code in uint32in_subr().)
*/
#if SIZEOF_LONG > 4
if (cvt != (unsigned long) blockNumber &&
diff --git a/src/backend/utils/adt/xml.c b/src/backend/utils/adt/xml.c
index f7b731825fc..182e8f75db7 100644
--- a/src/backend/utils/adt/xml.c
+++ b/src/backend/utils/adt/xml.c
@@ -1769,7 +1769,7 @@ xml_doctype_in_content(const xmlChar *str)
* xmloption_arg, but a DOCTYPE node in the input can force DOCUMENT mode).
*
* If parsed_nodes isn't NULL and we parse in CONTENT mode, the list
- * of parsed nodes from the xmlParseInNodeContext call will be returned
+ * of parsed nodes from the xmlParseBalancedChunkMemory call will be returned
* to *parsed_nodes. (It is caller's responsibility to free that.)
*
* Errors normally result in ereport(ERROR), but if escontext is an
@@ -1795,6 +1795,7 @@ xml_parse(text *data, XmlOptionType xmloption_arg,
PgXmlErrorContext *xmlerrcxt;
volatile xmlParserCtxtPtr ctxt = NULL;
volatile xmlDocPtr doc = NULL;
+ volatile int save_keep_blanks = -1;
/*
* This step looks annoyingly redundant, but we must do it to have a
@@ -1822,7 +1823,6 @@ xml_parse(text *data, XmlOptionType xmloption_arg,
PG_TRY();
{
bool parse_as_document = false;
- int options;
int res_code;
size_t count = 0;
xmlChar *version = NULL;
@@ -1853,18 +1853,6 @@ xml_parse(text *data, XmlOptionType xmloption_arg,
parse_as_document = true;
}
- /*
- * Select parse options.
- *
- * Note that here we try to apply DTD defaults (XML_PARSE_DTDATTR)
- * according to SQL/XML:2008 GR 10.16.7.d: 'Default values defined by
- * internal DTD are applied'. As for external DTDs, we try to support
- * them too (see SQL/XML:2008 GR 10.16.7.e), but that doesn't really
- * happen because xmlPgEntityLoader prevents it.
- */
- options = XML_PARSE_NOENT | XML_PARSE_DTDATTR
- | (preserve_whitespace ? 0 : XML_PARSE_NOBLANKS);
-
/* initialize output parameters */
if (parsed_xmloptiontype != NULL)
*parsed_xmloptiontype = parse_as_document ? XMLOPTION_DOCUMENT :
@@ -1874,11 +1862,26 @@ xml_parse(text *data, XmlOptionType xmloption_arg,
if (parse_as_document)
{
+ int options;
+
+ /* set up parser context used by xmlCtxtReadDoc */
ctxt = xmlNewParserCtxt();
if (ctxt == NULL || xmlerrcxt->err_occurred)
xml_ereport(xmlerrcxt, ERROR, ERRCODE_OUT_OF_MEMORY,
"could not allocate parser context");
+ /*
+ * Select parse options.
+ *
+ * Note that here we try to apply DTD defaults (XML_PARSE_DTDATTR)
+ * according to SQL/XML:2008 GR 10.16.7.d: 'Default values defined
+ * by internal DTD are applied'. As for external DTDs, we try to
+ * support them too (see SQL/XML:2008 GR 10.16.7.e), but that
+ * doesn't really happen because xmlPgEntityLoader prevents it.
+ */
+ options = XML_PARSE_NOENT | XML_PARSE_DTDATTR
+ | (preserve_whitespace ? 0 : XML_PARSE_NOBLANKS);
+
doc = xmlCtxtReadDoc(ctxt, utf8string,
NULL, /* no URL */
"UTF-8",
@@ -1900,10 +1903,7 @@ xml_parse(text *data, XmlOptionType xmloption_arg,
}
else
{
- xmlNodePtr root;
- xmlNodePtr oldroot PG_USED_FOR_ASSERTS_ONLY;
-
- /* set up document with empty root node to be the context node */
+ /* set up document that xmlParseBalancedChunkMemory will add to */
doc = xmlNewDoc(version);
if (doc == NULL || xmlerrcxt->err_occurred)
xml_ereport(xmlerrcxt, ERROR, ERRCODE_OUT_OF_MEMORY,
@@ -1916,43 +1916,22 @@ xml_parse(text *data, XmlOptionType xmloption_arg,
"could not allocate XML document");
doc->standalone = standalone;
- root = xmlNewNode(NULL, (const xmlChar *) "content-root");
- if (root == NULL || xmlerrcxt->err_occurred)
- xml_ereport(xmlerrcxt, ERROR, ERRCODE_OUT_OF_MEMORY,
- "could not allocate xml node");
-
- /*
- * This attaches root to doc, so we need not free it separately;
- * and there can't yet be any old root to free.
- */
- oldroot = xmlDocSetRootElement(doc, root);
- Assert(oldroot == NULL);
+ /* set parse options --- have to do this the ugly way */
+ save_keep_blanks = xmlKeepBlanksDefault(preserve_whitespace ? 1 : 0);
/* allow empty content */
if (*(utf8string + count))
{
- xmlNodePtr node_list = NULL;
- xmlParserErrors res;
-
- res = xmlParseInNodeContext(root,
- (char *) utf8string + count,
- strlen((char *) utf8string + count),
- options,
- &node_list);
-
- if (res != XML_ERR_OK || xmlerrcxt->err_occurred)
+ res_code = xmlParseBalancedChunkMemory(doc, NULL, NULL, 0,
+ utf8string + count,
+ parsed_nodes);
+ if (res_code != 0 || xmlerrcxt->err_occurred)
{
- xmlFreeNodeList(node_list);
xml_errsave(escontext, xmlerrcxt,
ERRCODE_INVALID_XML_CONTENT,
"invalid XML content");
goto fail;
}
-
- if (parsed_nodes != NULL)
- *parsed_nodes = node_list;
- else
- xmlFreeNodeList(node_list);
}
}
@@ -1961,6 +1940,8 @@ fail:
}
PG_CATCH();
{
+ if (save_keep_blanks != -1)
+ xmlKeepBlanksDefault(save_keep_blanks);
if (doc != NULL)
xmlFreeDoc(doc);
if (ctxt != NULL)
@@ -1972,6 +1953,9 @@ fail:
}
PG_END_TRY();
+ if (save_keep_blanks != -1)
+ xmlKeepBlanksDefault(save_keep_blanks);
+
if (ctxt != NULL)
xmlFreeParserCtxt(ctxt);
diff --git a/src/bin/initdb/Makefile b/src/bin/initdb/Makefile
index 997e0a013e9..c0470efda92 100644
--- a/src/bin/initdb/Makefile
+++ b/src/bin/initdb/Makefile
@@ -20,7 +20,7 @@ include $(top_builddir)/src/Makefile.global
# from libpq, else we have risks of version skew if we run with a libpq
# shared library from a different PG version. Define
# USE_PRIVATE_ENCODING_FUNCS to ensure that that happens.
-override CPPFLAGS := -DUSE_PRIVATE_ENCODING_FUNCS -I$(libpq_srcdir) -I$(top_srcdir)/src/timezone $(ICU_CFLAGS) $(CPPFLAGS)
+override CPPFLAGS := -DUSE_PRIVATE_ENCODING_FUNCS -I$(libpq_srcdir) -I$(top_srcdir)/src/timezone $(CPPFLAGS) $(ICU_CFLAGS)
# We need libpq only because fe_utils does.
LDFLAGS_INTERNAL += -L$(top_builddir)/src/fe_utils -lpgfeutils $(libpq_pgport) $(ICU_LIBS)
diff --git a/src/common/Makefile b/src/common/Makefile
index 1e2b91c83c4..2c720caa509 100644
--- a/src/common/Makefile
+++ b/src/common/Makefile
@@ -163,7 +163,7 @@ libpgcommon_shlib.a: $(OBJS_SHLIB)
# The JSON API normally exits on out-of-memory; disable that behavior for shared
# library builds. This requires libpq's pqexpbuffer.h.
jsonapi_shlib.o: override CPPFLAGS += -DJSONAPI_USE_PQEXPBUFFER
-jsonapi_shlib.o: override CPPFLAGS += -I$(libpq_srcdir)
+jsonapi_shlib.o: override CPPFLAGS := -I$(libpq_srcdir) $(CPPFLAGS)
# Because this uses its own compilation rule, it doesn't use the
# dependency tracking logic from Makefile.global. To make sure that
diff --git a/src/include/nodes/pathnodes.h b/src/include/nodes/pathnodes.h
index e5dd15098f6..ad2726f026f 100644
--- a/src/include/nodes/pathnodes.h
+++ b/src/include/nodes/pathnodes.h
@@ -2133,10 +2133,12 @@ typedef struct MemoizePath
* complete after caching the first record. */
bool binary_mode; /* true when cache key should be compared bit
* by bit, false when using hash equality ops */
- Cardinality calls; /* expected number of rescans */
uint32 est_entries; /* The maximum number of entries that the
* planner expects will fit in the cache, or 0
* if unknown */
+ Cardinality est_calls; /* expected number of rescans */
+ Cardinality est_unique_keys; /* estimated unique keys, for EXPLAIN */
+ double est_hit_ratio; /* estimated cache hit ratio, for EXPLAIN */
} MemoizePath;
/*
diff --git a/src/include/nodes/plannodes.h b/src/include/nodes/plannodes.h
index 46e2e09ea35..6d8e1e99db3 100644
--- a/src/include/nodes/plannodes.h
+++ b/src/include/nodes/plannodes.h
@@ -1073,6 +1073,16 @@ typedef struct Memoize
/* paramids from param_exprs */
Bitmapset *keyparamids;
+
+ /* Estimated number of rescans, for EXPLAIN */
+ Cardinality est_calls;
+
+ /* Estimated number of distinct lookup keys, for EXPLAIN */
+ Cardinality est_unique_keys;
+
+ /* Estimated cache hit ratio, for EXPLAIN */
+ double est_hit_ratio;
+
} Memoize;
/* ----------------
diff --git a/src/include/optimizer/pathnode.h b/src/include/optimizer/pathnode.h
index 60dcdb77e41..58936e963cb 100644
--- a/src/include/optimizer/pathnode.h
+++ b/src/include/optimizer/pathnode.h
@@ -90,7 +90,7 @@ extern MemoizePath *create_memoize_path(PlannerInfo *root,
List *hash_operators,
bool singlerow,
bool binary_mode,
- double calls);
+ Cardinality est_calls);
extern UniquePath *create_unique_path(PlannerInfo *root, RelOptInfo *rel,
Path *subpath, SpecialJoinInfo *sjinfo);
extern GatherPath *create_gather_path(PlannerInfo *root,
diff --git a/src/interfaces/ecpg/ecpglib/prepare.c b/src/interfaces/ecpg/ecpglib/prepare.c
index dd6fd1fe7f4..06f0135813b 100644
--- a/src/interfaces/ecpg/ecpglib/prepare.c
+++ b/src/interfaces/ecpg/ecpglib/prepare.c
@@ -603,7 +603,10 @@ ecpg_auto_prepare(int lineno, const char *connection_name, const int compat, cha
prep = ecpg_find_prepared_statement(stmtID, con, NULL);
/* This prepared name doesn't exist on this connection. */
if (!prep && !prepare_common(lineno, con, stmtID, query))
+ {
+ ecpg_free(*name);
return false;
+ }
}
else
@@ -619,11 +622,17 @@ ecpg_auto_prepare(int lineno, const char *connection_name, const int compat, cha
return false;
if (!ECPGprepare(lineno, connection_name, 0, stmtID, query))
+ {
+ ecpg_free(*name);
return false;
+ }
entNo = AddStmtToCache(lineno, stmtID, connection_name, compat, query);
if (entNo < 0)
+ {
+ ecpg_free(*name);
return false;
+ }
}
/* increase usage counter */
diff --git a/src/interfaces/libpq-oauth/Makefile b/src/interfaces/libpq-oauth/Makefile
index 270fc0cf2d9..682f17413b3 100644
--- a/src/interfaces/libpq-oauth/Makefile
+++ b/src/interfaces/libpq-oauth/Makefile
@@ -24,7 +24,7 @@ NAME = pq-oauth-$(MAJORVERSION)
override shlib := lib$(NAME)$(DLSUFFIX)
override stlib := libpq-oauth.a
-override CPPFLAGS := -I$(libpq_srcdir) -I$(top_builddir)/src/port $(LIBCURL_CPPFLAGS) $(CPPFLAGS)
+override CPPFLAGS := -I$(libpq_srcdir) -I$(top_builddir)/src/port $(CPPFLAGS) $(LIBCURL_CPPFLAGS)
OBJS = \
$(WIN32RES)
diff --git a/src/interfaces/libpq/Makefile b/src/interfaces/libpq/Makefile
index 47d67811509..da6650066d4 100644
--- a/src/interfaces/libpq/Makefile
+++ b/src/interfaces/libpq/Makefile
@@ -24,7 +24,7 @@ NAME= pq
SO_MAJOR_VERSION= 5
SO_MINOR_VERSION= $(MAJORVERSION)
-override CPPFLAGS := -I$(srcdir) $(CPPFLAGS) -I$(top_builddir)/src/port -I$(top_srcdir)/src/port
+override CPPFLAGS := -I$(srcdir) -I$(top_builddir)/src/port -I$(top_srcdir)/src/port $(CPPFLAGS)
ifneq ($(PORTNAME), win32)
override CFLAGS += $(PTHREAD_CFLAGS)
endif
diff --git a/src/pl/plpython/Makefile b/src/pl/plpython/Makefile
index f959083a0bd..25f295c3709 100644
--- a/src/pl/plpython/Makefile
+++ b/src/pl/plpython/Makefile
@@ -11,7 +11,7 @@ ifeq ($(PORTNAME), win32)
override python_libspec =
endif
-override CPPFLAGS := -I. -I$(srcdir) $(python_includespec) $(CPPFLAGS)
+override CPPFLAGS := -I. -I$(srcdir) $(CPPFLAGS) $(python_includespec)
rpathdir = $(python_libdir)
diff --git a/src/pl/tcl/Makefile b/src/pl/tcl/Makefile
index ea52a2efc22..dd57f7d694c 100644
--- a/src/pl/tcl/Makefile
+++ b/src/pl/tcl/Makefile
@@ -11,7 +11,7 @@ top_builddir = ../../..
include $(top_builddir)/src/Makefile.global
-override CPPFLAGS := -I. -I$(srcdir) $(TCL_INCLUDE_SPEC) $(CPPFLAGS)
+override CPPFLAGS := -I. -I$(srcdir) $(CPPFLAGS) $(TCL_INCLUDE_SPEC)
# On Windows, we don't link directly with the Tcl library; see below
ifneq ($(PORTNAME), win32)
diff --git a/src/test/perl/PostgreSQL/Test/Cluster.pm b/src/test/perl/PostgreSQL/Test/Cluster.pm
index 61f68e0cc2e..35413f14019 100644
--- a/src/test/perl/PostgreSQL/Test/Cluster.pm
+++ b/src/test/perl/PostgreSQL/Test/Cluster.pm
@@ -304,6 +304,7 @@ sub is_alive
my $ret = PostgreSQL::Test::Utils::system_log(
'pg_isready',
+ '--timeout' => $PostgreSQL::Test::Utils::timeout_default,
'--host' => $self->host,
'--port' => $self->port);
diff --git a/src/test/recovery/t/013_crash_restart.pl b/src/test/recovery/t/013_crash_restart.pl
index debfa635c36..4c5af018ee4 100644
--- a/src/test/recovery/t/013_crash_restart.pl
+++ b/src/test/recovery/t/013_crash_restart.pl
@@ -228,6 +228,13 @@ is( $node->safe_psql(
'before-orderly-restart',
'can still write after crash restart');
+# Confirm that the logical replication launcher, a background worker
+# without the never-restart flag, has also restarted successfully.
+is($node->poll_query_until('postgres',
+ "SELECT count(*) = 1 FROM pg_stat_activity WHERE backend_type = 'logical replication launcher'"),
+ '1',
+ 'logical replication launcher restarted after crash');
+
# Just to be sure, check that an orderly restart now still works
$node->restart();