aboutsummaryrefslogtreecommitdiff
path: root/src/backend
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend')
-rw-r--r--src/backend/access/gin/ginbulk.c2
-rw-r--r--src/backend/access/heap/vacuumlazy.c2
-rw-r--r--src/backend/access/table/tableamapi.c4
-rw-r--r--src/backend/access/transam/commit_ts.c4
-rw-r--r--src/backend/access/transam/multixact.c4
-rw-r--r--src/backend/access/transam/rmgr.c4
-rw-r--r--src/backend/access/transam/twophase.c6
-rw-r--r--src/backend/access/transam/xlog.c26
-rw-r--r--src/backend/access/transam/xlogarchive.c2
-rw-r--r--src/backend/access/transam/xlogfuncs.c4
-rw-r--r--src/backend/access/transam/xlogprefetcher.c2
-rw-r--r--src/backend/access/transam/xlogrecovery.c12
-rw-r--r--src/backend/commands/publicationcmds.c4
-rw-r--r--src/backend/commands/vacuum.c2
-rw-r--r--src/backend/commands/variable.c8
-rw-r--r--src/backend/libpq/be-secure-openssl.c4
-rw-r--r--src/backend/libpq/hba.c2
-rw-r--r--src/backend/libpq/pqcomm.c2
-rw-r--r--src/backend/parser/scan.l2
-rw-r--r--src/backend/port/sysv_sema.c2
-rw-r--r--src/backend/port/sysv_shmem.c8
-rw-r--r--src/backend/port/win32_shmem.c2
-rw-r--r--src/backend/postmaster/bgworker.c2
-rw-r--r--src/backend/postmaster/checkpointer.c2
-rw-r--r--src/backend/postmaster/pgarch.c10
-rw-r--r--src/backend/postmaster/postmaster.c10
-rw-r--r--src/backend/replication/logical/decode.c2
-rw-r--r--src/backend/replication/logical/launcher.c4
-rw-r--r--src/backend/replication/logical/logical.c4
-rw-r--r--src/backend/replication/logical/origin.c8
-rw-r--r--src/backend/replication/slot.c20
-rw-r--r--src/backend/replication/syncrep.c2
-rw-r--r--src/backend/storage/buffer/localbuf.c2
-rw-r--r--src/backend/storage/file/fd.c8
-rw-r--r--src/backend/storage/lmgr/lock.c12
-rw-r--r--src/backend/storage/lmgr/predicate.c12
-rw-r--r--src/backend/storage/lmgr/proc.c2
-rw-r--r--src/backend/tcop/postgres.c12
-rw-r--r--src/backend/utils/adt/pg_locale.c4
-rw-r--r--src/backend/utils/adt/varlena.c2
-rw-r--r--src/backend/utils/fmgr/dfmgr.c4
-rw-r--r--src/backend/utils/misc/guc.c2
-rw-r--r--src/backend/utils/misc/guc_tables.c24
43 files changed, 128 insertions, 128 deletions
diff --git a/src/backend/access/gin/ginbulk.c b/src/backend/access/gin/ginbulk.c
index a522801c2f7..7f89cd5e826 100644
--- a/src/backend/access/gin/ginbulk.c
+++ b/src/backend/access/gin/ginbulk.c
@@ -42,7 +42,7 @@ ginCombineData(RBTNode *existing, const RBTNode *newdata, void *arg)
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
errmsg("posting list is too long"),
- errhint("Reduce maintenance_work_mem.")));
+ errhint("Reduce \"maintenance_work_mem\".")));
accum->allocatedMemory -= GetMemoryChunkSpace(eo->list);
eo->maxcount *= 2;
diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c
index 84cc983b6e6..8145ea8fc3f 100644
--- a/src/backend/access/heap/vacuumlazy.c
+++ b/src/backend/access/heap/vacuumlazy.c
@@ -2327,7 +2327,7 @@ lazy_check_wraparound_failsafe(LVRelState *vacrel)
vacrel->dbname, vacrel->relnamespace, vacrel->relname,
vacrel->num_index_scans),
errdetail("The table's relfrozenxid or relminmxid is too far in the past."),
- errhint("Consider increasing configuration parameter maintenance_work_mem or autovacuum_work_mem.\n"
+ errhint("Consider increasing configuration parameter \"maintenance_work_mem\" or \"autovacuum_work_mem\".\n"
"You might also need to consider other ways for VACUUM to keep up with the allocation of transaction IDs.")));
/* Stop applying cost limits from this point on */
diff --git a/src/backend/access/table/tableamapi.c b/src/backend/access/table/tableamapi.c
index ce637a5a5d9..e9b598256fb 100644
--- a/src/backend/access/table/tableamapi.c
+++ b/src/backend/access/table/tableamapi.c
@@ -106,14 +106,14 @@ check_default_table_access_method(char **newval, void **extra, GucSource source)
{
if (**newval == '\0')
{
- GUC_check_errdetail("%s cannot be empty.",
+ GUC_check_errdetail("\"%s\" cannot be empty.",
"default_table_access_method");
return false;
}
if (strlen(*newval) >= NAMEDATALEN)
{
- GUC_check_errdetail("%s is too long (maximum %d characters).",
+ GUC_check_errdetail("\"%s\" is too long (maximum %d characters).",
"default_table_access_method", NAMEDATALEN - 1);
return false;
}
diff --git a/src/backend/access/transam/commit_ts.c b/src/backend/access/transam/commit_ts.c
index f2214946874..77e1899d7ad 100644
--- a/src/backend/access/transam/commit_ts.c
+++ b/src/backend/access/transam/commit_ts.c
@@ -384,9 +384,9 @@ error_commit_ts_disabled(void)
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("could not get commit timestamp data"),
RecoveryInProgress() ?
- errhint("Make sure the configuration parameter %s is set on the primary server.",
+ errhint("Make sure the configuration parameter \"%s\" is set on the primary server.",
"track_commit_timestamp") :
- errhint("Make sure the configuration parameter %s is set.",
+ errhint("Make sure the configuration parameter \"%s\" is set.",
"track_commit_timestamp")));
}
diff --git a/src/backend/access/transam/multixact.c b/src/backend/access/transam/multixact.c
index 380c866d714..54c916e0347 100644
--- a/src/backend/access/transam/multixact.c
+++ b/src/backend/access/transam/multixact.c
@@ -1151,7 +1151,7 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset)
MultiXactState->offsetStopLimit - nextOffset - 1,
nmembers,
MultiXactState->offsetStopLimit - nextOffset - 1),
- errhint("Execute a database-wide VACUUM in database with OID %u with reduced vacuum_multixact_freeze_min_age and vacuum_multixact_freeze_table_age settings.",
+ errhint("Execute a database-wide VACUUM in database with OID %u with reduced \"vacuum_multixact_freeze_min_age\" and \"vacuum_multixact_freeze_table_age\" settings.",
MultiXactState->oldestMultiXactDB)));
}
@@ -1187,7 +1187,7 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset)
MultiXactState->offsetStopLimit - nextOffset + nmembers,
MultiXactState->oldestMultiXactDB,
MultiXactState->offsetStopLimit - nextOffset + nmembers),
- errhint("Execute a database-wide VACUUM in that database with reduced vacuum_multixact_freeze_min_age and vacuum_multixact_freeze_table_age settings.")));
+ errhint("Execute a database-wide VACUUM in that database with reduced \"vacuum_multixact_freeze_min_age\" and \"vacuum_multixact_freeze_table_age\" settings.")));
ExtendMultiXactMember(nextOffset, nmembers);
diff --git a/src/backend/access/transam/rmgr.c b/src/backend/access/transam/rmgr.c
index 3e2f1d4a237..1b7499726eb 100644
--- a/src/backend/access/transam/rmgr.c
+++ b/src/backend/access/transam/rmgr.c
@@ -91,7 +91,7 @@ void
RmgrNotFound(RmgrId rmid)
{
ereport(ERROR, (errmsg("resource manager with ID %d not registered", rmid),
- errhint("Include the extension module that implements this resource manager in shared_preload_libraries.")));
+ errhint("Include the extension module that implements this resource manager in \"shared_preload_libraries\".")));
}
/*
@@ -118,7 +118,7 @@ RegisterCustomRmgr(RmgrId rmid, const RmgrData *rmgr)
if (!process_shared_preload_libraries_in_progress)
ereport(ERROR,
(errmsg("failed to register custom resource manager \"%s\" with ID %d", rmgr->rm_name, rmid),
- errdetail("Custom resource manager must be registered while initializing modules in shared_preload_libraries.")));
+ errdetail("Custom resource manager must be registered while initializing modules in \"shared_preload_libraries\".")));
if (RmgrTable[rmid].rm_name != NULL)
ereport(ERROR,
diff --git a/src/backend/access/transam/twophase.c b/src/backend/access/transam/twophase.c
index 8090ac9fc19..bf451d42ffb 100644
--- a/src/backend/access/transam/twophase.c
+++ b/src/backend/access/transam/twophase.c
@@ -373,7 +373,7 @@ MarkAsPreparing(TransactionId xid, const char *gid,
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("prepared transactions are disabled"),
- errhint("Set max_prepared_transactions to a nonzero value.")));
+ errhint("Set \"max_prepared_transactions\" to a nonzero value.")));
/* on first call, register the exit hook */
if (!twophaseExitRegistered)
@@ -402,7 +402,7 @@ MarkAsPreparing(TransactionId xid, const char *gid,
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("maximum number of prepared transactions reached"),
- errhint("Increase max_prepared_transactions (currently %d).",
+ errhint("Increase \"max_prepared_transactions\" (currently %d).",
max_prepared_xacts)));
gxact = TwoPhaseState->freeGXacts;
TwoPhaseState->freeGXacts = gxact->next;
@@ -2539,7 +2539,7 @@ PrepareRedoAdd(char *buf, XLogRecPtr start_lsn,
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("maximum number of prepared transactions reached"),
- errhint("Increase max_prepared_transactions (currently %d).",
+ errhint("Increase \"max_prepared_transactions\" (currently %d).",
max_prepared_xacts)));
gxact = TwoPhaseState->freeGXacts;
TwoPhaseState->freeGXacts = gxact->next;
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index c3fd9c1eaed..330e058c5f2 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -4501,11 +4501,11 @@ ReadControlFile(void)
/* check and update variables dependent on wal_segment_size */
if (ConvertToXSegs(min_wal_size_mb, wal_segment_size) < 2)
ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("min_wal_size must be at least twice wal_segment_size")));
+ errmsg("\"min_wal_size\" must be at least twice \"wal_segment_size\"")));
if (ConvertToXSegs(max_wal_size_mb, wal_segment_size) < 2)
ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("max_wal_size must be at least twice wal_segment_size")));
+ errmsg("\"max_wal_size\" must be at least twice \"wal_segment_size\"")));
UsableBytesInSegment =
(wal_segment_size / XLOG_BLCKSZ * UsableBytesInPage) -
@@ -5351,9 +5351,9 @@ CheckRequiredParameterValues(void)
{
ereport(FATAL,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("WAL was generated with wal_level=minimal, cannot continue recovering"),
- errdetail("This happens if you temporarily set wal_level=minimal on the server."),
- errhint("Use a backup taken after setting wal_level to higher than minimal.")));
+ errmsg("WAL was generated with \"wal_level=minimal\", cannot continue recovering"),
+ errdetail("This happens if you temporarily set \"wal_level=minimal\" on the server."),
+ errhint("Use a backup taken after setting \"wal_level\" to higher than \"minimal\".")));
}
/*
@@ -8549,7 +8549,7 @@ get_sync_bit(int method)
#endif
default:
/* can't happen (unless we are out of sync with option array) */
- elog(ERROR, "unrecognized wal_sync_method: %d", method);
+ elog(ERROR, "unrecognized \"wal_sync_method\": %d", method);
return 0; /* silence warning */
}
}
@@ -8647,7 +8647,7 @@ issue_xlog_fsync(int fd, XLogSegNo segno, TimeLineID tli)
default:
ereport(PANIC,
errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg_internal("unrecognized wal_sync_method: %d", wal_sync_method));
+ errmsg_internal("unrecognized \"wal_sync_method\": %d", wal_sync_method));
break;
}
@@ -8725,7 +8725,7 @@ do_pg_backup_start(const char *backupidstr, bool fast, List **tablespaces,
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("WAL level not sufficient for making an online backup"),
- errhint("wal_level must be set to \"replica\" or \"logical\" at server start.")));
+ errhint("\"wal_level\" must be set to \"replica\" or \"logical\" at server start.")));
if (strlen(backupidstr) > MAXPGPATH)
ereport(ERROR,
@@ -8851,11 +8851,11 @@ do_pg_backup_start(const char *backupidstr, bool fast, List **tablespaces,
if (!checkpointfpw || state->startpoint <= recptr)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("WAL generated with full_page_writes=off was replayed "
+ errmsg("WAL generated with \"full_page_writes=off\" was replayed "
"since last restartpoint"),
errhint("This means that the backup being taken on the standby "
"is corrupt and should not be used. "
- "Enable full_page_writes and run CHECKPOINT on the primary, "
+ "Enable \"full_page_writes\" and run CHECKPOINT on the primary, "
"and then try an online backup again.")));
/*
@@ -9147,11 +9147,11 @@ do_pg_backup_stop(BackupState *state, bool waitforarchive)
if (state->startpoint <= recptr)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("WAL generated with full_page_writes=off was replayed "
+ errmsg("WAL generated with \"full_page_writes=off\" was replayed "
"during online backup"),
errhint("This means that the backup being taken on the standby "
"is corrupt and should not be used. "
- "Enable full_page_writes and run CHECKPOINT on the primary, "
+ "Enable \"full_page_writes\" and run CHECKPOINT on the primary, "
"and then try an online backup again.")));
@@ -9279,7 +9279,7 @@ do_pg_backup_stop(BackupState *state, bool waitforarchive)
ereport(WARNING,
(errmsg("still waiting for all required WAL segments to be archived (%d seconds elapsed)",
waits),
- errhint("Check that your archive_command is executing properly. "
+ errhint("Check that your \"archive_command\" is executing properly. "
"You can safely cancel this backup, "
"but the database backup will not be usable without all the WAL segments.")));
}
diff --git a/src/backend/access/transam/xlogarchive.c b/src/backend/access/transam/xlogarchive.c
index caa1f03d934..81999b48200 100644
--- a/src/backend/access/transam/xlogarchive.c
+++ b/src/backend/access/transam/xlogarchive.c
@@ -233,7 +233,7 @@ RestoreArchivedFile(char *path, const char *xlogfname,
ereport(elevel,
(errcode_for_file_access(),
errmsg("could not stat file \"%s\": %m", xlogpath),
- errdetail("restore_command returned a zero exit status, but stat() failed.")));
+ errdetail("\"restore_command\" returned a zero exit status, but stat() failed.")));
}
}
diff --git a/src/backend/access/transam/xlogfuncs.c b/src/backend/access/transam/xlogfuncs.c
index 92bdb17ed52..4e46baaebdf 100644
--- a/src/backend/access/transam/xlogfuncs.c
+++ b/src/backend/access/transam/xlogfuncs.c
@@ -212,7 +212,7 @@ pg_log_standby_snapshot(PG_FUNCTION_ARGS)
if (!XLogStandbyInfoActive())
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("pg_log_standby_snapshot() can only be used if wal_level >= replica")));
+ errmsg("pg_log_standby_snapshot() can only be used if \"wal_level\" >= \"replica\"")));
recptr = LogStandbySnapshot();
@@ -245,7 +245,7 @@ pg_create_restore_point(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("WAL level not sufficient for creating a restore point"),
- errhint("wal_level must be set to \"replica\" or \"logical\" at server start.")));
+ errhint("\"wal_level\" must be set to \"replica\" or \"logical\" at server start.")));
restore_name_str = text_to_cstring(restore_name);
diff --git a/src/backend/access/transam/xlogprefetcher.c b/src/backend/access/transam/xlogprefetcher.c
index fc80c37e554..84023d61baf 100644
--- a/src/backend/access/transam/xlogprefetcher.c
+++ b/src/backend/access/transam/xlogprefetcher.c
@@ -1085,7 +1085,7 @@ check_recovery_prefetch(int *new_value, void **extra, GucSource source)
#ifndef USE_PREFETCH
if (*new_value == RECOVERY_PREFETCH_ON)
{
- GUC_check_errdetail("recovery_prefetch is not supported on platforms that lack posix_fadvise().");
+ GUC_check_errdetail("\"recovery_prefetch\" is not supported on platforms that lack posix_fadvise().");
return false;
}
#endif
diff --git a/src/backend/access/transam/xlogrecovery.c b/src/backend/access/transam/xlogrecovery.c
index 29c5bec0847..b45b8331720 100644
--- a/src/backend/access/transam/xlogrecovery.c
+++ b/src/backend/access/transam/xlogrecovery.c
@@ -1119,7 +1119,7 @@ validateRecoveryParameters(void)
if ((PrimaryConnInfo == NULL || strcmp(PrimaryConnInfo, "") == 0) &&
(recoveryRestoreCommand == NULL || strcmp(recoveryRestoreCommand, "") == 0))
ereport(WARNING,
- (errmsg("specified neither primary_conninfo nor restore_command"),
+ (errmsg("specified neither \"primary_conninfo\" nor \"restore_command\""),
errhint("The database server will regularly poll the pg_wal subdirectory to check for files placed there.")));
}
else
@@ -1128,7 +1128,7 @@ validateRecoveryParameters(void)
strcmp(recoveryRestoreCommand, "") == 0)
ereport(FATAL,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("must specify restore_command when standby mode is not enabled")));
+ errmsg("must specify \"restore_command\" when standby mode is not enabled")));
}
/*
@@ -2162,7 +2162,7 @@ CheckTablespaceDirectory(void)
errmsg("unexpected directory entry \"%s\" found in %s",
de->d_name, "pg_tblspc/"),
errdetail("All directory entries in pg_tblspc/ should be symbolic links."),
- errhint("Remove those directories, or set allow_in_place_tablespaces to ON transiently to let recovery complete.")));
+ errhint("Remove those directories, or set \"allow_in_place_tablespaces\" to ON transiently to let recovery complete.")));
}
}
@@ -4771,7 +4771,7 @@ error_multiple_recovery_targets(void)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("multiple recovery targets specified"),
- errdetail("At most one of recovery_target, recovery_target_lsn, recovery_target_name, recovery_target_time, recovery_target_xid may be set.")));
+ errdetail("At most one of \"recovery_target\", \"recovery_target_lsn\", \"recovery_target_name\", \"recovery_target_time\", \"recovery_target_xid\" may be set.")));
}
/*
@@ -4855,7 +4855,7 @@ check_recovery_target_name(char **newval, void **extra, GucSource source)
/* Use the value of newval directly */
if (strlen(*newval) >= MAXFNAMELEN)
{
- GUC_check_errdetail("%s is too long (maximum %d characters).",
+ GUC_check_errdetail("\"%s\" is too long (maximum %d characters).",
"recovery_target_name", MAXFNAMELEN - 1);
return false;
}
@@ -4979,7 +4979,7 @@ check_recovery_target_timeline(char **newval, void **extra, GucSource source)
strtoul(*newval, NULL, 0);
if (errno == EINVAL || errno == ERANGE)
{
- GUC_check_errdetail("recovery_target_timeline is not a valid number.");
+ GUC_check_errdetail("\"recovery_target_timeline\" is not a valid number.");
return false;
}
}
diff --git a/src/backend/commands/publicationcmds.c b/src/backend/commands/publicationcmds.c
index 9bcc22fdd7e..6ea709988ee 100644
--- a/src/backend/commands/publicationcmds.c
+++ b/src/backend/commands/publicationcmds.c
@@ -858,8 +858,8 @@ CreatePublication(ParseState *pstate, CreatePublicationStmt *stmt)
if (wal_level != WAL_LEVEL_LOGICAL)
ereport(WARNING,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("wal_level is insufficient to publish logical changes"),
- errhint("Set wal_level to \"logical\" before creating subscriptions.")));
+ errmsg("\"wal_level\" is insufficient to publish logical changes"),
+ errhint("Set \"wal_level\" to \"logical\" before creating subscriptions.")));
return myself;
}
diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c
index 521ee74586a..48f8eab2022 100644
--- a/src/backend/commands/vacuum.c
+++ b/src/backend/commands/vacuum.c
@@ -131,7 +131,7 @@ check_vacuum_buffer_usage_limit(int *newval, void **extra,
return true;
/* Value does not fall within any allowable range */
- GUC_check_errdetail("vacuum_buffer_usage_limit must be 0 or between %d kB and %d kB",
+ GUC_check_errdetail("\"vacuum_buffer_usage_limit\" must be 0 or between %d kB and %d kB",
MIN_BAS_VAC_RING_SIZE_KB, MAX_BAS_VAC_RING_SIZE_KB);
return false;
diff --git a/src/backend/commands/variable.c b/src/backend/commands/variable.c
index 01151ca2b5a..9345131711e 100644
--- a/src/backend/commands/variable.c
+++ b/src/backend/commands/variable.c
@@ -717,7 +717,7 @@ check_client_encoding(char **newval, void **extra, GucSource source)
else
{
/* Provide a useful complaint */
- GUC_check_errdetail("Cannot change client_encoding now.");
+ GUC_check_errdetail("Cannot change \"client_encoding\" now.");
}
return false;
}
@@ -778,7 +778,7 @@ assign_client_encoding(const char *newval, void *extra)
*/
ereport(ERROR,
(errcode(ERRCODE_INVALID_TRANSACTION_STATE),
- errmsg("cannot change client_encoding during a parallel operation")));
+ errmsg("cannot change \"client_encoding\" during a parallel operation")));
}
/* We do not expect an error if PrepareClientEncoding succeeded */
@@ -1202,7 +1202,7 @@ check_effective_io_concurrency(int *newval, void **extra, GucSource source)
#ifndef USE_PREFETCH
if (*newval != 0)
{
- GUC_check_errdetail("effective_io_concurrency must be set to 0 on platforms that lack posix_fadvise().");
+ GUC_check_errdetail("\"effective_io_concurrency\" must be set to 0 on platforms that lack posix_fadvise().");
return false;
}
#endif /* USE_PREFETCH */
@@ -1215,7 +1215,7 @@ check_maintenance_io_concurrency(int *newval, void **extra, GucSource source)
#ifndef USE_PREFETCH
if (*newval != 0)
{
- GUC_check_errdetail("maintenance_io_concurrency must be set to 0 on platforms that lack posix_fadvise().");
+ GUC_check_errdetail("\"maintenance_io_concurrency\" must be set to 0 on platforms that lack posix_fadvise().");
return false;
}
#endif /* USE_PREFETCH */
diff --git a/src/backend/libpq/be-secure-openssl.c b/src/backend/libpq/be-secure-openssl.c
index 60cf68aac4a..0caad6bed3d 100644
--- a/src/backend/libpq/be-secure-openssl.c
+++ b/src/backend/libpq/be-secure-openssl.c
@@ -201,7 +201,7 @@ be_tls_init(bool isServerStart)
{
ereport(isServerStart ? FATAL : LOG,
/*- translator: first %s is a GUC option name, second %s is its value */
- (errmsg("%s setting \"%s\" not supported by this build",
+ (errmsg("\"%s\" setting \"%s\" not supported by this build",
"ssl_min_protocol_version",
GetConfigOption("ssl_min_protocol_version",
false, false))));
@@ -251,7 +251,7 @@ be_tls_init(bool isServerStart)
{
ereport(isServerStart ? FATAL : LOG,
(errmsg("could not set SSL protocol version range"),
- errdetail("%s cannot be higher than %s",
+ errdetail("\"%s\" cannot be higher than \"%s\"",
"ssl_min_protocol_version",
"ssl_max_protocol_version")));
goto error;
diff --git a/src/backend/libpq/hba.c b/src/backend/libpq/hba.c
index d506c3c0b75..18271def2e8 100644
--- a/src/backend/libpq/hba.c
+++ b/src/backend/libpq/hba.c
@@ -1378,7 +1378,7 @@ parse_hba_line(TokenizedAuthLine *tok_line, int elevel)
ereport(elevel,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
errmsg("hostssl record cannot match because SSL is disabled"),
- errhint("Set ssl = on in postgresql.conf."),
+ errhint("Set \"ssl = on\" in postgresql.conf."),
errcontext("line %d of configuration file \"%s\"",
line_num, file_name)));
*err_msg = "hostssl record cannot match because SSL is disabled";
diff --git a/src/backend/libpq/pqcomm.c b/src/backend/libpq/pqcomm.c
index 2cee49a2085..daa0696146d 100644
--- a/src/backend/libpq/pqcomm.c
+++ b/src/backend/libpq/pqcomm.c
@@ -731,7 +731,7 @@ Setup_AF_UNIX(const char *sock_path)
if (Unix_socket_group[0] != '\0')
{
#ifdef WIN32
- elog(WARNING, "configuration item unix_socket_group is not supported on this platform");
+ elog(WARNING, "configuration item \"unix_socket_group\" is not supported on this platform");
#else
char *endptr;
unsigned long val;
diff --git a/src/backend/parser/scan.l b/src/backend/parser/scan.l
index b499975e9c4..9b33fb8d722 100644
--- a/src/backend/parser/scan.l
+++ b/src/backend/parser/scan.l
@@ -565,7 +565,7 @@ other .
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("unsafe use of string constant with Unicode escapes"),
- errdetail("String constants with Unicode escapes cannot be used when standard_conforming_strings is off."),
+ errdetail("String constants with Unicode escapes cannot be used when \"standard_conforming_strings\" is off."),
lexer_errposition()));
BEGIN(xus);
startlit();
diff --git a/src/backend/port/sysv_sema.c b/src/backend/port/sysv_sema.c
index 647045e8c53..1454f96b5f3 100644
--- a/src/backend/port/sysv_sema.c
+++ b/src/backend/port/sysv_sema.c
@@ -127,7 +127,7 @@ InternalIpcSemaphoreCreate(IpcSemaphoreKey semKey, int numSems)
"semaphore sets (SEMMNI), or the system wide maximum number of "
"semaphores (SEMMNS), would be exceeded. You need to raise the "
"respective kernel parameter. Alternatively, reduce PostgreSQL's "
- "consumption of semaphores by reducing its max_connections parameter.\n"
+ "consumption of semaphores by reducing its \"max_connections\" parameter.\n"
"The PostgreSQL documentation contains more information about "
"configuring your system for PostgreSQL.") : 0));
}
diff --git a/src/backend/port/sysv_shmem.c b/src/backend/port/sysv_shmem.c
index 1a6d8fa0fbc..362a37d3b3a 100644
--- a/src/backend/port/sysv_shmem.c
+++ b/src/backend/port/sysv_shmem.c
@@ -581,7 +581,7 @@ check_huge_page_size(int *newval, void **extra, GucSource source)
/* Recent enough Linux only, for now. See GetHugePageSize(). */
if (*newval != 0)
{
- GUC_check_errdetail("huge_page_size must be 0 on this platform.");
+ GUC_check_errdetail("\"huge_page_size\" must be 0 on this platform.");
return false;
}
#endif
@@ -658,8 +658,8 @@ CreateAnonymousSegment(Size *size)
"for a shared memory segment exceeded available memory, "
"swap space, or huge pages. To reduce the request size "
"(currently %zu bytes), reduce PostgreSQL's shared "
- "memory usage, perhaps by reducing shared_buffers or "
- "max_connections.",
+ "memory usage, perhaps by reducing \"shared_buffers\" or "
+ "\"max_connections\".",
allocsize) : 0));
}
@@ -729,7 +729,7 @@ PGSharedMemoryCreate(Size size,
if (huge_pages == HUGE_PAGES_ON && shared_memory_type != SHMEM_TYPE_MMAP)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("huge pages not supported with the current shared_memory_type setting")));
+ errmsg("huge pages not supported with the current \"shared_memory_type\" setting")));
/* Room for a header? */
Assert(size > MAXALIGN(sizeof(PGShmemHeader)));
diff --git a/src/backend/port/win32_shmem.c b/src/backend/port/win32_shmem.c
index 90bed0146dd..3bcce9d3b63 100644
--- a/src/backend/port/win32_shmem.c
+++ b/src/backend/port/win32_shmem.c
@@ -643,7 +643,7 @@ check_huge_page_size(int *newval, void **extra, GucSource source)
{
if (*newval != 0)
{
- GUC_check_errdetail("huge_page_size must be 0 on this platform.");
+ GUC_check_errdetail("\"huge_page_size\" must be 0 on this platform.");
return false;
}
return true;
diff --git a/src/backend/postmaster/bgworker.c b/src/backend/postmaster/bgworker.c
index cf64a4beb20..97f9f28424a 100644
--- a/src/backend/postmaster/bgworker.c
+++ b/src/backend/postmaster/bgworker.c
@@ -885,7 +885,7 @@ RegisterBackgroundWorker(BackgroundWorker *worker)
return;
ereport(LOG,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("background worker \"%s\": must be registered in shared_preload_libraries",
+ errmsg("background worker \"%s\": must be registered in \"shared_preload_libraries\"",
worker->bgw_name)));
return;
}
diff --git a/src/backend/postmaster/checkpointer.c b/src/backend/postmaster/checkpointer.c
index 8ef600ae72a..3c68a9904db 100644
--- a/src/backend/postmaster/checkpointer.c
+++ b/src/backend/postmaster/checkpointer.c
@@ -442,7 +442,7 @@ CheckpointerMain(char *startup_data, size_t startup_data_len)
"checkpoints are occurring too frequently (%d seconds apart)",
elapsed_secs,
elapsed_secs),
- errhint("Consider increasing the configuration parameter max_wal_size.")));
+ errhint("Consider increasing the configuration parameter \"%s\".", "max_wal_size")));
/*
* Initialize checkpointer-private variables used during
diff --git a/src/backend/postmaster/pgarch.c b/src/backend/postmaster/pgarch.c
index d82bcc2cfd5..3fc8fe7d105 100644
--- a/src/backend/postmaster/pgarch.c
+++ b/src/backend/postmaster/pgarch.c
@@ -425,7 +425,7 @@ pgarch_ArchiverCopyLoop(void)
!ArchiveCallbacks->check_configured_cb(archive_module_state))
{
ereport(WARNING,
- (errmsg("archive_mode enabled, yet archiving is not configured"),
+ (errmsg("\"archive_mode\" enabled, yet archiving is not configured"),
arch_module_check_errdetail_string ?
errdetail_internal("%s", arch_module_check_errdetail_string) : 0));
return;
@@ -876,8 +876,8 @@ HandlePgArchInterrupts(void)
if (XLogArchiveLibrary[0] != '\0' && XLogArchiveCommand[0] != '\0')
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("both archive_command and archive_library set"),
- errdetail("Only one of archive_command, archive_library may be set.")));
+ errmsg("both \"archive_command\" and \"archive_library\" set"),
+ errdetail("Only one of \"archive_command\", \"archive_library\" may be set.")));
archiveLibChanged = strcmp(XLogArchiveLibrary, archiveLib) != 0;
pfree(archiveLib);
@@ -915,8 +915,8 @@ LoadArchiveLibrary(void)
if (XLogArchiveLibrary[0] != '\0' && XLogArchiveCommand[0] != '\0')
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("both archive_command and archive_library set"),
- errdetail("Only one of archive_command, archive_library may be set.")));
+ errmsg("both \"archive_command\" and \"archive_library\" set"),
+ errdetail("Only one of \"archive_command\", \"archive_library\" may be set.")));
/*
* If shell archiving is enabled, use our special initialization function.
diff --git a/src/backend/postmaster/postmaster.c b/src/backend/postmaster/postmaster.c
index 7f3170a8f06..bf0241aed0c 100644
--- a/src/backend/postmaster/postmaster.c
+++ b/src/backend/postmaster/postmaster.c
@@ -822,7 +822,7 @@ PostmasterMain(int argc, char *argv[])
*/
if (SuperuserReservedConnections + ReservedConnections >= MaxConnections)
{
- write_stderr("%s: superuser_reserved_connections (%d) plus reserved_connections (%d) must be less than max_connections (%d)\n",
+ write_stderr("%s: \"superuser_reserved_connections\" (%d) plus \"reserved_connections\" (%d) must be less than \"max_connections\" (%d)\n",
progname,
SuperuserReservedConnections, ReservedConnections,
MaxConnections);
@@ -830,13 +830,13 @@ PostmasterMain(int argc, char *argv[])
}
if (XLogArchiveMode > ARCHIVE_MODE_OFF && wal_level == WAL_LEVEL_MINIMAL)
ereport(ERROR,
- (errmsg("WAL archival cannot be enabled when wal_level is \"minimal\"")));
+ (errmsg("WAL archival cannot be enabled when \"wal_level\" is \"minimal\"")));
if (max_wal_senders > 0 && wal_level == WAL_LEVEL_MINIMAL)
ereport(ERROR,
- (errmsg("WAL streaming (max_wal_senders > 0) requires wal_level \"replica\" or \"logical\"")));
+ (errmsg("WAL streaming (\"max_wal_senders\" > 0) requires \"wal_level\" to be \"replica\" or \"logical\"")));
if (summarize_wal && wal_level == WAL_LEVEL_MINIMAL)
ereport(ERROR,
- (errmsg("WAL cannot be summarized when wal_level is \"minimal\"")));
+ (errmsg("WAL cannot be summarized when \"wal_level\" is \"minimal\"")));
/*
* Other one-time internal sanity checks can go here, if they are fast.
@@ -3359,7 +3359,7 @@ PostmasterStateMachine(void)
if (!restart_after_crash)
{
ereport(LOG,
- (errmsg("shutting down because restart_after_crash is off")));
+ (errmsg("shutting down because \"restart_after_crash\" is off")));
ExitPostmaster(1);
}
}
diff --git a/src/backend/replication/logical/decode.c b/src/backend/replication/logical/decode.c
index 7a86f8481db..8ec5adfd909 100644
--- a/src/backend/replication/logical/decode.c
+++ b/src/backend/replication/logical/decode.c
@@ -174,7 +174,7 @@ xlog_decode(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
Assert(RecoveryInProgress());
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("logical decoding on standby requires wal_level >= logical on the primary")));
+ errmsg("logical decoding on standby requires \"wal_level\" >= \"logical\" on the primary")));
}
break;
}
diff --git a/src/backend/replication/logical/launcher.c b/src/backend/replication/logical/launcher.c
index 66070e9131c..27c3a91fb75 100644
--- a/src/backend/replication/logical/launcher.c
+++ b/src/backend/replication/logical/launcher.c
@@ -425,7 +425,7 @@ retry:
ereport(WARNING,
(errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED),
errmsg("out of logical replication worker slots"),
- errhint("You might need to increase %s.", "max_logical_replication_workers")));
+ errhint("You might need to increase \"%s\".", "max_logical_replication_workers")));
return false;
}
@@ -511,7 +511,7 @@ retry:
ereport(WARNING,
(errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED),
errmsg("out of background worker slots"),
- errhint("You might need to increase %s.", "max_worker_processes")));
+ errhint("You might need to increase \"%s\".", "max_worker_processes")));
return false;
}
diff --git a/src/backend/replication/logical/logical.c b/src/backend/replication/logical/logical.c
index 97a4d99c4e7..99f31849bb1 100644
--- a/src/backend/replication/logical/logical.c
+++ b/src/backend/replication/logical/logical.c
@@ -118,7 +118,7 @@ CheckLogicalDecodingRequirements(void)
if (wal_level < WAL_LEVEL_LOGICAL)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("logical decoding requires wal_level >= logical")));
+ errmsg("logical decoding requires \"wal_level\" >= \"logical\"")));
if (MyDatabaseId == InvalidOid)
ereport(ERROR,
@@ -138,7 +138,7 @@ CheckLogicalDecodingRequirements(void)
if (GetActiveWalLevelOnStandby() < WAL_LEVEL_LOGICAL)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("logical decoding on standby requires wal_level >= logical on the primary")));
+ errmsg("logical decoding on standby requires \"wal_level\" >= \"logical\" on the primary")));
}
}
diff --git a/src/backend/replication/logical/origin.c b/src/backend/replication/logical/origin.c
index a529da983ae..419e4814f05 100644
--- a/src/backend/replication/logical/origin.c
+++ b/src/backend/replication/logical/origin.c
@@ -187,7 +187,7 @@ replorigin_check_prerequisites(bool check_slots, bool recoveryOK)
if (check_slots && max_replication_slots == 0)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("cannot query or manipulate replication origin when max_replication_slots = 0")));
+ errmsg("cannot query or manipulate replication origin when \"max_replication_slots\" is 0")));
if (!recoveryOK && RecoveryInProgress())
ereport(ERROR,
@@ -795,7 +795,7 @@ StartupReplicationOrigin(void)
if (last_state == max_replication_slots)
ereport(PANIC,
(errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED),
- errmsg("could not find free replication state, increase max_replication_slots")));
+ errmsg("could not find free replication state, increase \"max_replication_slots\"")));
/* copy data to shared memory */
replication_states[last_state].roident = disk_state.roident;
@@ -954,7 +954,7 @@ replorigin_advance(RepOriginId node,
(errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED),
errmsg("could not find free replication state slot for replication origin with ID %d",
node),
- errhint("Increase max_replication_slots and try again.")));
+ errhint("Increase \"max_replication_slots\" and try again.")));
if (replication_state == NULL)
{
@@ -1155,7 +1155,7 @@ replorigin_session_setup(RepOriginId node, int acquired_by)
(errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED),
errmsg("could not find free replication state slot for replication origin with ID %d",
node),
- errhint("Increase max_replication_slots and try again.")));
+ errhint("Increase \"max_replication_slots\" and try again.")));
else if (session_replication_state == NULL)
{
/* initialize new slot */
diff --git a/src/backend/replication/slot.c b/src/backend/replication/slot.c
index aa4ea387da0..0e54ea5bb9a 100644
--- a/src/backend/replication/slot.c
+++ b/src/backend/replication/slot.c
@@ -378,7 +378,7 @@ ReplicationSlotCreate(const char *name, bool db_specific,
ereport(ERROR,
(errcode(ERRCODE_CONFIGURATION_LIMIT_EXCEEDED),
errmsg("all replication slots are in use"),
- errhint("Free one or increase max_replication_slots.")));
+ errhint("Free one or increase \"max_replication_slots\".")));
/*
* Since this slot is not in use, nobody should be looking at any part of
@@ -1369,12 +1369,12 @@ CheckSlotRequirements(void)
if (max_replication_slots == 0)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("replication slots can only be used if max_replication_slots > 0")));
+ errmsg("replication slots can only be used if \"max_replication_slots\" > 0")));
if (wal_level < WAL_LEVEL_REPLICA)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("replication slots can only be used if wal_level >= replica")));
+ errmsg("replication slots can only be used if \"wal_level\" >= \"replica\"")));
}
/*
@@ -1508,7 +1508,7 @@ ReportSlotInvalidation(ReplicationSlotInvalidationCause cause,
break;
case RS_INVAL_WAL_LEVEL:
- appendStringInfoString(&err_detail, _("Logical decoding on standby requires wal_level >= logical on the primary server."));
+ appendStringInfoString(&err_detail, _("Logical decoding on standby requires \"wal_level\" >= \"logical\" on the primary server."));
break;
case RS_INVAL_NONE:
pg_unreachable();
@@ -1521,7 +1521,7 @@ ReportSlotInvalidation(ReplicationSlotInvalidationCause cause,
errmsg("invalidating obsolete replication slot \"%s\"",
NameStr(slotname)),
errdetail_internal("%s", err_detail.data),
- hint ? errhint("You might need to increase %s.", "max_slot_wal_keep_size") : 0);
+ hint ? errhint("You might need to increase \"%s\".", "max_slot_wal_keep_size") : 0);
pfree(err_detail.data);
}
@@ -2332,15 +2332,15 @@ RestoreSlotFromDisk(const char *name)
if (cp.slotdata.database != InvalidOid && wal_level < WAL_LEVEL_LOGICAL)
ereport(FATAL,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("logical replication slot \"%s\" exists, but wal_level < logical",
+ errmsg("logical replication slot \"%s\" exists, but \"wal_level\" < \"logical\"",
NameStr(cp.slotdata.name)),
- errhint("Change wal_level to be logical or higher.")));
+ errhint("Change \"wal_level\" to be \"logical\" or higher.")));
else if (wal_level < WAL_LEVEL_REPLICA)
ereport(FATAL,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("physical replication slot \"%s\" exists, but wal_level < replica",
+ errmsg("physical replication slot \"%s\" exists, but \"wal_level\" < \"replica\"",
NameStr(cp.slotdata.name)),
- errhint("Change wal_level to be replica or higher.")));
+ errhint("Change \"wal_level\" to be \"replica\" or higher.")));
/* nothing can be active yet, don't lock anything */
for (i = 0; i < max_replication_slots; i++)
@@ -2383,7 +2383,7 @@ RestoreSlotFromDisk(const char *name)
if (!restored)
ereport(FATAL,
(errmsg("too many replication slots active before shutdown"),
- errhint("Increase max_replication_slots and try again.")));
+ errhint("Increase \"max_replication_slots\" and try again.")));
}
/*
diff --git a/src/backend/replication/syncrep.c b/src/backend/replication/syncrep.c
index 77917b848a4..fa5988c824e 100644
--- a/src/backend/replication/syncrep.c
+++ b/src/backend/replication/syncrep.c
@@ -1010,7 +1010,7 @@ check_synchronous_standby_names(char **newval, void **extra, GucSource source)
if (syncrep_parse_error_msg)
GUC_check_errdetail("%s", syncrep_parse_error_msg);
else
- GUC_check_errdetail("synchronous_standby_names parser failed");
+ GUC_check_errdetail("\"synchronous_standby_names\" parser failed");
return false;
}
diff --git a/src/backend/storage/buffer/localbuf.c b/src/backend/storage/buffer/localbuf.c
index 985a2c7049c..8da7dd6c98a 100644
--- a/src/backend/storage/buffer/localbuf.c
+++ b/src/backend/storage/buffer/localbuf.c
@@ -709,7 +709,7 @@ check_temp_buffers(int *newval, void **extra, GucSource source)
*/
if (source != PGC_S_TEST && NLocBuffer && NLocBuffer != *newval)
{
- GUC_check_errdetail("temp_buffers cannot be changed after any temporary tables have been accessed in the session.");
+ GUC_check_errdetail("\"temp_buffers\" cannot be changed after any temporary tables have been accessed in the session.");
return false;
}
return true;
diff --git a/src/backend/storage/file/fd.c b/src/backend/storage/file/fd.c
index 8c8e81f899b..a7c05b0a6fd 100644
--- a/src/backend/storage/file/fd.c
+++ b/src/backend/storage/file/fd.c
@@ -3947,7 +3947,7 @@ check_debug_io_direct(char **newval, void **extra, GucSource source)
#if PG_O_DIRECT == 0
if (strcmp(*newval, "") != 0)
{
- GUC_check_errdetail("debug_io_direct is not supported on this platform.");
+ GUC_check_errdetail("\"debug_io_direct\" is not supported on this platform.");
result = false;
}
flags = 0;
@@ -3961,7 +3961,7 @@ check_debug_io_direct(char **newval, void **extra, GucSource source)
if (!SplitGUCList(rawstring, ',', &elemlist))
{
- GUC_check_errdetail("Invalid list syntax in parameter %s",
+ GUC_check_errdetail("Invalid list syntax in parameter \"%s\"",
"debug_io_direct");
pfree(rawstring);
list_free(elemlist);
@@ -3994,14 +3994,14 @@ check_debug_io_direct(char **newval, void **extra, GucSource source)
#if XLOG_BLCKSZ < PG_IO_ALIGN_SIZE
if (result && (flags & (IO_DIRECT_WAL | IO_DIRECT_WAL_INIT)))
{
- GUC_check_errdetail("debug_io_direct is not supported for WAL because XLOG_BLCKSZ is too small");
+ GUC_check_errdetail("\"debug_io_direct\" is not supported for WAL because XLOG_BLCKSZ is too small");
result = false;
}
#endif
#if BLCKSZ < PG_IO_ALIGN_SIZE
if (result && (flags & IO_DIRECT_DATA))
{
- GUC_check_errdetail("debug_io_direct is not supported for data because BLCKSZ is too small");
+ GUC_check_errdetail("\"debug_io_direct\" is not supported for data because BLCKSZ is too small");
result = false;
}
#endif
diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c
index 5154353c844..9e4ddf72258 100644
--- a/src/backend/storage/lmgr/lock.c
+++ b/src/backend/storage/lmgr/lock.c
@@ -960,7 +960,7 @@ LockAcquireExtended(const LOCKTAG *locktag,
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of shared memory"),
- errhint("You might need to increase %s.", "max_locks_per_transaction")));
+ errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
else
return LOCKACQUIRE_NOT_AVAIL;
}
@@ -998,7 +998,7 @@ LockAcquireExtended(const LOCKTAG *locktag,
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of shared memory"),
- errhint("You might need to increase %s.", "max_locks_per_transaction")));
+ errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
else
return LOCKACQUIRE_NOT_AVAIL;
}
@@ -2801,7 +2801,7 @@ FastPathGetRelationLockEntry(LOCALLOCK *locallock)
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of shared memory"),
- errhint("You might need to increase %s.", "max_locks_per_transaction")));
+ errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
}
GrantLock(proclock->tag.myLock, proclock, lockmode);
FAST_PATH_CLEAR_LOCKMODE(MyProc, f, lockmode);
@@ -4186,7 +4186,7 @@ lock_twophase_recover(TransactionId xid, uint16 info,
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of shared memory"),
- errhint("You might need to increase %s.", "max_locks_per_transaction")));
+ errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
}
/*
@@ -4251,7 +4251,7 @@ lock_twophase_recover(TransactionId xid, uint16 info,
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of shared memory"),
- errhint("You might need to increase %s.", "max_locks_per_transaction")));
+ errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
}
/*
@@ -4601,7 +4601,7 @@ VirtualXactLock(VirtualTransactionId vxid, bool wait)
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of shared memory"),
- errhint("You might need to increase %s.", "max_locks_per_transaction")));
+ errhint("You might need to increase \"%s\".", "max_locks_per_transaction")));
}
GrantLock(proclock->tag.myLock, proclock, ExclusiveLock);
diff --git a/src/backend/storage/lmgr/predicate.c b/src/backend/storage/lmgr/predicate.c
index d5bbfbd4c6f..93841654db3 100644
--- a/src/backend/storage/lmgr/predicate.c
+++ b/src/backend/storage/lmgr/predicate.c
@@ -651,7 +651,7 @@ SetRWConflict(SERIALIZABLEXACT *reader, SERIALIZABLEXACT *writer)
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("not enough elements in RWConflictPool to record a read/write conflict"),
- errhint("You might need to run fewer transactions at a time or increase max_connections.")));
+ errhint("You might need to run fewer transactions at a time or increase \"max_connections\".")));
conflict = dlist_head_element(RWConflictData, outLink, &RWConflictPool->availableList);
dlist_delete(&conflict->outLink);
@@ -676,7 +676,7 @@ SetPossibleUnsafeConflict(SERIALIZABLEXACT *roXact,
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("not enough elements in RWConflictPool to record a potential read/write conflict"),
- errhint("You might need to run fewer transactions at a time or increase max_connections.")));
+ errhint("You might need to run fewer transactions at a time or increase \"max_connections\".")));
conflict = dlist_head_element(RWConflictData, outLink, &RWConflictPool->availableList);
dlist_delete(&conflict->outLink);
@@ -1678,7 +1678,7 @@ GetSerializableTransactionSnapshot(Snapshot snapshot)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot use serializable mode in a hot standby"),
- errdetail("default_transaction_isolation is set to \"serializable\"."),
+ errdetail("\"default_transaction_isolation\" is set to \"serializable\"."),
errhint("You can use \"SET default_transaction_isolation = 'repeatable read'\" to change the default.")));
/*
@@ -2461,7 +2461,7 @@ CreatePredicateLock(const PREDICATELOCKTARGETTAG *targettag,
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of shared memory"),
- errhint("You might need to increase %s.", "max_pred_locks_per_transaction")));
+ errhint("You might need to increase \"%s\".", "max_pred_locks_per_transaction")));
if (!found)
dlist_init(&target->predicateLocks);
@@ -2476,7 +2476,7 @@ CreatePredicateLock(const PREDICATELOCKTARGETTAG *targettag,
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of shared memory"),
- errhint("You might need to increase %s.", "max_pred_locks_per_transaction")));
+ errhint("You might need to increase \"%s\".", "max_pred_locks_per_transaction")));
if (!found)
{
@@ -3873,7 +3873,7 @@ ReleaseOneSerializableXact(SERIALIZABLEXACT *sxact, bool partial,
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of shared memory"),
- errhint("You might need to increase %s.", "max_pred_locks_per_transaction")));
+ errhint("You might need to increase \"%s\".", "max_pred_locks_per_transaction")));
if (found)
{
Assert(predlock->commitSeqNo != 0);
diff --git a/src/backend/storage/lmgr/proc.c b/src/backend/storage/lmgr/proc.c
index e4f256c63c7..a2900b6014a 100644
--- a/src/backend/storage/lmgr/proc.c
+++ b/src/backend/storage/lmgr/proc.c
@@ -345,7 +345,7 @@ InitProcess(void)
if (AmWalSenderProcess())
ereport(FATAL,
(errcode(ERRCODE_TOO_MANY_CONNECTIONS),
- errmsg("number of requested standby connections exceeds max_wal_senders (currently %d)",
+ errmsg("number of requested standby connections exceeds \"max_wal_senders\" (currently %d)",
max_wal_senders)));
ereport(FATAL,
(errcode(ERRCODE_TOO_MANY_CONNECTIONS),
diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c
index 2dff28afcef..45a3794b8e3 100644
--- a/src/backend/tcop/postgres.c
+++ b/src/backend/tcop/postgres.c
@@ -3535,7 +3535,7 @@ check_stack_depth(void)
ereport(ERROR,
(errcode(ERRCODE_STATEMENT_TOO_COMPLEX),
errmsg("stack depth limit exceeded"),
- errhint("Increase the configuration parameter max_stack_depth (currently %dkB), "
+ errhint("Increase the configuration parameter \"max_stack_depth\" (currently %dkB), "
"after ensuring the platform's stack depth limit is adequate.",
max_stack_depth)));
}
@@ -3582,7 +3582,7 @@ check_max_stack_depth(int *newval, void **extra, GucSource source)
if (stack_rlimit > 0 && newval_bytes > stack_rlimit - STACK_DEPTH_SLOP)
{
- GUC_check_errdetail("max_stack_depth must not exceed %ldkB.",
+ GUC_check_errdetail("\"max_stack_depth\" must not exceed %ldkB.",
(stack_rlimit - STACK_DEPTH_SLOP) / 1024L);
GUC_check_errhint("Increase the platform's stack depth limit via \"ulimit -s\" or local equivalent.");
return false;
@@ -3607,7 +3607,7 @@ check_client_connection_check_interval(int *newval, void **extra, GucSource sour
{
if (!WaitEventSetCanReportClosed() && *newval != 0)
{
- GUC_check_errdetail("client_connection_check_interval must be set to 0 on this platform.");
+ GUC_check_errdetail("\"client_connection_check_interval\" must be set to 0 on this platform.");
return false;
}
return true;
@@ -3643,9 +3643,9 @@ check_log_stats(bool *newval, void **extra, GucSource source)
if (*newval &&
(log_parser_stats || log_planner_stats || log_executor_stats))
{
- GUC_check_errdetail("Cannot enable log_statement_stats when "
- "log_parser_stats, log_planner_stats, "
- "or log_executor_stats is true.");
+ GUC_check_errdetail("Cannot enable \"log_statement_stats\" when "
+ "\"log_parser_stats\", \"log_planner_stats\", "
+ "or \"log_executor_stats\" is true.");
return false;
}
return true;
diff --git a/src/backend/utils/adt/pg_locale.c b/src/backend/utils/adt/pg_locale.c
index 8d95b5d42ab..7e5bb2b703a 100644
--- a/src/backend/utils/adt/pg_locale.c
+++ b/src/backend/utils/adt/pg_locale.c
@@ -3000,7 +3000,7 @@ icu_validate_locale(const char *loc_str)
ereport(elevel,
(errmsg("could not get language from ICU locale \"%s\": %s",
loc_str, u_errorName(status)),
- errhint("To disable ICU locale validation, set the parameter %s to \"%s\".",
+ errhint("To disable ICU locale validation, set the parameter \"%s\" to \"%s\".",
"icu_validation_level", "disabled")));
return;
}
@@ -3029,7 +3029,7 @@ icu_validate_locale(const char *loc_str)
ereport(elevel,
(errmsg("ICU locale \"%s\" has unknown language \"%s\"",
loc_str, lang),
- errhint("To disable ICU locale validation, set the parameter %s to \"%s\".",
+ errhint("To disable ICU locale validation, set the parameter \"%s\" to \"%s\".",
"icu_validation_level", "disabled")));
/* check that it can be opened */
diff --git a/src/backend/utils/adt/varlena.c b/src/backend/utils/adt/varlena.c
index dccd130c911..d2e2e9bbba0 100644
--- a/src/backend/utils/adt/varlena.c
+++ b/src/backend/utils/adt/varlena.c
@@ -456,7 +456,7 @@ byteaout(PG_FUNCTION_ARGS)
}
else
{
- elog(ERROR, "unrecognized bytea_output setting: %d",
+ elog(ERROR, "unrecognized \"bytea_output\" setting: %d",
bytea_output);
rp = result = NULL; /* keep compiler quiet */
}
diff --git a/src/backend/utils/fmgr/dfmgr.c b/src/backend/utils/fmgr/dfmgr.c
index eafa0128ef0..092004dcf3b 100644
--- a/src/backend/utils/fmgr/dfmgr.c
+++ b/src/backend/utils/fmgr/dfmgr.c
@@ -538,7 +538,7 @@ find_in_dynamic_libpath(const char *basename)
if (piece == p)
ereport(ERROR,
(errcode(ERRCODE_INVALID_NAME),
- errmsg("zero-length component in parameter dynamic_library_path")));
+ errmsg("zero-length component in parameter \"dynamic_library_path\"")));
if (piece == NULL)
len = strlen(p);
@@ -557,7 +557,7 @@ find_in_dynamic_libpath(const char *basename)
if (!is_absolute_path(mangled))
ereport(ERROR,
(errcode(ERRCODE_INVALID_NAME),
- errmsg("component in parameter dynamic_library_path is not an absolute path")));
+ errmsg("component in parameter \"dynamic_library_path\" is not an absolute path")));
full = palloc(strlen(mangled) + 1 + baselen + 1);
sprintf(full, "%s/%s", mangled, basename);
diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c
index 3fb68039986..547cecde240 100644
--- a/src/backend/utils/misc/guc.c
+++ b/src/backend/utils/misc/guc.c
@@ -1879,7 +1879,7 @@ SelectConfigFiles(const char *userDoption, const char *progname)
else
{
write_stderr("%s does not know where to find the database system data.\n"
- "This can be specified as data_directory in \"%s\", "
+ "This can be specified as \"data_directory\" in \"%s\", "
"or by the -D invocation option, or by the "
"PGDATA environment variable.\n",
progname, ConfigFileName);
diff --git a/src/backend/utils/misc/guc_tables.c b/src/backend/utils/misc/guc_tables.c
index ea2b0577bc6..85c8d54d4fc 100644
--- a/src/backend/utils/misc/guc_tables.c
+++ b/src/backend/utils/misc/guc_tables.c
@@ -1066,7 +1066,7 @@ struct config_bool ConfigureNamesBool[] =
},
{
{"ssl_passphrase_command_supports_reload", PGC_SIGHUP, CONN_AUTH_SSL,
- gettext_noop("Controls whether ssl_passphrase_command is called during server reload."),
+ gettext_noop("Controls whether \"ssl_passphrase_command\" is called during server reload."),
NULL
},
&ssl_passphrase_command_supports_reload,
@@ -1114,7 +1114,7 @@ struct config_bool ConfigureNamesBool[] =
gettext_noop("Continues processing past damaged page headers."),
gettext_noop("Detection of a damaged page header normally causes PostgreSQL to "
"report an error, aborting the current transaction. Setting "
- "zero_damaged_pages to true causes the system to instead report a "
+ "\"zero_damaged_page\" to true causes the system to instead report a "
"warning, zero out the damaged page, and continue processing. This "
"behavior will destroy data, namely all the rows on the damaged page."),
GUC_NOT_IN_SAMPLE
@@ -1129,7 +1129,7 @@ struct config_bool ConfigureNamesBool[] =
gettext_noop("Detection of WAL records having references to "
"invalid pages during recovery causes PostgreSQL to "
"raise a PANIC-level error, aborting the recovery. "
- "Setting ignore_invalid_pages to true causes "
+ "Setting \"ignore_invalid_pages\" to true causes "
"the system to ignore invalid page references "
"in WAL records (but still report a warning), "
"and continue recovery. This behavior may cause "
@@ -2713,7 +2713,7 @@ struct config_int ConfigureNamesInt[] =
{"max_locks_per_transaction", PGC_POSTMASTER, LOCK_MANAGEMENT,
gettext_noop("Sets the maximum number of locks per transaction."),
gettext_noop("The shared lock table is sized on the assumption that at most "
- "max_locks_per_transaction objects per server process or prepared "
+ "\"max_locks_per_transaction\" objects per server process or prepared "
"transaction will need to be locked at any one time.")
},
&max_locks_per_xact,
@@ -2725,7 +2725,7 @@ struct config_int ConfigureNamesInt[] =
{"max_pred_locks_per_transaction", PGC_POSTMASTER, LOCK_MANAGEMENT,
gettext_noop("Sets the maximum number of predicate locks per transaction."),
gettext_noop("The shared predicate lock table is sized on the assumption that "
- "at most max_pred_locks_per_transaction objects per server process "
+ "at most \"max_pred_locks_per_transaction\" objects per server process "
"or prepared transaction will need to be locked at any one time.")
},
&max_predicate_locks_per_xact,
@@ -2976,7 +2976,7 @@ struct config_int ConfigureNamesInt[] =
{
{"commit_siblings", PGC_USERSET, WAL_SETTINGS,
gettext_noop("Sets the minimum number of concurrent open transactions "
- "required before performing commit_delay."),
+ "required before performing \"commit_delay\"."),
NULL
},
&CommitSiblings,
@@ -3108,7 +3108,7 @@ struct config_int ConfigureNamesInt[] =
{"maintenance_io_concurrency",
PGC_USERSET,
RESOURCES_ASYNCHRONOUS,
- gettext_noop("A variant of effective_io_concurrency that is used for maintenance work."),
+ gettext_noop("A variant of \"effective_io_concurrency\" that is used for maintenance work."),
NULL,
GUC_EXPLAIN
},
@@ -3815,7 +3815,7 @@ struct config_real ConfigureNamesReal[] =
{
{"hash_mem_multiplier", PGC_USERSET, RESOURCES_MEM,
- gettext_noop("Multiple of work_mem to use for hash tables."),
+ gettext_noop("Multiple of \"work_mem\" to use for hash tables."),
NULL,
GUC_EXPLAIN
},
@@ -3909,7 +3909,7 @@ struct config_real ConfigureNamesReal[] =
{
{"log_statement_sample_rate", PGC_SUSET, LOGGING_WHEN,
- gettext_noop("Fraction of statements exceeding log_min_duration_sample to be logged."),
+ gettext_noop("Fraction of statements exceeding \"log_min_duration_sample\" to be logged."),
gettext_noop("Use a value between 0.0 (never log) and 1.0 (always log).")
},
&log_statement_sample_rate,
@@ -3940,7 +3940,7 @@ struct config_string ConfigureNamesString[] =
{
{"archive_command", PGC_SIGHUP, WAL_ARCHIVING,
gettext_noop("Sets the shell command that will be called to archive a WAL file."),
- gettext_noop("This is used only if archive_library is not set.")
+ gettext_noop("This is used only if \"archive_library\" is not set.")
},
&XLogArchiveCommand,
"",
@@ -3950,7 +3950,7 @@ struct config_string ConfigureNamesString[] =
{
{"archive_library", PGC_SIGHUP, WAL_ARCHIVING,
gettext_noop("Sets the library that will be called to archive a WAL file."),
- gettext_noop("An empty string indicates that archive_command should be used.")
+ gettext_noop("An empty string indicates that \"archive_command\" should be used.")
},
&XLogArchiveLibrary,
"",
@@ -4895,7 +4895,7 @@ struct config_enum ConfigureNamesEnum[] =
{
{"archive_mode", PGC_POSTMASTER, WAL_ARCHIVING,
- gettext_noop("Allows archiving of WAL files using archive_command."),
+ gettext_noop("Allows archiving of WAL files using \"archive_command\"."),
NULL
},
&XLogArchiveMode,