aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/backend/access/brin/brin_pageops.c20
-rw-r--r--src/backend/catalog/index.c2
-rw-r--r--src/backend/catalog/namespace.c2
-rw-r--r--src/backend/catalog/pg_aggregate.c2
-rw-r--r--src/backend/commands/vacuumlazy.c10
-rw-r--r--src/backend/commands/variable.c2
-rw-r--r--src/backend/storage/page/bufpage.c2
-rw-r--r--src/backend/utils/adt/json.c4
-rw-r--r--src/backend/utils/adt/jsonb.c6
-rw-r--r--src/backend/utils/misc/pg_rusage.c4
10 files changed, 24 insertions, 30 deletions
diff --git a/src/backend/access/brin/brin_pageops.c b/src/backend/access/brin/brin_pageops.c
index 4878f8b7768..987e6005cb3 100644
--- a/src/backend/access/brin/brin_pageops.c
+++ b/src/backend/access/brin/brin_pageops.c
@@ -73,10 +73,8 @@ brin_doupdate(Relation idxrel, BlockNumber pagesPerRange,
{
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
- errmsg("index row size %lu exceeds maximum %lu for index \"%s\"",
- (unsigned long) newsz,
- (unsigned long) BrinMaxItemSize,
- RelationGetRelationName(idxrel))));
+ errmsg("index row size %zu exceeds maximum %zu for index \"%s\"",
+ newsz, BrinMaxItemSize, RelationGetRelationName(idxrel))));
return false; /* keep compiler quiet */
}
@@ -359,10 +357,8 @@ brin_doinsert(Relation idxrel, BlockNumber pagesPerRange,
{
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
- errmsg("index row size %lu exceeds maximum %lu for index \"%s\"",
- (unsigned long) itemsz,
- (unsigned long) BrinMaxItemSize,
- RelationGetRelationName(idxrel))));
+ errmsg("index row size %zu exceeds maximum %zu for index \"%s\"",
+ itemsz, BrinMaxItemSize, RelationGetRelationName(idxrel))));
return InvalidOffsetNumber; /* keep compiler quiet */
}
@@ -669,7 +665,7 @@ brin_getinsertbuffer(Relation irel, Buffer oldbuf, Size itemsz,
BlockNumber oldblk;
BlockNumber newblk;
Page page;
- int freespace;
+ Size freespace;
/* callers must have checked */
Assert(itemsz <= BrinMaxItemSize);
@@ -825,10 +821,8 @@ brin_getinsertbuffer(Relation irel, Buffer oldbuf, Size itemsz,
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
- errmsg("index row size %lu exceeds maximum %lu for index \"%s\"",
- (unsigned long) itemsz,
- (unsigned long) freespace,
- RelationGetRelationName(irel))));
+ errmsg("index row size %zu exceeds maximum %zu for index \"%s\"",
+ itemsz, freespace, RelationGetRelationName(irel))));
return InvalidBuffer; /* keep compiler quiet */
}
diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c
index b0b43cf02d8..6aae1ab6fda 100644
--- a/src/backend/catalog/index.c
+++ b/src/backend/catalog/index.c
@@ -3453,7 +3453,7 @@ reindex_index(Oid indexId, bool skip_constraint_checks, char persistence,
ereport(INFO,
(errmsg("index \"%s\" was reindexed",
get_rel_name(indexId)),
- errdetail("%s.",
+ errdetail_internal("%s",
pg_rusage_show(&ru0))));
/* Close rels, but keep locks */
diff --git a/src/backend/catalog/namespace.c b/src/backend/catalog/namespace.c
index 8fd4c3136bc..8092381ae5c 100644
--- a/src/backend/catalog/namespace.c
+++ b/src/backend/catalog/namespace.c
@@ -3685,7 +3685,7 @@ InitTempTableNamespace(void)
if (IsParallelWorker())
ereport(ERROR,
(errcode(ERRCODE_READ_ONLY_SQL_TRANSACTION),
- errmsg("cannot create temporary tables in parallel mode")));
+ errmsg("cannot create temporary tables during a parallel operation")));
snprintf(namespaceName, sizeof(namespaceName), "pg_temp_%d", MyBackendId);
diff --git a/src/backend/catalog/pg_aggregate.c b/src/backend/catalog/pg_aggregate.c
index 959d3845df2..ada6e6171b7 100644
--- a/src/backend/catalog/pg_aggregate.c
+++ b/src/backend/catalog/pg_aggregate.c
@@ -433,7 +433,7 @@ AggregateCreate(const char *aggName,
if (aggTransType == INTERNALOID && func_strict(combinefn))
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("combine function with \"%s\" transition type must not be declared STRICT",
+ errmsg("combine function with transition type %s must not be declared STRICT",
format_type_be(aggTransType))));
}
diff --git a/src/backend/commands/vacuumlazy.c b/src/backend/commands/vacuumlazy.c
index e1e39dfb4ee..0506a9340c3 100644
--- a/src/backend/commands/vacuumlazy.c
+++ b/src/backend/commands/vacuumlazy.c
@@ -1342,8 +1342,7 @@ lazy_scan_heap(Relation onerel, int options, LVRelStats *vacrelstats,
"%u pages are entirely empty.\n",
empty_pages),
empty_pages);
- appendStringInfo(&buf, _("%s."),
- pg_rusage_show(&ru0));
+ appendStringInfo(&buf, "%s.", pg_rusage_show(&ru0));
ereport(elevel,
(errmsg("\"%s\": found %.0f removable, %.0f nonremovable row versions in %u out of %u pages",
@@ -1418,8 +1417,7 @@ lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats)
(errmsg("\"%s\": removed %d row versions in %d pages",
RelationGetRelationName(onerel),
tupindex, npages),
- errdetail("%s.",
- pg_rusage_show(&ru0))));
+ errdetail_internal("%s", pg_rusage_show(&ru0))));
}
/*
@@ -1607,7 +1605,7 @@ lazy_vacuum_index(Relation indrel,
(errmsg("scanned index \"%s\" to remove %d row versions",
RelationGetRelationName(indrel),
vacrelstats->num_dead_tuples),
- errdetail("%s.", pg_rusage_show(&ru0))));
+ errdetail_internal("%s", pg_rusage_show(&ru0))));
}
/*
@@ -1817,7 +1815,7 @@ lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats)
(errmsg("\"%s\": truncated %u to %u pages",
RelationGetRelationName(onerel),
old_rel_pages, new_rel_pages),
- errdetail("%s.",
+ errdetail_internal("%s",
pg_rusage_show(&ru0))));
old_rel_pages = new_rel_pages;
} while (new_rel_pages > vacrelstats->nonempty_pages &&
diff --git a/src/backend/commands/variable.c b/src/backend/commands/variable.c
index defafa54b29..1f72d7bee95 100644
--- a/src/backend/commands/variable.c
+++ b/src/backend/commands/variable.c
@@ -776,7 +776,7 @@ assign_client_encoding(const char *newval, void *extra)
*/
ereport(ERROR,
(errcode(ERRCODE_INVALID_TRANSACTION_STATE),
- errmsg("cannot change client_encoding in a parallel worker")));
+ errmsg("cannot change client_encoding during a parallel operation")));
}
/* We do not expect an error if PrepareClientEncoding succeeded */
diff --git a/src/backend/storage/page/bufpage.c b/src/backend/storage/page/bufpage.c
index f2a07f21116..11607827d8d 100644
--- a/src/backend/storage/page/bufpage.c
+++ b/src/backend/storage/page/bufpage.c
@@ -889,7 +889,7 @@ PageIndexMultiDelete(Page page, OffsetNumber *itemnos, int nitems)
offset != MAXALIGN(offset))
ereport(ERROR,
(errcode(ERRCODE_DATA_CORRUPTED),
- errmsg("corrupted item pointer: offset = %u, size = %u",
+ errmsg("corrupted item pointer: offset = %u, length = %u",
offset, (unsigned int) size)));
if (nextitm < nitems && offnum == itemnos[nextitm])
diff --git a/src/backend/utils/adt/json.c b/src/backend/utils/adt/json.c
index c73044b806d..2b6a8391d8b 100644
--- a/src/backend/utils/adt/json.c
+++ b/src/backend/utils/adt/json.c
@@ -2003,7 +2003,7 @@ json_object_agg_transfn(PG_FUNCTION_ARGS)
if (arg_type == InvalidOid)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("could not determine data type for argument 1")));
+ errmsg("could not determine data type for argument %d", 1)));
json_categorize_type(arg_type, &state->key_category,
&state->key_output_func);
@@ -2013,7 +2013,7 @@ json_object_agg_transfn(PG_FUNCTION_ARGS)
if (arg_type == InvalidOid)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("could not determine data type for argument 2")));
+ errmsg("could not determine data type for argument %d", 2)));
json_categorize_type(arg_type, &state->val_category,
&state->val_output_func);
diff --git a/src/backend/utils/adt/jsonb.c b/src/backend/utils/adt/jsonb.c
index 987cfd18625..d560e4edbc2 100644
--- a/src/backend/utils/adt/jsonb.c
+++ b/src/backend/utils/adt/jsonb.c
@@ -1211,7 +1211,7 @@ jsonb_build_object(PG_FUNCTION_ARGS)
if (val_type == InvalidOid || val_type == UNKNOWNOID)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("argument %d: could not determine data type", i + 1)));
+ errmsg("could not determine data type for argument %d", i + 1)));
add_jsonb(arg, false, &result, val_type, true);
@@ -1234,7 +1234,7 @@ jsonb_build_object(PG_FUNCTION_ARGS)
if (val_type == InvalidOid || val_type == UNKNOWNOID)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("argument %d: could not determine data type", i + 2)));
+ errmsg("could not determine data type for argument %d", i + 2)));
add_jsonb(arg, PG_ARGISNULL(i + 1), &result, val_type, false);
}
@@ -1294,7 +1294,7 @@ jsonb_build_array(PG_FUNCTION_ARGS)
if (val_type == InvalidOid || val_type == UNKNOWNOID)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("argument %d: could not determine data type", i + 1)));
+ errmsg("could not determine data type for argument %d", i + 1)));
add_jsonb(arg, PG_ARGISNULL(i), &result, val_type, false);
}
diff --git a/src/backend/utils/misc/pg_rusage.c b/src/backend/utils/misc/pg_rusage.c
index 8781a383c08..c21b9501517 100644
--- a/src/backend/utils/misc/pg_rusage.c
+++ b/src/backend/utils/misc/pg_rusage.c
@@ -61,7 +61,9 @@ pg_rusage_show(const PGRUsage *ru0)
}
snprintf(result, sizeof(result),
- "CPU %d.%02ds/%d.%02du sec elapsed %d.%02d sec",
+ /*---
+ * translator: %d.%02ds is system CPU time, %d.%02du is user CPU time */
+ _("CPU %d.%02ds/%d.%02du sec elapsed %d.%02d sec"),
(int) (ru1.ru.ru_stime.tv_sec - ru0->ru.ru_stime.tv_sec),
(int) (ru1.ru.ru_stime.tv_usec - ru0->ru.ru_stime.tv_usec) / 10000,
(int) (ru1.ru.ru_utime.tv_sec - ru0->ru.ru_utime.tv_sec),