diff options
author | Tom Lane <tgl@sss.pgh.pa.us> | 2017-06-21 15:35:54 -0400 |
---|---|---|
committer | Tom Lane <tgl@sss.pgh.pa.us> | 2017-06-21 15:35:54 -0400 |
commit | 382ceffdf7f620d8f2d50e451b4167d291ae2348 (patch) | |
tree | f558251492f2c6f86e3566f7a82f9d00509122c2 /src/backend/utils | |
parent | c7b8998ebbf310a156aa38022555a24d98fdbfb4 (diff) | |
download | postgresql-382ceffdf7f620d8f2d50e451b4167d291ae2348.tar.gz postgresql-382ceffdf7f620d8f2d50e451b4167d291ae2348.zip |
Phase 3 of pgindent updates.
Don't move parenthesized lines to the left, even if that means they
flow past the right margin.
By default, BSD indent lines up statement continuation lines that are
within parentheses so that they start just to the right of the preceding
left parenthesis. However, traditionally, if that resulted in the
continuation line extending to the right of the desired right margin,
then indent would push it left just far enough to not overrun the margin,
if it could do so without making the continuation line start to the left of
the current statement indent. That makes for a weird mix of indentations
unless one has been completely rigid about never violating the 80-column
limit.
This behavior has been pretty universally panned by Postgres developers.
Hence, disable it with indent's new -lpl switch, so that parenthesized
lines are always lined up with the preceding left paren.
This patch is much less interesting than the first round of indent
changes, but also bulkier, so I thought it best to separate the effects.
Discussion: https://postgr.es/m/E1dAmxK-0006EE-1r@gemulon.postgresql.org
Discussion: https://postgr.es/m/30527.1495162840@sss.pgh.pa.us
Diffstat (limited to 'src/backend/utils')
82 files changed, 678 insertions, 678 deletions
diff --git a/src/backend/utils/adt/acl.c b/src/backend/utils/adt/acl.c index c4899cb3bc8..2efb6c94e11 100644 --- a/src/backend/utils/adt/acl.c +++ b/src/backend/utils/adt/acl.c @@ -320,8 +320,8 @@ aclparse(const char *s, AclItem *aip) default: ereport(ERROR, (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), - errmsg("invalid mode character: must be one of \"%s\"", - ACL_ALL_RIGHTS_STR))); + errmsg("invalid mode character: must be one of \"%s\"", + ACL_ALL_RIGHTS_STR))); } privs |= read; @@ -573,7 +573,7 @@ aclitemin(PG_FUNCTION_ARGS) if (*s) ereport(ERROR, (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), - errmsg("extra garbage at the end of the ACL specification"))); + errmsg("extra garbage at the end of the ACL specification"))); PG_RETURN_ACLITEM_P(aip); } @@ -1193,7 +1193,7 @@ cc_restart: if ((ACLITEM_GET_GOPTIONS(*mod_aip) & ~own_privs) != 0) ereport(ERROR, (errcode(ERRCODE_INVALID_GRANT_OPERATION), - errmsg("grant options cannot be granted back to your own grantor"))); + errmsg("grant options cannot be granted back to your own grantor"))); pfree(acl); } @@ -5176,7 +5176,7 @@ get_rolespec_tuple(const RoleSpec *role) if (!HeapTupleIsValid(tuple)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("role \"%s\" does not exist", role->rolename))); + errmsg("role \"%s\" does not exist", role->rolename))); break; case ROLESPEC_CURRENT_USER: diff --git a/src/backend/utils/adt/amutils.c b/src/backend/utils/adt/amutils.c index b3a3a4dcc3f..f53b251b301 100644 --- a/src/backend/utils/adt/amutils.c +++ b/src/backend/utils/adt/amutils.c @@ -240,7 +240,7 @@ indexam_property(FunctionCallInfo fcinfo, case AMPROP_NULLS_FIRST: if (test_indoption(index_oid, attno, routine->amcanorder, - INDOPTION_NULLS_FIRST, INDOPTION_NULLS_FIRST, &res)) + INDOPTION_NULLS_FIRST, INDOPTION_NULLS_FIRST, &res)) PG_RETURN_BOOL(res); PG_RETURN_NULL(); diff --git a/src/backend/utils/adt/array_selfuncs.c b/src/backend/utils/adt/array_selfuncs.c index 7be5e6c6777..9daf8e73bc7 100644 --- a/src/backend/utils/adt/array_selfuncs.c +++ b/src/backend/utils/adt/array_selfuncs.c @@ -165,7 +165,7 @@ scalararraysel_containment(PlannerInfo *root, sslot.numbers, sslot.nnumbers, &constval, 1, - OID_ARRAY_CONTAINS_OP, + OID_ARRAY_CONTAINS_OP, cmpfunc); else selec = mcelem_array_contained_selec(sslot.values, @@ -188,7 +188,7 @@ scalararraysel_containment(PlannerInfo *root, selec = mcelem_array_contain_overlap_selec(NULL, 0, NULL, 0, &constval, 1, - OID_ARRAY_CONTAINS_OP, + OID_ARRAY_CONTAINS_OP, cmpfunc); else selec = mcelem_array_contained_selec(NULL, 0, @@ -482,7 +482,7 @@ mcelem_array_selec(ArrayType *array, TypeCacheEntry *typentry, if (operator == OID_ARRAY_CONTAINS_OP || operator == OID_ARRAY_OVERLAP_OP) selec = mcelem_array_contain_overlap_selec(mcelem, nmcelem, numbers, nnumbers, - elem_values, nonnull_nitems, + elem_values, nonnull_nitems, operator, cmpfunc); else if (operator == OID_ARRAY_CONTAINED_OP) selec = mcelem_array_contained_selec(mcelem, nmcelem, diff --git a/src/backend/utils/adt/array_typanalyze.c b/src/backend/utils/adt/array_typanalyze.c index e3ef18cace9..78153d232fe 100644 --- a/src/backend/utils/adt/array_typanalyze.c +++ b/src/backend/utils/adt/array_typanalyze.c @@ -81,7 +81,7 @@ typedef struct } DECountItem; static void compute_array_stats(VacAttrStats *stats, - AnalyzeAttrFetchFunc fetchfunc, int samplerows, double totalrows); + AnalyzeAttrFetchFunc fetchfunc, int samplerows, double totalrows); static void prune_element_hashtable(HTAB *elements_tab, int b_current); static uint32 element_hash(const void *key, Size keysize); static int element_match(const void *key1, const void *key2, Size keysize); @@ -285,7 +285,7 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc, elements_tab = hash_create("Analyzed elements table", num_mcelem, &elem_hash_ctl, - HASH_ELEM | HASH_FUNCTION | HASH_COMPARE | HASH_CONTEXT); + HASH_ELEM | HASH_FUNCTION | HASH_COMPARE | HASH_CONTEXT); /* hashtable for array distinct elements counts */ MemSet(&count_hash_ctl, 0, sizeof(count_hash_ctl)); diff --git a/src/backend/utils/adt/array_userfuncs.c b/src/backend/utils/adt/array_userfuncs.c index dc2a1749db1..87d79f3f98b 100644 --- a/src/backend/utils/adt/array_userfuncs.c +++ b/src/backend/utils/adt/array_userfuncs.c @@ -143,7 +143,7 @@ array_append(PG_FUNCTION_ARGS) result = array_set_element(EOHPGetRWDatum(&eah->hdr), 1, &indx, newelem, isNull, - -1, my_extra->typlen, my_extra->typbyval, my_extra->typalign); + -1, my_extra->typlen, my_extra->typbyval, my_extra->typalign); PG_RETURN_DATUM(result); } @@ -200,7 +200,7 @@ array_prepend(PG_FUNCTION_ARGS) result = array_set_element(EOHPGetRWDatum(&eah->hdr), 1, &indx, newelem, isNull, - -1, my_extra->typlen, my_extra->typbyval, my_extra->typalign); + -1, my_extra->typlen, my_extra->typbyval, my_extra->typalign); /* Readjust result's LB to match the input's, as expected for prepend */ Assert(result == EOHPGetRWDatum(&eah->hdr)); @@ -352,8 +352,8 @@ array_cat(PG_FUNCTION_ARGS) ereport(ERROR, (errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR), errmsg("cannot concatenate incompatible arrays"), - errdetail("Arrays with differing element dimensions are " - "not compatible for concatenation."))); + errdetail("Arrays with differing element dimensions are " + "not compatible for concatenation."))); dims[i] = dims1[i]; lbs[i] = lbs1[i]; @@ -721,8 +721,8 @@ array_position_common(FunctionCallInfo fcinfo) if (!OidIsValid(typentry->eq_opr_finfo.fn_oid)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_FUNCTION), - errmsg("could not identify an equality operator for type %s", - format_type_be(element_type)))); + errmsg("could not identify an equality operator for type %s", + format_type_be(element_type)))); my_extra->element_type = element_type; fmgr_info_cxt(typentry->eq_opr_finfo.fn_oid, &my_extra->proc, @@ -860,8 +860,8 @@ array_positions(PG_FUNCTION_ARGS) if (!OidIsValid(typentry->eq_opr_finfo.fn_oid)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_FUNCTION), - errmsg("could not identify an equality operator for type %s", - format_type_be(element_type)))); + errmsg("could not identify an equality operator for type %s", + format_type_be(element_type)))); my_extra->element_type = element_type; fmgr_info_cxt(typentry->eq_opr_finfo.fn_oid, &my_extra->proc, diff --git a/src/backend/utils/adt/arrayfuncs.c b/src/backend/utils/adt/arrayfuncs.c index e1ebe576817..34dadd6e19e 100644 --- a/src/backend/utils/adt/arrayfuncs.c +++ b/src/backend/utils/adt/arrayfuncs.c @@ -501,7 +501,7 @@ ArrayCount(const char *str, int *dim, char typdelim) parse_state != ARRAY_ELEM_DELIMITED) ereport(ERROR, (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), - errmsg("malformed array literal: \"%s\"", str), + errmsg("malformed array literal: \"%s\"", str), errdetail("Unexpected \"%c\" character.", '\\'))); if (parse_state != ARRAY_QUOTED_ELEM_STARTED) @@ -512,7 +512,7 @@ ArrayCount(const char *str, int *dim, char typdelim) else ereport(ERROR, (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), - errmsg("malformed array literal: \"%s\"", str), + errmsg("malformed array literal: \"%s\"", str), errdetail("Unexpected end of input."))); break; case '"': @@ -527,7 +527,7 @@ ArrayCount(const char *str, int *dim, char typdelim) parse_state != ARRAY_ELEM_DELIMITED) ereport(ERROR, (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), - errmsg("malformed array literal: \"%s\"", str), + errmsg("malformed array literal: \"%s\"", str), errdetail("Unexpected array element."))); in_quotes = !in_quotes; if (in_quotes) @@ -547,10 +547,10 @@ ArrayCount(const char *str, int *dim, char typdelim) parse_state != ARRAY_LEVEL_STARTED && parse_state != ARRAY_LEVEL_DELIMITED) ereport(ERROR, - (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), - errmsg("malformed array literal: \"%s\"", str), - errdetail("Unexpected \"%c\" character.", - '{'))); + (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), + errmsg("malformed array literal: \"%s\"", str), + errdetail("Unexpected \"%c\" character.", + '{'))); parse_state = ARRAY_LEVEL_STARTED; if (nest_level >= MAXDIM) ereport(ERROR, @@ -577,26 +577,26 @@ ArrayCount(const char *str, int *dim, char typdelim) parse_state != ARRAY_LEVEL_COMPLETED && !(nest_level == 1 && parse_state == ARRAY_LEVEL_STARTED)) ereport(ERROR, - (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), - errmsg("malformed array literal: \"%s\"", str), - errdetail("Unexpected \"%c\" character.", - '}'))); + (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), + errmsg("malformed array literal: \"%s\"", str), + errdetail("Unexpected \"%c\" character.", + '}'))); parse_state = ARRAY_LEVEL_COMPLETED; if (nest_level == 0) ereport(ERROR, - (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), - errmsg("malformed array literal: \"%s\"", str), - errdetail("Unmatched \"%c\" character.", '}'))); + (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), + errmsg("malformed array literal: \"%s\"", str), + errdetail("Unmatched \"%c\" character.", '}'))); nest_level--; if (nelems_last[nest_level] != 0 && nelems[nest_level] != nelems_last[nest_level]) ereport(ERROR, - (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), - errmsg("malformed array literal: \"%s\"", str), - errdetail("Multidimensional arrays must have " - "sub-arrays with matching " - "dimensions."))); + (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), + errmsg("malformed array literal: \"%s\"", str), + errdetail("Multidimensional arrays must have " + "sub-arrays with matching " + "dimensions."))); nelems_last[nest_level] = nelems[nest_level]; nelems[nest_level] = 1; if (nest_level == 0) @@ -626,10 +626,10 @@ ArrayCount(const char *str, int *dim, char typdelim) parse_state != ARRAY_QUOTED_ELEM_COMPLETED && parse_state != ARRAY_LEVEL_COMPLETED) ereport(ERROR, - (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), - errmsg("malformed array literal: \"%s\"", str), - errdetail("Unexpected \"%c\" character.", - typdelim))); + (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), + errmsg("malformed array literal: \"%s\"", str), + errdetail("Unexpected \"%c\" character.", + typdelim))); if (parse_state == ARRAY_LEVEL_COMPLETED) parse_state = ARRAY_LEVEL_DELIMITED; else @@ -649,9 +649,9 @@ ArrayCount(const char *str, int *dim, char typdelim) parse_state != ARRAY_ELEM_STARTED && parse_state != ARRAY_ELEM_DELIMITED) ereport(ERROR, - (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), - errmsg("malformed array literal: \"%s\"", str), - errdetail("Unexpected array element."))); + (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), + errmsg("malformed array literal: \"%s\"", str), + errdetail("Unexpected array element."))); parse_state = ARRAY_ELEM_STARTED; } } @@ -820,9 +820,9 @@ ReadArrayStr(char *arrayStr, { if (nest_level >= ndim) ereport(ERROR, - (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), - errmsg("malformed array literal: \"%s\"", - origStr))); + (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), + errmsg("malformed array literal: \"%s\"", + origStr))); nest_level++; indx[nest_level - 1] = 0; srcptr++; @@ -835,9 +835,9 @@ ReadArrayStr(char *arrayStr, { if (nest_level == 0) ereport(ERROR, - (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), - errmsg("malformed array literal: \"%s\"", - origStr))); + (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), + errmsg("malformed array literal: \"%s\"", + origStr))); if (i == -1) i = ArrayGetOffset0(ndim, indx, prod); indx[nest_level - 1] = 0; @@ -2292,7 +2292,7 @@ array_set_element(Datum arraydatum, return PointerGetDatum(construct_md_array(&dataValue, &isNull, nSubscripts, dim, lb, elmtype, - elmlen, elmbyval, elmalign)); + elmlen, elmbyval, elmalign)); } if (ndim != nSubscripts) @@ -2520,7 +2520,7 @@ array_set_element_expanded(Datum arraydatum, eah->dims = (int *) MemoryContextAllocZero(eah->hdr.eoh_context, nSubscripts * sizeof(int)); eah->lbound = (int *) MemoryContextAllocZero(eah->hdr.eoh_context, - nSubscripts * sizeof(int)); + nSubscripts * sizeof(int)); /* Update local copies of dimension info */ ndim = nSubscripts; @@ -2792,7 +2792,7 @@ array_set_slice(Datum arraydatum, */ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("updates on slices of fixed-length arrays not implemented"))); + errmsg("updates on slices of fixed-length arrays not implemented"))); } /* detoast arrays if necessary */ @@ -2823,9 +2823,9 @@ array_set_slice(Datum arraydatum, if (!upperProvided[i] || !lowerProvided[i]) ereport(ERROR, (errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR), - errmsg("array slice subscript must provide both boundaries"), - errdetail("When assigning to a slice of an empty array value," - " slice boundaries must be fully specified."))); + errmsg("array slice subscript must provide both boundaries"), + errdetail("When assigning to a slice of an empty array value," + " slice boundaries must be fully specified."))); dim[i] = 1 + upperIndx[i] - lowerIndx[i]; lb[i] = lowerIndx[i]; @@ -2839,7 +2839,7 @@ array_set_slice(Datum arraydatum, return PointerGetDatum(construct_md_array(dvalues, dnulls, nSubscripts, dim, lb, elmtype, - elmlen, elmbyval, elmalign)); + elmlen, elmbyval, elmalign)); } if (ndim < nSubscripts || ndim <= 0 || ndim > MAXDIM) @@ -2899,7 +2899,7 @@ array_set_slice(Datum arraydatum, if (lowerIndx[i] > upperIndx[i]) ereport(ERROR, (errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR), - errmsg("upper bound cannot be less than lower bound"))); + errmsg("upper bound cannot be less than lower bound"))); if (lowerIndx[i] < lb[i] || upperIndx[i] >= (dim[i] + lb[i])) ereport(ERROR, @@ -2914,7 +2914,7 @@ array_set_slice(Datum arraydatum, if (lowerIndx[i] > upperIndx[i]) ereport(ERROR, (errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR), - errmsg("upper bound cannot be less than lower bound"))); + errmsg("upper bound cannot be less than lower bound"))); } } @@ -3206,7 +3206,7 @@ array_map(FunctionCallInfo fcinfo, Oid retType, ArrayMapState *amstate) /* Get source element, checking for NULL */ fcinfo->arg[0] = array_iter_next(&iter, &fcinfo->argnull[0], i, - inp_typlen, inp_typbyval, inp_typalign); + inp_typlen, inp_typbyval, inp_typalign); /* * Apply the given function to source elt and extra args. @@ -3510,7 +3510,7 @@ deconstruct_array(ArrayType *array, else ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), - errmsg("null array element not allowed in this context"))); + errmsg("null array element not allowed in this context"))); } else { @@ -3636,8 +3636,8 @@ array_eq(PG_FUNCTION_ARGS) if (!OidIsValid(typentry->eq_opr_finfo.fn_oid)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_FUNCTION), - errmsg("could not identify an equality operator for type %s", - format_type_be(element_type)))); + errmsg("could not identify an equality operator for type %s", + format_type_be(element_type)))); fcinfo->flinfo->fn_extra = (void *) typentry; } typlen = typentry->typlen; @@ -3800,8 +3800,8 @@ array_cmp(FunctionCallInfo fcinfo) if (!OidIsValid(typentry->cmp_proc_finfo.fn_oid)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_FUNCTION), - errmsg("could not identify a comparison function for type %s", - format_type_be(element_type)))); + errmsg("could not identify a comparison function for type %s", + format_type_be(element_type)))); fcinfo->flinfo->fn_extra = (void *) typentry; } typlen = typentry->typlen; @@ -4074,8 +4074,8 @@ array_contain_compare(AnyArrayType *array1, AnyArrayType *array2, Oid collation, if (!OidIsValid(typentry->eq_opr_finfo.fn_oid)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_FUNCTION), - errmsg("could not identify an equality operator for type %s", - format_type_be(element_type)))); + errmsg("could not identify an equality operator for type %s", + format_type_be(element_type)))); *fn_extra = (void *) typentry; } typlen = typentry->typlen; @@ -5261,7 +5261,7 @@ accumArrayResultArr(ArrayBuildStateArr *astate, if (astate->ndims != ndims + 1) ereport(ERROR, (errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR), - errmsg("cannot accumulate arrays of different dimensionality"))); + errmsg("cannot accumulate arrays of different dimensionality"))); for (i = 0; i < ndims; i++) { if (astate->dims[i + 1] != dims[i] || astate->lbs[i + 1] != lbs[i]) @@ -5633,7 +5633,7 @@ array_fill_with_lower_bounds(PG_FUNCTION_ARGS) if (PG_ARGISNULL(1) || PG_ARGISNULL(2)) ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), - errmsg("dimension array or low bound array cannot be null"))); + errmsg("dimension array or low bound array cannot be null"))); dims = PG_GETARG_ARRAYTYPE_P(1); lbs = PG_GETARG_ARRAYTYPE_P(2); @@ -5673,7 +5673,7 @@ array_fill(PG_FUNCTION_ARGS) if (PG_ARGISNULL(1)) ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), - errmsg("dimension array or low bound array cannot be null"))); + errmsg("dimension array or low bound array cannot be null"))); dims = PG_GETARG_ARRAYTYPE_P(1); @@ -6034,8 +6034,8 @@ array_replace_internal(ArrayType *array, if (!OidIsValid(typentry->eq_opr_finfo.fn_oid)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_FUNCTION), - errmsg("could not identify an equality operator for type %s", - format_type_be(element_type)))); + errmsg("could not identify an equality operator for type %s", + format_type_be(element_type)))); fcinfo->flinfo->fn_extra = (void *) typentry; } typlen = typentry->typlen; @@ -6154,8 +6154,8 @@ array_replace_internal(ArrayType *array, if (!AllocSizeIsValid(nbytes)) ereport(ERROR, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), - errmsg("array size exceeds the maximum allowed (%d)", - (int) MaxAllocSize))); + errmsg("array size exceeds the maximum allowed (%d)", + (int) MaxAllocSize))); } nresult++; } @@ -6320,8 +6320,8 @@ width_bucket_array(PG_FUNCTION_ARGS) if (!OidIsValid(typentry->cmp_proc_finfo.fn_oid)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_FUNCTION), - errmsg("could not identify a comparison function for type %s", - format_type_be(element_type)))); + errmsg("could not identify a comparison function for type %s", + format_type_be(element_type)))); fcinfo->flinfo->fn_extra = (void *) typentry; } diff --git a/src/backend/utils/adt/date.c b/src/backend/utils/adt/date.c index 2b261cd5bd5..7d89d794381 100644 --- a/src/backend/utils/adt/date.c +++ b/src/backend/utils/adt/date.c @@ -143,7 +143,7 @@ date_in(PG_FUNCTION_ARGS) case DTK_CURRENT: ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("date/time value \"current\" is no longer supported"))); + errmsg("date/time value \"current\" is no longer supported"))); GetCurrentDateTime(tm); break; @@ -1161,7 +1161,7 @@ abstime_date(PG_FUNCTION_ARGS) case INVALID_ABSTIME: ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot convert reserved abstime value to date"))); + errmsg("cannot convert reserved abstime value to date"))); result = 0; /* keep compiler quiet */ break; @@ -2598,8 +2598,8 @@ timetz_part(PG_FUNCTION_ARGS) default: ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("\"time with time zone\" units \"%s\" not recognized", - lowunits))); + errmsg("\"time with time zone\" units \"%s\" not recognized", + lowunits))); result = 0; } } @@ -2716,9 +2716,9 @@ timetz_izone(PG_FUNCTION_ARGS) if (zone->month != 0 || zone->day != 0) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("interval time zone \"%s\" must not include months or days", - DatumGetCString(DirectFunctionCall1(interval_out, - PointerGetDatum(zone)))))); + errmsg("interval time zone \"%s\" must not include months or days", + DatumGetCString(DirectFunctionCall1(interval_out, + PointerGetDatum(zone)))))); tz = -(zone->time / USECS_PER_SEC); diff --git a/src/backend/utils/adt/datetime.c b/src/backend/utils/adt/datetime.c index 73c4e41213e..a3d7dc3697a 100644 --- a/src/backend/utils/adt/datetime.c +++ b/src/backend/utils/adt/datetime.c @@ -1205,8 +1205,8 @@ DecodeDateTime(char **field, int *ftype, int nf, { case DTK_CURRENT: ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("date/time value \"current\" is no longer supported"))); + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("date/time value \"current\" is no longer supported"))); return DTERR_BAD_FORMAT; break; @@ -1222,7 +1222,7 @@ DecodeDateTime(char **field, int *ftype, int nf, *dtype = DTK_DATE; GetCurrentDateTime(&cur_tm); j2date(date2j(cur_tm.tm_year, cur_tm.tm_mon, cur_tm.tm_mday) - 1, - &tm->tm_year, &tm->tm_mon, &tm->tm_mday); + &tm->tm_year, &tm->tm_mon, &tm->tm_mday); break; case DTK_TODAY: @@ -1239,7 +1239,7 @@ DecodeDateTime(char **field, int *ftype, int nf, *dtype = DTK_DATE; GetCurrentDateTime(&cur_tm); j2date(date2j(cur_tm.tm_year, cur_tm.tm_mon, cur_tm.tm_mday) + 1, - &tm->tm_year, &tm->tm_mon, &tm->tm_mday); + &tm->tm_year, &tm->tm_mon, &tm->tm_mday); break; case DTK_ZULU: @@ -2113,8 +2113,8 @@ DecodeTimeOnly(char **field, int *ftype, int nf, { case DTK_CURRENT: ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("date/time value \"current\" is no longer supported"))); + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("date/time value \"current\" is no longer supported"))); return DTERR_BAD_FORMAT; break; @@ -3778,7 +3778,7 @@ DateTimeParseError(int dterr, const char *str, const char *datatype) (errcode(ERRCODE_DATETIME_FIELD_OVERFLOW), errmsg("date/time field value out of range: \"%s\"", str), - errhint("Perhaps you need a different \"datestyle\" setting."))); + errhint("Perhaps you need a different \"datestyle\" setting."))); break; case DTERR_INTERVAL_OVERFLOW: ereport(ERROR, @@ -3891,7 +3891,7 @@ EncodeDateOnly(struct pg_tm *tm, int style, char *str) case USE_XSD_DATES: /* compatible with ISO date formats */ str = pg_ltostr_zeropad(str, - (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4); + (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4); *str++ = '-'; str = pg_ltostr_zeropad(str, tm->tm_mon, 2); *str++ = '-'; @@ -3914,7 +3914,7 @@ EncodeDateOnly(struct pg_tm *tm, int style, char *str) } *str++ = '/'; str = pg_ltostr_zeropad(str, - (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4); + (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4); break; case USE_GERMAN_DATES: @@ -3924,7 +3924,7 @@ EncodeDateOnly(struct pg_tm *tm, int style, char *str) str = pg_ltostr_zeropad(str, tm->tm_mon, 2); *str++ = '.'; str = pg_ltostr_zeropad(str, - (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4); + (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4); break; case USE_POSTGRES_DATES: @@ -3944,7 +3944,7 @@ EncodeDateOnly(struct pg_tm *tm, int style, char *str) } *str++ = '-'; str = pg_ltostr_zeropad(str, - (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4); + (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4); break; } @@ -4014,7 +4014,7 @@ EncodeDateTime(struct pg_tm *tm, fsec_t fsec, bool print_tz, int tz, const char case USE_XSD_DATES: /* Compatible with ISO-8601 date formats */ str = pg_ltostr_zeropad(str, - (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4); + (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4); *str++ = '-'; str = pg_ltostr_zeropad(str, tm->tm_mon, 2); *str++ = '-'; @@ -4045,7 +4045,7 @@ EncodeDateTime(struct pg_tm *tm, fsec_t fsec, bool print_tz, int tz, const char } *str++ = '/'; str = pg_ltostr_zeropad(str, - (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4); + (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4); *str++ = ' '; str = pg_ltostr_zeropad(str, tm->tm_hour, 2); *str++ = ':'; @@ -4077,7 +4077,7 @@ EncodeDateTime(struct pg_tm *tm, fsec_t fsec, bool print_tz, int tz, const char str = pg_ltostr_zeropad(str, tm->tm_mon, 2); *str++ = '.'; str = pg_ltostr_zeropad(str, - (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4); + (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4); *str++ = ' '; str = pg_ltostr_zeropad(str, tm->tm_hour, 2); *str++ = ':'; @@ -4127,7 +4127,7 @@ EncodeDateTime(struct pg_tm *tm, fsec_t fsec, bool print_tz, int tz, const char str = AppendTimestampSeconds(str, tm, fsec); *str++ = ' '; str = pg_ltostr_zeropad(str, - (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4); + (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1), 4); if (print_tz) { @@ -4860,7 +4860,7 @@ pg_timezone_names(PG_FUNCTION_ARGS) * reasonably omit from the pg_timezone_names view. */ if (tzn && (strcmp(tzn, "-00") == 0 || - strcmp(tzn, "Local time zone must be set--see zic manual page") == 0)) + strcmp(tzn, "Local time zone must be set--see zic manual page") == 0)) continue; /* Found a displayable zone */ diff --git a/src/backend/utils/adt/dbsize.c b/src/backend/utils/adt/dbsize.c index ac4feddbfd3..515e30d1777 100644 --- a/src/backend/utils/adt/dbsize.c +++ b/src/backend/utils/adt/dbsize.c @@ -333,7 +333,7 @@ pg_relation_size(PG_FUNCTION_ARGS) PG_RETURN_NULL(); size = calculate_relation_size(&(rel->rd_node), rel->rd_backend, - forkname_to_number(text_to_cstring(forkName))); + forkname_to_number(text_to_cstring(forkName))); relation_close(rel, AccessShareLock); @@ -838,10 +838,10 @@ pg_size_bytes(PG_FUNCTION_ARGS) Numeric mul_num; mul_num = DatumGetNumeric(DirectFunctionCall1(int8_numeric, - Int64GetDatum(multiplier))); + Int64GetDatum(multiplier))); num = DatumGetNumeric(DirectFunctionCall2(numeric_mul, - NumericGetDatum(mul_num), + NumericGetDatum(mul_num), NumericGetDatum(num))); } } @@ -978,7 +978,7 @@ pg_relation_filepath(PG_FUNCTION_ARGS) rnode.relNode = relform->relfilenode; else /* Consult the relation mapper */ rnode.relNode = RelationMapOidToFilenode(relid, - relform->relisshared); + relform->relisshared); break; default: diff --git a/src/backend/utils/adt/domains.c b/src/backend/utils/adt/domains.c index 73deaa7e1cf..e61d91bd885 100644 --- a/src/backend/utils/adt/domains.c +++ b/src/backend/utils/adt/domains.c @@ -174,7 +174,7 @@ domain_check_input(Datum value, bool isnull, DomainIOData *my_extra) */ econtext->domainValue_datum = MakeExpandedObjectReadOnly(value, isnull, - my_extra->constraint_ref.tcache->typlen); + my_extra->constraint_ref.tcache->typlen); econtext->domainValue_isNull = isnull; if (!ExecCheck(con->check_exprstate, econtext)) diff --git a/src/backend/utils/adt/encode.c b/src/backend/utils/adt/encode.c index 8773538b8d5..742ce6f97e8 100644 --- a/src/backend/utils/adt/encode.c +++ b/src/backend/utils/adt/encode.c @@ -175,7 +175,7 @@ hex_decode(const char *src, unsigned len, char *dst) if (s >= srcend) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("invalid hexadecimal data: odd number of digits"))); + errmsg("invalid hexadecimal data: odd number of digits"))); v2 = get_hex(*s++); *p++ = v1 | v2; diff --git a/src/backend/utils/adt/enum.c b/src/backend/utils/adt/enum.c index b1d2a6f0c3b..973397cc85b 100644 --- a/src/backend/utils/adt/enum.c +++ b/src/backend/utils/adt/enum.c @@ -115,7 +115,7 @@ check_safe_enum_use(HeapTuple enumval_tup) errmsg("unsafe use of new value \"%s\" of enum type %s", NameStr(en->enumlabel), format_type_be(en->enumtypid)), - errhint("New enum values must be committed before they can be used."))); + errhint("New enum values must be committed before they can be used."))); } diff --git a/src/backend/utils/adt/float.c b/src/backend/utils/adt/float.c index 30746ef908c..18b3b949acb 100644 --- a/src/backend/utils/adt/float.c +++ b/src/backend/utils/adt/float.c @@ -534,8 +534,8 @@ float8in_internal(char *num, char **endptr_p, errnumber[endptr - num] = '\0'; ereport(ERROR, (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), - errmsg("\"%s\" is out of range for type double precision", - errnumber))); + errmsg("\"%s\" is out of range for type double precision", + errnumber))); } } else @@ -3534,7 +3534,7 @@ width_bucket_float8(PG_FUNCTION_ARGS) if (isnan(operand) || isnan(bound1) || isnan(bound2)) ereport(ERROR, (errcode(ERRCODE_INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION), - errmsg("operand, lower bound, and upper bound cannot be NaN"))); + errmsg("operand, lower bound, and upper bound cannot be NaN"))); /* Note that we allow "operand" to be infinite */ if (isinf(bound1) || isinf(bound2)) diff --git a/src/backend/utils/adt/formatting.c b/src/backend/utils/adt/formatting.c index 807ce589da8..31f04ff5ed5 100644 --- a/src/backend/utils/adt/formatting.c +++ b/src/backend/utils/adt/formatting.c @@ -988,7 +988,7 @@ static char *get_last_relevant_decnum(char *num); static void NUM_numpart_from_char(NUMProc *Np, int id, int input_len); static void NUM_numpart_to_char(NUMProc *Np, int id); static char *NUM_processor(FormatNode *node, NUMDesc *Num, char *inout, - char *number, int from_char_input_len, int to_char_out_pre_spaces, + char *number, int from_char_input_len, int to_char_out_pre_spaces, int sign, bool is_to_char, Oid collid); static DCHCacheEntry *DCH_cache_getnew(const char *str); static DCHCacheEntry *DCH_cache_search(const char *str); @@ -1112,7 +1112,7 @@ NUMDesc_prepare(NUMDesc *num, FormatNode *n) if (IS_MULTI(num)) ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("cannot use \"V\" and decimal point together"))); + errmsg("cannot use \"V\" and decimal point together"))); num->flag |= NUM_F_DECIMAL; break; @@ -1195,7 +1195,7 @@ NUMDesc_prepare(NUMDesc *num, FormatNode *n) if (IS_DECIMAL(num)) ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("cannot use \"V\" and decimal point together"))); + errmsg("cannot use \"V\" and decimal point together"))); num->flag |= NUM_F_MULTI; break; @@ -1209,7 +1209,7 @@ NUMDesc_prepare(NUMDesc *num, FormatNode *n) IS_ROMAN(num) || IS_MULTI(num)) ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("\"EEEE\" is incompatible with other formats"), + errmsg("\"EEEE\" is incompatible with other formats"), errdetail("\"EEEE\" may only be used together with digit and decimal point patterns."))); num->flag |= NUM_F_EEEE; break; @@ -2175,8 +2175,8 @@ from_char_set_int(int *dest, const int value, const FormatNode *node) if (*dest != 0 && *dest != value) ereport(ERROR, (errcode(ERRCODE_INVALID_DATETIME_FORMAT), - errmsg("conflicting values for \"%s\" field in formatting string", - node->key->name), + errmsg("conflicting values for \"%s\" field in formatting string", + node->key->name), errdetail("This value contradicts a previous setting for " "the same field type."))); *dest = value; @@ -2238,8 +2238,8 @@ from_char_parse_int_len(int *dest, char **src, const int len, FormatNode *node) if (used < len) ereport(ERROR, (errcode(ERRCODE_INVALID_DATETIME_FORMAT), - errmsg("source string too short for \"%s\" formatting field", - node->key->name), + errmsg("source string too short for \"%s\" formatting field", + node->key->name), errdetail("Field requires %d characters, but only %d " "remain.", len, used), @@ -2465,7 +2465,7 @@ DCH_to_char(FormatNode *node, bool is_interval, TmToChar *in, char *out, Oid col * intervals */ sprintf(s, "%0*d", S_FM(n->suffix) ? 0 : (tm->tm_hour >= 0) ? 2 : 3, - tm->tm_hour % (HOURS_PER_DAY / 2) == 0 ? HOURS_PER_DAY / 2 : + tm->tm_hour % (HOURS_PER_DAY / 2) == 0 ? HOURS_PER_DAY / 2 : tm->tm_hour % (HOURS_PER_DAY / 2)); if (S_THth(n->suffix)) str_numth(s, s, S_TH_TYPE(n->suffix)); @@ -2583,7 +2583,7 @@ DCH_to_char(FormatNode *node, bool is_interval, TmToChar *in, char *out, Oid col else ereport(ERROR, (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), - errmsg("localized string format value too long"))); + errmsg("localized string format value too long"))); } else sprintf(s, "%*s", S_FM(n->suffix) ? 0 : -9, @@ -2603,7 +2603,7 @@ DCH_to_char(FormatNode *node, bool is_interval, TmToChar *in, char *out, Oid col else ereport(ERROR, (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), - errmsg("localized string format value too long"))); + errmsg("localized string format value too long"))); } else sprintf(s, "%*s", S_FM(n->suffix) ? 0 : -9, @@ -2623,7 +2623,7 @@ DCH_to_char(FormatNode *node, bool is_interval, TmToChar *in, char *out, Oid col else ereport(ERROR, (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), - errmsg("localized string format value too long"))); + errmsg("localized string format value too long"))); } else sprintf(s, "%*s", S_FM(n->suffix) ? 0 : -9, @@ -2643,7 +2643,7 @@ DCH_to_char(FormatNode *node, bool is_interval, TmToChar *in, char *out, Oid col else ereport(ERROR, (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), - errmsg("localized string format value too long"))); + errmsg("localized string format value too long"))); } else strcpy(s, asc_toupper_z(months[tm->tm_mon - 1])); @@ -2662,7 +2662,7 @@ DCH_to_char(FormatNode *node, bool is_interval, TmToChar *in, char *out, Oid col else ereport(ERROR, (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), - errmsg("localized string format value too long"))); + errmsg("localized string format value too long"))); } else strcpy(s, months[tm->tm_mon - 1]); @@ -2681,7 +2681,7 @@ DCH_to_char(FormatNode *node, bool is_interval, TmToChar *in, char *out, Oid col else ereport(ERROR, (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), - errmsg("localized string format value too long"))); + errmsg("localized string format value too long"))); } else strcpy(s, asc_tolower_z(months[tm->tm_mon - 1])); @@ -2705,7 +2705,7 @@ DCH_to_char(FormatNode *node, bool is_interval, TmToChar *in, char *out, Oid col else ereport(ERROR, (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), - errmsg("localized string format value too long"))); + errmsg("localized string format value too long"))); } else sprintf(s, "%*s", S_FM(n->suffix) ? 0 : -9, @@ -2723,7 +2723,7 @@ DCH_to_char(FormatNode *node, bool is_interval, TmToChar *in, char *out, Oid col else ereport(ERROR, (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), - errmsg("localized string format value too long"))); + errmsg("localized string format value too long"))); } else sprintf(s, "%*s", S_FM(n->suffix) ? 0 : -9, @@ -2741,7 +2741,7 @@ DCH_to_char(FormatNode *node, bool is_interval, TmToChar *in, char *out, Oid col else ereport(ERROR, (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), - errmsg("localized string format value too long"))); + errmsg("localized string format value too long"))); } else sprintf(s, "%*s", S_FM(n->suffix) ? 0 : -9, @@ -2759,7 +2759,7 @@ DCH_to_char(FormatNode *node, bool is_interval, TmToChar *in, char *out, Oid col else ereport(ERROR, (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), - errmsg("localized string format value too long"))); + errmsg("localized string format value too long"))); } else strcpy(s, asc_toupper_z(days_short[tm->tm_wday])); @@ -2776,7 +2776,7 @@ DCH_to_char(FormatNode *node, bool is_interval, TmToChar *in, char *out, Oid col else ereport(ERROR, (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), - errmsg("localized string format value too long"))); + errmsg("localized string format value too long"))); } else strcpy(s, days_short[tm->tm_wday]); @@ -2793,7 +2793,7 @@ DCH_to_char(FormatNode *node, bool is_interval, TmToChar *in, char *out, Oid col else ereport(ERROR, (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE), - errmsg("localized string format value too long"))); + errmsg("localized string format value too long"))); } else strcpy(s, asc_tolower_z(days_short[tm->tm_wday])); @@ -2804,7 +2804,7 @@ DCH_to_char(FormatNode *node, bool is_interval, TmToChar *in, char *out, Oid col sprintf(s, "%0*d", S_FM(n->suffix) ? 0 : 3, (n->key->id == DCH_DDD) ? tm->tm_yday : - date2isoyearday(tm->tm_year, tm->tm_mon, tm->tm_mday)); + date2isoyearday(tm->tm_year, tm->tm_mon, tm->tm_mday)); if (S_THth(n->suffix)) str_numth(s, s, S_TH_TYPE(n->suffix)); s += strlen(s); @@ -3080,8 +3080,8 @@ DCH_from_char(FormatNode *node, char *in, TmFromChar *out) case DCH_OF: ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("formatting field \"%s\" is only supported in to_char", - n->key->name))); + errmsg("formatting field \"%s\" is only supported in to_char", + n->key->name))); break; case DCH_A_D: case DCH_B_C: @@ -3191,7 +3191,7 @@ DCH_from_char(FormatNode *node, char *in, TmFromChar *out) if (matched < 2) ereport(ERROR, (errcode(ERRCODE_INVALID_DATETIME_FORMAT), - errmsg("invalid input string for \"Y,YYY\""))); + errmsg("invalid input string for \"Y,YYY\""))); years += (millennia * 1000); from_char_set_int(&out->year, years, n); out->yysz = 4; @@ -3805,7 +3805,7 @@ do_to_timestamp(text *date_txt, text *fmt, if (!tm->tm_year && !tmfc.bc) ereport(ERROR, (errcode(ERRCODE_INVALID_DATETIME_FORMAT), - errmsg("cannot calculate day of year without year information"))); + errmsg("cannot calculate day of year without year information"))); if (tmfc.mode == FROM_CHAR_DATE_ISOWEEK) { @@ -4646,7 +4646,7 @@ NUM_numpart_to_char(NUMProc *Np, int id) static char * NUM_processor(FormatNode *node, NUMDesc *Num, char *inout, - char *number, int from_char_input_len, int to_char_out_pre_spaces, + char *number, int from_char_input_len, int to_char_out_pre_spaces, int sign, bool is_to_char, Oid collid) { FormatNode *n; @@ -5129,15 +5129,15 @@ numeric_to_number(PG_FUNCTION_ARGS) result = DirectFunctionCall3(numeric_in, CStringGetDatum(numstr), ObjectIdGetDatum(InvalidOid), - Int32GetDatum(((precision << 16) | scale) + VARHDRSZ)); + Int32GetDatum(((precision << 16) | scale) + VARHDRSZ)); if (IS_MULTI(&Num)) { Numeric x; Numeric a = DatumGetNumeric(DirectFunctionCall1(int4_numeric, - Int32GetDatum(10))); + Int32GetDatum(10))); Numeric b = DatumGetNumeric(DirectFunctionCall1(int4_numeric, - Int32GetDatum(-Num.multi))); + Int32GetDatum(-Num.multi))); x = DatumGetNumeric(DirectFunctionCall2(numeric_power, NumericGetDatum(a), @@ -5183,7 +5183,7 @@ numeric_to_char(PG_FUNCTION_ARGS) Int32GetDatum(0))); numstr = orgnum = int_to_roman(DatumGetInt32(DirectFunctionCall1(numeric_int4, - NumericGetDatum(x)))); + NumericGetDatum(x)))); } else if (IS_EEEE(&Num)) { @@ -5224,9 +5224,9 @@ numeric_to_char(PG_FUNCTION_ARGS) if (IS_MULTI(&Num)) { Numeric a = DatumGetNumeric(DirectFunctionCall1(int4_numeric, - Int32GetDatum(10))); + Int32GetDatum(10))); Numeric b = DatumGetNumeric(DirectFunctionCall1(int4_numeric, - Int32GetDatum(Num.multi))); + Int32GetDatum(Num.multi))); x = DatumGetNumeric(DirectFunctionCall2(numeric_power, NumericGetDatum(a), @@ -5329,7 +5329,7 @@ int4_to_char(PG_FUNCTION_ARGS) else { orgnum = DatumGetCString(DirectFunctionCall1(int4out, - Int32GetDatum(value))); + Int32GetDatum(value))); } if (*orgnum == '-') @@ -5397,7 +5397,7 @@ int8_to_char(PG_FUNCTION_ARGS) { /* Currently don't support int8 conversion to roman... */ numstr = orgnum = int_to_roman(DatumGetInt32( - DirectFunctionCall1(int84, Int64GetDatum(value)))); + DirectFunctionCall1(int84, Int64GetDatum(value)))); } else if (IS_EEEE(&Num)) { @@ -5434,8 +5434,8 @@ int8_to_char(PG_FUNCTION_ARGS) value = DatumGetInt64(DirectFunctionCall2(int8mul, Int64GetDatum(value), - DirectFunctionCall1(dtoi8, - Float8GetDatum(multi)))); + DirectFunctionCall1(dtoi8, + Float8GetDatum(multi)))); Num.pre += Num.multi; } diff --git a/src/backend/utils/adt/genfile.c b/src/backend/utils/adt/genfile.c index 5b15562ba5b..5285aa54f1c 100644 --- a/src/backend/utils/adt/genfile.c +++ b/src/backend/utils/adt/genfile.c @@ -60,7 +60,7 @@ convert_and_check_filename(text *arg) if (path_contains_parent_reference(filename)) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - (errmsg("reference to parent directory (\"..\") not allowed")))); + (errmsg("reference to parent directory (\"..\") not allowed")))); /* * Allow absolute paths if within DataDir or Log_directory, even @@ -112,7 +112,7 @@ read_binary_file(const char *filename, int64 seek_offset, int64 bytes_to_read, else ereport(ERROR, (errcode_for_file_access(), - errmsg("could not stat file \"%s\": %m", filename))); + errmsg("could not stat file \"%s\": %m", filename))); } bytes_to_read = fst.st_size - seek_offset; diff --git a/src/backend/utils/adt/geo_ops.c b/src/backend/utils/adt/geo_ops.c index 97210e8db3f..0348855b11c 100644 --- a/src/backend/utils/adt/geo_ops.c +++ b/src/backend/utils/adt/geo_ops.c @@ -1401,7 +1401,7 @@ path_recv(PG_FUNCTION_ARGS) if (npts <= 0 || npts >= (int32) ((INT_MAX - offsetof(PATH, p)) / sizeof(Point))) ereport(ERROR, (errcode(ERRCODE_INVALID_BINARY_REPRESENTATION), - errmsg("invalid number of points in external \"path\" value"))); + errmsg("invalid number of points in external \"path\" value"))); size = offsetof(PATH, p) + sizeof(path->p[0]) * npts; path = (PATH *) palloc(size); @@ -1907,7 +1907,7 @@ point_dt(Point *pt1, Point *pt2) { #ifdef GEODEBUG printf("point_dt- segment (%f,%f),(%f,%f) length is %f\n", - pt1->x, pt1->y, pt2->x, pt2->y, HYPOT(pt1->x - pt2->x, pt1->y - pt2->y)); + pt1->x, pt1->y, pt2->x, pt2->y, HYPOT(pt1->x - pt2->x, pt1->y - pt2->y)); #endif return HYPOT(pt1->x - pt2->x, pt1->y - pt2->y); } @@ -3225,10 +3225,10 @@ on_sl(PG_FUNCTION_ARGS) LINE *line = PG_GETARG_LINE_P(1); PG_RETURN_BOOL(DatumGetBool(DirectFunctionCall2(on_pl, - PointPGetDatum(&lseg->p[0]), + PointPGetDatum(&lseg->p[0]), LinePGetDatum(line))) && DatumGetBool(DirectFunctionCall2(on_pl, - PointPGetDatum(&lseg->p[1]), + PointPGetDatum(&lseg->p[1]), LinePGetDatum(line)))); } @@ -3239,10 +3239,10 @@ on_sb(PG_FUNCTION_ARGS) BOX *box = PG_GETARG_BOX_P(1); PG_RETURN_BOOL(DatumGetBool(DirectFunctionCall2(on_pb, - PointPGetDatum(&lseg->p[0]), + PointPGetDatum(&lseg->p[0]), BoxPGetDatum(box))) && DatumGetBool(DirectFunctionCall2(on_pb, - PointPGetDatum(&lseg->p[1]), + PointPGetDatum(&lseg->p[1]), BoxPGetDatum(box)))); } @@ -3484,7 +3484,7 @@ poly_recv(PG_FUNCTION_ARGS) if (npts <= 0 || npts >= (int32) ((INT_MAX - offsetof(POLYGON, p)) / sizeof(Point))) ereport(ERROR, (errcode(ERRCODE_INVALID_BINARY_REPRESENTATION), - errmsg("invalid number of points in external \"polygon\" value"))); + errmsg("invalid number of points in external \"polygon\" value"))); size = offsetof(POLYGON, p) + sizeof(poly->p[0]) * npts; poly = (POLYGON *) palloc0(size); /* zero any holes */ @@ -5164,7 +5164,7 @@ circle_poly(PG_FUNCTION_ARGS) if (FPzero(circle->radius)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot convert circle with radius zero to polygon"))); + errmsg("cannot convert circle with radius zero to polygon"))); if (npts < 2) ereport(ERROR, diff --git a/src/backend/utils/adt/inet_cidr_ntop.c b/src/backend/utils/adt/inet_cidr_ntop.c index d5d1289d7d9..2973d566587 100644 --- a/src/backend/utils/adt/inet_cidr_ntop.c +++ b/src/backend/utils/adt/inet_cidr_ntop.c @@ -241,8 +241,8 @@ inet_cidr_ntop_ipv6(const u_char *src, int bits, char *dst, size_t size) } if (zero_l != words && zero_s == 0 && ((zero_l == 6) || - ((zero_l == 5 && s[10] == 0xff && s[11] == 0xff) || - ((zero_l == 7 && s[14] != 0 && s[15] != 1))))) + ((zero_l == 5 && s[10] == 0xff && s[11] == 0xff) || + ((zero_l == 7 && s[14] != 0 && s[15] != 1))))) is_ipv4 = 1; /* Format whole words. */ diff --git a/src/backend/utils/adt/json.c b/src/backend/utils/adt/json.c index 8b822b27366..4dd7d977e8a 100644 --- a/src/backend/utils/adt/json.c +++ b/src/backend/utils/adt/json.c @@ -837,11 +837,11 @@ json_lex_string(JsonLexContext *lex) { if (hi_surrogate != -1) ereport(ERROR, - (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), - errmsg("invalid input syntax for type %s", - "json"), - errdetail("Unicode high surrogate must not follow a high surrogate."), - report_json_context(lex))); + (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), + errmsg("invalid input syntax for type %s", + "json"), + errdetail("Unicode high surrogate must not follow a high surrogate."), + report_json_context(lex))); hi_surrogate = (ch & 0x3ff) << 10; continue; } @@ -849,10 +849,10 @@ json_lex_string(JsonLexContext *lex) { if (hi_surrogate == -1) ereport(ERROR, - (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), - errmsg("invalid input syntax for type %s", "json"), - errdetail("Unicode low surrogate must follow a high surrogate."), - report_json_context(lex))); + (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), + errmsg("invalid input syntax for type %s", "json"), + errdetail("Unicode low surrogate must follow a high surrogate."), + report_json_context(lex))); ch = 0x10000 + hi_surrogate + (ch & 0x3ff); hi_surrogate = -1; } @@ -860,7 +860,7 @@ json_lex_string(JsonLexContext *lex) if (hi_surrogate != -1) ereport(ERROR, (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), - errmsg("invalid input syntax for type %s", "json"), + errmsg("invalid input syntax for type %s", "json"), errdetail("Unicode low surrogate must follow a high surrogate."), report_json_context(lex))); @@ -876,8 +876,8 @@ json_lex_string(JsonLexContext *lex) /* We can't allow this, since our TEXT type doesn't */ ereport(ERROR, (errcode(ERRCODE_UNTRANSLATABLE_CHARACTER), - errmsg("unsupported Unicode escape sequence"), - errdetail("\\u0000 cannot be converted to text."), + errmsg("unsupported Unicode escape sequence"), + errdetail("\\u0000 cannot be converted to text."), report_json_context(lex))); } else if (GetDatabaseEncoding() == PG_UTF8) @@ -899,7 +899,7 @@ json_lex_string(JsonLexContext *lex) { ereport(ERROR, (errcode(ERRCODE_UNTRANSLATABLE_CHARACTER), - errmsg("unsupported Unicode escape sequence"), + errmsg("unsupported Unicode escape sequence"), errdetail("Unicode escape values cannot be used for code point values above 007F when the server encoding is not UTF8."), report_json_context(lex))); } @@ -945,8 +945,8 @@ json_lex_string(JsonLexContext *lex) (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), errmsg("invalid input syntax for type %s", "json"), - errdetail("Escape sequence \"\\%s\" is invalid.", - extract_mb_char(s)), + errdetail("Escape sequence \"\\%s\" is invalid.", + extract_mb_char(s)), report_json_context(lex))); } } @@ -987,7 +987,7 @@ json_lex_string(JsonLexContext *lex) ereport(ERROR, (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), errmsg("invalid input syntax for type %s", "json"), - errdetail("Unicode low surrogate must follow a high surrogate."), + errdetail("Unicode low surrogate must follow a high surrogate."), report_json_context(lex))); /* Hooray, we found the end of the string! */ @@ -1181,16 +1181,16 @@ report_parse_error(JsonParseContext ctx, JsonLexContext *lex) ereport(ERROR, (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), errmsg("invalid input syntax for type %s", "json"), - errdetail("Expected \",\" or \"]\", but found \"%s\".", - token), + errdetail("Expected \",\" or \"]\", but found \"%s\".", + token), report_json_context(lex))); break; case JSON_PARSE_OBJECT_START: ereport(ERROR, (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), errmsg("invalid input syntax for type %s", "json"), - errdetail("Expected string or \"}\", but found \"%s\".", - token), + errdetail("Expected string or \"}\", but found \"%s\".", + token), report_json_context(lex))); break; case JSON_PARSE_OBJECT_LABEL: @@ -1205,8 +1205,8 @@ report_parse_error(JsonParseContext ctx, JsonLexContext *lex) ereport(ERROR, (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), errmsg("invalid input syntax for type %s", "json"), - errdetail("Expected \",\" or \"}\", but found \"%s\".", - token), + errdetail("Expected \",\" or \"}\", but found \"%s\".", + token), report_json_context(lex))); break; case JSON_PARSE_OBJECT_COMMA: @@ -1471,7 +1471,7 @@ datum_to_json(Datum val, bool is_null, StringInfo result, tcategory == JSONTYPE_CAST)) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("key value must be scalar, not array, composite, or json"))); + errmsg("key value must be scalar, not array, composite, or json"))); switch (tcategory) { @@ -2008,7 +2008,7 @@ json_object_agg_transfn(PG_FUNCTION_ARGS) if (arg_type == InvalidOid) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("could not determine data type for argument %d", 1))); + errmsg("could not determine data type for argument %d", 1))); json_categorize_type(arg_type, &state->key_category, &state->key_output_func); @@ -2018,7 +2018,7 @@ json_object_agg_transfn(PG_FUNCTION_ARGS) if (arg_type == InvalidOid) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("could not determine data type for argument %d", 2))); + errmsg("could not determine data type for argument %d", 2))); json_categorize_type(arg_type, &state->val_category, &state->val_output_func); diff --git a/src/backend/utils/adt/jsonb.c b/src/backend/utils/adt/jsonb.c index 3e206c21213..49f41f9f999 100644 --- a/src/backend/utils/adt/jsonb.c +++ b/src/backend/utils/adt/jsonb.c @@ -320,8 +320,8 @@ jsonb_put_escaped_value(StringInfo out, JsonbValue *scalarVal) break; case jbvNumeric: appendStringInfoString(out, - DatumGetCString(DirectFunctionCall1(numeric_out, - PointerGetDatum(scalarVal->val.numeric)))); + DatumGetCString(DirectFunctionCall1(numeric_out, + PointerGetDatum(scalarVal->val.numeric)))); break; case jbvBool: if (scalarVal->val.boolean) @@ -664,7 +664,7 @@ jsonb_categorize_type(Oid typoid, CoercionPathType ctype; ctype = find_coercion_pathway(JSONOID, typoid, - COERCION_EXPLICIT, &castfunc); + COERCION_EXPLICIT, &castfunc); if (ctype == COERCION_PATH_FUNC && OidIsValid(castfunc)) { *tcategory = JSONBTYPE_JSONCAST; @@ -722,7 +722,7 @@ datum_to_jsonb(Datum val, bool is_null, JsonbInState *result, { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("key value must be scalar, not array, composite, or json"))); + errmsg("key value must be scalar, not array, composite, or json"))); } else { @@ -1212,7 +1212,7 @@ jsonb_build_object(PG_FUNCTION_ARGS) if (val_type == InvalidOid || val_type == UNKNOWNOID) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("could not determine data type for argument %d", i + 1))); + errmsg("could not determine data type for argument %d", i + 1))); add_jsonb(arg, false, &result, val_type, true); @@ -1235,7 +1235,7 @@ jsonb_build_object(PG_FUNCTION_ARGS) if (val_type == InvalidOid || val_type == UNKNOWNOID) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("could not determine data type for argument %d", i + 2))); + errmsg("could not determine data type for argument %d", i + 2))); add_jsonb(arg, PG_ARGISNULL(i + 1), &result, val_type, false); } @@ -1295,7 +1295,7 @@ jsonb_build_array(PG_FUNCTION_ARGS) if (val_type == InvalidOid || val_type == UNKNOWNOID) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("could not determine data type for argument %d", i + 1))); + errmsg("could not determine data type for argument %d", i + 1))); add_jsonb(arg, PG_ARGISNULL(i), &result, val_type, false); } @@ -1662,7 +1662,7 @@ jsonb_agg_transfn(PG_FUNCTION_ARGS) /* same for numeric */ v.val.numeric = DatumGetNumeric(DirectFunctionCall1(numeric_uplus, - NumericGetDatum(v.val.numeric))); + NumericGetDatum(v.val.numeric))); } result->res = pushJsonbValue(&result->parseState, type, &v); @@ -1892,7 +1892,7 @@ jsonb_object_agg_transfn(PG_FUNCTION_ARGS) /* same for numeric */ v.val.numeric = DatumGetNumeric(DirectFunctionCall1(numeric_uplus, - NumericGetDatum(v.val.numeric))); + NumericGetDatum(v.val.numeric))); } result->res = pushJsonbValue(&result->parseState, single_scalar ? WJB_VALUE : type, diff --git a/src/backend/utils/adt/jsonb_util.c b/src/backend/utils/adt/jsonb_util.c index 0d2abb35b96..4850569bb5e 100644 --- a/src/backend/utils/adt/jsonb_util.c +++ b/src/backend/utils/adt/jsonb_util.c @@ -557,7 +557,7 @@ pushJsonbValueScalar(JsonbParseState **pstate, JsonbIteratorToken seq, (*pstate)->contVal.type = jbvArray; (*pstate)->contVal.val.array.nElems = 0; (*pstate)->contVal.val.array.rawScalar = (scalarVal && - scalarVal->val.array.rawScalar); + scalarVal->val.array.rawScalar); if (scalarVal && scalarVal->val.array.nElems > 0) { /* Assume that this array is still really a scalar */ @@ -872,7 +872,7 @@ recurse: JBE_ADVANCE_OFFSET((*it)->curDataOffset, (*it)->children[(*it)->curIndex]); JBE_ADVANCE_OFFSET((*it)->curValueOffset, - (*it)->children[(*it)->curIndex + (*it)->nElems]); + (*it)->children[(*it)->curIndex + (*it)->nElems]); (*it)->curIndex++; /* @@ -1228,7 +1228,7 @@ JsonbHashScalarValue(const JsonbValue *scalarVal, uint32 *hash) case jbvNumeric: /* Must hash equal numerics to equal hash codes */ tmp = DatumGetUInt32(DirectFunctionCall1(hash_numeric, - NumericGetDatum(scalarVal->val.numeric))); + NumericGetDatum(scalarVal->val.numeric))); break; case jbvBool: tmp = scalarVal->val.boolean ? 0x02 : 0x04; @@ -1265,8 +1265,8 @@ equalsJsonbScalarValue(JsonbValue *aScalar, JsonbValue *bScalar) return lengthCompareJsonbStringValue(aScalar, bScalar) == 0; case jbvNumeric: return DatumGetBool(DirectFunctionCall2(numeric_eq, - PointerGetDatum(aScalar->val.numeric), - PointerGetDatum(bScalar->val.numeric))); + PointerGetDatum(aScalar->val.numeric), + PointerGetDatum(bScalar->val.numeric))); case jbvBool: return aScalar->val.boolean == bScalar->val.boolean; @@ -1301,8 +1301,8 @@ compareJsonbScalarValue(JsonbValue *aScalar, JsonbValue *bScalar) DEFAULT_COLLATION_OID); case jbvNumeric: return DatumGetInt32(DirectFunctionCall2(numeric_cmp, - PointerGetDatum(aScalar->val.numeric), - PointerGetDatum(bScalar->val.numeric))); + PointerGetDatum(aScalar->val.numeric), + PointerGetDatum(bScalar->val.numeric))); case jbvBool: if (aScalar->val.boolean == bScalar->val.boolean) return 0; diff --git a/src/backend/utils/adt/jsonfuncs.c b/src/backend/utils/adt/jsonfuncs.c index aa0dc165f02..4779e74895c 100644 --- a/src/backend/utils/adt/jsonfuncs.c +++ b/src/backend/utils/adt/jsonfuncs.c @@ -426,7 +426,7 @@ static Datum populate_scalar(ScalarIOData *io, Oid typid, int32 typmod, JsValue static void prepare_column_cache(ColumnIOData *column, Oid typid, int32 typmod, MemoryContext mcxt, bool json); static Datum populate_record_field(ColumnIOData *col, Oid typid, int32 typmod, - const char *colname, MemoryContext mcxt, Datum defaultval, + const char *colname, MemoryContext mcxt, Datum defaultval, JsValue *jsv, bool *isnull); static RecordIOData *allocate_record_info(MemoryContext mcxt, int ncolumns); static bool JsObjectGetField(JsObject *obj, char *field, JsValue *jsv); @@ -766,7 +766,7 @@ jsonb_object_field_text(PG_FUNCTION_ARGS) break; case jbvNumeric: result = cstring_to_text(DatumGetCString(DirectFunctionCall1(numeric_out, - PointerGetDatum(v->val.numeric)))); + PointerGetDatum(v->val.numeric)))); break; case jbvBinary: { @@ -883,7 +883,7 @@ jsonb_array_element_text(PG_FUNCTION_ARGS) break; case jbvNumeric: result = cstring_to_text(DatumGetCString(DirectFunctionCall1(numeric_out, - PointerGetDatum(v->val.numeric)))); + PointerGetDatum(v->val.numeric)))); break; case jbvBinary: { @@ -1446,7 +1446,7 @@ get_jsonb_path_all(FunctionCallInfo fcinfo, bool as_text) jbvp = findJsonbValueFromContainerLen(container, JB_FOBJECT, VARDATA(pathtext[i]), - VARSIZE(pathtext[i]) - VARHDRSZ); + VARSIZE(pathtext[i]) - VARHDRSZ); } else if (have_array) { @@ -2161,7 +2161,7 @@ elements_worker(FunctionCallInfo fcinfo, const char *funcname, bool as_text) state->next_scalar = false; state->lex = lex; state->tmp_cxt = AllocSetContextCreate(CurrentMemoryContext, - "json_array_elements temporary cxt", + "json_array_elements temporary cxt", ALLOCSET_DEFAULT_SIZES); pg_parse_json(lex, sem); @@ -2818,7 +2818,7 @@ populate_scalar(ScalarIOData *io, Oid typid, int32 typmod, JsValue *jsv) str = pstrdup(jbv->val.boolean ? "true" : "false"); else if (jbv->type == jbvNumeric) str = DatumGetCString(DirectFunctionCall1(numeric_out, - PointerGetDatum(jbv->val.numeric))); + PointerGetDatum(jbv->val.numeric))); else if (jbv->type == jbvBinary) str = JsonbToCString(NULL, jbv->val.binary.data, jbv->val.binary.len); @@ -2887,7 +2887,7 @@ prepare_column_cache(ColumnIOData *column, column->io.domain.base_typid = type->typbasetype; column->io.domain.base_typmod = type->typtypmod; column->io.domain.base_io = MemoryContextAllocZero(mcxt, - sizeof(ColumnIOData)); + sizeof(ColumnIOData)); column->io.domain.domain_info = NULL; } else if (type->typtype == TYPTYPE_COMPOSITE || typid == RECORDOID) @@ -2900,7 +2900,7 @@ prepare_column_cache(ColumnIOData *column, { column->typcat = TYPECAT_ARRAY; column->io.array.element_info = MemoryContextAllocZero(mcxt, - sizeof(ColumnIOData)); + sizeof(ColumnIOData)); column->io.array.element_type = type->typelem; /* array element typemod stored in attribute's typmod */ column->io.array.element_typmod = typmod; @@ -3379,7 +3379,7 @@ hash_array_start(void *state) if (_state->lex->lex_level == 0) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("cannot call %s on an array", _state->function_name))); + errmsg("cannot call %s on an array", _state->function_name))); } static void @@ -3390,7 +3390,7 @@ hash_scalar(void *state, char *token, JsonTokenType tokentype) if (_state->lex->lex_level == 0) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("cannot call %s on a scalar", _state->function_name))); + errmsg("cannot call %s on a scalar", _state->function_name))); if (_state->lex->lex_level == 1) { @@ -3580,8 +3580,8 @@ populate_recordset_worker(FunctionCallInfo fcinfo, const char *funcname, !JsonContainerIsObject(v.val.binary.data)) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("argument of %s must be an array of objects", - funcname))); + errmsg("argument of %s must be an array of objects", + funcname))); obj.is_json = false; obj.val.jsonb_cont = v.val.binary.data; @@ -4712,8 +4712,8 @@ setPathArray(JsonbIterator **it, Datum *path_elems, bool *path_nulls, lindex < INT_MIN) ereport(ERROR, (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), - errmsg("path element at position %d is not an integer: \"%s\"", - level + 1, c))); + errmsg("path element at position %d is not an integer: \"%s\"", + level + 1, c))); idx = lindex; } else @@ -4871,7 +4871,7 @@ iterate_string_values_scalar(void *state, char *token, JsonTokenType tokentype) */ Jsonb * transform_jsonb_string_values(Jsonb *jsonb, void *action_state, - JsonTransformStringValuesAction transform_action) + JsonTransformStringValuesAction transform_action) { JsonbIterator *it; JsonbValue v, diff --git a/src/backend/utils/adt/levenshtein.c b/src/backend/utils/adt/levenshtein.c index 4f40f279db2..97e034b453e 100644 --- a/src/backend/utils/adt/levenshtein.c +++ b/src/backend/utils/adt/levenshtein.c @@ -130,8 +130,8 @@ varstr_levenshtein(const char *source, int slen, n > MAX_LEVENSHTEIN_STRLEN)) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("levenshtein argument exceeds maximum length of %d characters", - MAX_LEVENSHTEIN_STRLEN))); + errmsg("levenshtein argument exceeds maximum length of %d characters", + MAX_LEVENSHTEIN_STRLEN))); #ifdef LEVENSHTEIN_LESS_EQUAL /* Initialize start and stop columns. */ diff --git a/src/backend/utils/adt/like.c b/src/backend/utils/adt/like.c index d4d173480d0..37394f4972c 100644 --- a/src/backend/utils/adt/like.c +++ b/src/backend/utils/adt/like.c @@ -180,7 +180,7 @@ Generic_Text_IC_like(text *str, text *pat, Oid collation) */ ereport(ERROR, (errcode(ERRCODE_INDETERMINATE_COLLATION), - errmsg("could not determine which collation to use for ILIKE"), + errmsg("could not determine which collation to use for ILIKE"), errhint("Use the COLLATE clause to set the collation explicitly."))); } locale = pg_newlocale_from_collation(collation); diff --git a/src/backend/utils/adt/like_match.c b/src/backend/utils/adt/like_match.c index 087a720625b..8ed362a8ff2 100644 --- a/src/backend/utils/adt/like_match.c +++ b/src/backend/utils/adt/like_match.c @@ -104,7 +104,7 @@ MatchText(char *t, int tlen, char *p, int plen, if (plen <= 0) ereport(ERROR, (errcode(ERRCODE_INVALID_ESCAPE_SEQUENCE), - errmsg("LIKE pattern must not end with escape character"))); + errmsg("LIKE pattern must not end with escape character"))); if (GETCHAR(*p) != GETCHAR(*t)) return LIKE_FALSE; } @@ -290,7 +290,7 @@ do_like_escape(text *pat, text *esc) ereport(ERROR, (errcode(ERRCODE_INVALID_ESCAPE_SEQUENCE), errmsg("invalid escape string"), - errhint("Escape string must be empty or one character."))); + errhint("Escape string must be empty or one character."))); e = VARDATA_ANY(esc); diff --git a/src/backend/utils/adt/lockfuncs.c b/src/backend/utils/adt/lockfuncs.c index ef4824f79cc..9e0a8ab79d4 100644 --- a/src/backend/utils/adt/lockfuncs.c +++ b/src/backend/utils/adt/lockfuncs.c @@ -661,7 +661,7 @@ PreventAdvisoryLocksInParallelMode(void) if (IsInParallelMode()) ereport(ERROR, (errcode(ERRCODE_INVALID_TRANSACTION_STATE), - errmsg("cannot use advisory locks during a parallel operation"))); + errmsg("cannot use advisory locks during a parallel operation"))); } /* diff --git a/src/backend/utils/adt/mac.c b/src/backend/utils/adt/mac.c index c2b52d80468..d1c20c30865 100644 --- a/src/backend/utils/adt/mac.c +++ b/src/backend/utils/adt/mac.c @@ -99,7 +99,7 @@ macaddr_in(PG_FUNCTION_ARGS) (e < 0) || (e > 255) || (f < 0) || (f > 255)) ereport(ERROR, (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), - errmsg("invalid octet value in \"macaddr\" value: \"%s\"", str))); + errmsg("invalid octet value in \"macaddr\" value: \"%s\"", str))); result = (macaddr *) palloc(sizeof(macaddr)); @@ -460,7 +460,7 @@ macaddr_abbrev_abort(int memtupcount, SortSupport ssup) if (trace_sort) elog(LOG, "macaddr_abbrev: aborting abbreviation at cardinality %f" - " below threshold %f after " INT64_FORMAT " values (%d rows)", + " below threshold %f after " INT64_FORMAT " values (%d rows)", abbr_card, uss->input_count / 2000.0 + 0.5, uss->input_count, memtupcount); #endif diff --git a/src/backend/utils/adt/mac8.c b/src/backend/utils/adt/mac8.c index 1ed4183be7f..482d1fb5bf3 100644 --- a/src/backend/utils/adt/mac8.c +++ b/src/backend/utils/adt/mac8.c @@ -163,8 +163,8 @@ macaddr8_in(PG_FUNCTION_ARGS) /* must be trailing garbage... */ ereport(ERROR, (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), - errmsg("invalid input syntax for type %s: \"%s\"", "macaddr8", - str))); + errmsg("invalid input syntax for type %s: \"%s\"", "macaddr8", + str))); } /* Move forward to where the next byte should be */ @@ -181,8 +181,8 @@ macaddr8_in(PG_FUNCTION_ARGS) else if (spacer != *ptr) ereport(ERROR, (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), - errmsg("invalid input syntax for type %s: \"%s\"", "macaddr8", - str))); + errmsg("invalid input syntax for type %s: \"%s\"", "macaddr8", + str))); /* move past the spacer */ ptr++; @@ -218,8 +218,8 @@ macaddr8_in(PG_FUNCTION_ARGS) else if (count != 8) ereport(ERROR, (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), - errmsg("invalid input syntax for type %s: \"%s\"", "macaddr8", - str))); + errmsg("invalid input syntax for type %s: \"%s\"", "macaddr8", + str))); result = (macaddr8 *) palloc0(sizeof(macaddr8)); @@ -552,10 +552,10 @@ macaddr8tomacaddr(PG_FUNCTION_ARGS) ereport(ERROR, (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), errmsg("macaddr8 data out of range to convert to macaddr"), - errhint("Only addresses that have FF and FE as values in the " - "4th and 5th bytes, from the left, for example: " - "XX-XX-XX-FF-FE-XX-XX-XX, are eligible to be converted " - "from macaddr8 to macaddr."))); + errhint("Only addresses that have FF and FE as values in the " + "4th and 5th bytes, from the left, for example: " + "XX-XX-XX-FF-FE-XX-XX-XX, are eligible to be converted " + "from macaddr8 to macaddr."))); result->a = addr->a; result->b = addr->b; diff --git a/src/backend/utils/adt/misc.c b/src/backend/utils/adt/misc.c index 9cc0b08e969..62341b84d1c 100644 --- a/src/backend/utils/adt/misc.c +++ b/src/backend/utils/adt/misc.c @@ -310,7 +310,7 @@ pg_terminate_backend(PG_FUNCTION_ARGS) if (r == SIGNAL_BACKEND_NOSUPERUSER) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - (errmsg("must be a superuser to terminate superuser process")))); + (errmsg("must be a superuser to terminate superuser process")))); if (r == SIGNAL_BACKEND_NOPERMISSION) ereport(ERROR, @@ -352,7 +352,7 @@ pg_rotate_logfile(PG_FUNCTION_ARGS) if (!Logging_collector) { ereport(WARNING, - (errmsg("rotation not possible because log collection not active"))); + (errmsg("rotation not possible because log collection not active"))); PG_RETURN_BOOL(false); } @@ -410,7 +410,7 @@ pg_tablespace_databases(PG_FUNCTION_ARGS) errmsg("could not open directory \"%s\": %m", fctx->location))); ereport(WARNING, - (errmsg("%u is not a tablespace OID", tablespaceOid))); + (errmsg("%u is not a tablespace OID", tablespaceOid))); } } funcctx->user_fctx = fctx; @@ -789,9 +789,9 @@ parse_ident(PG_FUNCTION_ARGS) if (endp == NULL) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("string is not a valid identifier: \"%s\"", - text_to_cstring(qualname)), - errdetail("String has unclosed double quotes."))); + errmsg("string is not a valid identifier: \"%s\"", + text_to_cstring(qualname)), + errdetail("String has unclosed double quotes."))); if (endp[1] != '"') break; memmove(endp, endp + 1, strlen(endp)); @@ -952,7 +952,7 @@ pg_current_logfile(PG_FUNCTION_ARGS) { /* Uh oh. No newline found, so file content is corrupted. */ elog(ERROR, - "missing newline character in \"%s\"", LOG_METAINFO_DATAFILE); + "missing newline character in \"%s\"", LOG_METAINFO_DATAFILE); break; } *nlpos = '\0'; diff --git a/src/backend/utils/adt/nabstime.c b/src/backend/utils/adt/nabstime.c index e3a858c7c7b..2c5948052d3 100644 --- a/src/backend/utils/adt/nabstime.c +++ b/src/backend/utils/adt/nabstime.c @@ -159,7 +159,7 @@ tm2abstime(struct pg_tm *tm, int tz) tm->tm_mday < 1 || tm->tm_mday > 31 || tm->tm_hour < 0 || tm->tm_hour > HOURS_PER_DAY || /* test for > 24:00:00 */ - (tm->tm_hour == HOURS_PER_DAY && (tm->tm_min > 0 || tm->tm_sec > 0)) || + (tm->tm_hour == HOURS_PER_DAY && (tm->tm_min > 0 || tm->tm_sec > 0)) || tm->tm_min < 0 || tm->tm_min > MINS_PER_HOUR - 1 || tm->tm_sec < 0 || tm->tm_sec > SECS_PER_MINUTE) return INVALID_ABSTIME; @@ -479,7 +479,7 @@ abstime_timestamp(PG_FUNCTION_ARGS) case INVALID_ABSTIME: ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot convert abstime \"invalid\" to timestamp"))); + errmsg("cannot convert abstime \"invalid\" to timestamp"))); TIMESTAMP_NOBEGIN(result); break; @@ -552,7 +552,7 @@ abstime_timestamptz(PG_FUNCTION_ARGS) case INVALID_ABSTIME: ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot convert abstime \"invalid\" to timestamp"))); + errmsg("cannot convert abstime \"invalid\" to timestamp"))); TIMESTAMP_NOBEGIN(result); break; @@ -741,12 +741,12 @@ tintervalout(PG_FUNCTION_ARGS) else { p = DatumGetCString(DirectFunctionCall1(abstimeout, - AbsoluteTimeGetDatum(tinterval->data[0]))); + AbsoluteTimeGetDatum(tinterval->data[0]))); strcat(i_str, p); pfree(p); strcat(i_str, "\" \""); p = DatumGetCString(DirectFunctionCall1(abstimeout, - AbsoluteTimeGetDatum(tinterval->data[1]))); + AbsoluteTimeGetDatum(tinterval->data[1]))); strcat(i_str, p); pfree(p); } @@ -849,7 +849,7 @@ reltime_interval(PG_FUNCTION_ARGS) case INVALID_RELTIME: ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot convert reltime \"invalid\" to interval"))); + errmsg("cannot convert reltime \"invalid\" to interval"))); result->time = 0; result->day = 0; result->month = 0; @@ -958,10 +958,10 @@ intinterval(PG_FUNCTION_ARGS) { if (DatumGetBool(DirectFunctionCall2(abstimege, AbsoluteTimeGetDatum(t), - AbsoluteTimeGetDatum(tinterval->data[0]))) && + AbsoluteTimeGetDatum(tinterval->data[0]))) && DatumGetBool(DirectFunctionCall2(abstimele, AbsoluteTimeGetDatum(t), - AbsoluteTimeGetDatum(tinterval->data[1])))) + AbsoluteTimeGetDatum(tinterval->data[1])))) PG_RETURN_BOOL(true); } PG_RETURN_BOOL(false); @@ -1108,7 +1108,7 @@ tintervalsame(PG_FUNCTION_ARGS) if (DatumGetBool(DirectFunctionCall2(abstimeeq, AbsoluteTimeGetDatum(i1->data[0]), - AbsoluteTimeGetDatum(i2->data[0]))) && + AbsoluteTimeGetDatum(i2->data[0]))) && DatumGetBool(DirectFunctionCall2(abstimeeq, AbsoluteTimeGetDatum(i1->data[1]), AbsoluteTimeGetDatum(i2->data[1])))) @@ -1353,7 +1353,7 @@ tintervalct(PG_FUNCTION_ARGS) PG_RETURN_BOOL(false); if (DatumGetBool(DirectFunctionCall2(abstimele, AbsoluteTimeGetDatum(i1->data[0]), - AbsoluteTimeGetDatum(i2->data[0]))) && + AbsoluteTimeGetDatum(i2->data[0]))) && DatumGetBool(DirectFunctionCall2(abstimege, AbsoluteTimeGetDatum(i1->data[1]), AbsoluteTimeGetDatum(i2->data[1])))) @@ -1374,7 +1374,7 @@ tintervalov(PG_FUNCTION_ARGS) PG_RETURN_BOOL(false); if (DatumGetBool(DirectFunctionCall2(abstimelt, AbsoluteTimeGetDatum(i1->data[1]), - AbsoluteTimeGetDatum(i2->data[0]))) || + AbsoluteTimeGetDatum(i2->data[0]))) || DatumGetBool(DirectFunctionCall2(abstimegt, AbsoluteTimeGetDatum(i1->data[0]), AbsoluteTimeGetDatum(i2->data[1])))) diff --git a/src/backend/utils/adt/network_selfuncs.c b/src/backend/utils/adt/network_selfuncs.c index 1d29ecd5217..c9927fff9be 100644 --- a/src/backend/utils/adt/network_selfuncs.c +++ b/src/backend/utils/adt/network_selfuncs.c @@ -287,7 +287,7 @@ networkjoinsel_inner(Oid operator, mcv1_exists = get_attstatsslot(&mcv1_slot, vardata1->statsTuple, STATISTIC_KIND_MCV, InvalidOid, - ATTSTATSSLOT_VALUES | ATTSTATSSLOT_NUMBERS); + ATTSTATSSLOT_VALUES | ATTSTATSSLOT_NUMBERS); hist1_exists = get_attstatsslot(&hist1_slot, vardata1->statsTuple, STATISTIC_KIND_HISTOGRAM, InvalidOid, ATTSTATSSLOT_VALUES); @@ -309,7 +309,7 @@ networkjoinsel_inner(Oid operator, mcv2_exists = get_attstatsslot(&mcv2_slot, vardata2->statsTuple, STATISTIC_KIND_MCV, InvalidOid, - ATTSTATSSLOT_VALUES | ATTSTATSSLOT_NUMBERS); + ATTSTATSSLOT_VALUES | ATTSTATSSLOT_NUMBERS); hist2_exists = get_attstatsslot(&hist2_slot, vardata2->statsTuple, STATISTIC_KIND_HISTOGRAM, InvalidOid, ATTSTATSSLOT_VALUES); @@ -360,7 +360,7 @@ networkjoinsel_inner(Oid operator, selec += (1.0 - nullfrac1 - sumcommon1) * (1.0 - nullfrac2 - sumcommon2) * inet_hist_inclusion_join_sel(hist1_slot.values, hist1_slot.nvalues, - hist2_slot.values, hist2_slot.nvalues, + hist2_slot.values, hist2_slot.nvalues, opr_codenum); /* @@ -417,7 +417,7 @@ networkjoinsel_semi(Oid operator, mcv1_exists = get_attstatsslot(&mcv1_slot, vardata1->statsTuple, STATISTIC_KIND_MCV, InvalidOid, - ATTSTATSSLOT_VALUES | ATTSTATSSLOT_NUMBERS); + ATTSTATSSLOT_VALUES | ATTSTATSSLOT_NUMBERS); hist1_exists = get_attstatsslot(&hist1_slot, vardata1->statsTuple, STATISTIC_KIND_HISTOGRAM, InvalidOid, ATTSTATSSLOT_VALUES); @@ -439,7 +439,7 @@ networkjoinsel_semi(Oid operator, mcv2_exists = get_attstatsslot(&mcv2_slot, vardata2->statsTuple, STATISTIC_KIND_MCV, InvalidOid, - ATTSTATSSLOT_VALUES | ATTSTATSSLOT_NUMBERS); + ATTSTATSSLOT_VALUES | ATTSTATSSLOT_NUMBERS); hist2_exists = get_attstatsslot(&hist2_slot, vardata2->statsTuple, STATISTIC_KIND_HISTOGRAM, InvalidOid, ATTSTATSSLOT_VALUES); diff --git a/src/backend/utils/adt/numeric.c b/src/backend/utils/adt/numeric.c index 1a182a0725c..1d0ca1b7943 100644 --- a/src/backend/utils/adt/numeric.c +++ b/src/backend/utils/adt/numeric.c @@ -1026,8 +1026,8 @@ numerictypmodin(PG_FUNCTION_ARGS) if (tl[1] < 0 || tl[1] > tl[0]) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("NUMERIC scale %d must be between 0 and precision %d", - tl[1], tl[0]))); + errmsg("NUMERIC scale %d must be between 0 and precision %d", + tl[1], tl[0]))); typmod = ((tl[0] << 16) | tl[1]) + VARHDRSZ; } else if (n == 1) @@ -1497,7 +1497,7 @@ width_bucket_numeric(PG_FUNCTION_ARGS) NUMERIC_IS_NAN(bound2)) ereport(ERROR, (errcode(ERRCODE_INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION), - errmsg("operand, lower bound, and upper bound cannot be NaN"))); + errmsg("operand, lower bound, and upper bound cannot be NaN"))); init_var(&result_var); init_var(&count_var); @@ -1509,8 +1509,8 @@ width_bucket_numeric(PG_FUNCTION_ARGS) { case 0: ereport(ERROR, - (errcode(ERRCODE_INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION), - errmsg("lower bound cannot equal upper bound"))); + (errcode(ERRCODE_INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION), + errmsg("lower bound cannot equal upper bound"))); /* bound1 < bound2 */ case -1: @@ -1770,7 +1770,7 @@ numeric_abbrev_abort(int memtupcount, SortSupport ssup) if (trace_sort) elog(LOG, "numeric_abbrev: aborting abbreviation at cardinality %f" - " below threshold %f after " INT64_FORMAT " values (%d rows)", + " below threshold %f after " INT64_FORMAT " values (%d rows)", abbr_card, nss->input_count / 10000.0 + 0.5, nss->input_count, memtupcount); #endif @@ -8361,7 +8361,7 @@ cmp_abs(NumericVar *var1, NumericVar *var2) */ static int cmp_abs_common(const NumericDigit *var1digits, int var1ndigits, int var1weight, - const NumericDigit *var2digits, int var2ndigits, int var2weight) + const NumericDigit *var2digits, int var2ndigits, int var2weight) { int i1 = 0; int i2 = 0; diff --git a/src/backend/utils/adt/numutils.c b/src/backend/utils/adt/numutils.c index 4ea2892c983..07682723b78 100644 --- a/src/backend/utils/adt/numutils.c +++ b/src/backend/utils/adt/numutils.c @@ -86,7 +86,7 @@ pg_atoi(const char *s, int size, int c) if (errno == ERANGE || l < SCHAR_MIN || l > SCHAR_MAX) ereport(ERROR, (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), - errmsg("value \"%s\" is out of range for 8-bit integer", s))); + errmsg("value \"%s\" is out of range for 8-bit integer", s))); break; default: elog(ERROR, "unsupported result size: %d", size); diff --git a/src/backend/utils/adt/pg_locale.c b/src/backend/utils/adt/pg_locale.c index 7554fbb22c3..ad3fe74bbd6 100644 --- a/src/backend/utils/adt/pg_locale.c +++ b/src/backend/utils/adt/pg_locale.c @@ -1358,8 +1358,8 @@ pg_newlocale_from_collation(Oid collid) collator = ucol_open(collcollate, &status); if (U_FAILURE(status)) ereport(ERROR, - (errmsg("could not open collator for locale \"%s\": %s", - collcollate, u_errorName(status)))); + (errmsg("could not open collator for locale \"%s\": %s", + collcollate, u_errorName(status)))); result->info.icu.locale = strdup(collcollate); result->info.icu.ucol = collator; @@ -1368,7 +1368,7 @@ pg_newlocale_from_collation(Oid collid) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("ICU is not supported in this build"), \ - errhint("You need to rebuild PostgreSQL using --with-icu."))); + errhint("You need to rebuild PostgreSQL using --with-icu."))); #endif /* not USE_ICU */ } @@ -1398,13 +1398,13 @@ pg_newlocale_from_collation(Oid collid) (errmsg("collation \"%s\" has version mismatch", NameStr(collform->collname)), errdetail("The collation in the database was created using version %s, " - "but the operating system provides version %s.", + "but the operating system provides version %s.", collversionstr, actual_versionstr), errhint("Rebuild all objects affected by this collation and run " "ALTER COLLATION %s REFRESH VERSION, " - "or build PostgreSQL with the right library version.", + "or build PostgreSQL with the right library version.", quote_qualified_identifier(get_namespace_name(collform->collnamespace), - NameStr(collform->collname))))); + NameStr(collform->collname))))); } ReleaseSysCache(tp); @@ -1480,8 +1480,8 @@ init_icu_converter(void) conv = ucnv_open(icu_encoding_name, &status); if (U_FAILURE(status)) ereport(ERROR, - (errmsg("could not open ICU converter for encoding \"%s\": %s", - icu_encoding_name, u_errorName(status)))); + (errmsg("could not open ICU converter for encoding \"%s\": %s", + icu_encoding_name, u_errorName(status)))); icu_converter = conv; } diff --git a/src/backend/utils/adt/pg_upgrade_support.c b/src/backend/utils/adt/pg_upgrade_support.c index 4b340055f0f..c0b04746287 100644 --- a/src/backend/utils/adt/pg_upgrade_support.c +++ b/src/backend/utils/adt/pg_upgrade_support.c @@ -172,7 +172,7 @@ binary_upgrade_create_empty_extension(PG_FUNCTION_ARGS) InsertExtensionTuple(text_to_cstring(extName), GetUserId(), - get_namespace_oid(text_to_cstring(schemaName), false), + get_namespace_oid(text_to_cstring(schemaName), false), relocatable, text_to_cstring(extVersion), extConfig, diff --git a/src/backend/utils/adt/pgstatfuncs.c b/src/backend/utils/adt/pgstatfuncs.c index e0cae1ba1e8..20ce48b2d82 100644 --- a/src/backend/utils/adt/pgstatfuncs.c +++ b/src/backend/utils/adt/pgstatfuncs.c @@ -784,7 +784,7 @@ pg_stat_get_activity(PG_FUNCTION_ARGS) { clean_ipv6_addr(beentry->st_clientaddr.addr.ss_family, remote_host); values[12] = DirectFunctionCall1(inet_in, - CStringGetDatum(remote_host)); + CStringGetDatum(remote_host)); if (beentry->st_clienthostname && beentry->st_clienthostname[0]) values[13] = CStringGetTextDatum(beentry->st_clienthostname); @@ -1859,5 +1859,5 @@ pg_stat_get_archiver(PG_FUNCTION_ARGS) /* Returns the record as Datum */ PG_RETURN_DATUM(HeapTupleGetDatum( - heap_form_tuple(tupdesc, values, nulls))); + heap_form_tuple(tupdesc, values, nulls))); } diff --git a/src/backend/utils/adt/rangetypes.c b/src/backend/utils/adt/rangetypes.c index 94a77c9b6a3..09a4f14a179 100644 --- a/src/backend/utils/adt/rangetypes.c +++ b/src/backend/utils/adt/rangetypes.c @@ -331,13 +331,13 @@ get_range_io_data(FunctionCallInfo fcinfo, Oid rngtypid, IOFuncSelector func) if (func == IOFunc_receive) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_FUNCTION), - errmsg("no binary input function available for type %s", - format_type_be(cache->typcache->rngelemtype->type_id)))); + errmsg("no binary input function available for type %s", + format_type_be(cache->typcache->rngelemtype->type_id)))); else ereport(ERROR, (errcode(ERRCODE_UNDEFINED_FUNCTION), - errmsg("no binary output function available for type %s", - format_type_be(cache->typcache->rngelemtype->type_id)))); + errmsg("no binary output function available for type %s", + format_type_be(cache->typcache->rngelemtype->type_id)))); } fmgr_info_cxt(cache->typiofunc, &cache->proc, fcinfo->flinfo->fn_mcxt); @@ -402,7 +402,7 @@ range_constructor3(PG_FUNCTION_ARGS) if (PG_ARGISNULL(2)) ereport(ERROR, (errcode(ERRCODE_DATA_EXCEPTION), - errmsg("range constructor flags argument must not be null"))); + errmsg("range constructor flags argument must not be null"))); flags = range_parse_flags(text_to_cstring(PG_GETARG_TEXT_PP(2))); @@ -989,7 +989,7 @@ range_minus(PG_FUNCTION_ARGS) if (cmp_l1l2 < 0 && cmp_u1u2 > 0) ereport(ERROR, (errcode(ERRCODE_DATA_EXCEPTION), - errmsg("result of range difference would not be contiguous"))); + errmsg("result of range difference would not be contiguous"))); if (cmp_l1u2 > 0 || cmp_u1l2 < 0) PG_RETURN_RANGE(r1); @@ -1914,7 +1914,7 @@ range_parse_flags(const char *flags_str) ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("invalid range bound flags"), - errhint("Valid values are \"[]\", \"[)\", \"(]\", and \"()\"."))); + errhint("Valid values are \"[]\", \"[)\", \"(]\", and \"()\"."))); switch (flags_str[0]) { @@ -1927,7 +1927,7 @@ range_parse_flags(const char *flags_str) ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("invalid range bound flags"), - errhint("Valid values are \"[]\", \"[)\", \"(]\", and \"()\"."))); + errhint("Valid values are \"[]\", \"[)\", \"(]\", and \"()\"."))); } switch (flags_str[1]) @@ -1941,7 +1941,7 @@ range_parse_flags(const char *flags_str) ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("invalid range bound flags"), - errhint("Valid values are \"[]\", \"[)\", \"(]\", and \"()\"."))); + errhint("Valid values are \"[]\", \"[)\", \"(]\", and \"()\"."))); } return flags; diff --git a/src/backend/utils/adt/rangetypes_selfuncs.c b/src/backend/utils/adt/rangetypes_selfuncs.c index e803f72924a..ed13c27fcb8 100644 --- a/src/backend/utils/adt/rangetypes_selfuncs.c +++ b/src/backend/utils/adt/rangetypes_selfuncs.c @@ -47,15 +47,15 @@ static float8 get_distance(TypeCacheEntry *typcache, RangeBound *bound1, static int length_hist_bsearch(Datum *length_hist_values, int length_hist_nvalues, double value, bool equal); static double calc_length_hist_frac(Datum *length_hist_values, - int length_hist_nvalues, double length1, double length2, bool equal); + int length_hist_nvalues, double length1, double length2, bool equal); static double calc_hist_selectivity_contained(TypeCacheEntry *typcache, RangeBound *lower, RangeBound *upper, RangeBound *hist_lower, int hist_nvalues, - Datum *length_hist_values, int length_hist_nvalues); + Datum *length_hist_values, int length_hist_nvalues); static double calc_hist_selectivity_contains(TypeCacheEntry *typcache, RangeBound *lower, RangeBound *upper, RangeBound *hist_lower, int hist_nvalues, - Datum *length_hist_values, int length_hist_nvalues); + Datum *length_hist_values, int length_hist_nvalues); /* * Returns a default selectivity estimate for given operator, when we don't @@ -535,7 +535,7 @@ calc_hist_selectivity(TypeCacheEntry *typcache, VariableStatData *vardata, case OID_RANGE_CONTAINS_OP: hist_selec = calc_hist_selectivity_contains(typcache, &const_lower, - &const_upper, hist_lower, nhist, + &const_upper, hist_lower, nhist, lslot.values, lslot.nvalues); break; @@ -554,14 +554,14 @@ calc_hist_selectivity(TypeCacheEntry *typcache, VariableStatData *vardata, { hist_selec = 1.0 - calc_hist_selectivity_scalar(typcache, &const_lower, - hist_lower, nhist, false); + hist_lower, nhist, false); } else { hist_selec = calc_hist_selectivity_contained(typcache, &const_lower, - &const_upper, hist_lower, nhist, - lslot.values, lslot.nvalues); + &const_upper, hist_lower, nhist, + lslot.values, lslot.nvalues); } break; @@ -599,7 +599,7 @@ calc_hist_selectivity_scalar(TypeCacheEntry *typcache, RangeBound *constbound, /* Adjust using linear interpolation within the bin */ if (index >= 0 && index < hist_nvalues - 1) selec += get_position(typcache, constbound, &hist[index], - &hist[index + 1]) / (Selectivity) (hist_nvalues - 1); + &hist[index + 1]) / (Selectivity) (hist_nvalues - 1); return selec; } @@ -694,7 +694,7 @@ get_position(TypeCacheEntry *typcache, RangeBound *value, RangeBound *hist1, /* Calculate relative position using subdiff function. */ bin_width = DatumGetFloat8(FunctionCall2Coll( - &typcache->rng_subdiff_finfo, + &typcache->rng_subdiff_finfo, typcache->rng_collation, hist2->val, hist1->val)); @@ -702,7 +702,7 @@ get_position(TypeCacheEntry *typcache, RangeBound *value, RangeBound *hist1, return 0.5; /* zero width bin */ position = DatumGetFloat8(FunctionCall2Coll( - &typcache->rng_subdiff_finfo, + &typcache->rng_subdiff_finfo, typcache->rng_collation, value->val, hist1->val)) @@ -998,7 +998,7 @@ static double calc_hist_selectivity_contained(TypeCacheEntry *typcache, RangeBound *lower, RangeBound *upper, RangeBound *hist_lower, int hist_nvalues, - Datum *length_hist_values, int length_hist_nvalues) + Datum *length_hist_values, int length_hist_nvalues) { int i, upper_index; @@ -1108,7 +1108,7 @@ static double calc_hist_selectivity_contains(TypeCacheEntry *typcache, RangeBound *lower, RangeBound *upper, RangeBound *hist_lower, int hist_nvalues, - Datum *length_hist_values, int length_hist_nvalues) + Datum *length_hist_values, int length_hist_nvalues) { int i, lower_index; diff --git a/src/backend/utils/adt/rangetypes_spgist.c b/src/backend/utils/adt/rangetypes_spgist.c index a887e55b926..e82c4e1a9e1 100644 --- a/src/backend/utils/adt/rangetypes_spgist.c +++ b/src/backend/utils/adt/rangetypes_spgist.c @@ -213,7 +213,7 @@ spg_range_quad_picksplit(PG_FUNCTION_ARGS) *upperBounds; typcache = range_get_typcache(fcinfo, - RangeTypeGetOid(DatumGetRangeType(in->datums[0]))); + RangeTypeGetOid(DatumGetRangeType(in->datums[0]))); /* Allocate memory for bounds */ lowerBounds = palloc(sizeof(RangeBound) * in->nTuples); @@ -347,7 +347,7 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS) */ if (strategy != RANGESTRAT_CONTAINS_ELEM) empty = RangeIsEmpty( - DatumGetRangeType(in->scankeys[i].sk_argument)); + DatumGetRangeType(in->scankeys[i].sk_argument)); else empty = false; @@ -417,7 +417,7 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS) /* This node has a centroid. Fetch it. */ centroid = DatumGetRangeType(in->prefixDatum); typcache = range_get_typcache(fcinfo, - RangeTypeGetOid(DatumGetRangeType(centroid))); + RangeTypeGetOid(DatumGetRangeType(centroid))); range_deserialize(typcache, centroid, ¢roidLower, ¢roidUpper, ¢roidEmpty); @@ -574,7 +574,7 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS) */ cmp = adjacent_inner_consistent(typcache, &lower, ¢roidUpper, - prevCentroid ? &prevUpper : NULL); + prevCentroid ? &prevUpper : NULL); if (cmp > 0) which1 = (1 << 1) | (1 << 4); else if (cmp < 0) @@ -590,7 +590,7 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS) */ cmp = adjacent_inner_consistent(typcache, &upper, ¢roidLower, - prevCentroid ? &prevLower : NULL); + prevCentroid ? &prevLower : NULL); if (cmp > 0) which2 = (1 << 1) | (1 << 2); else if (cmp < 0) @@ -973,7 +973,7 @@ spg_range_quad_leaf_consistent(PG_FUNCTION_ARGS) break; case RANGESTRAT_CONTAINED_BY: res = range_contained_by_internal(typcache, leafRange, - DatumGetRangeType(keyDatum)); + DatumGetRangeType(keyDatum)); break; case RANGESTRAT_CONTAINS_ELEM: res = range_contains_elem_internal(typcache, leafRange, diff --git a/src/backend/utils/adt/rangetypes_typanalyze.c b/src/backend/utils/adt/rangetypes_typanalyze.c index 879540fc1ae..324bbe48e5f 100644 --- a/src/backend/utils/adt/rangetypes_typanalyze.c +++ b/src/backend/utils/adt/rangetypes_typanalyze.c @@ -33,7 +33,7 @@ static int float8_qsort_cmp(const void *a1, const void *a2); static int range_bound_qsort_cmp(const void *a1, const void *a2, void *arg); static void compute_range_stats(VacAttrStats *stats, - AnalyzeAttrFetchFunc fetchfunc, int samplerows, double totalrows); + AnalyzeAttrFetchFunc fetchfunc, int samplerows, double totalrows); /* * range_typanalyze -- typanalyze function for range columns @@ -165,9 +165,9 @@ compute_range_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc, * and lower bound values. */ length = DatumGetFloat8(FunctionCall2Coll( - &typcache->rng_subdiff_finfo, - typcache->rng_collation, - upper.val, lower.val)); + &typcache->rng_subdiff_finfo, + typcache->rng_collation, + upper.val, lower.val)); } else { @@ -246,7 +246,7 @@ compute_range_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc, for (i = 0; i < num_hist; i++) { bound_hist_values[i] = PointerGetDatum(range_serialize( - typcache, &lowers[pos], &uppers[pos], false)); + typcache, &lowers[pos], &uppers[pos], false)); pos += delta; posfrac += deltafrac; if (posfrac >= (num_hist - 1)) diff --git a/src/backend/utils/adt/regexp.c b/src/backend/utils/adt/regexp.c index 3a1647bc521..139bb583b1c 100644 --- a/src/backend/utils/adt/regexp.c +++ b/src/backend/utils/adt/regexp.c @@ -696,7 +696,7 @@ similar_escape(PG_FUNCTION_ARGS) ereport(ERROR, (errcode(ERRCODE_INVALID_ESCAPE_SEQUENCE), errmsg("invalid escape string"), - errhint("Escape string must be empty or one character."))); + errhint("Escape string must be empty or one character."))); } } @@ -1028,7 +1028,7 @@ setup_regexp_matches(text *orig_str, text *pattern, pg_re_flags *re_flags, { array_len *= 2; matchctx->match_locs = (int *) repalloc(matchctx->match_locs, - sizeof(int) * array_len); + sizeof(int) * array_len); } /* save this match's locations */ @@ -1118,7 +1118,7 @@ build_regexp_match_result(regexp_matches_ctx *matchctx) else { elems[i] = DirectFunctionCall3(text_substr, - PointerGetDatum(matchctx->orig_str), + PointerGetDatum(matchctx->orig_str), Int32GetDatum(so + 1), Int32GetDatum(eo - so)); nulls[i] = false; @@ -1216,7 +1216,7 @@ regexp_split_to_array(PG_FUNCTION_ARGS) if (re_flags.glob) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("regexp_split_to_array does not support the global option"))); + errmsg("regexp_split_to_array does not support the global option"))); /* But we find all the matches anyway */ re_flags.glob = true; diff --git a/src/backend/utils/adt/regproc.c b/src/backend/utils/adt/regproc.c index ba879e81cd4..6fe81fab7e4 100644 --- a/src/backend/utils/adt/regproc.c +++ b/src/backend/utils/adt/regproc.c @@ -76,7 +76,7 @@ regprocin(PG_FUNCTION_ARGS) strspn(pro_name_or_oid, "0123456789") == strlen(pro_name_or_oid)) { result = DatumGetObjectId(DirectFunctionCall1(oidin, - CStringGetDatum(pro_name_or_oid))); + CStringGetDatum(pro_name_or_oid))); PG_RETURN_OID(result); } @@ -247,7 +247,7 @@ regprocedurein(PG_FUNCTION_ARGS) strspn(pro_name_or_oid, "0123456789") == strlen(pro_name_or_oid)) { result = DatumGetObjectId(DirectFunctionCall1(oidin, - CStringGetDatum(pro_name_or_oid))); + CStringGetDatum(pro_name_or_oid))); PG_RETURN_OID(result); } @@ -497,7 +497,7 @@ regoperin(PG_FUNCTION_ARGS) strspn(opr_name_or_oid, "0123456789") == strlen(opr_name_or_oid)) { result = DatumGetObjectId(DirectFunctionCall1(oidin, - CStringGetDatum(opr_name_or_oid))); + CStringGetDatum(opr_name_or_oid))); PG_RETURN_OID(result); } @@ -670,7 +670,7 @@ regoperatorin(PG_FUNCTION_ARGS) strspn(opr_name_or_oid, "0123456789") == strlen(opr_name_or_oid)) { result = DatumGetObjectId(DirectFunctionCall1(oidin, - CStringGetDatum(opr_name_or_oid))); + CStringGetDatum(opr_name_or_oid))); PG_RETURN_OID(result); } @@ -916,7 +916,7 @@ regclassin(PG_FUNCTION_ARGS) strspn(class_name_or_oid, "0123456789") == strlen(class_name_or_oid)) { result = DatumGetObjectId(DirectFunctionCall1(oidin, - CStringGetDatum(class_name_or_oid))); + CStringGetDatum(class_name_or_oid))); PG_RETURN_OID(result); } @@ -1074,7 +1074,7 @@ regtypein(PG_FUNCTION_ARGS) strspn(typ_name_or_oid, "0123456789") == strlen(typ_name_or_oid)) { result = DatumGetObjectId(DirectFunctionCall1(oidin, - CStringGetDatum(typ_name_or_oid))); + CStringGetDatum(typ_name_or_oid))); PG_RETURN_OID(result); } @@ -1210,7 +1210,7 @@ regconfigin(PG_FUNCTION_ARGS) strspn(cfg_name_or_oid, "0123456789") == strlen(cfg_name_or_oid)) { result = DatumGetObjectId(DirectFunctionCall1(oidin, - CStringGetDatum(cfg_name_or_oid))); + CStringGetDatum(cfg_name_or_oid))); PG_RETURN_OID(result); } @@ -1321,7 +1321,7 @@ regdictionaryin(PG_FUNCTION_ARGS) strspn(dict_name_or_oid, "0123456789") == strlen(dict_name_or_oid)) { result = DatumGetObjectId(DirectFunctionCall1(oidin, - CStringGetDatum(dict_name_or_oid))); + CStringGetDatum(dict_name_or_oid))); PG_RETURN_OID(result); } @@ -1432,7 +1432,7 @@ regrolein(PG_FUNCTION_ARGS) strspn(role_name_or_oid, "0123456789") == strlen(role_name_or_oid)) { result = DatumGetObjectId(DirectFunctionCall1(oidin, - CStringGetDatum(role_name_or_oid))); + CStringGetDatum(role_name_or_oid))); PG_RETURN_OID(result); } @@ -1557,7 +1557,7 @@ regnamespacein(PG_FUNCTION_ARGS) strspn(nsp_name_or_oid, "0123456789") == strlen(nsp_name_or_oid)) { result = DatumGetObjectId(DirectFunctionCall1(oidin, - CStringGetDatum(nsp_name_or_oid))); + CStringGetDatum(nsp_name_or_oid))); PG_RETURN_OID(result); } diff --git a/src/backend/utils/adt/ri_triggers.c b/src/backend/utils/adt/ri_triggers.c index 1c02f143ba0..c2891e6fa1e 100644 --- a/src/backend/utils/adt/ri_triggers.c +++ b/src/backend/utils/adt/ri_triggers.c @@ -2718,7 +2718,7 @@ ri_CheckTrigger(FunctionCallInfo fcinfo, const char *funcname, int tgkind) !TRIGGER_FIRED_FOR_ROW(trigdata->tg_event)) ereport(ERROR, (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED), - errmsg("function \"%s\" must be fired AFTER ROW", funcname))); + errmsg("function \"%s\" must be fired AFTER ROW", funcname))); switch (tgkind) { @@ -2761,8 +2761,8 @@ ri_FetchConstraintInfo(Trigger *trigger, Relation trig_rel, bool rel_is_pk) if (!OidIsValid(constraintOid)) ereport(ERROR, (errcode(ERRCODE_INVALID_OBJECT_DEFINITION), - errmsg("no pg_constraint entry for trigger \"%s\" on table \"%s\"", - trigger->tgname, RelationGetRelationName(trig_rel)), + errmsg("no pg_constraint entry for trigger \"%s\" on table \"%s\"", + trigger->tgname, RelationGetRelationName(trig_rel)), errhint("Remove this referential integrity trigger and its mates, then do ALTER TABLE ADD CONSTRAINT."))); /* Find or create a hashtable entry for the constraint */ @@ -2834,7 +2834,7 @@ ri_LoadConstraintInfo(Oid constraintOid) /* And extract data */ Assert(riinfo->constraint_id == constraintOid); riinfo->oidHashValue = GetSysCacheHashValue1(CONSTROID, - ObjectIdGetDatum(constraintOid)); + ObjectIdGetDatum(constraintOid)); memcpy(&riinfo->conname, &conForm->conname, sizeof(NameData)); riinfo->pk_relid = conForm->confrelid; riinfo->fk_relid = conForm->conrelid; @@ -3156,7 +3156,7 @@ ri_PerformCheck(const RI_ConstraintInfo *riinfo, /* XXX wouldn't it be clearer to do this part at the caller? */ if (qkey->constr_queryno != RI_PLAN_CHECK_LOOKUPPK_FROM_PK && expect_OK == SPI_OK_SELECT && - (SPI_processed == 0) == (qkey->constr_queryno == RI_PLAN_CHECK_LOOKUPPK)) + (SPI_processed == 0) == (qkey->constr_queryno == RI_PLAN_CHECK_LOOKUPPK)) ri_ReportViolation(riinfo, pk_rel, fk_rel, new_tuple ? new_tuple : old_tuple, @@ -3327,9 +3327,9 @@ ri_ReportViolation(const RI_ConstraintInfo *riinfo, NameStr(riinfo->conname), RelationGetRelationName(fk_rel)), has_perm ? - errdetail("Key (%s)=(%s) is still referenced from table \"%s\".", - key_names.data, key_values.data, - RelationGetRelationName(fk_rel)) : + errdetail("Key (%s)=(%s) is still referenced from table \"%s\".", + key_names.data, key_values.data, + RelationGetRelationName(fk_rel)) : errdetail("Key is still referenced from table \"%s\".", RelationGetRelationName(fk_rel)), errtableconstraint(fk_rel, NameStr(riinfo->conname)))); diff --git a/src/backend/utils/adt/rowtypes.c b/src/backend/utils/adt/rowtypes.c index 08b37b270e2..44acd13c6b1 100644 --- a/src/backend/utils/adt/rowtypes.c +++ b/src/backend/utils/adt/rowtypes.c @@ -100,7 +100,7 @@ record_in(PG_FUNCTION_ARGS) if (tupType == RECORDOID && tupTypmod < 0) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("input of anonymous composite types is not implemented"))); + errmsg("input of anonymous composite types is not implemented"))); /* * This comes from the composite type's pg_type.oid and stores system oids @@ -476,7 +476,7 @@ record_recv(PG_FUNCTION_ARGS) if (tupType == RECORDOID && tupTypmod < 0) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("input of anonymous composite types is not implemented"))); + errmsg("input of anonymous composite types is not implemented"))); tupdesc = lookup_rowtype_tupdesc(tupType, tupTypmod); ncolumns = tupdesc->natts; @@ -924,8 +924,8 @@ record_cmp(FunctionCallInfo fcinfo) if (!OidIsValid(typentry->cmp_proc_finfo.fn_oid)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_FUNCTION), - errmsg("could not identify a comparison function for type %s", - format_type_be(typentry->type_id)))); + errmsg("could not identify a comparison function for type %s", + format_type_be(typentry->type_id)))); my_extra->columns[j].typentry = typentry; } @@ -1164,8 +1164,8 @@ record_eq(PG_FUNCTION_ARGS) if (!OidIsValid(typentry->eq_opr_finfo.fn_oid)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_FUNCTION), - errmsg("could not identify an equality operator for type %s", - format_type_be(typentry->type_id)))); + errmsg("could not identify an equality operator for type %s", + format_type_be(typentry->type_id)))); my_extra->columns[j].typentry = typentry; } diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c index b8ad47a20c8..18d9e27d1ed 100644 --- a/src/backend/utils/adt/ruleutils.c +++ b/src/backend/utils/adt/ruleutils.c @@ -925,7 +925,7 @@ pg_get_triggerdef_worker(Oid trigid, bool pretty) { if (OidIsValid(trigrec->tgconstrrelid)) appendStringInfo(&buf, "FROM %s ", - generate_relation_name(trigrec->tgconstrrelid, NIL)); + generate_relation_name(trigrec->tgconstrrelid, NIL)); if (!trigrec->tgdeferrable) appendStringInfoString(&buf, "NOT "); appendStringInfoString(&buf, "DEFERRABLE INITIALLY "); @@ -1309,7 +1309,7 @@ pg_get_indexdef_worker(Oid indexrelid, int colno, { /* Need parens if it's not a bare function call */ if (indexkey && IsA(indexkey, FuncExpr) && - ((FuncExpr *) indexkey)->funcformat == COERCE_EXPLICIT_CALL) + ((FuncExpr *) indexkey)->funcformat == COERCE_EXPLICIT_CALL) appendStringInfoString(&buf, str); else appendStringInfo(&buf, "(%s)", str); @@ -1477,7 +1477,7 @@ pg_get_statisticsobj_worker(Oid statextid, bool missing_ok) nsp = get_namespace_name(statextrec->stxnamespace); appendStringInfo(&buf, "CREATE STATISTICS %s", quote_qualified_identifier(nsp, - NameStr(statextrec->stxname))); + NameStr(statextrec->stxname))); /* * Decode the stxkind column so that we know which stats types to print. @@ -1631,7 +1631,7 @@ pg_get_partkeydef_worker(Oid relid, int prettyFlags, char *exprsString; exprsDatum = SysCacheGetAttr(PARTRELID, tuple, - Anum_pg_partitioned_table_partexprs, &isnull); + Anum_pg_partitioned_table_partexprs, &isnull); Assert(!isnull); exprsString = TextDatumGetCString(exprsDatum); partexprs = (List *) stringToNode(exprsString); @@ -2023,7 +2023,7 @@ pg_get_constraintdef_worker(Oid constraintId, bool fullCommand, tblspc = get_rel_tablespace(indexId); if (OidIsValid(tblspc)) appendStringInfo(&buf, " USING INDEX TABLESPACE %s", - quote_identifier(get_tablespace_name(tblspc))); + quote_identifier(get_tablespace_name(tblspc))); } break; @@ -2458,7 +2458,7 @@ pg_get_functiondef(PG_FUNCTION_ARGS) print_function_trftypes(&buf, proctup); appendStringInfo(&buf, "\n LANGUAGE %s\n", - quote_identifier(get_language_name(proc->prolang, false))); + quote_identifier(get_language_name(proc->prolang, false))); /* Emit some miscellaneous options on one line */ oldlen = buf.len; @@ -5088,19 +5088,19 @@ get_select_query_def(Query *query, deparse_context *context, break; case LCS_FORKEYSHARE: appendContextKeyword(context, " FOR KEY SHARE", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 0); + -PRETTYINDENT_STD, PRETTYINDENT_STD, 0); break; case LCS_FORSHARE: appendContextKeyword(context, " FOR SHARE", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 0); + -PRETTYINDENT_STD, PRETTYINDENT_STD, 0); break; case LCS_FORNOKEYUPDATE: appendContextKeyword(context, " FOR NO KEY UPDATE", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 0); + -PRETTYINDENT_STD, PRETTYINDENT_STD, 0); break; case LCS_FORUPDATE: appendContextKeyword(context, " FOR UPDATE", - -PRETTYINDENT_STD, PRETTYINDENT_STD, 0); + -PRETTYINDENT_STD, PRETTYINDENT_STD, 0); break; } @@ -5956,8 +5956,8 @@ get_insert_query_def(Query *query, deparse_context *context) * tle->resname, since resname will fail to track RENAME. */ appendStringInfoString(buf, - quote_identifier(get_relid_attribute_name(rte->relid, - tle->resno))); + quote_identifier(get_relid_attribute_name(rte->relid, + tle->resno))); /* * Print any indirection needed (subfields or subscripts), and strip @@ -6237,7 +6237,7 @@ get_update_query_targetlist_def(Query *query, List *targetList, cur_ma_sublink = (SubLink *) lfirst(next_ma_cell); next_ma_cell = lnext(next_ma_cell); remaining_ma_columns = count_nonjunk_tlist_entries( - ((Query *) cur_ma_sublink->subselect)->targetList); + ((Query *) cur_ma_sublink->subselect)->targetList); Assert(((Param *) expr)->paramid == ((cur_ma_sublink->subLinkId << 16) | 1)); appendStringInfoChar(buf, '('); @@ -6249,8 +6249,8 @@ get_update_query_targetlist_def(Query *query, List *targetList, * tle->resname, since resname will fail to track RENAME. */ appendStringInfoString(buf, - quote_identifier(get_relid_attribute_name(rte->relid, - tle->resno))); + quote_identifier(get_relid_attribute_name(rte->relid, + tle->resno))); /* * Print any indirection needed (subfields or subscripts), and strip @@ -7733,7 +7733,7 @@ get_rule_expr(Node *node, deparse_context *context, appendStringInfo(buf, " %s %s (", generate_operator_name(expr->opno, exprType(arg1), - get_base_element_type(exprType(arg2))), + get_base_element_type(exprType(arg2))), expr->useOr ? "ANY" : "ALL"); get_rule_expr_paren(arg2, context, true, node); @@ -7753,7 +7753,7 @@ get_rule_expr(Node *node, deparse_context *context, ((SubLink *) arg2)->subLinkType == EXPR_SUBLINK) appendStringInfo(buf, "::%s", format_type_with_typemod(exprType(arg2), - exprTypmod(arg2))); + exprTypmod(arg2))); appendStringInfoChar(buf, ')'); if (!PRETTY_PAREN(context)) appendStringInfoChar(buf, ')'); @@ -8111,7 +8111,7 @@ get_rule_expr(Node *node, deparse_context *context, */ if (arrayexpr->elements == NIL) appendStringInfo(buf, "::%s", - format_type_with_typemod(arrayexpr->array_typeid, -1)); + format_type_with_typemod(arrayexpr->array_typeid, -1)); } break; @@ -8173,7 +8173,7 @@ get_rule_expr(Node *node, deparse_context *context, appendStringInfoChar(buf, ')'); if (rowexpr->row_format == COERCE_EXPLICIT_CAST) appendStringInfo(buf, "::%s", - format_type_with_typemod(rowexpr->row_typeid, -1)); + format_type_with_typemod(rowexpr->row_typeid, -1)); } break; @@ -8206,9 +8206,9 @@ get_rule_expr(Node *node, deparse_context *context, * be perfect. */ appendStringInfo(buf, ") %s ROW(", - generate_operator_name(linitial_oid(rcexpr->opnos), - exprType(linitial(rcexpr->largs)), - exprType(linitial(rcexpr->rargs)))); + generate_operator_name(linitial_oid(rcexpr->opnos), + exprType(linitial(rcexpr->largs)), + exprType(linitial(rcexpr->rargs)))); sep = ""; foreach(arg, rcexpr->rargs) { @@ -8406,7 +8406,7 @@ get_rule_expr(Node *node, deparse_context *context, Assert(!con->constisnull); if (DatumGetBool(con->constvalue)) appendStringInfoString(buf, - " PRESERVE WHITESPACE"); + " PRESERVE WHITESPACE"); else appendStringInfoString(buf, " STRIP WHITESPACE"); @@ -8434,15 +8434,15 @@ get_rule_expr(Node *node, deparse_context *context, { case XML_STANDALONE_YES: appendStringInfoString(buf, - ", STANDALONE YES"); + ", STANDALONE YES"); break; case XML_STANDALONE_NO: appendStringInfoString(buf, - ", STANDALONE NO"); + ", STANDALONE NO"); break; case XML_STANDALONE_NO_VALUE: appendStringInfoString(buf, - ", STANDALONE NO VALUE"); + ", STANDALONE NO VALUE"); break; default: break; @@ -8628,7 +8628,7 @@ get_rule_expr(Node *node, deparse_context *context, if (iexpr->infercollid) appendStringInfo(buf, " COLLATE %s", - generate_collation_name(iexpr->infercollid)); + generate_collation_name(iexpr->infercollid)); /* Add the operator class name, if not default */ if (iexpr->inferopclass) @@ -9400,8 +9400,8 @@ get_sublink_expr(SubLink *sublink, deparse_context *context) get_rule_expr(linitial(opexpr->args), context, true); if (!opname) opname = generate_operator_name(opexpr->opno, - exprType(linitial(opexpr->args)), - exprType(lsecond(opexpr->args))); + exprType(linitial(opexpr->args)), + exprType(lsecond(opexpr->args))); sep = ", "; } appendStringInfoChar(buf, ')'); @@ -9415,7 +9415,7 @@ get_sublink_expr(SubLink *sublink, deparse_context *context) get_rule_expr((Node *) rcexpr->largs, context, true); opname = generate_operator_name(linitial_oid(rcexpr->opnos), exprType(linitial(rcexpr->largs)), - exprType(linitial(rcexpr->rargs))); + exprType(linitial(rcexpr->rargs))); appendStringInfoChar(buf, ')'); } else @@ -10216,7 +10216,7 @@ processIndirection(Node *node, deparse_context *context) */ Assert(list_length(fstore->fieldnums) == 1); fieldname = get_relid_attribute_name(typrelid, - linitial_int(fstore->fieldnums)); + linitial_int(fstore->fieldnums)); appendStringInfo(buf, ".%s", quote_identifier(fieldname)); /* diff --git a/src/backend/utils/adt/selfuncs.c b/src/backend/utils/adt/selfuncs.c index 63a25db5d39..ce4c5b50783 100644 --- a/src/backend/utils/adt/selfuncs.c +++ b/src/backend/utils/adt/selfuncs.c @@ -171,7 +171,7 @@ static double eqjoinsel_semi(Oid operator, VariableStatData *vardata1, VariableStatData *vardata2, RelOptInfo *inner_rel); static bool estimate_multivariate_ndistinct(PlannerInfo *root, - RelOptInfo *rel, List **varinfos, double *ndistinct); + RelOptInfo *rel, List **varinfos, double *ndistinct); static bool convert_to_scalar(Datum value, Oid valuetypid, double *scaledvalue, Datum lobound, Datum hibound, Oid boundstypid, double *scaledlobound, double *scaledhibound); @@ -334,7 +334,7 @@ var_eq_const(VariableStatData *vardata, Oid operator, } else if (HeapTupleIsValid(vardata->statsTuple) && statistic_proc_security_check(vardata, - (opfuncoid = get_opcode(operator)))) + (opfuncoid = get_opcode(operator)))) { AttStatsSlot sslot; bool match = false; @@ -360,12 +360,12 @@ var_eq_const(VariableStatData *vardata, Oid operator, /* be careful to apply operator right way 'round */ if (varonleft) match = DatumGetBool(FunctionCall2Coll(&eqproc, - DEFAULT_COLLATION_OID, + DEFAULT_COLLATION_OID, sslot.values[i], constval)); else match = DatumGetBool(FunctionCall2Coll(&eqproc, - DEFAULT_COLLATION_OID, + DEFAULT_COLLATION_OID, constval, sslot.values[i])); if (match) @@ -848,7 +848,7 @@ ineq_histogram_selectivity(PlannerInfo *root, vardata, sslot.staop, NULL, - &sslot.values[probe]); + &sslot.values[probe]); ltcmp = DatumGetBool(FunctionCall2Coll(opproc, DEFAULT_COLLATION_OID, @@ -1268,7 +1268,7 @@ patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype, bool negate) break; case BYTEAOID: prefixstr = DatumGetCString(DirectFunctionCall1(byteaout, - prefix->constvalue)); + prefix->constvalue)); break; default: elog(ERROR, "unrecognized consttype: %u", @@ -1933,17 +1933,17 @@ scalararraysel(PlannerInfo *root, s2 = DatumGetFloat8(FunctionCall5Coll(&oprselproc, clause->inputcollid, PointerGetDatum(root), - ObjectIdGetDatum(operator), + ObjectIdGetDatum(operator), PointerGetDatum(args), Int16GetDatum(jointype), - PointerGetDatum(sjinfo))); + PointerGetDatum(sjinfo))); else s2 = DatumGetFloat8(FunctionCall4Coll(&oprselproc, clause->inputcollid, PointerGetDatum(root), - ObjectIdGetDatum(operator), + ObjectIdGetDatum(operator), PointerGetDatum(args), - Int32GetDatum(varRelid))); + Int32GetDatum(varRelid))); if (useOr) { @@ -2000,17 +2000,17 @@ scalararraysel(PlannerInfo *root, s2 = DatumGetFloat8(FunctionCall5Coll(&oprselproc, clause->inputcollid, PointerGetDatum(root), - ObjectIdGetDatum(operator), + ObjectIdGetDatum(operator), PointerGetDatum(args), Int16GetDatum(jointype), - PointerGetDatum(sjinfo))); + PointerGetDatum(sjinfo))); else s2 = DatumGetFloat8(FunctionCall4Coll(&oprselproc, clause->inputcollid, PointerGetDatum(root), - ObjectIdGetDatum(operator), + ObjectIdGetDatum(operator), PointerGetDatum(args), - Int32GetDatum(varRelid))); + Int32GetDatum(varRelid))); if (useOr) { @@ -2295,7 +2295,7 @@ eqjoinsel_inner(Oid operator, if (statistic_proc_security_check(vardata1, opfuncoid)) have_mcvs1 = get_attstatsslot(&sslot1, vardata1->statsTuple, STATISTIC_KIND_MCV, InvalidOid, - ATTSTATSSLOT_VALUES | ATTSTATSSLOT_NUMBERS); + ATTSTATSSLOT_VALUES | ATTSTATSSLOT_NUMBERS); } if (HeapTupleIsValid(vardata2->statsTuple)) @@ -2305,7 +2305,7 @@ eqjoinsel_inner(Oid operator, if (statistic_proc_security_check(vardata2, opfuncoid)) have_mcvs2 = get_attstatsslot(&sslot2, vardata2->statsTuple, STATISTIC_KIND_MCV, InvalidOid, - ATTSTATSSLOT_VALUES | ATTSTATSSLOT_NUMBERS); + ATTSTATSSLOT_VALUES | ATTSTATSSLOT_NUMBERS); } if (have_mcvs1 && have_mcvs2) @@ -2545,7 +2545,7 @@ eqjoinsel_semi(Oid operator, if (statistic_proc_security_check(vardata1, opfuncoid)) have_mcvs1 = get_attstatsslot(&sslot1, vardata1->statsTuple, STATISTIC_KIND_MCV, InvalidOid, - ATTSTATSSLOT_VALUES | ATTSTATSSLOT_NUMBERS); + ATTSTATSSLOT_VALUES | ATTSTATSSLOT_NUMBERS); } if (HeapTupleIsValid(vardata2->statsTuple) && @@ -4612,7 +4612,7 @@ examine_variable(PlannerInfo *root, Node *node, int varRelid, if (varRelid == 0 || bms_is_member(varRelid, varnos)) { onerel = find_base_rel(root, - (varRelid ? varRelid : bms_singleton_member(varnos))); + (varRelid ? varRelid : bms_singleton_member(varnos))); vardata->rel = onerel; node = basenode; /* strip any relabeling */ } @@ -4714,7 +4714,7 @@ examine_variable(PlannerInfo *root, Node *node, int varRelid, { vardata->statsTuple = SearchSysCache3(STATRELATTINH, - ObjectIdGetDatum(index->indexoid), + ObjectIdGetDatum(index->indexoid), Int16GetDatum(pos + 1), BoolGetDatum(false)); vardata->freefunc = ReleaseSysCache; @@ -4735,7 +4735,7 @@ examine_variable(PlannerInfo *root, Node *node, int varRelid, */ vardata->acl_ok = (pg_class_aclcheck(rte->relid, GetUserId(), - ACL_SELECT) == ACLCHECK_OK); + ACL_SELECT) == ACLCHECK_OK); } else { @@ -5545,7 +5545,7 @@ like_fixed_prefix(Const *patt_const, bool case_insensitive, Oid collation, if (typeid == BYTEAOID) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("case insensitive matching not supported on type bytea"))); + errmsg("case insensitive matching not supported on type bytea"))); /* If case-insensitive, we need locale info */ if (lc_ctype_is_c(collation)) @@ -5601,7 +5601,7 @@ like_fixed_prefix(Const *patt_const, bool case_insensitive, Oid collation, /* Stop if case-varying character (it's sort of a wildcard) */ if (case_insensitive && - pattern_char_isalpha(patt[pos], is_multibyte, locale, locale_is_c)) + pattern_char_isalpha(patt[pos], is_multibyte, locale, locale_is_c)) break; match[match_pos++] = patt[pos]; @@ -5647,7 +5647,7 @@ regex_fixed_prefix(Const *patt_const, bool case_insensitive, Oid collation, if (typeid == BYTEAOID) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("regular-expression matching not supported on type bytea"))); + errmsg("regular-expression matching not supported on type bytea"))); /* Use the regexp machinery to extract the prefix, if any */ prefix = regexp_fixed_prefix(DatumGetTextPP(patt_const->constvalue), @@ -7827,7 +7827,7 @@ brincostestimate(PlannerInfo *root, IndexPath *path, double loop_count, */ if (HeapTupleIsValid(vardata.statsTuple) && !vardata.freefunc) elog(ERROR, - "no function provided to release variable stats with"); + "no function provided to release variable stats with"); } else { @@ -7850,7 +7850,7 @@ brincostestimate(PlannerInfo *root, IndexPath *path, double loop_count, attnum = qinfo->indexcol + 1; if (get_index_stats_hook && - (*get_index_stats_hook) (root, index->indexoid, attnum, &vardata)) + (*get_index_stats_hook) (root, index->indexoid, attnum, &vardata)) { /* * The hook took control of acquiring a stats tuple. If it @@ -7863,7 +7863,7 @@ brincostestimate(PlannerInfo *root, IndexPath *path, double loop_count, else { vardata.statsTuple = SearchSysCache3(STATRELATTINH, - ObjectIdGetDatum(index->indexoid), + ObjectIdGetDatum(index->indexoid), Int16GetDatum(attnum), BoolGetDatum(false)); vardata.freefunc = ReleaseSysCache; diff --git a/src/backend/utils/adt/timestamp.c b/src/backend/utils/adt/timestamp.c index d477e235507..6fa126d295b 100644 --- a/src/backend/utils/adt/timestamp.c +++ b/src/backend/utils/adt/timestamp.c @@ -110,9 +110,9 @@ anytimestamp_typmod_check(bool istz, int32 typmod) { ereport(WARNING, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("TIMESTAMP(%d)%s precision reduced to maximum allowed, %d", - typmod, (istz ? " WITH TIME ZONE" : ""), - MAX_TIMESTAMP_PRECISION))); + errmsg("TIMESTAMP(%d)%s precision reduced to maximum allowed, %d", + typmod, (istz ? " WITH TIME ZONE" : ""), + MAX_TIMESTAMP_PRECISION))); typmod = MAX_TIMESTAMP_PRECISION; } @@ -191,7 +191,7 @@ timestamp_in(PG_FUNCTION_ARGS) case DTK_INVALID: ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("date/time value \"%s\" is no longer supported", str))); + errmsg("date/time value \"%s\" is no longer supported", str))); TIMESTAMP_NOEND(result); break; @@ -359,8 +359,8 @@ AdjustTimestampForTypmod(Timestamp *time, int32 typmod) if (typmod < 0 || typmod > MAX_TIMESTAMP_PRECISION) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("timestamp(%d) precision must be between %d and %d", - typmod, 0, MAX_TIMESTAMP_PRECISION))); + errmsg("timestamp(%d) precision must be between %d and %d", + typmod, 0, MAX_TIMESTAMP_PRECISION))); if (*time >= INT64CONST(0)) { @@ -431,7 +431,7 @@ timestamptz_in(PG_FUNCTION_ARGS) case DTK_INVALID: ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("date/time value \"%s\" is no longer supported", str))); + errmsg("date/time value \"%s\" is no longer supported", str))); TIMESTAMP_NOEND(result); break; @@ -496,7 +496,7 @@ parse_sane_timezone(struct pg_tm *tm, text *zone) if (rt == DTERR_TZDISP_OVERFLOW) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("numeric time zone \"%s\" out of range", tzname))); + errmsg("numeric time zone \"%s\" out of range", tzname))); else if (rt != DTERR_BAD_FORMAT) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), @@ -938,7 +938,7 @@ interval_in(PG_FUNCTION_ARGS) case DTK_INVALID: ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("date/time value \"%s\" is no longer supported", str))); + errmsg("date/time value \"%s\" is no longer supported", str))); break; default: @@ -1087,8 +1087,8 @@ intervaltypmodin(PG_FUNCTION_ARGS) { ereport(WARNING, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("INTERVAL(%d) precision reduced to maximum allowed, %d", - tl[1], MAX_INTERVAL_PRECISION))); + errmsg("INTERVAL(%d) precision reduced to maximum allowed, %d", + tl[1], MAX_INTERVAL_PRECISION))); typmod = INTERVAL_TYPMOD(MAX_INTERVAL_PRECISION, tl[0]); } else @@ -1459,8 +1459,8 @@ AdjustIntervalForTypmod(Interval *interval, int32 typmod) if (precision < 0 || precision > MAX_INTERVAL_PRECISION) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("interval(%d) precision must be between %d and %d", - precision, 0, MAX_INTERVAL_PRECISION))); + errmsg("interval(%d) precision must be between %d and %d", + precision, 0, MAX_INTERVAL_PRECISION))); if (interval->time >= INT64CONST(0)) { @@ -1694,7 +1694,7 @@ timestamptz_to_time_t(TimestampTz t) pg_time_t result; result = (pg_time_t) (t / USECS_PER_SEC + - ((POSTGRES_EPOCH_JDATE - UNIX_EPOCH_JDATE) * SECS_PER_DAY)); + ((POSTGRES_EPOCH_JDATE - UNIX_EPOCH_JDATE) * SECS_PER_DAY)); return result; } @@ -2625,7 +2625,7 @@ timestamp_mi(PG_FUNCTION_ARGS) *---------- */ result = DatumGetIntervalP(DirectFunctionCall1(interval_justify_hours, - IntervalPGetDatum(result))); + IntervalPGetDatum(result))); PG_RETURN_INTERVAL_P(result); } @@ -3161,7 +3161,7 @@ interval_mul(PG_FUNCTION_ARGS) month_remainder_days = (orig_month * factor - result->month) * DAYS_PER_MONTH; month_remainder_days = TSROUND(month_remainder_days); sec_remainder = (orig_day * factor - result->day + - month_remainder_days - (int) month_remainder_days) * SECS_PER_DAY; + month_remainder_days - (int) month_remainder_days) * SECS_PER_DAY; sec_remainder = TSROUND(sec_remainder); /* @@ -3224,7 +3224,7 @@ interval_div(PG_FUNCTION_ARGS) month_remainder_days = (orig_month / factor - result->month) * DAYS_PER_MONTH; month_remainder_days = TSROUND(month_remainder_days); sec_remainder = (orig_day / factor - result->day + - month_remainder_days - (int) month_remainder_days) * SECS_PER_DAY; + month_remainder_days - (int) month_remainder_days) * SECS_PER_DAY; sec_remainder = TSROUND(sec_remainder); if (Abs(sec_remainder) >= SECS_PER_DAY) { @@ -3272,7 +3272,7 @@ interval_accum(PG_FUNCTION_ARGS) newsum = DatumGetIntervalP(DirectFunctionCall2(interval_pl, IntervalPGetDatum(&sumX), - IntervalPGetDatum(newval))); + IntervalPGetDatum(newval))); N.time += 1; transdatums[0] = IntervalPGetDatum(newsum); @@ -3356,7 +3356,7 @@ interval_accum_inv(PG_FUNCTION_ARGS) newsum = DatumGetIntervalP(DirectFunctionCall2(interval_mi, IntervalPGetDatum(&sumX), - IntervalPGetDatum(newval))); + IntervalPGetDatum(newval))); N.time -= 1; transdatums[0] = IntervalPGetDatum(newsum); @@ -3911,8 +3911,8 @@ timestamptz_trunc(PG_FUNCTION_ARGS) { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("timestamp with time zone units \"%s\" not recognized", - lowunits))); + errmsg("timestamp with time zone units \"%s\" not recognized", + lowunits))); result = 0; } @@ -3985,7 +3985,7 @@ interval_trunc(PG_FUNCTION_ARGS) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("interval units \"%s\" not supported " - "because months usually have fractional weeks", + "because months usually have fractional weeks", lowunits))); else ereport(ERROR, @@ -4206,8 +4206,8 @@ NonFiniteTimestampTzPart(int type, int unit, char *lowunits, if (isTz) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("timestamp with time zone units \"%s\" not recognized", - lowunits))); + errmsg("timestamp with time zone units \"%s\" not recognized", + lowunits))); else ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), @@ -4252,8 +4252,8 @@ NonFiniteTimestampTzPart(int type, int unit, char *lowunits, if (isTz) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("timestamp with time zone units \"%s\" not supported", - lowunits))); + errmsg("timestamp with time zone units \"%s\" not supported", + lowunits))); else ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), @@ -4391,7 +4391,7 @@ timestamp_part(PG_FUNCTION_ARGS) case DTK_JULIAN: result = date2j(tm->tm_year, tm->tm_mon, tm->tm_mday); result += ((((tm->tm_hour * MINS_PER_HOUR) + tm->tm_min) * SECS_PER_MINUTE) + - tm->tm_sec + (fsec / 1000000.0)) / (double) SECS_PER_DAY; + tm->tm_sec + (fsec / 1000000.0)) / (double) SECS_PER_DAY; break; case DTK_ISOYEAR: @@ -4595,7 +4595,7 @@ timestamptz_part(PG_FUNCTION_ARGS) case DTK_JULIAN: result = date2j(tm->tm_year, tm->tm_mon, tm->tm_mday); result += ((((tm->tm_hour * MINS_PER_HOUR) + tm->tm_min) * SECS_PER_MINUTE) + - tm->tm_sec + (fsec / 1000000.0)) / (double) SECS_PER_DAY; + tm->tm_sec + (fsec / 1000000.0)) / (double) SECS_PER_DAY; break; case DTK_ISOYEAR: @@ -4625,8 +4625,8 @@ timestamptz_part(PG_FUNCTION_ARGS) default: ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("timestamp with time zone units \"%s\" not supported", - lowunits))); + errmsg("timestamp with time zone units \"%s\" not supported", + lowunits))); result = 0; } @@ -4647,8 +4647,8 @@ timestamptz_part(PG_FUNCTION_ARGS) default: ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("timestamp with time zone units \"%s\" not supported", - lowunits))); + errmsg("timestamp with time zone units \"%s\" not supported", + lowunits))); result = 0; } } @@ -4656,8 +4656,8 @@ timestamptz_part(PG_FUNCTION_ARGS) { ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("timestamp with time zone units \"%s\" not recognized", - lowunits))); + errmsg("timestamp with time zone units \"%s\" not recognized", + lowunits))); result = 0; } @@ -4916,9 +4916,9 @@ timestamp_izone(PG_FUNCTION_ARGS) if (zone->month != 0 || zone->day != 0) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("interval time zone \"%s\" must not include months or days", - DatumGetCString(DirectFunctionCall1(interval_out, - PointerGetDatum(zone)))))); + errmsg("interval time zone \"%s\" must not include months or days", + DatumGetCString(DirectFunctionCall1(interval_out, + PointerGetDatum(zone)))))); tz = zone->time / USECS_PER_SEC; @@ -5113,9 +5113,9 @@ timestamptz_izone(PG_FUNCTION_ARGS) if (zone->month != 0 || zone->day != 0) ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("interval time zone \"%s\" must not include months or days", - DatumGetCString(DirectFunctionCall1(interval_out, - PointerGetDatum(zone)))))); + errmsg("interval time zone \"%s\" must not include months or days", + DatumGetCString(DirectFunctionCall1(interval_out, + PointerGetDatum(zone)))))); tz = -(zone->time / USECS_PER_SEC); @@ -5196,9 +5196,9 @@ generate_series_timestamp(PG_FUNCTION_ARGS) { /* increment current in preparation for next iteration */ fctx->current = DatumGetTimestamp( - DirectFunctionCall2(timestamp_pl_interval, - TimestampGetDatum(fctx->current), - PointerGetDatum(&fctx->step))); + DirectFunctionCall2(timestamp_pl_interval, + TimestampGetDatum(fctx->current), + PointerGetDatum(&fctx->step))); /* do when there is more left to send */ SRF_RETURN_NEXT(funcctx, TimestampGetDatum(result)); @@ -5277,9 +5277,9 @@ generate_series_timestamptz(PG_FUNCTION_ARGS) { /* increment current in preparation for next iteration */ fctx->current = DatumGetTimestampTz( - DirectFunctionCall2(timestamptz_pl_interval, - TimestampTzGetDatum(fctx->current), - PointerGetDatum(&fctx->step))); + DirectFunctionCall2(timestamptz_pl_interval, + TimestampTzGetDatum(fctx->current), + PointerGetDatum(&fctx->step))); /* do when there is more left to send */ SRF_RETURN_NEXT(funcctx, TimestampTzGetDatum(result)); diff --git a/src/backend/utils/adt/tsquery.c b/src/backend/utils/adt/tsquery.c index 53e36afbe8e..ee047bd8d57 100644 --- a/src/backend/utils/adt/tsquery.c +++ b/src/backend/utils/adt/tsquery.c @@ -458,7 +458,7 @@ cleanOpStack(TSQueryParserState state, { /* NOT is right associative unlike to others */ if ((op != OP_NOT && opPriority > OP_PRIORITY(stack[*lenstack - 1].op)) || - (op == OP_NOT && opPriority >= OP_PRIORITY(stack[*lenstack - 1].op))) + (op == OP_NOT && opPriority >= OP_PRIORITY(stack[*lenstack - 1].op))) break; (*lenstack)--; diff --git a/src/backend/utils/adt/tsvector_op.c b/src/backend/utils/adt/tsvector_op.c index 66c5255f8b4..2d7407c29cb 100644 --- a/src/backend/utils/adt/tsvector_op.c +++ b/src/backend/utils/adt/tsvector_op.c @@ -674,7 +674,7 @@ tsvector_unnest(PG_FUNCTION_ARGS) Datum values[3]; values[0] = PointerGetDatum( - cstring_to_text_with_len(data + arrin[i].pos, arrin[i].len) + cstring_to_text_with_len(data + arrin[i].pos, arrin[i].len) ); if (arrin[i].haspos) @@ -697,14 +697,14 @@ tsvector_unnest(PG_FUNCTION_ARGS) positions[j] = Int16GetDatum(WEP_GETPOS(posv->pos[j])); weight = 'D' - WEP_GETWEIGHT(posv->pos[j]); weights[j] = PointerGetDatum( - cstring_to_text_with_len(&weight, 1) + cstring_to_text_with_len(&weight, 1) ); } values[1] = PointerGetDatum( - construct_array(positions, posv->npos, INT2OID, 2, true, 's')); + construct_array(positions, posv->npos, INT2OID, 2, true, 's')); values[2] = PointerGetDatum( - construct_array(weights, posv->npos, TEXTOID, -1, false, 'i')); + construct_array(weights, posv->npos, TEXTOID, -1, false, 'i')); } else { @@ -738,7 +738,7 @@ tsvector_to_array(PG_FUNCTION_ARGS) for (i = 0; i < tsin->size; i++) { elements[i] = PointerGetDatum( - cstring_to_text_with_len(STRPTR(tsin) + arrin[i].pos, arrin[i].len) + cstring_to_text_with_len(STRPTR(tsin) + arrin[i].pos, arrin[i].len) ); } diff --git a/src/backend/utils/adt/uuid.c b/src/backend/utils/adt/uuid.c index eaf2f8064d4..5f15c8e6196 100644 --- a/src/backend/utils/adt/uuid.c +++ b/src/backend/utils/adt/uuid.c @@ -340,7 +340,7 @@ uuid_abbrev_abort(int memtupcount, SortSupport ssup) if (trace_sort) elog(LOG, "uuid_abbrev: aborting abbreviation at cardinality %f" - " below threshold %f after " INT64_FORMAT " values (%d rows)", + " below threshold %f after " INT64_FORMAT " values (%d rows)", abbr_card, uss->input_count / 2000.0 + 0.5, uss->input_count, memtupcount); #endif diff --git a/src/backend/utils/adt/varbit.c b/src/backend/utils/adt/varbit.c index 41238dd763f..0cf1c6f6d60 100644 --- a/src/backend/utils/adt/varbit.c +++ b/src/backend/utils/adt/varbit.c @@ -161,8 +161,8 @@ bit_in(PG_FUNCTION_ARGS) if (slen > VARBITMAXLEN / 4) ereport(ERROR, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), - errmsg("bit string length exceeds the maximum allowed (%d)", - VARBITMAXLEN))); + errmsg("bit string length exceeds the maximum allowed (%d)", + VARBITMAXLEN))); bitlen = slen * 4; } @@ -473,8 +473,8 @@ varbit_in(PG_FUNCTION_ARGS) if (slen > VARBITMAXLEN / 4) ereport(ERROR, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), - errmsg("bit string length exceeds the maximum allowed (%d)", - VARBITMAXLEN))); + errmsg("bit string length exceeds the maximum allowed (%d)", + VARBITMAXLEN))); bitlen = slen * 4; } diff --git a/src/backend/utils/adt/varchar.c b/src/backend/utils/adt/varchar.c index 6f2f9f66339..cbc62b00be2 100644 --- a/src/backend/utils/adt/varchar.c +++ b/src/backend/utils/adt/varchar.c @@ -467,8 +467,8 @@ varchar_input(const char *s, size_t len, int32 atttypmod) if (s[j] != ' ') ereport(ERROR, (errcode(ERRCODE_STRING_DATA_RIGHT_TRUNCATION), - errmsg("value too long for type character varying(%d)", - (int) maxlen))); + errmsg("value too long for type character varying(%d)", + (int) maxlen))); } len = mbmaxlen; @@ -620,8 +620,8 @@ varchar(PG_FUNCTION_ARGS) if (s_data[i] != ' ') ereport(ERROR, (errcode(ERRCODE_STRING_DATA_RIGHT_TRUNCATION), - errmsg("value too long for type character varying(%d)", - maxlen))); + errmsg("value too long for type character varying(%d)", + maxlen))); } PG_RETURN_VARCHAR_P((VarChar *) cstring_to_text_with_len(s_data, diff --git a/src/backend/utils/adt/varlena.c b/src/backend/utils/adt/varlena.c index be53f7d60d2..2407394b068 100644 --- a/src/backend/utils/adt/varlena.c +++ b/src/backend/utils/adt/varlena.c @@ -2139,7 +2139,7 @@ varstrfastcmp_locale(Datum x, Datum y, SortSupport ssup) &status); if (U_FAILURE(status)) ereport(ERROR, - (errmsg("collation failed: %s", u_errorName(status)))); + (errmsg("collation failed: %s", u_errorName(status)))); } else #endif @@ -2350,7 +2350,7 @@ varstr_abbrev_convert(Datum original, SortSupport ssup) &iter, state, (uint8_t *) sss->buf2, - Min(sizeof(Datum), sss->buflen2), + Min(sizeof(Datum), sss->buflen2), &status); if (U_FAILURE(status)) ereport(ERROR, @@ -2359,7 +2359,7 @@ varstr_abbrev_convert(Datum original, SortSupport ssup) else bsize = ucol_getSortKey(sss->locale->info.icu.ucol, uchar, ulen, - (uint8_t *) sss->buf2, sss->buflen2); + (uint8_t *) sss->buf2, sss->buflen2); } else #endif @@ -4244,7 +4244,7 @@ text_to_array_internal(PG_FUNCTION_ARGS) /* XXX: this hardcodes assumptions about the text type */ PG_RETURN_ARRAYTYPE_P(construct_md_array(elems, nulls, 1, dims, lbs, - TEXTOID, -1, false, 'i')); + TEXTOID, -1, false, 'i')); } start_posn = 1; @@ -5317,7 +5317,7 @@ text_format_parse_format(const char *start_ptr, const char *end_ptr, if (*cp != '$') ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("width argument position must be ended by \"$\""))); + errmsg("width argument position must be ended by \"$\""))); /* The number was width argument position */ *widthpos = n; /* Explicit 0 for argument index is immediately refused */ @@ -5362,7 +5362,7 @@ text_format_string_conversion(StringInfo buf, char conversion, else if (conversion == 'I') ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), - errmsg("null values cannot be formatted as an SQL identifier"))); + errmsg("null values cannot be formatted as an SQL identifier"))); return; } diff --git a/src/backend/utils/adt/xml.c b/src/backend/utils/adt/xml.c index 0ed679eea69..323614c183d 100644 --- a/src/backend/utils/adt/xml.c +++ b/src/backend/utils/adt/xml.c @@ -157,7 +157,7 @@ static StringInfo query_to_xml_internal(const char *query, char *tablename, const char *xmlschema, bool nulls, bool tableforest, const char *targetns, bool top_level); static const char *map_sql_table_to_xmlschema(TupleDesc tupdesc, Oid relid, - bool nulls, bool tableforest, const char *targetns); + bool nulls, bool tableforest, const char *targetns); static const char *map_sql_schema_to_xmlschema_types(Oid nspid, List *relid_list, bool nulls, bool tableforest, const char *targetns); @@ -798,7 +798,7 @@ xmlpi(char *target, text *arg, bool arg_is_null, bool *result_is_null) ereport(ERROR, (errcode(ERRCODE_INVALID_XML_PROCESSING_INSTRUCTION), errmsg("invalid XML processing instruction"), - errdetail("XML processing instruction cannot contain \"?>\"."))); + errdetail("XML processing instruction cannot contain \"?>\"."))); appendStringInfoChar(&buf, ' '); appendStringInfoString(&buf, string + strspn(string, " ")); @@ -1439,7 +1439,7 @@ xml_parse(text *data, XmlOptionType xmloption_arg, bool preserve_whitespace, NULL, "UTF-8", XML_PARSE_NOENT | XML_PARSE_DTDATTR - | (preserve_whitespace ? 0 : XML_PARSE_NOBLANKS)); + | (preserve_whitespace ? 0 : XML_PARSE_NOBLANKS)); if (doc == NULL || xmlerrcxt->err_occurred) xml_ereport(xmlerrcxt, ERROR, ERRCODE_INVALID_XML_DOCUMENT, "invalid XML document"); @@ -1455,7 +1455,7 @@ xml_parse(text *data, XmlOptionType xmloption_arg, bool preserve_whitespace, &count, &version, NULL, &standalone); if (res_code != 0) xml_ereport_by_code(ERROR, ERRCODE_INVALID_XML_CONTENT, - "invalid XML content: invalid XML declaration", + "invalid XML content: invalid XML declaration", res_code); doc = xmlNewDoc(version); @@ -1467,7 +1467,7 @@ xml_parse(text *data, XmlOptionType xmloption_arg, bool preserve_whitespace, if (*(utf8string + count)) { res_code = xmlParseBalancedChunkMemory(doc, NULL, NULL, 0, - utf8string + count, NULL); + utf8string + count, NULL); if (res_code != 0 || xmlerrcxt->err_occurred) xml_ereport(xmlerrcxt, ERROR, ERRCODE_INVALID_XML_CONTENT, "invalid XML content"); @@ -1623,7 +1623,7 @@ xml_errorHandler(void *data, xmlErrorPtr error) xmlParserInputPtr input = (ctxt != NULL) ? ctxt->input : NULL; xmlNodePtr node = error->node; const xmlChar *name = (node != NULL && - node->type == XML_ELEMENT_NODE) ? node->name : NULL; + node->type == XML_ELEMENT_NODE) ? node->name : NULL; int domain = error->domain; int level = error->level; StringInfo errorBuf; @@ -2171,10 +2171,10 @@ map_sql_value_to_xml_value(Datum value, Oid type, bool xml_escape_strings) if (xmlbinary == XMLBINARY_BASE64) xmlTextWriterWriteBase64(writer, VARDATA_ANY(bstr), - 0, VARSIZE_ANY_EXHDR(bstr)); + 0, VARSIZE_ANY_EXHDR(bstr)); else xmlTextWriterWriteBinHex(writer, VARDATA_ANY(bstr), - 0, VARSIZE_ANY_EXHDR(bstr)); + 0, VARSIZE_ANY_EXHDR(bstr)); /* we MUST do this now to flush data out to the buffer */ xmlFreeTextWriter(writer); @@ -2385,8 +2385,8 @@ database_get_xml_visible_tables(void) CppAsString2(RELKIND_RELATION) "," CppAsString2(RELKIND_MATVIEW) "," CppAsString2(RELKIND_VIEW) ")" - " AND pg_catalog.has_table_privilege(pg_class.oid, 'SELECT')" - " AND relnamespace IN (" XML_VISIBLE_SCHEMAS ");"); + " AND pg_catalog.has_table_privilege(pg_class.oid, 'SELECT')" + " AND relnamespace IN (" XML_VISIBLE_SCHEMAS ");"); } @@ -2405,7 +2405,7 @@ table_to_xml_internal(Oid relid, initStringInfo(&query); appendStringInfo(&query, "SELECT * FROM %s", DatumGetCString(DirectFunctionCall1(regclassout, - ObjectIdGetDatum(relid)))); + ObjectIdGetDatum(relid)))); return query_to_xml_internal(query.data, get_rel_name(relid), xmlschema, nulls, tableforest, targetns, top_level); @@ -2421,8 +2421,8 @@ table_to_xml(PG_FUNCTION_ARGS) const char *targetns = text_to_cstring(PG_GETARG_TEXT_PP(3)); PG_RETURN_XML_P(stringinfo_to_xmltype(table_to_xml_internal(relid, NULL, - nulls, tableforest, - targetns, true))); + nulls, tableforest, + targetns, true))); } @@ -2435,8 +2435,8 @@ query_to_xml(PG_FUNCTION_ARGS) const char *targetns = text_to_cstring(PG_GETARG_TEXT_PP(3)); PG_RETURN_XML_P(stringinfo_to_xmltype(query_to_xml_internal(query, NULL, - NULL, nulls, tableforest, - targetns, true))); + NULL, nulls, tableforest, + targetns, true))); } @@ -2640,7 +2640,7 @@ cursor_to_xmlschema(PG_FUNCTION_ARGS) xmlschema = _SPI_strdup(map_sql_table_to_xmlschema(portal->tupDesc, InvalidOid, nulls, - tableforest, targetns)); + tableforest, targetns)); SPI_finish(); PG_RETURN_XML_P(cstring_to_xmltype(xmlschema)); @@ -2663,8 +2663,8 @@ table_to_xml_and_xmlschema(PG_FUNCTION_ARGS) heap_close(rel, NoLock); PG_RETURN_XML_P(stringinfo_to_xmltype(table_to_xml_internal(relid, - xmlschema, nulls, tableforest, - targetns, true))); + xmlschema, nulls, tableforest, + targetns, true))); } @@ -2689,13 +2689,13 @@ query_to_xml_and_xmlschema(PG_FUNCTION_ARGS) elog(ERROR, "SPI_cursor_open(\"%s\") failed", query); xmlschema = _SPI_strdup(map_sql_table_to_xmlschema(portal->tupDesc, - InvalidOid, nulls, tableforest, targetns)); + InvalidOid, nulls, tableforest, targetns)); SPI_cursor_close(portal); SPI_finish(); PG_RETURN_XML_P(stringinfo_to_xmltype(query_to_xml_internal(query, NULL, - xmlschema, nulls, tableforest, - targetns, true))); + xmlschema, nulls, tableforest, + targetns, true))); } @@ -2762,7 +2762,7 @@ schema_to_xml(PG_FUNCTION_ARGS) nspid = LookupExplicitNamespace(schemaname, false); PG_RETURN_XML_P(stringinfo_to_xmltype(schema_to_xml_internal(nspid, NULL, - nulls, tableforest, targetns, true))); + nulls, tableforest, targetns, true))); } @@ -2827,8 +2827,8 @@ schema_to_xmlschema_internal(const char *schemaname, bool nulls, map_sql_typecoll_to_xmlschema_types(tupdesc_list)); appendStringInfoString(result, - map_sql_schema_to_xmlschema_types(nspid, relid_list, - nulls, tableforest, targetns)); + map_sql_schema_to_xmlschema_types(nspid, relid_list, + nulls, tableforest, targetns)); xsd_schema_element_end(result); @@ -2847,7 +2847,7 @@ schema_to_xmlschema(PG_FUNCTION_ARGS) const char *targetns = text_to_cstring(PG_GETARG_TEXT_PP(3)); PG_RETURN_XML_P(stringinfo_to_xmltype(schema_to_xmlschema_internal(NameStr(*name), - nulls, tableforest, targetns))); + nulls, tableforest, targetns))); } @@ -2869,8 +2869,8 @@ schema_to_xml_and_xmlschema(PG_FUNCTION_ARGS) tableforest, targetns); PG_RETURN_XML_P(stringinfo_to_xmltype(schema_to_xml_internal(nspid, - xmlschema->data, nulls, - tableforest, targetns, true))); + xmlschema->data, nulls, + tableforest, targetns, true))); } @@ -2930,7 +2930,7 @@ database_to_xml(PG_FUNCTION_ARGS) const char *targetns = text_to_cstring(PG_GETARG_TEXT_PP(2)); PG_RETURN_XML_P(stringinfo_to_xmltype(database_to_xml_internal(NULL, nulls, - tableforest, targetns))); + tableforest, targetns))); } @@ -2985,7 +2985,7 @@ database_to_xmlschema(PG_FUNCTION_ARGS) const char *targetns = text_to_cstring(PG_GETARG_TEXT_PP(2)); PG_RETURN_XML_P(stringinfo_to_xmltype(database_to_xmlschema_internal(nulls, - tableforest, targetns))); + tableforest, targetns))); } @@ -3000,7 +3000,7 @@ database_to_xml_and_xmlschema(PG_FUNCTION_ARGS) xmlschema = database_to_xmlschema_internal(nulls, tableforest, targetns); PG_RETURN_XML_P(stringinfo_to_xmltype(database_to_xml_internal(xmlschema->data, - nulls, tableforest, targetns))); + nulls, tableforest, targetns))); } @@ -3065,14 +3065,14 @@ map_sql_table_to_xmlschema(TupleDesc tupdesc, Oid relid, bool nulls, true, false); tabletypename = map_multipart_sql_identifier_to_xml_name("TableType", - get_database_name(MyDatabaseId), - get_namespace_name(reltuple->relnamespace), - NameStr(reltuple->relname)); + get_database_name(MyDatabaseId), + get_namespace_name(reltuple->relnamespace), + NameStr(reltuple->relname)); rowtypename = map_multipart_sql_identifier_to_xml_name("RowType", - get_database_name(MyDatabaseId), - get_namespace_name(reltuple->relnamespace), - NameStr(reltuple->relname)); + get_database_name(MyDatabaseId), + get_namespace_name(reltuple->relnamespace), + NameStr(reltuple->relname)); ReleaseSysCache(tuple); } @@ -3090,7 +3090,7 @@ map_sql_table_to_xmlschema(TupleDesc tupdesc, Oid relid, bool nulls, xsd_schema_element_start(&result, targetns); appendStringInfoString(&result, - map_sql_typecoll_to_xmlschema_types(list_make1(tupdesc))); + map_sql_typecoll_to_xmlschema_types(list_make1(tupdesc))); appendStringInfo(&result, "<xsd:complexType name=\"%s\">\n" @@ -3102,10 +3102,10 @@ map_sql_table_to_xmlschema(TupleDesc tupdesc, Oid relid, bool nulls, if (tupdesc->attrs[i]->attisdropped) continue; appendStringInfo(&result, - " <xsd:element name=\"%s\" type=\"%s\"%s></xsd:element>\n", - map_sql_identifier_to_xml_name(NameStr(tupdesc->attrs[i]->attname), - true, false), - map_sql_type_to_xml_name(tupdesc->attrs[i]->atttypid, -1), + " <xsd:element name=\"%s\" type=\"%s\"%s></xsd:element>\n", + map_sql_identifier_to_xml_name(NameStr(tupdesc->attrs[i]->attname), + true, false), + map_sql_type_to_xml_name(tupdesc->attrs[i]->atttypid, -1), nulls ? " nillable=\"true\"" : " minOccurs=\"0\""); } @@ -3180,9 +3180,9 @@ map_sql_schema_to_xmlschema_types(Oid nspid, List *relid_list, bool nulls, char *relname = get_rel_name(relid); char *xmltn = map_sql_identifier_to_xml_name(relname, true, false); char *tabletypename = map_multipart_sql_identifier_to_xml_name(tableforest ? "RowType" : "TableType", - dbname, - nspname, - relname); + dbname, + nspname, + relname); if (!tableforest) appendStringInfo(&result, @@ -3247,9 +3247,9 @@ map_sql_catalog_to_xmlschema_types(List *nspid_list, bool nulls, char *nspname = get_namespace_name(nspid); char *xmlsn = map_sql_identifier_to_xml_name(nspname, true, false); char *schematypename = map_multipart_sql_identifier_to_xml_name("SchemaType", - dbname, - nspname, - NULL); + dbname, + nspname, + NULL); appendStringInfo(&result, " <xsd:element name=\"%s\" type=\"%s\"/>\n", @@ -3361,9 +3361,9 @@ map_sql_type_to_xml_name(Oid typeoid, int typmod) appendStringInfoString(&result, map_multipart_sql_identifier_to_xml_name((typtuple->typtype == TYPTYPE_DOMAIN) ? "Domain" : "UDT", - get_database_name(MyDatabaseId), - get_namespace_name(typtuple->typnamespace), - NameStr(typtuple->typname))); + get_database_name(MyDatabaseId), + get_namespace_name(typtuple->typnamespace), + NameStr(typtuple->typname))); ReleaseSysCache(tuple); } @@ -3471,15 +3471,15 @@ map_sql_type_to_xmlschema_type(Oid typeoid, int typmod) appendStringInfo(&result, " <xsd:restriction base=\"xsd:%s\">\n" " </xsd:restriction>\n", - xmlbinary == XMLBINARY_BASE64 ? "base64Binary" : "hexBinary"); + xmlbinary == XMLBINARY_BASE64 ? "base64Binary" : "hexBinary"); break; case NUMERICOID: if (typmod != -1) appendStringInfo(&result, - " <xsd:restriction base=\"xsd:decimal\">\n" + " <xsd:restriction base=\"xsd:decimal\">\n" " <xsd:totalDigits value=\"%d\"/>\n" - " <xsd:fractionDigits value=\"%d\"/>\n" + " <xsd:fractionDigits value=\"%d\"/>\n" " </xsd:restriction>\n", ((typmod - VARHDRSZ) >> 16) & 0xffff, (typmod - VARHDRSZ) & 0xffff); @@ -3506,16 +3506,16 @@ map_sql_type_to_xmlschema_type(Oid typeoid, int typmod) case INT8OID: appendStringInfo(&result, " <xsd:restriction base=\"xsd:long\">\n" - " <xsd:maxInclusive value=\"" INT64_FORMAT "\"/>\n" - " <xsd:minInclusive value=\"" INT64_FORMAT "\"/>\n" + " <xsd:maxInclusive value=\"" INT64_FORMAT "\"/>\n" + " <xsd:minInclusive value=\"" INT64_FORMAT "\"/>\n" " </xsd:restriction>\n", - (((uint64) 1) << (sizeof(int64) * 8 - 1)) - 1, + (((uint64) 1) << (sizeof(int64) * 8 - 1)) - 1, (((uint64) 1) << (sizeof(int64) * 8 - 1))); break; case FLOAT4OID: appendStringInfoString(&result, - " <xsd:restriction base=\"xsd:float\"></xsd:restriction>\n"); + " <xsd:restriction base=\"xsd:float\"></xsd:restriction>\n"); break; case FLOAT8OID: @@ -3535,19 +3535,19 @@ map_sql_type_to_xmlschema_type(Oid typeoid, int typmod) if (typmod == -1) appendStringInfo(&result, - " <xsd:restriction base=\"xsd:time\">\n" + " <xsd:restriction base=\"xsd:time\">\n" " <xsd:pattern value=\"\\p{Nd}{2}:\\p{Nd}{2}:\\p{Nd}{2}(.\\p{Nd}+)?%s\"/>\n" " </xsd:restriction>\n", tz); else if (typmod == 0) appendStringInfo(&result, - " <xsd:restriction base=\"xsd:time\">\n" + " <xsd:restriction base=\"xsd:time\">\n" " <xsd:pattern value=\"\\p{Nd}{2}:\\p{Nd}{2}:\\p{Nd}{2}%s\"/>\n" " </xsd:restriction>\n", tz); else appendStringInfo(&result, - " <xsd:restriction base=\"xsd:time\">\n" + " <xsd:restriction base=\"xsd:time\">\n" " <xsd:pattern value=\"\\p{Nd}{2}:\\p{Nd}{2}:\\p{Nd}{2}.\\p{Nd}{%d}%s\"/>\n" - " </xsd:restriction>\n", typmod - VARHDRSZ, tz); + " </xsd:restriction>\n", typmod - VARHDRSZ, tz); break; } @@ -3558,25 +3558,25 @@ map_sql_type_to_xmlschema_type(Oid typeoid, int typmod) if (typmod == -1) appendStringInfo(&result, - " <xsd:restriction base=\"xsd:dateTime\">\n" + " <xsd:restriction base=\"xsd:dateTime\">\n" " <xsd:pattern value=\"\\p{Nd}{4}-\\p{Nd}{2}-\\p{Nd}{2}T\\p{Nd}{2}:\\p{Nd}{2}:\\p{Nd}{2}(.\\p{Nd}+)?%s\"/>\n" " </xsd:restriction>\n", tz); else if (typmod == 0) appendStringInfo(&result, - " <xsd:restriction base=\"xsd:dateTime\">\n" + " <xsd:restriction base=\"xsd:dateTime\">\n" " <xsd:pattern value=\"\\p{Nd}{4}-\\p{Nd}{2}-\\p{Nd}{2}T\\p{Nd}{2}:\\p{Nd}{2}:\\p{Nd}{2}%s\"/>\n" " </xsd:restriction>\n", tz); else appendStringInfo(&result, - " <xsd:restriction base=\"xsd:dateTime\">\n" + " <xsd:restriction base=\"xsd:dateTime\">\n" " <xsd:pattern value=\"\\p{Nd}{4}-\\p{Nd}{2}-\\p{Nd}{2}T\\p{Nd}{2}:\\p{Nd}{2}:\\p{Nd}{2}.\\p{Nd}{%d}%s\"/>\n" - " </xsd:restriction>\n", typmod - VARHDRSZ, tz); + " </xsd:restriction>\n", typmod - VARHDRSZ, tz); break; } case DATEOID: appendStringInfoString(&result, - " <xsd:restriction base=\"xsd:date\">\n" + " <xsd:restriction base=\"xsd:date\">\n" " <xsd:pattern value=\"\\p{Nd}{4}-\\p{Nd}{2}-\\p{Nd}{2}\"/>\n" " </xsd:restriction>\n"); break; @@ -3591,7 +3591,7 @@ map_sql_type_to_xmlschema_type(Oid typeoid, int typmod) appendStringInfo(&result, " <xsd:restriction base=\"%s\"/>\n", - map_sql_type_to_xml_name(base_typeoid, base_typmod)); + map_sql_type_to_xml_name(base_typeoid, base_typmod)); } break; } @@ -3650,7 +3650,7 @@ SPI_sql_row_to_xmlelement(uint64 rownum, StringInfo result, char *tablename, appendStringInfo(result, " <%s>%s</%s>\n", colname, map_sql_value_to_xml_value(colval, - SPI_gettypeid(SPI_tuptable->tupdesc, i), true), + SPI_gettypeid(SPI_tuptable->tupdesc, i), true), colname); } @@ -3772,7 +3772,7 @@ xml_xpathobjtoxmlarray(xmlXPathObjectPtr xpathobj, for (i = 0; i < result; i++) { datum = PointerGetDatum(xml_xmlnodetoxmltype(xpathobj->nodesetval->nodeTab[i], - xmlerrcxt)); + xmlerrcxt)); (void) accumArrayResult(astate, datum, false, XMLOID, CurrentMemoryContext); } @@ -3936,7 +3936,7 @@ xpath_internal(text *xpath_expr_text, xmltype *data, ArrayType *namespaces, ns_names_uris_nulls[i * 2 + 1]) ereport(ERROR, (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), - errmsg("neither namespace name nor URI may be null"))); + errmsg("neither namespace name nor URI may be null"))); ns_name = TextDatumGetCString(ns_names_uris[i * 2]); ns_uri = TextDatumGetCString(ns_names_uris[i * 2 + 1]); if (xmlXPathRegisterNs(xpathctx, @@ -4495,7 +4495,7 @@ XmlTableGetValue(TableFuncScanState *state, int colnum, xmlChar *str; str = xmlNodeListGetString(xtCxt->doc, - xpathobj->nodesetval->nodeTab[0]->xmlChildrenNode, + xpathobj->nodesetval->nodeTab[0]->xmlChildrenNode, 1); if (str != NULL) @@ -4546,8 +4546,8 @@ XmlTableGetValue(TableFuncScanState *state, int colnum, for (i = 0; i < count; i++) { appendStringInfoText(&str, - xml_xmlnodetoxmltype(xpathobj->nodesetval->nodeTab[i], - xtCxt->xmlerrcxt)); + xml_xmlnodetoxmltype(xpathobj->nodesetval->nodeTab[i], + xtCxt->xmlerrcxt)); } cstr = str.data; } diff --git a/src/backend/utils/cache/inval.c b/src/backend/utils/cache/inval.c index 5652c3abe07..d0e54b85352 100644 --- a/src/backend/utils/cache/inval.c +++ b/src/backend/utils/cache/inval.c @@ -236,7 +236,7 @@ AddInvalidationMessage(InvalidationChunk **listHdr, chunk = (InvalidationChunk *) MemoryContextAlloc(CurTransactionContext, offsetof(InvalidationChunk, msgs) + - FIRSTCHUNKSIZE * sizeof(SharedInvalidationMessage)); + FIRSTCHUNKSIZE * sizeof(SharedInvalidationMessage)); chunk->nitems = 0; chunk->maxitems = FIRSTCHUNKSIZE; chunk->next = *listHdr; @@ -464,7 +464,7 @@ ProcessInvalidationMessages(InvalidationListHeader *hdr, */ static void ProcessInvalidationMessagesMulti(InvalidationListHeader *hdr, - void (*func) (const SharedInvalidationMessage *msgs, int n)) + void (*func) (const SharedInvalidationMessage *msgs, int n)) { ProcessMessageListMulti(hdr->cclist, func(msgs, n)); ProcessMessageListMulti(hdr->rclist, func(msgs, n)); @@ -778,7 +778,7 @@ MakeSharedInvalidMessagesArray(const SharedInvalidationMessage *msgs, int n) * We're so close to EOXact that we now we're going to lose it anyhow. */ SharedInvalidMessagesArray = palloc(maxSharedInvalidMessagesArray - * sizeof(SharedInvalidationMessage)); + * sizeof(SharedInvalidationMessage)); } if ((numSharedInvalidMessagesArray + n) > maxSharedInvalidMessagesArray) @@ -788,7 +788,7 @@ MakeSharedInvalidMessagesArray(const SharedInvalidationMessage *msgs, int n) SharedInvalidMessagesArray = repalloc(SharedInvalidMessagesArray, maxSharedInvalidMessagesArray - * sizeof(SharedInvalidationMessage)); + * sizeof(SharedInvalidationMessage)); } /* diff --git a/src/backend/utils/cache/plancache.c b/src/backend/utils/cache/plancache.c index dfe5592a25a..ad8a82f1e3f 100644 --- a/src/backend/utils/cache/plancache.c +++ b/src/backend/utils/cache/plancache.c @@ -1180,7 +1180,7 @@ GetCachedPlan(CachedPlanSource *plansource, ParamListInfo boundParams, { /* otherwise, it should be a sibling of the plansource */ MemoryContextSetParent(plan->context, - MemoryContextGetParent(plansource->context)); + MemoryContextGetParent(plansource->context)); } /* Update generic_cost whenever we make a new generic plan */ plansource->generic_cost = cached_plan_cost(plan, false); @@ -1520,7 +1520,7 @@ AcquireExecutorLocks(List *stmt_list, bool acquire) * acquire a non-conflicting lock. */ if (list_member_int(plannedstmt->resultRelations, rt_index) || - list_member_int(plannedstmt->nonleafResultRelations, rt_index)) + list_member_int(plannedstmt->nonleafResultRelations, rt_index)) lockmode = RowExclusiveLock; else if ((rc = get_plan_rowmark(plannedstmt->rowMarks, rt_index)) != NULL && RowMarkRequiresRowShareLock(rc->markType)) diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c index 93bcac1e776..43238dd8cbd 100644 --- a/src/backend/utils/cache/relcache.c +++ b/src/backend/utils/cache/relcache.c @@ -627,7 +627,7 @@ RelationBuildTupleDesc(Relation relation) constr->num_check = relation->rd_rel->relchecks; constr->check = (ConstrCheck *) MemoryContextAllocZero(CacheMemoryContext, - constr->num_check * sizeof(ConstrCheck)); + constr->num_check * sizeof(ConstrCheck)); CheckConstraintFetch(relation); } else @@ -1446,7 +1446,7 @@ RelationInitPhysicalAddr(Relation relation) Form_pg_class physrel; phys_tuple = ScanPgRelation(RelationGetRelid(relation), - RelationGetRelid(relation) != ClassOidIndexId, + RelationGetRelid(relation) != ClassOidIndexId, true); if (!HeapTupleIsValid(phys_tuple)) elog(ERROR, "could not find pg_class entry for %u", @@ -2889,7 +2889,7 @@ RememberToFreeTupleDescAtEOX(TupleDesc td) Assert(EOXactTupleDescArrayLen > 0); EOXactTupleDescArray = (TupleDesc *) repalloc(EOXactTupleDescArray, - newlen * sizeof(TupleDesc)); + newlen * sizeof(TupleDesc)); EOXactTupleDescArrayLen = newlen; } @@ -3782,7 +3782,7 @@ RelationCacheInitializePhase3(void) Form_pg_class relp; htup = SearchSysCache1(RELOID, - ObjectIdGetDatum(RelationGetRelid(relation))); + ObjectIdGetDatum(RelationGetRelid(relation))); if (!HeapTupleIsValid(htup)) elog(FATAL, "cache lookup failed for relation %u", RelationGetRelid(relation)); @@ -4045,7 +4045,7 @@ AttrDefaultFetch(Relation relation) continue; if (attrdef[i].adbin != NULL) elog(WARNING, "multiple attrdef records found for attr %s of rel %s", - NameStr(relation->rd_att->attrs[adform->adnum - 1]->attname), + NameStr(relation->rd_att->attrs[adform->adnum - 1]->attname), RelationGetRelationName(relation)); else found++; @@ -4055,7 +4055,7 @@ AttrDefaultFetch(Relation relation) adrel->rd_att, &isnull); if (isnull) elog(WARNING, "null adbin for attr %s of rel %s", - NameStr(relation->rd_att->attrs[adform->adnum - 1]->attname), + NameStr(relation->rd_att->attrs[adform->adnum - 1]->attname), RelationGetRelationName(relation)); else { @@ -4947,19 +4947,19 @@ restart: if (attrnum != 0) { indexattrs = bms_add_member(indexattrs, - attrnum - FirstLowInvalidHeapAttributeNumber); + attrnum - FirstLowInvalidHeapAttributeNumber); if (isKey) uindexattrs = bms_add_member(uindexattrs, - attrnum - FirstLowInvalidHeapAttributeNumber); + attrnum - FirstLowInvalidHeapAttributeNumber); if (isPK) pkindexattrs = bms_add_member(pkindexattrs, - attrnum - FirstLowInvalidHeapAttributeNumber); + attrnum - FirstLowInvalidHeapAttributeNumber); if (isIDKey) idindexattrs = bms_add_member(idindexattrs, - attrnum - FirstLowInvalidHeapAttributeNumber); + attrnum - FirstLowInvalidHeapAttributeNumber); } } @@ -5790,7 +5790,7 @@ write_relcache_init_file(bool shared) (errcode_for_file_access(), errmsg("could not create relation-cache initialization file \"%s\": %m", tempfilename), - errdetail("Continuing anyway, but there's something wrong."))); + errdetail("Continuing anyway, but there's something wrong."))); return; } diff --git a/src/backend/utils/cache/relmapper.c b/src/backend/utils/cache/relmapper.c index 6836c601e0e..f5394dc43d4 100644 --- a/src/backend/utils/cache/relmapper.c +++ b/src/backend/utils/cache/relmapper.c @@ -684,8 +684,8 @@ load_relmap_file(bool shared) if (!EQ_CRC32C(crc, map->crc)) ereport(FATAL, - (errmsg("relation mapping file \"%s\" contains incorrect checksum", - mapfilename))); + (errmsg("relation mapping file \"%s\" contains incorrect checksum", + mapfilename))); } /* diff --git a/src/backend/utils/cache/ts_cache.c b/src/backend/utils/cache/ts_cache.c index 88e4ffb66d9..da5c8ea345b 100644 --- a/src/backend/utils/cache/ts_cache.c +++ b/src/backend/utils/cache/ts_cache.c @@ -334,7 +334,7 @@ lookup_ts_dictionary_cache(Oid dictId) entry->dictData = DatumGetPointer(OidFunctionCall1(template->tmplinit, - PointerGetDatum(dictoptions))); + PointerGetDatum(dictoptions))); MemoryContextSwitchTo(oldcontext); } diff --git a/src/backend/utils/error/elog.c b/src/backend/utils/error/elog.c index 234c8e3aa96..918db0a8f2d 100644 --- a/src/backend/utils/error/elog.c +++ b/src/backend/utils/error/elog.c @@ -1876,7 +1876,7 @@ DebugFileOpen(void) 0666)) < 0) ereport(FATAL, (errcode_for_file_access(), - errmsg("could not open file \"%s\": %m", OutputFileName))); + errmsg("could not open file \"%s\": %m", OutputFileName))); istty = isatty(fd); close(fd); @@ -2079,7 +2079,7 @@ write_eventlog(int level, const char *line, int len) if (evtHandle == INVALID_HANDLE_VALUE) { evtHandle = RegisterEventSource(NULL, - event_source ? event_source : DEFAULT_EVENT_SOURCE); + event_source ? event_source : DEFAULT_EVENT_SOURCE); if (evtHandle == NULL) { evtHandle = INVALID_HANDLE_VALUE; diff --git a/src/backend/utils/fmgr/dfmgr.c b/src/backend/utils/fmgr/dfmgr.c index 0783145a16e..1239f95ddcd 100644 --- a/src/backend/utils/fmgr/dfmgr.c +++ b/src/backend/utils/fmgr/dfmgr.c @@ -65,7 +65,7 @@ char *Dynamic_library_path; static void *internal_load_library(const char *libname); static void incompatible_module_error(const char *libname, - const Pg_magic_struct *module_magic_data) pg_attribute_noreturn(); + const Pg_magic_struct *module_magic_data) pg_attribute_noreturn(); static void internal_unload_library(const char *libname); static bool file_exists(const char *name); static char *expand_dynamic_library_name(const char *name); @@ -268,9 +268,9 @@ internal_load_library(const char *libname) free((char *) file_scanner); /* complain */ ereport(ERROR, - (errmsg("incompatible library \"%s\": missing magic block", - libname), - errhint("Extension libraries are required to use the PG_MODULE_MAGIC macro."))); + (errmsg("incompatible library \"%s\": missing magic block", + libname), + errhint("Extension libraries are required to use the PG_MODULE_MAGIC macro."))); } /* @@ -362,7 +362,7 @@ incompatible_module_error(const char *libname, if (details.len) appendStringInfoChar(&details, '\n'); appendStringInfo(&details, - _("Server has FLOAT4PASSBYVAL = %s, library has %s."), + _("Server has FLOAT4PASSBYVAL = %s, library has %s."), magic_data.float4byval ? "true" : "false", module_magic_data->float4byval ? "true" : "false"); } @@ -371,14 +371,14 @@ incompatible_module_error(const char *libname, if (details.len) appendStringInfoChar(&details, '\n'); appendStringInfo(&details, - _("Server has FLOAT8PASSBYVAL = %s, library has %s."), + _("Server has FLOAT8PASSBYVAL = %s, library has %s."), magic_data.float8byval ? "true" : "false", module_magic_data->float8byval ? "true" : "false"); } if (details.len == 0) appendStringInfoString(&details, - _("Magic block has unexpected length or padding difference.")); + _("Magic block has unexpected length or padding difference.")); ereport(ERROR, (errmsg("incompatible library \"%s\": magic block mismatch", diff --git a/src/backend/utils/fmgr/fmgr.c b/src/backend/utils/fmgr/fmgr.c index fe268e7d44d..a7b07827e01 100644 --- a/src/backend/utils/fmgr/fmgr.c +++ b/src/backend/utils/fmgr/fmgr.c @@ -396,8 +396,8 @@ fetch_finfo_record(void *filehandle, const char *funcname) { ereport(ERROR, (errcode(ERRCODE_UNDEFINED_FUNCTION), - errmsg("could not find function information for function \"%s\"", - funcname), + errmsg("could not find function information for function \"%s\"", + funcname), errhint("SQL-callable functions need an accompanying PG_FUNCTION_INFO_V1(funcname)."))); return NULL; /* silence compiler */ } @@ -631,7 +631,7 @@ fmgr_security_definer(PG_FUNCTION_ARGS) if (OidIsValid(fcache->userid)) SetUserIdAndSecContext(fcache->userid, - save_sec_context | SECURITY_LOCAL_USERID_CHANGE); + save_sec_context | SECURITY_LOCAL_USERID_CHANGE); if (fcache->proconfig) { diff --git a/src/backend/utils/fmgr/funcapi.c b/src/backend/utils/fmgr/funcapi.c index 6324220426b..be47d411c5b 100644 --- a/src/backend/utils/fmgr/funcapi.c +++ b/src/backend/utils/fmgr/funcapi.c @@ -1369,7 +1369,7 @@ TypeGetTupleDesc(Oid typeoid, List *colaliases) if (list_length(colaliases) != 1) ereport(ERROR, (errcode(ERRCODE_DATATYPE_MISMATCH), - errmsg("number of aliases does not match number of columns"))); + errmsg("number of aliases does not match number of columns"))); /* OK, get the column alias */ attname = strVal(linitial(colaliases)); diff --git a/src/backend/utils/hash/dynahash.c b/src/backend/utils/hash/dynahash.c index 111d63df2b6..ebd55da997e 100644 --- a/src/backend/utils/hash/dynahash.c +++ b/src/backend/utils/hash/dynahash.c @@ -733,7 +733,7 @@ hash_estimate_size(long num_entries, Size entrysize) size = add_size(size, mul_size(nDirEntries, sizeof(HASHSEGMENT))); /* segments */ size = add_size(size, mul_size(nSegments, - MAXALIGN(DEF_SEGSIZE * sizeof(HASHBUCKET)))); + MAXALIGN(DEF_SEGSIZE * sizeof(HASHBUCKET)))); /* elements --- allocated in groups of choose_nelem_alloc() entries */ elementAllocCnt = choose_nelem_alloc(entrysize); nElementAllocs = (num_entries - 1) / elementAllocCnt + 1; diff --git a/src/backend/utils/init/miscinit.c b/src/backend/utils/init/miscinit.c index 1c75e6357ec..49a6afafe7f 100644 --- a/src/backend/utils/init/miscinit.c +++ b/src/backend/utils/init/miscinit.c @@ -987,7 +987,7 @@ CreateLockFile(const char *filename, bool amPostmaster, errmsg("could not remove old lock file \"%s\": %m", filename), errhint("The file seems accidentally left over, but " - "it could not be removed. Please remove the file " + "it could not be removed. Please remove the file " "by hand and try again."))); } @@ -1303,8 +1303,8 @@ RecheckDataDirLockFile(void) /* non-fatal, at least for now */ ereport(LOG, (errcode_for_file_access(), - errmsg("could not open file \"%s\": %m; continuing anyway", - DIRECTORY_LOCK_FILE))); + errmsg("could not open file \"%s\": %m; continuing anyway", + DIRECTORY_LOCK_FILE))); return true; } } diff --git a/src/backend/utils/init/postinit.c b/src/backend/utils/init/postinit.c index 56b4a5b8c3e..eb6960d93fa 100644 --- a/src/backend/utils/init/postinit.c +++ b/src/backend/utils/init/postinit.c @@ -322,8 +322,8 @@ CheckMyDatabase(const char *name, bool am_superuser) if (!dbform->datallowconn) ereport(FATAL, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), - errmsg("database \"%s\" is not currently accepting connections", - name))); + errmsg("database \"%s\" is not currently accepting connections", + name))); /* * Check privilege to connect to the database. (The am_superuser test @@ -375,17 +375,17 @@ CheckMyDatabase(const char *name, bool am_superuser) if (pg_perm_setlocale(LC_COLLATE, collate) == NULL) ereport(FATAL, - (errmsg("database locale is incompatible with operating system"), - errdetail("The database was initialized with LC_COLLATE \"%s\", " - " which is not recognized by setlocale().", collate), - errhint("Recreate the database with another locale or install the missing locale."))); + (errmsg("database locale is incompatible with operating system"), + errdetail("The database was initialized with LC_COLLATE \"%s\", " + " which is not recognized by setlocale().", collate), + errhint("Recreate the database with another locale or install the missing locale."))); if (pg_perm_setlocale(LC_CTYPE, ctype) == NULL) ereport(FATAL, - (errmsg("database locale is incompatible with operating system"), - errdetail("The database was initialized with LC_CTYPE \"%s\", " - " which is not recognized by setlocale().", ctype), - errhint("Recreate the database with another locale or install the missing locale."))); + (errmsg("database locale is incompatible with operating system"), + errdetail("The database was initialized with LC_CTYPE \"%s\", " + " which is not recognized by setlocale().", ctype), + errhint("Recreate the database with another locale or install the missing locale."))); /* Make the locale settings visible as GUC variables, too */ SetConfigOption("lc_collate", collate, PGC_INTERNAL, PGC_S_OVERRIDE); @@ -757,7 +757,7 @@ InitPostgres(const char *in_dbname, Oid dboid, const char *username, else ereport(FATAL, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("must be superuser to connect during database shutdown"))); + errmsg("must be superuser to connect during database shutdown"))); } /* @@ -767,7 +767,7 @@ InitPostgres(const char *in_dbname, Oid dboid, const char *username, { ereport(FATAL, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("must be superuser to connect in binary upgrade mode"))); + errmsg("must be superuser to connect in binary upgrade mode"))); } /* @@ -949,7 +949,7 @@ InitPostgres(const char *in_dbname, Oid dboid, const char *username, ereport(FATAL, (errcode(ERRCODE_UNDEFINED_DATABASE), errmsg("database \"%s\" does not exist", dbname), - errdetail("It seems to have just been dropped or renamed."))); + errdetail("It seems to have just been dropped or renamed."))); } /* @@ -967,8 +967,8 @@ InitPostgres(const char *in_dbname, Oid dboid, const char *username, (errcode(ERRCODE_UNDEFINED_DATABASE), errmsg("database \"%s\" does not exist", dbname), - errdetail("The database subdirectory \"%s\" is missing.", - fullpath))); + errdetail("The database subdirectory \"%s\" is missing.", + fullpath))); else ereport(FATAL, (errcode_for_file_access(), diff --git a/src/backend/utils/mb/conversion_procs/utf8_and_euc2004/utf8_and_euc2004.c b/src/backend/utils/mb/conversion_procs/utf8_and_euc2004/utf8_and_euc2004.c index 1e6f3b3f19c..b21167997ca 100644 --- a/src/backend/utils/mb/conversion_procs/utf8_and_euc2004/utf8_and_euc2004.c +++ b/src/backend/utils/mb/conversion_procs/utf8_and_euc2004/utf8_and_euc2004.c @@ -43,7 +43,7 @@ euc_jis_2004_to_utf8(PG_FUNCTION_ARGS) LocalToUtf(src, len, dest, &euc_jis_2004_to_unicode_tree, - LUmapEUC_JIS_2004_combined, lengthof(LUmapEUC_JIS_2004_combined), + LUmapEUC_JIS_2004_combined, lengthof(LUmapEUC_JIS_2004_combined), NULL, PG_EUC_JIS_2004); @@ -61,7 +61,7 @@ utf8_to_euc_jis_2004(PG_FUNCTION_ARGS) UtfToLocal(src, len, dest, &euc_jis_2004_from_unicode_tree, - ULmapEUC_JIS_2004_combined, lengthof(ULmapEUC_JIS_2004_combined), + ULmapEUC_JIS_2004_combined, lengthof(ULmapEUC_JIS_2004_combined), NULL, PG_EUC_JIS_2004); diff --git a/src/backend/utils/mb/conversion_procs/utf8_and_sjis2004/utf8_and_sjis2004.c b/src/backend/utils/mb/conversion_procs/utf8_and_sjis2004/utf8_and_sjis2004.c index f8eb7252018..adc1c9e93c4 100644 --- a/src/backend/utils/mb/conversion_procs/utf8_and_sjis2004/utf8_and_sjis2004.c +++ b/src/backend/utils/mb/conversion_procs/utf8_and_sjis2004/utf8_and_sjis2004.c @@ -43,7 +43,7 @@ shift_jis_2004_to_utf8(PG_FUNCTION_ARGS) LocalToUtf(src, len, dest, &shift_jis_2004_to_unicode_tree, - LUmapSHIFT_JIS_2004_combined, lengthof(LUmapSHIFT_JIS_2004_combined), + LUmapSHIFT_JIS_2004_combined, lengthof(LUmapSHIFT_JIS_2004_combined), NULL, PG_SHIFT_JIS_2004); @@ -61,7 +61,7 @@ utf8_to_shift_jis_2004(PG_FUNCTION_ARGS) UtfToLocal(src, len, dest, &shift_jis_2004_from_unicode_tree, - ULmapSHIFT_JIS_2004_combined, lengthof(ULmapSHIFT_JIS_2004_combined), + ULmapSHIFT_JIS_2004_combined, lengthof(ULmapSHIFT_JIS_2004_combined), NULL, PG_SHIFT_JIS_2004); diff --git a/src/backend/utils/mb/mbutils.c b/src/backend/utils/mb/mbutils.c index 95644e3468a..ca7f129ebe1 100644 --- a/src/backend/utils/mb/mbutils.c +++ b/src/backend/utils/mb/mbutils.c @@ -374,8 +374,8 @@ pg_do_encoding_conversion(unsigned char *src, int len, ereport(ERROR, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), errmsg("out of memory"), - errdetail("String of %d bytes is too long for encoding conversion.", - len))); + errdetail("String of %d bytes is too long for encoding conversion.", + len))); result = palloc(len * MAX_CONVERSION_GROWTH + 1); @@ -399,7 +399,7 @@ pg_convert_to(PG_FUNCTION_ARGS) Datum string = PG_GETARG_DATUM(0); Datum dest_encoding_name = PG_GETARG_DATUM(1); Datum src_encoding_name = DirectFunctionCall1(namein, - CStringGetDatum(DatabaseEncoding->name)); + CStringGetDatum(DatabaseEncoding->name)); Datum result; /* @@ -424,7 +424,7 @@ pg_convert_from(PG_FUNCTION_ARGS) Datum string = PG_GETARG_DATUM(0); Datum src_encoding_name = PG_GETARG_DATUM(1); Datum dest_encoding_name = DirectFunctionCall1(namein, - CStringGetDatum(DatabaseEncoding->name)); + CStringGetDatum(DatabaseEncoding->name)); Datum result; result = DirectFunctionCall3(pg_convert, string, @@ -606,9 +606,9 @@ pg_any_to_server(const char *s, int len, int encoding) if (s[i] == '\0' || IS_HIGHBIT_SET(s[i])) ereport(ERROR, (errcode(ERRCODE_CHARACTER_NOT_IN_REPERTOIRE), - errmsg("invalid byte value for encoding \"%s\": 0x%02x", - pg_enc2name_tbl[PG_SQL_ASCII].name, - (unsigned char) s[i]))); + errmsg("invalid byte value for encoding \"%s\": 0x%02x", + pg_enc2name_tbl[PG_SQL_ASCII].name, + (unsigned char) s[i]))); } } return (char *) s; @@ -707,8 +707,8 @@ perform_default_encoding_conversion(const char *src, int len, ereport(ERROR, (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED), errmsg("out of memory"), - errdetail("String of %d bytes is too long for encoding conversion.", - len))); + errdetail("String of %d bytes is too long for encoding conversion.", + len))); result = palloc(len * MAX_CONVERSION_GROWTH + 1); diff --git a/src/backend/utils/mb/wchar.c b/src/backend/utils/mb/wchar.c index b71b5e016a3..765815a1998 100644 --- a/src/backend/utils/mb/wchar.c +++ b/src/backend/utils/mb/wchar.c @@ -1785,8 +1785,8 @@ int pg_encoding_mblen(int encoding, const char *mbstr) { return (PG_VALID_ENCODING(encoding) ? - ((*pg_wchar_table[encoding].mblen) ((const unsigned char *) mbstr)) : - ((*pg_wchar_table[PG_SQL_ASCII].mblen) ((const unsigned char *) mbstr))); + ((*pg_wchar_table[encoding].mblen) ((const unsigned char *) mbstr)) : + ((*pg_wchar_table[PG_SQL_ASCII].mblen) ((const unsigned char *) mbstr))); } /* @@ -1796,8 +1796,8 @@ int pg_encoding_dsplen(int encoding, const char *mbstr) { return (PG_VALID_ENCODING(encoding) ? - ((*pg_wchar_table[encoding].dsplen) ((const unsigned char *) mbstr)) : - ((*pg_wchar_table[PG_SQL_ASCII].dsplen) ((const unsigned char *) mbstr))); + ((*pg_wchar_table[encoding].dsplen) ((const unsigned char *) mbstr)) : + ((*pg_wchar_table[PG_SQL_ASCII].dsplen) ((const unsigned char *) mbstr))); } /* diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c index f96a556a5e2..82e54c084b8 100644 --- a/src/backend/utils/misc/guc.c +++ b/src/backend/utils/misc/guc.c @@ -972,7 +972,7 @@ static struct config_bool ConfigureNamesBool[] = {"fsync", PGC_SIGHUP, WAL_SETTINGS, gettext_noop("Forces synchronization of updates to disk."), gettext_noop("The server will use the fsync() system call in several places to make " - "sure that updates are physically written to disk. This insures " + "sure that updates are physically written to disk. This insures " "that a database cluster will recover to a consistent state after " "an operating system or hardware crash.") }, @@ -984,10 +984,10 @@ static struct config_bool ConfigureNamesBool[] = {"ignore_checksum_failure", PGC_SUSET, DEVELOPER_OPTIONS, gettext_noop("Continues processing after a checksum failure."), gettext_noop("Detection of a checksum failure normally causes PostgreSQL to " - "report an error, aborting the current transaction. Setting " + "report an error, aborting the current transaction. Setting " "ignore_checksum_failure to true causes the system to ignore the failure " - "(but still report a warning), and continue processing. This " - "behavior could cause crashes or other serious problems. Only " + "(but still report a warning), and continue processing. This " + "behavior could cause crashes or other serious problems. Only " "has an effect if checksums are enabled."), GUC_NOT_IN_SAMPLE }, @@ -999,7 +999,7 @@ static struct config_bool ConfigureNamesBool[] = {"zero_damaged_pages", PGC_SUSET, DEVELOPER_OPTIONS, gettext_noop("Continues processing past damaged page headers."), gettext_noop("Detection of a damaged page header normally causes PostgreSQL to " - "report an error, aborting the current transaction. Setting " + "report an error, aborting the current transaction. Setting " "zero_damaged_pages to true causes the system to instead report a " "warning, zero out the damaged page, and continue processing. This " "behavior will destroy data, namely all the rows on the damaged page."), @@ -1014,7 +1014,7 @@ static struct config_bool ConfigureNamesBool[] = gettext_noop("Writes full pages to WAL when first modified after a checkpoint."), gettext_noop("A page write in process during an operating system crash might be " "only partially written to disk. During recovery, the row changes " - "stored in WAL are not enough to recover. This option writes " + "stored in WAL are not enough to recover. This option writes " "pages when first modified after a checkpoint to WAL so full recovery " "is possible.") }, @@ -1330,8 +1330,8 @@ static struct config_bool ConfigureNamesBool[] = gettext_noop("Logs the host name in the connection logs."), gettext_noop("By default, connection logs only show the IP address " "of the connecting host. If you want them to show the host name you " - "can turn this on, but depending on your host name resolution " - "setup it might impose a non-negligible performance penalty.") + "can turn this on, but depending on your host name resolution " + "setup it might impose a non-negligible performance penalty.") }, &log_hostname, false, @@ -1341,9 +1341,9 @@ static struct config_bool ConfigureNamesBool[] = {"transform_null_equals", PGC_USERSET, COMPAT_OPTIONS_CLIENT, gettext_noop("Treats \"expr=NULL\" as \"expr IS NULL\"."), gettext_noop("When turned on, expressions of the form expr = NULL " - "(or NULL = expr) are treated as expr IS NULL, that is, they " - "return true if expr evaluates to the null value, and false " - "otherwise. The correct behavior of expr = NULL is to always " + "(or NULL = expr) are treated as expr IS NULL, that is, they " + "return true if expr evaluates to the null value, and false " + "otherwise. The correct behavior of expr = NULL is to always " "return null (unknown).") }, &Transform_null_equals, @@ -1608,7 +1608,7 @@ static struct config_bool ConfigureNamesBool[] = {"lo_compat_privileges", PGC_SUSET, COMPAT_OPTIONS_PREVIOUS, gettext_noop("Enables backward compatibility mode for privilege checks on large objects."), gettext_noop("Skips privilege checks when reading or modifying large objects, " - "for compatibility with PostgreSQL releases prior to 9.0.") + "for compatibility with PostgreSQL releases prior to 9.0.") }, &lo_compat_privileges, false, @@ -1700,7 +1700,7 @@ static struct config_int ConfigureNamesInt[] = {"default_statistics_target", PGC_USERSET, QUERY_TUNING_OTHER, gettext_noop("Sets the default statistics target."), gettext_noop("This applies to table columns that have not had a " - "column-specific target set via ALTER TABLE SET STATISTICS.") + "column-specific target set via ALTER TABLE SET STATISTICS.") }, &default_statistics_target, 100, 1, 10000, @@ -1711,7 +1711,7 @@ static struct config_int ConfigureNamesInt[] = gettext_noop("Sets the FROM-list size beyond which subqueries " "are not collapsed."), gettext_noop("The planner will merge subqueries into upper " - "queries if the resulting FROM list would have no more than " + "queries if the resulting FROM list would have no more than " "this many items.") }, &from_collapse_limit, @@ -2177,7 +2177,7 @@ static struct config_int ConfigureNamesInt[] = {"max_locks_per_transaction", PGC_POSTMASTER, LOCK_MANAGEMENT, gettext_noop("Sets the maximum number of locks per transaction."), gettext_noop("The shared lock table is sized on the assumption that " - "at most max_locks_per_transaction * max_connections distinct " + "at most max_locks_per_transaction * max_connections distinct " "objects will need to be locked at any one time.") }, &max_locks_per_xact, @@ -2212,7 +2212,7 @@ static struct config_int ConfigureNamesInt[] = {"max_pred_locks_per_page", PGC_SIGHUP, LOCK_MANAGEMENT, gettext_noop("Sets the maximum number of predicate-locked tuples per page."), gettext_noop("If more than this number of tuples on the same page are locked " - "by a connection, those locks are replaced by a page level lock.") + "by a connection, those locks are replaced by a page level lock.") }, &max_predicate_locks_per_page, 2, 0, INT_MAX, @@ -2290,7 +2290,7 @@ static struct config_int ConfigureNamesInt[] = gettext_noop("Enables warnings if checkpoint segments are filled more " "frequently than this."), gettext_noop("Write a message to the server log if checkpoints " - "caused by the filling of checkpoint segment files happens more " + "caused by the filling of checkpoint segment files happens more " "frequently than this number of seconds. Zero turns off the warning."), GUC_UNIT_S }, @@ -2403,7 +2403,7 @@ static struct config_int ConfigureNamesInt[] = {"extra_float_digits", PGC_USERSET, CLIENT_CONN_LOCALE, gettext_noop("Sets the number of digits displayed for floating-point values."), gettext_noop("This affects real, double precision, and geometric data types. " - "The parameter value is added to the standard number of digits " + "The parameter value is added to the standard number of digits " "(FLT_DIG or DBL_DIG as appropriate).") }, &extra_float_digits, @@ -2947,7 +2947,7 @@ static struct config_real ConfigureNamesReal[] = { {"parallel_tuple_cost", PGC_USERSET, QUERY_TUNING_COST, gettext_noop("Sets the planner's estimate of the cost of " - "passing each tuple (row) from worker to master backend."), + "passing each tuple (row) from worker to master backend."), NULL }, ¶llel_tuple_cost, @@ -3899,7 +3899,7 @@ static struct config_enum ConfigureNamesEnum[] = {"password_encryption", PGC_USERSET, CONN_AUTH_SECURITY, gettext_noop("Encrypt passwords."), gettext_noop("When a password is specified in CREATE USER or " - "ALTER USER without writing either ENCRYPTED or UNENCRYPTED, " + "ALTER USER without writing either ENCRYPTED or UNENCRYPTED, " "this parameter determines whether the password is to be encrypted.") }, &Password_encryption, @@ -5736,8 +5736,8 @@ parse_and_validate_value(struct config_generic *record, { ereport(elevel, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("parameter \"%s\" requires a Boolean value", - name))); + errmsg("parameter \"%s\" requires a Boolean value", + name))); return false; } @@ -5756,8 +5756,8 @@ parse_and_validate_value(struct config_generic *record, { ereport(elevel, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("invalid value for parameter \"%s\": \"%s\"", - name, value), + errmsg("invalid value for parameter \"%s\": \"%s\"", + name, value), hintmsg ? errhint("%s", _(hintmsg)) : 0)); return false; } @@ -5785,8 +5785,8 @@ parse_and_validate_value(struct config_generic *record, { ereport(elevel, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("parameter \"%s\" requires a numeric value", - name))); + errmsg("parameter \"%s\" requires a numeric value", + name))); return false; } @@ -5849,8 +5849,8 @@ parse_and_validate_value(struct config_generic *record, ereport(elevel, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("invalid value for parameter \"%s\": \"%s\"", - name, value), + errmsg("invalid value for parameter \"%s\": \"%s\"", + name, value), hintmsg ? errhint("%s", _(hintmsg)) : 0)); if (hintmsg) @@ -5947,14 +5947,14 @@ set_config_option(const char *name, const char *value, if (IsInParallelMode() && changeVal && action != GUC_ACTION_SAVE) ereport(elevel, (errcode(ERRCODE_INVALID_TRANSACTION_STATE), - errmsg("cannot set parameters during a parallel operation"))); + errmsg("cannot set parameters during a parallel operation"))); record = find_option(name, true, elevel); if (record == NULL) { ereport(elevel, (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("unrecognized configuration parameter \"%s\"", name))); + errmsg("unrecognized configuration parameter \"%s\"", name))); return 0; } @@ -6736,7 +6736,7 @@ GetConfigOption(const char *name, bool missing_ok, bool restrict_superuser) case PGC_ENUM: return config_enum_lookup_by_value((struct config_enum *) record, - *((struct config_enum *) record)->variable); + *((struct config_enum *) record)->variable); } return NULL; } @@ -6758,7 +6758,7 @@ GetConfigOptionResetString(const char *name) if (record == NULL) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("unrecognized configuration parameter \"%s\"", name))); + errmsg("unrecognized configuration parameter \"%s\"", name))); if ((record->flags & GUC_SUPERUSER_ONLY) && !is_member_of_role(GetUserId(), DEFAULT_ROLE_READ_ALL_SETTINGS)) ereport(ERROR, @@ -6786,7 +6786,7 @@ GetConfigOptionResetString(const char *name) case PGC_ENUM: return config_enum_lookup_by_value((struct config_enum *) record, - ((struct config_enum *) record)->reset_val); + ((struct config_enum *) record)->reset_val); } return NULL; } @@ -7081,7 +7081,7 @@ AlterSystemSetConfigFile(AlterSystemStmt *altersysstmt) if (!superuser()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - (errmsg("must be superuser to execute ALTER SYSTEM command")))); + (errmsg("must be superuser to execute ALTER SYSTEM command")))); /* * Extract statement arguments @@ -7293,7 +7293,7 @@ ExecSetVariableStmt(VariableSetStmt *stmt, bool isTopLevel) if (IsInParallelMode()) ereport(ERROR, (errcode(ERRCODE_INVALID_TRANSACTION_STATE), - errmsg("cannot set parameters during a parallel operation"))); + errmsg("cannot set parameters during a parallel operation"))); switch (stmt->kind) { @@ -7688,7 +7688,7 @@ reapply_stacked_values(struct config_generic *variable, case GUC_SET_LOCAL: /* first, apply the masked value as SET */ (void) set_config_option(name, stack->masked.val.stringval, - stack->masked_scontext, PGC_S_SESSION, + stack->masked_scontext, PGC_S_SESSION, GUC_ACTION_SET, true, WARNING, false); /* then apply the current value as LOCAL */ @@ -8048,7 +8048,7 @@ GetConfigOptionByName(const char *name, const char **varname, bool missing_ok) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("unrecognized configuration parameter \"%s\"", name))); + errmsg("unrecognized configuration parameter \"%s\"", name))); } if ((record->flags & GUC_SUPERUSER_ONLY) && @@ -8278,11 +8278,11 @@ GetConfigOptionByNum(int varnum, const char **values, bool *noshow) /* boot_val */ values[12] = pstrdup(config_enum_lookup_by_value(lconf, - lconf->boot_val)); + lconf->boot_val)); /* reset_val */ values[13] = pstrdup(config_enum_lookup_by_value(lconf, - lconf->reset_val)); + lconf->reset_val)); } break; @@ -9212,7 +9212,7 @@ serialize_variable(char **destptr, Size *maxbytes, struct config_enum *conf = (struct config_enum *) gconf; do_serialize(destptr, maxbytes, "%s", - config_enum_lookup_by_value(conf, *conf->variable)); + config_enum_lookup_by_value(conf, *conf->variable)); } break; } diff --git a/src/backend/utils/misc/help_config.c b/src/backend/utils/misc/help_config.c index 7bc8f92f6df..08d563d1ae6 100644 --- a/src/backend/utils/misc/help_config.c +++ b/src/backend/utils/misc/help_config.c @@ -124,7 +124,7 @@ printMixedStruct(mixedStruct *structToPrint) case PGC_ENUM: printf("ENUM\t%s\t\t\t", config_enum_lookup_by_value(&structToPrint->_enum, - structToPrint->_enum.boot_val)); + structToPrint->_enum.boot_val)); break; default: diff --git a/src/backend/utils/misc/pg_controldata.c b/src/backend/utils/misc/pg_controldata.c index 56ba301e5a1..0dbfe7f952d 100644 --- a/src/backend/utils/misc/pg_controldata.c +++ b/src/backend/utils/misc/pg_controldata.c @@ -167,8 +167,8 @@ pg_control_checkpoint(PG_FUNCTION_ARGS) nulls[6] = false; values[7] = CStringGetTextDatum(psprintf("%u:%u", - ControlFile->checkPointCopy.nextXidEpoch, - ControlFile->checkPointCopy.nextXid)); + ControlFile->checkPointCopy.nextXidEpoch, + ControlFile->checkPointCopy.nextXid)); nulls[7] = false; values[8] = ObjectIdGetDatum(ControlFile->checkPointCopy.nextOid); @@ -202,7 +202,7 @@ pg_control_checkpoint(PG_FUNCTION_ARGS) nulls[17] = false; values[18] = TimestampTzGetDatum( - time_t_to_timestamptz(ControlFile->checkPointCopy.time)); + time_t_to_timestamptz(ControlFile->checkPointCopy.time)); nulls[18] = false; htup = heap_form_tuple(tupdesc, values, nulls); diff --git a/src/backend/utils/misc/pg_rusage.c b/src/backend/utils/misc/pg_rusage.c index 98fa7ea9a8a..df643d798cb 100644 --- a/src/backend/utils/misc/pg_rusage.c +++ b/src/backend/utils/misc/pg_rusage.c @@ -63,9 +63,9 @@ pg_rusage_show(const PGRUsage *ru0) snprintf(result, sizeof(result), _("CPU: user: %d.%02d s, system: %d.%02d s, elapsed: %d.%02d s"), (int) (ru1.ru.ru_utime.tv_sec - ru0->ru.ru_utime.tv_sec), - (int) (ru1.ru.ru_utime.tv_usec - ru0->ru.ru_utime.tv_usec) / 10000, + (int) (ru1.ru.ru_utime.tv_usec - ru0->ru.ru_utime.tv_usec) / 10000, (int) (ru1.ru.ru_stime.tv_sec - ru0->ru.ru_stime.tv_sec), - (int) (ru1.ru.ru_stime.tv_usec - ru0->ru.ru_stime.tv_usec) / 10000, + (int) (ru1.ru.ru_stime.tv_usec - ru0->ru.ru_stime.tv_usec) / 10000, (int) (ru1.tv.tv_sec - ru0->tv.tv_sec), (int) (ru1.tv.tv_usec - ru0->tv.tv_usec) / 10000); diff --git a/src/backend/utils/mmgr/aset.c b/src/backend/utils/mmgr/aset.c index 51668d88fec..7033042e2d8 100644 --- a/src/backend/utils/mmgr/aset.c +++ b/src/backend/utils/mmgr/aset.c @@ -1181,7 +1181,7 @@ AllocSetStats(MemoryContext context, int level, bool print, for (i = 0; i < level; i++) fprintf(stderr, " "); fprintf(stderr, - "%s: %zu total in %zd blocks; %zu free (%zd chunks); %zu used\n", + "%s: %zu total in %zd blocks; %zu free (%zd chunks); %zu used\n", set->header.name, totalspace, nblocks, freespace, freechunks, totalspace - freespace); } diff --git a/src/backend/utils/mmgr/dsa.c b/src/backend/utils/mmgr/dsa.c index 7852fde300d..b3327f676b1 100644 --- a/src/backend/utils/mmgr/dsa.c +++ b/src/backend/utils/mmgr/dsa.c @@ -1080,7 +1080,7 @@ dsa_dump(dsa_area *area) dsa_segment_index segment_index; fprintf(stderr, - " segment bin %zu (at least %d contiguous pages free):\n", + " segment bin %zu (at least %d contiguous pages free):\n", i, 1 << (i - 1)); segment_index = area->control->segment_bins[i]; while (segment_index != DSA_SEGMENT_INDEX_NONE) @@ -1120,7 +1120,7 @@ dsa_dump(dsa_area *area) fprintf(stderr, " pool for large object spans:\n"); else fprintf(stderr, - " pool for size class %zu (object size %hu bytes):\n", + " pool for size class %zu (object size %hu bytes):\n", i, dsa_size_classes[i]); for (j = 0; j < DSA_FULLNESS_CLASSES; ++j) { @@ -1734,7 +1734,7 @@ get_segment_by_index(dsa_area *area, dsa_segment_index index) /* It's an error to try to access an unused slot. */ if (handle == DSM_HANDLE_INVALID) elog(ERROR, - "dsa_area could not attach to a segment that has been freed"); + "dsa_area could not attach to a segment that has been freed"); segment = dsm_attach(handle); if (segment == NULL) diff --git a/src/backend/utils/mmgr/freepage.c b/src/backend/utils/mmgr/freepage.c index 2cd758178c0..7566a669708 100644 --- a/src/backend/utils/mmgr/freepage.c +++ b/src/backend/utils/mmgr/freepage.c @@ -135,7 +135,7 @@ static FreePageBtree *FreePageBtreeFindRightSibling(char *base, static Size FreePageBtreeFirstKey(FreePageBtree *btp); static FreePageBtree *FreePageBtreeGetRecycled(FreePageManager *fpm); static void FreePageBtreeInsertInternal(char *base, FreePageBtree *btp, - Size index, Size first_page, FreePageBtree *child); + Size index, Size first_page, FreePageBtree *child); static void FreePageBtreeInsertLeaf(FreePageBtree *btp, Size index, Size first_page, Size npages); static void FreePageBtreeRecycle(FreePageManager *fpm, Size pageno); @@ -1269,7 +1269,7 @@ FreePageManagerDumpBtree(FreePageManager *fpm, FreePageBtree *btp, if (btp->hdr.magic == FREE_PAGE_INTERNAL_MAGIC) appendStringInfo(buf, " %zu->%zu", btp->u.internal_key[index].first_page, - btp->u.internal_key[index].child.relptr_off / FPM_PAGE_SIZE); + btp->u.internal_key[index].child.relptr_off / FPM_PAGE_SIZE); else appendStringInfo(buf, " %zu(%zu)", btp->u.leaf_key[index].first_page, @@ -1359,7 +1359,7 @@ FreePageManagerGetInternal(FreePageManager *fpm, Size npages, Size *first_page) do { if (candidate->npages >= npages && (victim == NULL || - victim->npages > candidate->npages)) + victim->npages > candidate->npages)) { victim = candidate; if (victim->npages == npages) diff --git a/src/backend/utils/mmgr/mcxt.c b/src/backend/utils/mmgr/mcxt.c index 6668bf135e9..5d173d7e60b 100644 --- a/src/backend/utils/mmgr/mcxt.c +++ b/src/backend/utils/mmgr/mcxt.c @@ -456,7 +456,7 @@ MemoryContextStatsDetail(MemoryContext context, int max_children) MemoryContextStatsInternal(context, 0, true, max_children, &grand_totals); fprintf(stderr, - "Grand total: %zu bytes in %zd blocks; %zu free (%zd chunks); %zu used\n", + "Grand total: %zu bytes in %zd blocks; %zu free (%zd chunks); %zu used\n", grand_totals.totalspace, grand_totals.nblocks, grand_totals.freespace, grand_totals.freechunks, grand_totals.totalspace - grand_totals.freespace); diff --git a/src/backend/utils/sort/logtape.c b/src/backend/utils/sort/logtape.c index e1628f73795..5ebb6fb11ab 100644 --- a/src/backend/utils/sort/logtape.c +++ b/src/backend/utils/sort/logtape.c @@ -357,7 +357,7 @@ ltsReleaseBlock(LogicalTapeSet *lts, long blocknum) { lts->freeBlocksLen *= 2; lts->freeBlocks = (long *) repalloc(lts->freeBlocks, - lts->freeBlocksLen * sizeof(long)); + lts->freeBlocksLen * sizeof(long)); } /* diff --git a/src/backend/utils/sort/tuplesort.c b/src/backend/utils/sort/tuplesort.c index 5614d970bd6..a8a115c6b09 100644 --- a/src/backend/utils/sort/tuplesort.c +++ b/src/backend/utils/sort/tuplesort.c @@ -726,7 +726,7 @@ tuplesort_begin_common(int workMem, bool randomAccess) * see comments in grow_memtuples(). */ state->memtupsize = Max(1024, - ALLOCSET_SEPARATE_THRESHOLD / sizeof(SortTuple) + 1); + ALLOCSET_SEPARATE_THRESHOLD / sizeof(SortTuple) + 1); state->growmemtuples = true; state->slabAllocatorUsed = false; @@ -1989,7 +1989,7 @@ tuplesort_gettuple_common(Tuplesortstate *state, bool forward, */ nmoved = LogicalTapeBackspace(state->tapeset, state->result_tape, - tuplen + 2 * sizeof(unsigned int)); + tuplen + 2 * sizeof(unsigned int)); if (nmoved == tuplen + sizeof(unsigned int)) { /* @@ -3971,7 +3971,7 @@ copytup_cluster(Tuplesortstate *state, SortTuple *stup, void *tup) tuple = (HeapTuple) mtup->tuple; mtup->datum1 = heap_getattr(tuple, - state->indexInfo->ii_KeyAttrNumbers[0], + state->indexInfo->ii_KeyAttrNumbers[0], state->tupDesc, &mtup->isnull1); } @@ -4143,7 +4143,7 @@ comparetup_index_btree(const SortTuple *a, const SortTuple *b, key_desc ? errdetail("Key %s is duplicated.", key_desc) : errdetail("Duplicate keys exist."), errtableconstraint(state->heapRel, - RelationGetRelationName(state->indexRel)))); + RelationGetRelationName(state->indexRel)))); } /* @@ -4348,7 +4348,7 @@ comparetup_datum(const SortTuple *a, const SortTuple *b, Tuplesortstate *state) if (state->sortKeys->abbrev_converter) compare = ApplySortAbbrevFullComparator(PointerGetDatum(a->tuple), a->isnull1, - PointerGetDatum(b->tuple), b->isnull1, + PointerGetDatum(b->tuple), b->isnull1, state->sortKeys); return compare; diff --git a/src/backend/utils/sort/tuplestore.c b/src/backend/utils/sort/tuplestore.c index b3f6be74573..98c006b663e 100644 --- a/src/backend/utils/sort/tuplestore.c +++ b/src/backend/utils/sort/tuplestore.c @@ -866,7 +866,7 @@ tuplestore_puttuple_common(Tuplestorestate *state, void *tuple) SEEK_SET) != 0) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not seek in tuplestore temporary file: %m"))); + errmsg("could not seek in tuplestore temporary file: %m"))); state->status = TSS_WRITEFILE; /* @@ -1051,7 +1051,7 @@ tuplestore_gettuple(Tuplestorestate *state, bool forward, SEEK_CUR) != 0) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not seek in tuplestore temporary file: %m"))); + errmsg("could not seek in tuplestore temporary file: %m"))); tup = READTUP(state, tuplen); return tup; @@ -1253,7 +1253,7 @@ tuplestore_rescan(Tuplestorestate *state) if (BufFileSeek(state->myfile, 0, 0L, SEEK_SET) != 0) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not seek in tuplestore temporary file: %m"))); + errmsg("could not seek in tuplestore temporary file: %m"))); break; default: elog(ERROR, "invalid tuplestore state"); @@ -1474,7 +1474,7 @@ getlen(Tuplestorestate *state, bool eofOK) if (nbytes != 0 || !eofOK) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not read from tuplestore temporary file: %m"))); + errmsg("could not read from tuplestore temporary file: %m"))); return 0; } @@ -1526,7 +1526,7 @@ writetup_heap(Tuplestorestate *state, void *tup) sizeof(tuplen)) != sizeof(tuplen)) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not write to tuplestore temporary file: %m"))); + errmsg("could not write to tuplestore temporary file: %m"))); FREEMEM(state, GetMemoryChunkSpace(tuple)); heap_free_minimal_tuple(tuple); @@ -1547,12 +1547,12 @@ readtup_heap(Tuplestorestate *state, unsigned int len) tupbodylen) != (size_t) tupbodylen) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not read from tuplestore temporary file: %m"))); + errmsg("could not read from tuplestore temporary file: %m"))); if (state->backward) /* need trailing length word? */ if (BufFileRead(state->myfile, (void *) &tuplen, sizeof(tuplen)) != sizeof(tuplen)) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not read from tuplestore temporary file: %m"))); + errmsg("could not read from tuplestore temporary file: %m"))); return (void *) tuple; } diff --git a/src/backend/utils/time/combocid.c b/src/backend/utils/time/combocid.c index baff998641a..c7e4331efbd 100644 --- a/src/backend/utils/time/combocid.c +++ b/src/backend/utils/time/combocid.c @@ -129,7 +129,7 @@ HeapTupleHeaderGetCmax(HeapTupleHeader tup) * things too much. */ Assert(CritSectionCount > 0 || - TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetUpdateXid(tup))); + TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetUpdateXid(tup))); if (tup->t_infomask & HEAP_COMBOCID) return GetRealCmax(cid); diff --git a/src/backend/utils/time/snapmgr.c b/src/backend/utils/time/snapmgr.c index 362f5896251..6369be78a31 100644 --- a/src/backend/utils/time/snapmgr.c +++ b/src/backend/utils/time/snapmgr.c @@ -416,7 +416,7 @@ GetOldestSnapshot(void) if (!pairingheap_is_empty(&RegisteredSnapshots)) { OldestRegisteredSnapshot = pairingheap_container(SnapshotData, ph_node, - pairingheap_first(&RegisteredSnapshots)); + pairingheap_first(&RegisteredSnapshots)); RegisteredLSN = OldestRegisteredSnapshot->lsn; } @@ -619,14 +619,14 @@ SetTransactionSnapshot(Snapshot sourcesnap, VirtualTransactionId *sourcevxid, ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("could not import the requested snapshot"), - errdetail("The source transaction is not running anymore."))); + errdetail("The source transaction is not running anymore."))); } else if (!ProcArrayInstallImportedXmin(CurrentSnapshot->xmin, sourcevxid)) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("could not import the requested snapshot"), - errdetail("The source process with pid %d is not running anymore.", - sourcepid))); + errdetail("The source process with pid %d is not running anymore.", + sourcepid))); /* * In transaction-snapshot mode, the first snapshot must live until end of @@ -989,7 +989,7 @@ SnapshotResetXmin(void) } minSnapshot = pairingheap_container(SnapshotData, ph_node, - pairingheap_first(&RegisteredSnapshots)); + pairingheap_first(&RegisteredSnapshots)); if (TransactionIdPrecedes(MyPgXact->xmin, minSnapshot->xmin)) MyPgXact->xmin = minSnapshot->xmin; @@ -1211,7 +1211,7 @@ ExportSnapshot(Snapshot snapshot) * inside the transaction from 1. */ snprintf(path, sizeof(path), SNAPSHOT_EXPORT_DIR "/%08X-%08X-%d", - MyProc->backendId, MyProc->lxid, list_length(exportedSnapshots) + 1); + MyProc->backendId, MyProc->lxid, list_length(exportedSnapshots) + 1); /* * Copy the snapshot into TopTransactionContext, add it to the @@ -1453,7 +1453,7 @@ ImportSnapshot(const char *idstr) IsSubTransaction()) ereport(ERROR, (errcode(ERRCODE_ACTIVE_SQL_TRANSACTION), - errmsg("SET TRANSACTION SNAPSHOT must be called before any query"))); + errmsg("SET TRANSACTION SNAPSHOT must be called before any query"))); /* * If we are in read committed mode then the next query would execute with @@ -1589,7 +1589,7 @@ ImportSnapshot(const char *idstr) if (src_dbid != MyDatabaseId) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("cannot import a snapshot from a different database"))); + errmsg("cannot import a snapshot from a different database"))); /* OK, install the snapshot */ SetTransactionSnapshot(&snapshot, &src_vxid, src_pid, NULL); @@ -1884,7 +1884,7 @@ MaintainOldSnapshotTimeMapping(TimestampTz whenTaken, TransactionId xmin) if (whenTaken < 0) { elog(DEBUG1, - "MaintainOldSnapshotTimeMapping called with negative whenTaken = %ld", + "MaintainOldSnapshotTimeMapping called with negative whenTaken = %ld", (long) whenTaken); return; } |