diff options
author | Bruce Momjian <bruce@momjian.us> | 2017-05-17 16:31:56 -0400 |
---|---|---|
committer | Bruce Momjian <bruce@momjian.us> | 2017-05-17 16:31:56 -0400 |
commit | a6fd7b7a5f7bf3a8aa3f3d076cf09d922c1c6dd2 (patch) | |
tree | d10454411c05d459abe06df161ab3c1156c5f477 /src/backend/utils | |
parent | 8a943324780259757c77c56cfc597347d1150cdb (diff) | |
download | postgresql-a6fd7b7a5f7bf3a8aa3f3d076cf09d922c1c6dd2.tar.gz postgresql-a6fd7b7a5f7bf3a8aa3f3d076cf09d922c1c6dd2.zip |
Post-PG 10 beta1 pgindent run
perltidy run not included.
Diffstat (limited to 'src/backend/utils')
31 files changed, 476 insertions, 440 deletions
diff --git a/src/backend/utils/adt/cash.c b/src/backend/utils/adt/cash.c index 5afadb65d11..5cb086e50e6 100644 --- a/src/backend/utils/adt/cash.c +++ b/src/backend/utils/adt/cash.c @@ -203,7 +203,7 @@ cash_in(PG_FUNCTION_ARGS) /* than the required number of decimal places */ if (isdigit((unsigned char) *s) && (!seen_dot || dec < fpoint)) { - Cash newvalue = (value * 10) - (*s - '0'); + Cash newvalue = (value * 10) - (*s - '0'); if (newvalue / 10 != value) ereport(ERROR, @@ -230,7 +230,7 @@ cash_in(PG_FUNCTION_ARGS) /* round off if there's another digit */ if (isdigit((unsigned char) *s) && *s >= '5') - value--; /* remember we build the value in the negative */ + value--; /* remember we build the value in the negative */ if (value > 0) ereport(ERROR, @@ -241,7 +241,7 @@ cash_in(PG_FUNCTION_ARGS) /* adjust for less than required decimal places */ for (; dec < fpoint; dec++) { - Cash newvalue = value * 10; + Cash newvalue = value * 10; if (newvalue / 10 != value) ereport(ERROR, @@ -279,8 +279,10 @@ cash_in(PG_FUNCTION_ARGS) "money", str))); } - /* If the value is supposed to be positive, flip the sign, but check for - * the most negative number. */ + /* + * If the value is supposed to be positive, flip the sign, but check for + * the most negative number. + */ if (sgn > 0) { result = -value; diff --git a/src/backend/utils/adt/dbsize.c b/src/backend/utils/adt/dbsize.c index b0418b18dce..f0725860b4b 100644 --- a/src/backend/utils/adt/dbsize.c +++ b/src/backend/utils/adt/dbsize.c @@ -90,8 +90,8 @@ calculate_database_size(Oid dbOid) AclResult aclresult; /* - * User must have connect privilege for target database - * or be a member of pg_read_all_stats + * User must have connect privilege for target database or be a member of + * pg_read_all_stats */ aclresult = pg_database_aclcheck(dbOid, GetUserId(), ACL_CONNECT); if (aclresult != ACLCHECK_OK && @@ -180,8 +180,8 @@ calculate_tablespace_size(Oid tblspcOid) /* * User must be a member of pg_read_all_stats or have CREATE privilege for - * target tablespace, either explicitly granted or implicitly because - * it is default for current database. + * target tablespace, either explicitly granted or implicitly because it + * is default for current database. */ if (tblspcOid != MyDatabaseTableSpace && !is_member_of_role(GetUserId(), DEFAULT_ROLE_READ_ALL_STATS)) diff --git a/src/backend/utils/adt/formatting.c b/src/backend/utils/adt/formatting.c index 1e21dd5c689..4127bece12a 100644 --- a/src/backend/utils/adt/formatting.c +++ b/src/backend/utils/adt/formatting.c @@ -1449,10 +1449,10 @@ str_numth(char *dest, char *num, int type) #ifdef USE_ICU -typedef int32_t (*ICU_Convert_Func)(UChar *dest, int32_t destCapacity, - const UChar *src, int32_t srcLength, - const char *locale, - UErrorCode *pErrorCode); +typedef int32_t (*ICU_Convert_Func) (UChar *dest, int32_t destCapacity, + const UChar *src, int32_t srcLength, + const char *locale, + UErrorCode *pErrorCode); static int32_t icu_convert_case(ICU_Convert_Func func, pg_locale_t mylocale, @@ -1461,7 +1461,7 @@ icu_convert_case(ICU_Convert_Func func, pg_locale_t mylocale, UErrorCode status; int32_t len_dest; - len_dest = len_source; /* try first with same length */ + len_dest = len_source; /* try first with same length */ *buff_dest = palloc(len_dest * sizeof(**buff_dest)); status = U_ZERO_ERROR; len_dest = func(*buff_dest, len_dest, buff_source, len_source, @@ -1491,7 +1491,7 @@ u_strToTitle_default_BI(UChar *dest, int32_t destCapacity, NULL, locale, pErrorCode); } -#endif /* USE_ICU */ +#endif /* USE_ICU */ /* * If the system provides the needed functions for wide-character manipulation @@ -1592,7 +1592,10 @@ str_tolower(const char *buff, size_t nbytes, Oid collid) workspace[curr_char] = towlower(workspace[curr_char]); } - /* Make result large enough; case change might change number of bytes */ + /* + * Make result large enough; case change might change number + * of bytes + */ result_size = curr_char * pg_database_encoding_max_length() + 1; result = palloc(result_size); @@ -1607,11 +1610,11 @@ str_tolower(const char *buff, size_t nbytes, Oid collid) result = pnstrdup(buff, nbytes); /* - * Note: we assume that tolower_l() will not be so broken as to need - * an isupper_l() guard test. When using the default collation, we - * apply the traditional Postgres behavior that forces ASCII-style - * treatment of I/i, but in non-default collations you get exactly - * what the collation says. + * Note: we assume that tolower_l() will not be so broken as + * to need an isupper_l() guard test. When using the default + * collation, we apply the traditional Postgres behavior that + * forces ASCII-style treatment of I/i, but in non-default + * collations you get exactly what the collation says. */ for (p = result; *p; p++) { @@ -1672,7 +1675,8 @@ str_toupper(const char *buff, size_t nbytes, Oid collid) #ifdef USE_ICU if (mylocale && mylocale->provider == COLLPROVIDER_ICU) { - int32_t len_uchar, len_conv; + int32_t len_uchar, + len_conv; UChar *buff_uchar; UChar *buff_conv; @@ -1711,7 +1715,10 @@ str_toupper(const char *buff, size_t nbytes, Oid collid) workspace[curr_char] = towupper(workspace[curr_char]); } - /* Make result large enough; case change might change number of bytes */ + /* + * Make result large enough; case change might change number + * of bytes + */ result_size = curr_char * pg_database_encoding_max_length() + 1; result = palloc(result_size); @@ -1726,11 +1733,11 @@ str_toupper(const char *buff, size_t nbytes, Oid collid) result = pnstrdup(buff, nbytes); /* - * Note: we assume that toupper_l() will not be so broken as to need - * an islower_l() guard test. When using the default collation, we - * apply the traditional Postgres behavior that forces ASCII-style - * treatment of I/i, but in non-default collations you get exactly - * what the collation says. + * Note: we assume that toupper_l() will not be so broken as + * to need an islower_l() guard test. When using the default + * collation, we apply the traditional Postgres behavior that + * forces ASCII-style treatment of I/i, but in non-default + * collations you get exactly what the collation says. */ for (p = result; *p; p++) { @@ -1792,7 +1799,8 @@ str_initcap(const char *buff, size_t nbytes, Oid collid) #ifdef USE_ICU if (mylocale && mylocale->provider == COLLPROVIDER_ICU) { - int32_t len_uchar, len_conv; + int32_t len_uchar, + len_conv; UChar *buff_uchar; UChar *buff_conv; @@ -1843,7 +1851,10 @@ str_initcap(const char *buff, size_t nbytes, Oid collid) } } - /* Make result large enough; case change might change number of bytes */ + /* + * Make result large enough; case change might change number + * of bytes + */ result_size = curr_char * pg_database_encoding_max_length() + 1; result = palloc(result_size); @@ -1858,11 +1869,11 @@ str_initcap(const char *buff, size_t nbytes, Oid collid) result = pnstrdup(buff, nbytes); /* - * Note: we assume that toupper_l()/tolower_l() will not be so broken - * as to need guard tests. When using the default collation, we apply - * the traditional Postgres behavior that forces ASCII-style treatment - * of I/i, but in non-default collations you get exactly what the - * collation says. + * Note: we assume that toupper_l()/tolower_l() will not be so + * broken as to need guard tests. When using the default + * collation, we apply the traditional Postgres behavior that + * forces ASCII-style treatment of I/i, but in non-default + * collations you get exactly what the collation says. */ for (p = result; *p; p++) { diff --git a/src/backend/utils/adt/genfile.c b/src/backend/utils/adt/genfile.c index 32d6a666881..5b15562ba5b 100644 --- a/src/backend/utils/adt/genfile.c +++ b/src/backend/utils/adt/genfile.c @@ -486,7 +486,7 @@ pg_ls_dir_files(FunctionCallInfo fcinfo, char *dir) if (SRF_IS_FIRSTCALL()) { MemoryContext oldcontext; - TupleDesc tupdesc; + TupleDesc tupdesc; funcctx = SRF_FIRSTCALL_INIT(); oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx); @@ -523,7 +523,7 @@ pg_ls_dir_files(FunctionCallInfo fcinfo, char *dir) Datum values[3]; bool nulls[3]; char path[MAXPGPATH * 2]; - struct stat attrib; + struct stat attrib; HeapTuple tuple; /* Skip hidden files */ diff --git a/src/backend/utils/adt/json.c b/src/backend/utils/adt/json.c index 9fb0e480bf0..0c6572d03ed 100644 --- a/src/backend/utils/adt/json.c +++ b/src/backend/utils/adt/json.c @@ -1400,7 +1400,7 @@ json_categorize_type(Oid typoid, if (OidIsValid(get_element_type(typoid)) || typoid == ANYARRAYOID || typoid == RECORDARRAYOID) *tcategory = JSONTYPE_ARRAY; - else if (type_is_rowtype(typoid)) /* includes RECORDOID */ + else if (type_is_rowtype(typoid)) /* includes RECORDOID */ *tcategory = JSONTYPE_COMPOSITE; else { diff --git a/src/backend/utils/adt/jsonb.c b/src/backend/utils/adt/jsonb.c index 164f57ef770..952040d5bb1 100644 --- a/src/backend/utils/adt/jsonb.c +++ b/src/backend/utils/adt/jsonb.c @@ -647,7 +647,7 @@ jsonb_categorize_type(Oid typoid, if (OidIsValid(get_element_type(typoid)) || typoid == ANYARRAYOID || typoid == RECORDARRAYOID) *tcategory = JSONBTYPE_ARRAY; - else if (type_is_rowtype(typoid)) /* includes RECORDOID */ + else if (type_is_rowtype(typoid)) /* includes RECORDOID */ *tcategory = JSONBTYPE_COMPOSITE; else { diff --git a/src/backend/utils/adt/jsonfuncs.c b/src/backend/utils/adt/jsonfuncs.c index 3966e43dd5d..173584fef60 100644 --- a/src/backend/utils/adt/jsonfuncs.c +++ b/src/backend/utils/adt/jsonfuncs.c @@ -56,20 +56,20 @@ typedef struct OkeysState /* state for iterate_json_string_values function */ typedef struct IterateJsonStringValuesState { - JsonLexContext *lex; - JsonIterateStringValuesAction action; /* an action that will be applied - to each json value */ - void *action_state; /* any necessary context for iteration */ + JsonLexContext *lex; + JsonIterateStringValuesAction action; /* an action that will be + * applied to each json value */ + void *action_state; /* any necessary context for iteration */ } IterateJsonStringValuesState; /* state for transform_json_string_values function */ typedef struct TransformJsonStringValuesState { - JsonLexContext *lex; - StringInfo strval; /* resulting json */ - JsonTransformStringValuesAction action; /* an action that will be applied - to each json value */ - void *action_state; /* any necessary context for transformation */ + JsonLexContext *lex; + StringInfo strval; /* resulting json */ + JsonTransformStringValuesAction action; /* an action that will be + * applied to each json value */ + void *action_state; /* any necessary context for transformation */ } TransformJsonStringValuesState; /* state for json_get* functions */ @@ -154,29 +154,29 @@ typedef struct RecordIOData RecordIOData; /* structure to cache metadata needed for populate_array() */ typedef struct ArrayIOData { - ColumnIOData *element_info; /* metadata cache */ - Oid element_type; /* array element type id */ - int32 element_typmod; /* array element type modifier */ + ColumnIOData *element_info; /* metadata cache */ + Oid element_type; /* array element type id */ + int32 element_typmod; /* array element type modifier */ } ArrayIOData; /* structure to cache metadata needed for populate_composite() */ typedef struct CompositeIOData { /* - * We use pointer to a RecordIOData here because variable-length - * struct RecordIOData can't be used directly in ColumnIOData.io union + * We use pointer to a RecordIOData here because variable-length struct + * RecordIOData can't be used directly in ColumnIOData.io union */ - RecordIOData *record_io; /* metadata cache for populate_record() */ - TupleDesc tupdesc; /* cached tuple descriptor */ + RecordIOData *record_io; /* metadata cache for populate_record() */ + TupleDesc tupdesc; /* cached tuple descriptor */ } CompositeIOData; /* structure to cache metadata needed for populate_domain() */ typedef struct DomainIOData { - ColumnIOData *base_io; /* metadata cache */ - Oid base_typid; /* base type id */ - int32 base_typmod; /* base type modifier */ - void *domain_info; /* opaque cache for domain checks */ + ColumnIOData *base_io; /* metadata cache */ + Oid base_typid; /* base type id */ + int32 base_typmod; /* base type modifier */ + void *domain_info; /* opaque cache for domain checks */ } DomainIOData; /* enumeration type categories */ @@ -193,17 +193,18 @@ typedef enum TypeCat /* structure to cache record metadata needed for populate_record_field() */ struct ColumnIOData { - Oid typid; /* column type id */ - int32 typmod; /* column type modifier */ - TypeCat typcat; /* column type category */ - ScalarIOData scalar_io; /* metadata cache for directi conversion - * through input function */ + Oid typid; /* column type id */ + int32 typmod; /* column type modifier */ + TypeCat typcat; /* column type category */ + ScalarIOData scalar_io; /* metadata cache for directi conversion + * through input function */ union { - ArrayIOData array; - CompositeIOData composite; - DomainIOData domain; - } io; /* metadata cache for various column type categories */ + ArrayIOData array; + CompositeIOData composite; + DomainIOData domain; + } io; /* metadata cache for various column type + * categories */ }; /* structure to cache record metadata needed for populate_record() */ @@ -234,31 +235,32 @@ typedef struct PopulateRecordsetState /* structure to cache metadata needed for populate_record_worker() */ typedef struct PopulateRecordCache { - Oid argtype; /* verified row type of the first argument */ + Oid argtype; /* verified row type of the first argument */ CompositeIOData io; /* metadata cache for populate_composite() */ } PopulateRecordCache; /* common data for populate_array_json() and populate_array_dim_jsonb() */ typedef struct PopulateArrayContext { - ArrayBuildState *astate; /* array build state */ - ArrayIOData *aio; /* metadata cache */ - MemoryContext acxt; /* array build memory context */ - MemoryContext mcxt; /* cache memory context */ - const char *colname; /* for diagnostics only */ - int *dims; /* dimensions */ - int *sizes; /* current dimension counters */ - int ndims; /* number of dimensions */ + ArrayBuildState *astate; /* array build state */ + ArrayIOData *aio; /* metadata cache */ + MemoryContext acxt; /* array build memory context */ + MemoryContext mcxt; /* cache memory context */ + const char *colname; /* for diagnostics only */ + int *dims; /* dimensions */ + int *sizes; /* current dimension counters */ + int ndims; /* number of dimensions */ } PopulateArrayContext; /* state for populate_array_json() */ typedef struct PopulateArrayState { - JsonLexContext *lex; /* json lexer */ + JsonLexContext *lex; /* json lexer */ PopulateArrayContext *ctx; /* context */ - char *element_start; /* start of the current array element */ - char *element_scalar; /* current array element token if it is a scalar */ - JsonTokenType element_type; /* current array element type */ + char *element_start; /* start of the current array element */ + char *element_scalar; /* current array element token if it is a + * scalar */ + JsonTokenType element_type; /* current array element type */ } PopulateArrayState; /* state for json_strip_nulls */ @@ -272,18 +274,18 @@ typedef struct StripnullState /* structure for generalized json/jsonb value passing */ typedef struct JsValue { - bool is_json; /* json/jsonb */ + bool is_json; /* json/jsonb */ union { struct { - char *str; /* json string */ - int len; /* json string length or -1 if null-terminated */ - JsonTokenType type; /* json type */ - } json; /* json value */ + char *str; /* json string */ + int len; /* json string length or -1 if null-terminated */ + JsonTokenType type; /* json type */ + } json; /* json value */ JsonbValue *jsonb; /* jsonb value */ - } val; + } val; } JsValue; typedef struct JsObject @@ -291,9 +293,9 @@ typedef struct JsObject bool is_json; /* json/jsonb */ union { - HTAB *json_hash; + HTAB *json_hash; JsonbContainer *jsonb_cont; - } val; + } val; } JsObject; /* useful macros for testing JsValue properties */ @@ -406,39 +408,39 @@ static void sn_scalar(void *state, char *token, JsonTokenType tokentype); static Datum populate_recordset_worker(FunctionCallInfo fcinfo, const char *funcname, bool have_record_arg); static Datum populate_record_worker(FunctionCallInfo fcinfo, const char *funcname, - bool have_record_arg); + bool have_record_arg); /* helper functions for populate_record[set] */ -static HeapTupleHeader populate_record(TupleDesc tupdesc, RecordIOData **record_info, - HeapTupleHeader template, MemoryContext mcxt, - JsObject *obj); +static HeapTupleHeader populate_record(TupleDesc tupdesc, RecordIOData **record_info, + HeapTupleHeader template, MemoryContext mcxt, + JsObject *obj); static Datum populate_record_field(ColumnIOData *col, Oid typid, int32 typmod, - const char *colname, MemoryContext mcxt, - Datum defaultval, JsValue *jsv, bool *isnull); + const char *colname, MemoryContext mcxt, + Datum defaultval, JsValue *jsv, bool *isnull); static void JsValueToJsObject(JsValue *jsv, JsObject *jso); static Datum populate_composite(CompositeIOData *io, Oid typid, int32 typmod, - const char *colname, MemoryContext mcxt, - HeapTupleHeader defaultval, JsValue *jsv); + const char *colname, MemoryContext mcxt, + HeapTupleHeader defaultval, JsValue *jsv); static Datum populate_scalar(ScalarIOData *io, Oid typid, int32 typmod, JsValue *jsv); static void prepare_column_cache(ColumnIOData *column, Oid typid, int32 typmod, - MemoryContext mcxt, bool json); + MemoryContext mcxt, bool json); static Datum populate_record_field(ColumnIOData *col, Oid typid, int32 typmod, - const char *colname, MemoryContext mcxt, Datum defaultval, - JsValue *jsv, bool *isnull); -static RecordIOData * allocate_record_info(MemoryContext mcxt, int ncolumns); + const char *colname, MemoryContext mcxt, Datum defaultval, + JsValue *jsv, bool *isnull); +static RecordIOData *allocate_record_info(MemoryContext mcxt, int ncolumns); static bool JsObjectGetField(JsObject *obj, char *field, JsValue *jsv); static void populate_recordset_record(PopulateRecordsetState *state, JsObject *obj); static void populate_array_json(PopulateArrayContext *ctx, char *json, int len); -static void populate_array_dim_jsonb(PopulateArrayContext *ctx, JsonbValue *jbv, - int ndim); +static void populate_array_dim_jsonb(PopulateArrayContext *ctx, JsonbValue *jbv, + int ndim); static void populate_array_report_expected_array(PopulateArrayContext *ctx, int ndim); static void populate_array_assign_ndims(PopulateArrayContext *ctx, int ndims); static void populate_array_check_dimension(PopulateArrayContext *ctx, int ndim); static void populate_array_element(PopulateArrayContext *ctx, int ndim, JsValue *jsv); -static Datum populate_array(ArrayIOData *aio, const char *colname, - MemoryContext mcxt, JsValue *jsv); -static Datum populate_domain(DomainIOData *io, Oid typid, const char *colname, - MemoryContext mcxt, JsValue *jsv, bool isnull); +static Datum populate_array(ArrayIOData *aio, const char *colname, + MemoryContext mcxt, JsValue *jsv); +static Datum populate_domain(DomainIOData *io, Oid typid, const char *colname, + MemoryContext mcxt, JsValue *jsv, bool isnull); /* Worker that takes care of common setup for us */ static JsonbValue *findJsonbValueFromContainerLen(JsonbContainer *container, @@ -2319,8 +2321,8 @@ populate_array_report_expected_array(PopulateArrayContext *ctx, int ndim) } else { - StringInfoData indices; - int i; + StringInfoData indices; + int i; initStringInfo(&indices); @@ -2348,7 +2350,7 @@ populate_array_report_expected_array(PopulateArrayContext *ctx, int ndim) static void populate_array_assign_ndims(PopulateArrayContext *ctx, int ndims) { - int i; + int i; Assert(ctx->ndims <= 0); @@ -2360,17 +2362,17 @@ populate_array_assign_ndims(PopulateArrayContext *ctx, int ndims) ctx->sizes = palloc0(sizeof(int) * ndims); for (i = 0; i < ndims; i++) - ctx->dims[i] = -1; /* dimensions are unknown yet */ + ctx->dims[i] = -1; /* dimensions are unknown yet */ } /* check the populated subarray dimension */ static void populate_array_check_dimension(PopulateArrayContext *ctx, int ndim) { - int dim = ctx->sizes[ndim]; /* current dimension counter */ + int dim = ctx->sizes[ndim]; /* current dimension counter */ if (ctx->dims[ndim] == -1) - ctx->dims[ndim] = dim; /* assign dimension if not yet known */ + ctx->dims[ndim] = dim; /* assign dimension if not yet known */ else if (ctx->dims[ndim] != dim) ereport(ERROR, (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), @@ -2389,8 +2391,8 @@ populate_array_check_dimension(PopulateArrayContext *ctx, int ndim) static void populate_array_element(PopulateArrayContext *ctx, int ndim, JsValue *jsv) { - Datum element; - bool element_isnull; + Datum element; + bool element_isnull; /* populate the array element */ element = populate_record_field(ctx->aio->element_info, @@ -2400,10 +2402,10 @@ populate_array_element(PopulateArrayContext *ctx, int ndim, JsValue *jsv) jsv, &element_isnull); accumArrayResult(ctx->astate, element, element_isnull, - ctx->aio->element_type, ctx->acxt); + ctx->aio->element_type, ctx->acxt); Assert(ndim > 0); - ctx->sizes[ndim - 1]++; /* increment current dimension counter */ + ctx->sizes[ndim - 1]++; /* increment current dimension counter */ } /* json object start handler for populate_array_json() */ @@ -2411,7 +2413,7 @@ static void populate_array_object_start(void *_state) { PopulateArrayState *state = (PopulateArrayState *) _state; - int ndim = state->lex->lex_level; + int ndim = state->lex->lex_level; if (state->ctx->ndims <= 0) populate_array_assign_ndims(state->ctx, ndim); @@ -2423,9 +2425,9 @@ populate_array_object_start(void *_state) static void populate_array_array_end(void *_state) { - PopulateArrayState *state = (PopulateArrayState *) _state; - PopulateArrayContext *ctx = state->ctx; - int ndim = state->lex->lex_level; + PopulateArrayState *state = (PopulateArrayState *) _state; + PopulateArrayContext *ctx = state->ctx; + int ndim = state->lex->lex_level; if (ctx->ndims <= 0) populate_array_assign_ndims(ctx, ndim + 1); @@ -2439,7 +2441,7 @@ static void populate_array_element_start(void *_state, bool isnull) { PopulateArrayState *state = (PopulateArrayState *) _state; - int ndim = state->lex->lex_level; + int ndim = state->lex->lex_level; if (state->ctx->ndims <= 0 || ndim == state->ctx->ndims) { @@ -2454,9 +2456,9 @@ populate_array_element_start(void *_state, bool isnull) static void populate_array_element_end(void *_state, bool isnull) { - PopulateArrayState *state = (PopulateArrayState *) _state; - PopulateArrayContext *ctx = state->ctx; - int ndim = state->lex->lex_level; + PopulateArrayState *state = (PopulateArrayState *) _state; + PopulateArrayContext *ctx = state->ctx; + int ndim = state->lex->lex_level; Assert(ctx->ndims > 0); @@ -2476,7 +2478,7 @@ populate_array_element_end(void *_state, bool isnull) else if (state->element_scalar) { jsv.val.json.str = state->element_scalar; - jsv.val.json.len = -1; /* null-terminated */ + jsv.val.json.len = -1; /* null-terminated */ } else { @@ -2493,9 +2495,9 @@ populate_array_element_end(void *_state, bool isnull) static void populate_array_scalar(void *_state, char *token, JsonTokenType tokentype) { - PopulateArrayState *state = (PopulateArrayState *) _state; - PopulateArrayContext *ctx = state->ctx; - int ndim = state->lex->lex_level; + PopulateArrayState *state = (PopulateArrayState *) _state; + PopulateArrayContext *ctx = state->ctx; + int ndim = state->lex->lex_level; if (ctx->ndims <= 0) populate_array_assign_ndims(ctx, ndim); @@ -2515,8 +2517,8 @@ populate_array_scalar(void *_state, char *token, JsonTokenType tokentype) static void populate_array_json(PopulateArrayContext *ctx, char *json, int len) { - PopulateArrayState state; - JsonSemAction sem; + PopulateArrayState state; + JsonSemAction sem; state.lex = makeJsonLexContextCstringLen(json, len, true); state.ctx = ctx; @@ -2539,18 +2541,18 @@ populate_array_json(PopulateArrayContext *ctx, char *json, int len) /* * populate_array_dim_jsonb() -- Iterate recursively through jsonb sub-array - * elements and accumulate result using given ArrayBuildState. + * elements and accumulate result using given ArrayBuildState. */ static void -populate_array_dim_jsonb(PopulateArrayContext *ctx, /* context */ - JsonbValue *jbv, /* jsonb sub-array */ - int ndim) /* current dimension */ +populate_array_dim_jsonb(PopulateArrayContext *ctx, /* context */ + JsonbValue *jbv, /* jsonb sub-array */ + int ndim) /* current dimension */ { - JsonbContainer *jbc = jbv->val.binary.data; - JsonbIterator *it; - JsonbIteratorToken tok; - JsonbValue val; - JsValue jsv; + JsonbContainer *jbc = jbv->val.binary.data; + JsonbIterator *it; + JsonbIteratorToken tok; + JsonbValue val; + JsValue jsv; check_stack_depth(); @@ -2567,9 +2569,9 @@ populate_array_dim_jsonb(PopulateArrayContext *ctx, /* context */ tok = JsonbIteratorNext(&it, &val, true); /* - * If the number of dimensions is not yet known and - * we have found end of the array, or the first child element is not - * an array, then assign the number of dimensions now. + * If the number of dimensions is not yet known and we have found end of + * the array, or the first child element is not an array, then assign the + * number of dimensions now. */ if (ctx->ndims <= 0 && (tok == WJB_END_ARRAY || @@ -2585,8 +2587,8 @@ populate_array_dim_jsonb(PopulateArrayContext *ctx, /* context */ while (tok == WJB_ELEM) { /* - * Recurse only if the dimensions of dimensions is still unknown or - * if it is not the innermost dimension. + * Recurse only if the dimensions of dimensions is still unknown or if + * it is not the innermost dimension. */ if (ctx->ndims > 0 && ndim >= ctx->ndims) populate_array_element(ctx, ndim, &jsv); @@ -2613,29 +2615,29 @@ populate_array_dim_jsonb(PopulateArrayContext *ctx, /* context */ /* recursively populate an array from json/jsonb */ static Datum -populate_array(ArrayIOData *aio, - const char *colname, - MemoryContext mcxt, - JsValue *jsv) -{ - PopulateArrayContext ctx; - Datum result; - int *lbs; - int i; +populate_array(ArrayIOData *aio, + const char *colname, + MemoryContext mcxt, + JsValue *jsv) +{ + PopulateArrayContext ctx; + Datum result; + int *lbs; + int i; ctx.aio = aio; ctx.mcxt = mcxt; ctx.acxt = CurrentMemoryContext; ctx.astate = initArrayResult(aio->element_type, ctx.acxt, true); ctx.colname = colname; - ctx.ndims = 0; /* unknown yet */ + ctx.ndims = 0; /* unknown yet */ ctx.dims = NULL; ctx.sizes = NULL; if (jsv->is_json) populate_array_json(&ctx, jsv->val.json.str, jsv->val.json.len >= 0 ? jsv->val.json.len - : strlen(jsv->val.json.str)); + : strlen(jsv->val.json.str)); else { populate_array_dim_jsonb(&ctx, jsv->val.jsonb, 1); @@ -2644,7 +2646,7 @@ populate_array(ArrayIOData *aio, Assert(ctx.ndims > 0); - lbs = palloc(sizeof(int) * ctx.ndims); + lbs = palloc(sizeof(int) * ctx.ndims); for (i = 0; i < ctx.ndims; i++) lbs[i] = 1; @@ -2668,11 +2670,11 @@ JsValueToJsObject(JsValue *jsv, JsObject *jso) { /* convert plain-text json into a hash table */ jso->val.json_hash = - get_json_object_as_hash(jsv->val.json.str, - jsv->val.json.len >= 0 - ? jsv->val.json.len - : strlen(jsv->val.json.str), - "populate_composite"); + get_json_object_as_hash(jsv->val.json.str, + jsv->val.json.len >= 0 + ? jsv->val.json.len + : strlen(jsv->val.json.str), + "populate_composite"); } else { @@ -2689,23 +2691,23 @@ JsValueToJsObject(JsValue *jsv, JsObject *jso) /* recursively populate a composite (row type) value from json/jsonb */ static Datum populate_composite(CompositeIOData *io, - Oid typid, - int32 typmod, - const char *colname, - MemoryContext mcxt, - HeapTupleHeader defaultval, - JsValue *jsv) + Oid typid, + int32 typmod, + const char *colname, + MemoryContext mcxt, + HeapTupleHeader defaultval, + JsValue *jsv) { - HeapTupleHeader tuple; - JsObject jso; + HeapTupleHeader tuple; + JsObject jso; /* acquire cached tuple descriptor */ if (!io->tupdesc || io->tupdesc->tdtypeid != typid || io->tupdesc->tdtypmod != typmod) { - TupleDesc tupdesc = lookup_rowtype_tupdesc(typid, typmod); - MemoryContext oldcxt; + TupleDesc tupdesc = lookup_rowtype_tupdesc(typid, typmod); + MemoryContext oldcxt; if (io->tupdesc) FreeTupleDesc(io->tupdesc); @@ -2750,8 +2752,8 @@ populate_scalar(ScalarIOData *io, Oid typid, int32 typmod, JsValue *jsv) jsv->val.json.type == JSON_TOKEN_STRING) { /* - * Add quotes around string value (should be already escaped) - * if converting to json/jsonb. + * Add quotes around string value (should be already escaped) if + * converting to json/jsonb. */ if (len < 0) @@ -2771,7 +2773,7 @@ populate_scalar(ScalarIOData *io, Oid typid, int32 typmod, JsValue *jsv) str[len] = '\0'; } else - str = json; /* null-terminated string */ + str = json; /* null-terminated string */ } else { @@ -2779,7 +2781,8 @@ populate_scalar(ScalarIOData *io, Oid typid, int32 typmod, JsValue *jsv) if (typid == JSONBOID) { - Jsonb *jsonb = JsonbValueToJsonb(jbv); /* directly use jsonb */ + Jsonb *jsonb = JsonbValueToJsonb(jbv); /* directly use jsonb */ + return JsonbGetDatum(jsonb); } /* convert jsonb to string for typio call */ @@ -2789,19 +2792,20 @@ populate_scalar(ScalarIOData *io, Oid typid, int32 typmod, JsValue *jsv) * Convert scalar jsonb (non-scalars are passed here as jbvBinary) * to json string, preserving quotes around top-level strings. */ - Jsonb *jsonb = JsonbValueToJsonb(jbv); + Jsonb *jsonb = JsonbValueToJsonb(jbv); + str = JsonbToCString(NULL, &jsonb->root, VARSIZE(jsonb)); } - else if (jbv->type == jbvString) /* quotes are stripped */ + else if (jbv->type == jbvString) /* quotes are stripped */ str = pnstrdup(jbv->val.string.val, jbv->val.string.len); else if (jbv->type == jbvBool) str = pstrdup(jbv->val.boolean ? "true" : "false"); else if (jbv->type == jbvNumeric) str = DatumGetCString(DirectFunctionCall1(numeric_out, - PointerGetDatum(jbv->val.numeric))); + PointerGetDatum(jbv->val.numeric))); else if (jbv->type == jbvBinary) str = JsonbToCString(NULL, jbv->val.binary.data, - jbv->val.binary.len); + jbv->val.binary.len); else elog(ERROR, "unrecognized jsonb type: %d", (int) jbv->type); } @@ -2816,12 +2820,12 @@ populate_scalar(ScalarIOData *io, Oid typid, int32 typmod, JsValue *jsv) } static Datum -populate_domain(DomainIOData *io, - Oid typid, - const char *colname, - MemoryContext mcxt, - JsValue *jsv, - bool isnull) +populate_domain(DomainIOData *io, + Oid typid, + const char *colname, + MemoryContext mcxt, + JsValue *jsv, + bool isnull) { Datum res; @@ -2843,14 +2847,14 @@ populate_domain(DomainIOData *io, /* prepare column metadata cache for the given type */ static void -prepare_column_cache(ColumnIOData *column, - Oid typid, - int32 typmod, - MemoryContext mcxt, - bool json) +prepare_column_cache(ColumnIOData *column, + Oid typid, + int32 typmod, + MemoryContext mcxt, + bool json) { - HeapTuple tup; - Form_pg_type type; + HeapTuple tup; + Form_pg_type type; column->typid = typid; column->typmod = typmod; @@ -2867,7 +2871,7 @@ prepare_column_cache(ColumnIOData *column, column->io.domain.base_typid = type->typbasetype; column->io.domain.base_typmod = type->typtypmod; column->io.domain.base_io = MemoryContextAllocZero(mcxt, - sizeof(ColumnIOData)); + sizeof(ColumnIOData)); column->io.domain.domain_info = NULL; } else if (type->typtype == TYPTYPE_COMPOSITE || typid == RECORDOID) @@ -2880,7 +2884,7 @@ prepare_column_cache(ColumnIOData *column, { column->typcat = TYPECAT_ARRAY; column->io.array.element_info = MemoryContextAllocZero(mcxt, - sizeof(ColumnIOData)); + sizeof(ColumnIOData)); column->io.array.element_type = type->typelem; /* array element typemod stored in attribute's typmod */ column->io.array.element_typmod = typmod; @@ -2891,7 +2895,7 @@ prepare_column_cache(ColumnIOData *column, /* don't need input function when converting from jsonb to jsonb */ if (json || typid != JSONBOID) { - Oid typioproc; + Oid typioproc; getTypeInputInfo(typid, &typioproc, &column->scalar_io.typioparam); fmgr_info_cxt(typioproc, &column->scalar_io.typiofunc, mcxt); @@ -2903,13 +2907,13 @@ prepare_column_cache(ColumnIOData *column, /* recursively populate a record field or an array element from a json/jsonb value */ static Datum populate_record_field(ColumnIOData *col, - Oid typid, - int32 typmod, - const char *colname, - MemoryContext mcxt, - Datum defaultval, - JsValue *jsv, - bool *isnull) + Oid typid, + int32 typmod, + const char *colname, + MemoryContext mcxt, + Datum defaultval, + JsValue *jsv, + bool *isnull) { TypeCat typcat; @@ -2962,9 +2966,9 @@ static RecordIOData * allocate_record_info(MemoryContext mcxt, int ncolumns) { RecordIOData *data = (RecordIOData *) - MemoryContextAlloc(mcxt, - offsetof(RecordIOData, columns) + - ncolumns * sizeof(ColumnIOData)); + MemoryContextAlloc(mcxt, + offsetof(RecordIOData, columns) + + ncolumns * sizeof(ColumnIOData)); data->record_type = InvalidOid; data->record_typmod = 0; @@ -2986,7 +2990,7 @@ JsObjectGetField(JsObject *obj, char *field, JsValue *jsv) jsv->val.json.type = hashentry ? hashentry->type : JSON_TOKEN_NULL; jsv->val.json.str = jsv->val.json.type == JSON_TOKEN_NULL ? NULL : - hashentry->val; + hashentry->val; jsv->val.json.len = jsv->val.json.str ? -1 : 0; /* null-terminated */ return hashentry != NULL; @@ -2994,8 +2998,8 @@ JsObjectGetField(JsObject *obj, char *field, JsValue *jsv) else { jsv->val.jsonb = !obj->val.jsonb_cont ? NULL : - findJsonbValueFromContainerLen(obj->val.jsonb_cont, JB_FOBJECT, - field, strlen(field)); + findJsonbValueFromContainerLen(obj->val.jsonb_cont, JB_FOBJECT, + field, strlen(field)); return jsv->val.jsonb != NULL; } @@ -3003,23 +3007,23 @@ JsObjectGetField(JsObject *obj, char *field, JsValue *jsv) /* populate a record tuple from json/jsonb value */ static HeapTupleHeader -populate_record(TupleDesc tupdesc, - RecordIOData **precord, - HeapTupleHeader defaultval, - MemoryContext mcxt, - JsObject *obj) -{ - RecordIOData *record = *precord; - Datum *values; - bool *nulls; - HeapTuple res; - int ncolumns = tupdesc->natts; - int i; +populate_record(TupleDesc tupdesc, + RecordIOData **precord, + HeapTupleHeader defaultval, + MemoryContext mcxt, + JsObject *obj) +{ + RecordIOData *record = *precord; + Datum *values; + bool *nulls; + HeapTuple res; + int ncolumns = tupdesc->natts; + int i; /* - * if the input json is empty, we can only skip the rest if we were - * passed in a non-null record, since otherwise there may be issues - * with domain nulls. + * if the input json is empty, we can only skip the rest if we were passed + * in a non-null record, since otherwise there may be issues with domain + * nulls. */ if (defaultval && JsObjectIsEmpty(obj)) return defaultval; @@ -3034,7 +3038,7 @@ populate_record(TupleDesc tupdesc, record->record_typmod != tupdesc->tdtypmod) { MemSet(record, 0, offsetof(RecordIOData, columns) + - ncolumns * sizeof(ColumnIOData)); + ncolumns * sizeof(ColumnIOData)); record->record_type = tupdesc->tdtypeid; record->record_typmod = tupdesc->tdtypmod; record->ncolumns = ncolumns; @@ -3067,10 +3071,10 @@ populate_record(TupleDesc tupdesc, for (i = 0; i < ncolumns; ++i) { - Form_pg_attribute att = tupdesc->attrs[i]; - char *colname = NameStr(att->attname); - JsValue field = { 0 }; - bool found; + Form_pg_attribute att = tupdesc->attrs[i]; + char *colname = NameStr(att->attname); + JsValue field = {0}; + bool found; /* Ignore dropped columns in datatype */ if (att->attisdropped) @@ -3116,7 +3120,7 @@ populate_record_worker(FunctionCallInfo fcinfo, const char *funcname, { int json_arg_num = have_record_arg ? 1 : 0; Oid jtype = get_fn_expr_argtype(fcinfo->flinfo, json_arg_num); - JsValue jsv = { 0 }; + JsValue jsv = {0}; HeapTupleHeader rec = NULL; Oid tupType; int32 tupTypmod; @@ -3134,7 +3138,7 @@ populate_record_worker(FunctionCallInfo fcinfo, const char *funcname, */ if (!cache) fcinfo->flinfo->fn_extra = cache = - MemoryContextAllocZero(fnmcxt, sizeof(*cache)); + MemoryContextAllocZero(fnmcxt, sizeof(*cache)); if (have_record_arg) { @@ -3210,7 +3214,8 @@ populate_record_worker(FunctionCallInfo fcinfo, const char *funcname, jsv.val.json.str = VARDATA_ANY(json); jsv.val.json.len = VARSIZE_ANY_EXHDR(json); - jsv.val.json.type = JSON_TOKEN_INVALID; /* not used in populate_composite() */ + jsv.val.json.type = JSON_TOKEN_INVALID; /* not used in + * populate_composite() */ } else { @@ -3417,8 +3422,8 @@ json_to_recordset(PG_FUNCTION_ARGS) static void populate_recordset_record(PopulateRecordsetState *state, JsObject *obj) { - HeapTupleData tuple; - HeapTupleHeader tuphead = populate_record(state->ret_tdesc, + HeapTupleData tuple; + HeapTupleHeader tuphead = populate_record(state->ret_tdesc, state->my_extra, state->rec, state->fn_mcxt, @@ -4793,9 +4798,9 @@ setPathArray(JsonbIterator **it, Datum *path_elems, bool *path_nulls, void iterate_jsonb_string_values(Jsonb *jb, void *state, JsonIterateStringValuesAction action) { - JsonbIterator *it; - JsonbValue v; - JsonbIteratorToken type; + JsonbIterator *it; + JsonbValue v; + JsonbIteratorToken type; it = JsonbIteratorInit(&jb->root); @@ -4817,7 +4822,7 @@ iterate_json_string_values(text *json, void *action_state, JsonIterateStringValu { JsonLexContext *lex = makeJsonLexContext(json, true); JsonSemAction *sem = palloc0(sizeof(JsonSemAction)); - IterateJsonStringValuesState *state = palloc0(sizeof(IterateJsonStringValuesState)); + IterateJsonStringValuesState *state = palloc0(sizeof(IterateJsonStringValuesState)); state->lex = lex; state->action = action; @@ -4836,7 +4841,8 @@ iterate_json_string_values(text *json, void *action_state, JsonIterateStringValu static void iterate_string_values_scalar(void *state, char *token, JsonTokenType tokentype) { - IterateJsonStringValuesState *_state = (IterateJsonStringValuesState *) state; + IterateJsonStringValuesState *_state = (IterateJsonStringValuesState *) state; + if (tokentype == JSON_TOKEN_STRING) (*_state->action) (_state->action_state, token, strlen(token)); } @@ -4849,14 +4855,15 @@ iterate_string_values_scalar(void *state, char *token, JsonTokenType tokentype) */ Jsonb * transform_jsonb_string_values(Jsonb *jsonb, void *action_state, - JsonTransformStringValuesAction transform_action) + JsonTransformStringValuesAction transform_action) { - JsonbIterator *it; - JsonbValue v, *res = NULL; - JsonbIteratorToken type; - JsonbParseState *st = NULL; - text *out; - bool is_scalar = false; + JsonbIterator *it; + JsonbValue v, + *res = NULL; + JsonbIteratorToken type; + JsonbParseState *st = NULL; + text *out; + bool is_scalar = false; it = JsonbIteratorInit(&jsonb->root); is_scalar = it->isScalar; @@ -4928,6 +4935,7 @@ static void transform_string_values_object_start(void *state) { TransformJsonStringValuesState *_state = (TransformJsonStringValuesState *) state; + appendStringInfoCharMacro(_state->strval, '{'); } @@ -4935,6 +4943,7 @@ static void transform_string_values_object_end(void *state) { TransformJsonStringValuesState *_state = (TransformJsonStringValuesState *) state; + appendStringInfoCharMacro(_state->strval, '}'); } @@ -4942,6 +4951,7 @@ static void transform_string_values_array_start(void *state) { TransformJsonStringValuesState *_state = (TransformJsonStringValuesState *) state; + appendStringInfoCharMacro(_state->strval, '['); } @@ -4949,6 +4959,7 @@ static void transform_string_values_array_end(void *state) { TransformJsonStringValuesState *_state = (TransformJsonStringValuesState *) state; + appendStringInfoCharMacro(_state->strval, ']'); } @@ -4984,7 +4995,8 @@ transform_string_values_scalar(void *state, char *token, JsonTokenType tokentype if (tokentype == JSON_TOKEN_STRING) { - text *out = (*_state->action) (_state->action_state, token, strlen(token)); + text *out = (*_state->action) (_state->action_state, token, strlen(token)); + escape_json(_state->strval, text_to_cstring(out)); } else diff --git a/src/backend/utils/adt/like.c b/src/backend/utils/adt/like.c index b9806069c21..d4d173480d0 100644 --- a/src/backend/utils/adt/like.c +++ b/src/backend/utils/adt/like.c @@ -180,7 +180,7 @@ Generic_Text_IC_like(text *str, text *pat, Oid collation) */ ereport(ERROR, (errcode(ERRCODE_INDETERMINATE_COLLATION), - errmsg("could not determine which collation to use for ILIKE"), + errmsg("could not determine which collation to use for ILIKE"), errhint("Use the COLLATE clause to set the collation explicitly."))); } locale = pg_newlocale_from_collation(collation); @@ -189,9 +189,9 @@ Generic_Text_IC_like(text *str, text *pat, Oid collation) /* * For efficiency reasons, in the single byte case we don't call lower() * on the pattern and text, but instead call SB_lower_char on each - * character. In the multi-byte case we don't have much choice :-(. - * Also, ICU does not support single-character case folding, so we go the - * long way. + * character. In the multi-byte case we don't have much choice :-(. Also, + * ICU does not support single-character case folding, so we go the long + * way. */ if (pg_database_encoding_max_length() > 1 || (locale && locale->provider == COLLPROVIDER_ICU)) diff --git a/src/backend/utils/adt/mac.c b/src/backend/utils/adt/mac.c index eff4529a6a8..c2b52d80468 100644 --- a/src/backend/utils/adt/mac.c +++ b/src/backend/utils/adt/mac.c @@ -40,7 +40,7 @@ typedef struct bool estimating; /* true if estimating cardinality */ hyperLogLogState abbr_card; /* cardinality estimator */ -} macaddr_sortsupport_state; +} macaddr_sortsupport_state; static int macaddr_cmp_internal(macaddr *a1, macaddr *a2); static int macaddr_fast_cmp(Datum x, Datum y, SortSupport ssup); diff --git a/src/backend/utils/adt/mac8.c b/src/backend/utils/adt/mac8.c index c442eae6c10..1ed4183be7f 100644 --- a/src/backend/utils/adt/mac8.c +++ b/src/backend/utils/adt/mac8.c @@ -103,7 +103,7 @@ invalid_input: Datum macaddr8_in(PG_FUNCTION_ARGS) { - const unsigned char *str = (unsigned char*) PG_GETARG_CSTRING(0); + const unsigned char *str = (unsigned char *) PG_GETARG_CSTRING(0); const unsigned char *ptr = str; macaddr8 *result; unsigned char a = 0, diff --git a/src/backend/utils/adt/pg_locale.c b/src/backend/utils/adt/pg_locale.c index e2ccac2d2a5..24ae3c6886e 100644 --- a/src/backend/utils/adt/pg_locale.c +++ b/src/backend/utils/adt/pg_locale.c @@ -1282,7 +1282,7 @@ pg_newlocale_from_collation(Oid collid) Form_pg_collation collform; const char *collcollate; const char *collctype pg_attribute_unused(); - pg_locale_t result; + pg_locale_t result; Datum collversion; bool isnull; @@ -1294,8 +1294,8 @@ pg_newlocale_from_collation(Oid collid) collcollate = NameStr(collform->collcollate); collctype = NameStr(collform->collctype); - result = malloc(sizeof(* result)); - memset(result, 0, sizeof(* result)); + result = malloc(sizeof(*result)); + memset(result, 0, sizeof(*result)); result->provider = collform->collprovider; if (collform->collprovider == COLLPROVIDER_LIBC) @@ -1308,7 +1308,7 @@ pg_newlocale_from_collation(Oid collid) /* Normal case where they're the same */ #ifndef WIN32 loc = newlocale(LC_COLLATE_MASK | LC_CTYPE_MASK, collcollate, - NULL); + NULL); #else loc = _create_locale(LC_ALL, collcollate); #endif @@ -1330,9 +1330,9 @@ pg_newlocale_from_collation(Oid collid) #else /* - * XXX The _create_locale() API doesn't appear to support this. - * Could perhaps be worked around by changing pg_locale_t to - * contain two separate fields. + * XXX The _create_locale() API doesn't appear to support + * this. Could perhaps be worked around by changing + * pg_locale_t to contain two separate fields. */ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), @@ -1358,18 +1358,18 @@ pg_newlocale_from_collation(Oid collid) collator = ucol_open(collcollate, &status); if (U_FAILURE(status)) ereport(ERROR, - (errmsg("could not open collator for locale \"%s\": %s", - collcollate, u_errorName(status)))); + (errmsg("could not open collator for locale \"%s\": %s", + collcollate, u_errorName(status)))); result->info.icu.locale = strdup(collcollate); result->info.icu.ucol = collator; -#else /* not USE_ICU */ +#else /* not USE_ICU */ /* could get here if a collation was created by a build with ICU */ ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("ICU is not supported in this build"), \ - errhint("You need to rebuild PostgreSQL using --with-icu."))); -#endif /* not USE_ICU */ + errhint("You need to rebuild PostgreSQL using --with-icu."))); +#endif /* not USE_ICU */ } collversion = SysCacheGetAttr(COLLOID, tp, Anum_pg_collation_collversion, @@ -1382,9 +1382,11 @@ pg_newlocale_from_collation(Oid collid) actual_versionstr = get_collation_actual_version(collform->collprovider, collcollate); if (!actual_versionstr) { - /* This could happen when specifying a version in CREATE - * COLLATION for a libc locale, or manually creating a mess - * in the catalogs. */ + /* + * This could happen when specifying a version in CREATE + * COLLATION for a libc locale, or manually creating a mess in + * the catalogs. + */ ereport(ERROR, (errmsg("collation \"%s\" has no actual version, but a version was specified", NameStr(collform->collname)))); @@ -1396,13 +1398,13 @@ pg_newlocale_from_collation(Oid collid) (errmsg("collation \"%s\" has version mismatch", NameStr(collform->collname)), errdetail("The collation in the database was created using version %s, " - "but the operating system provides version %s.", + "but the operating system provides version %s.", collversionstr, actual_versionstr), errhint("Rebuild all objects affected by this collation and run " "ALTER COLLATION %s REFRESH VERSION, " - "or build PostgreSQL with the right library version.", + "or build PostgreSQL with the right library version.", quote_qualified_identifier(get_namespace_name(collform->collnamespace), - NameStr(collform->collname))))); + NameStr(collform->collname))))); } ReleaseSysCache(tp); @@ -1478,8 +1480,8 @@ init_icu_converter(void) conv = ucnv_open(icu_encoding_name, &status); if (U_FAILURE(status)) ereport(ERROR, - (errmsg("could not open ICU converter for encoding \"%s\": %s", - icu_encoding_name, u_errorName(status)))); + (errmsg("could not open ICU converter for encoding \"%s\": %s", + icu_encoding_name, u_errorName(status)))); icu_converter = conv; } @@ -1492,7 +1494,7 @@ icu_to_uchar(UChar **buff_uchar, const char *buff, size_t nbytes) init_icu_converter(); - len_uchar = 2 * nbytes; /* max length per docs */ + len_uchar = 2 * nbytes; /* max length per docs */ *buff_uchar = palloc(len_uchar * sizeof(**buff_uchar)); status = U_ZERO_ERROR; len_uchar = ucnv_toUChars(icu_converter, *buff_uchar, len_uchar, buff, nbytes, &status); diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c index 43b14750351..9234bc2a971 100644 --- a/src/backend/utils/adt/ruleutils.c +++ b/src/backend/utils/adt/ruleutils.c @@ -1448,7 +1448,7 @@ pg_get_statisticsobjdef(PG_FUNCTION_ARGS) static char * pg_get_statisticsobj_worker(Oid statextid, bool missing_ok) { - Form_pg_statistic_ext statextrec; + Form_pg_statistic_ext statextrec; HeapTuple statexttup; StringInfoData buf; int colno; @@ -1477,7 +1477,7 @@ pg_get_statisticsobj_worker(Oid statextid, bool missing_ok) nsp = get_namespace_name(statextrec->stxnamespace); appendStringInfo(&buf, "CREATE STATISTICS %s", quote_qualified_identifier(nsp, - NameStr(statextrec->stxname))); + NameStr(statextrec->stxname))); /* * Decode the stxkind column so that we know which stats types to print. @@ -1735,11 +1735,11 @@ pg_get_partkeydef_worker(Oid relid, int prettyFlags, Datum pg_get_partition_constraintdef(PG_FUNCTION_ARGS) { - Oid relationId = PG_GETARG_OID(0); - Expr *constr_expr; - int prettyFlags; - List *context; - char *consrc; + Oid relationId = PG_GETARG_OID(0); + Expr *constr_expr; + int prettyFlags; + List *context; + char *consrc; constr_expr = get_partition_qual_relid(relationId); diff --git a/src/backend/utils/adt/selfuncs.c b/src/backend/utils/adt/selfuncs.c index 7028d6387c7..6e491bbc21e 100644 --- a/src/backend/utils/adt/selfuncs.c +++ b/src/backend/utils/adt/selfuncs.c @@ -170,7 +170,7 @@ static double eqjoinsel_semi(Oid operator, VariableStatData *vardata1, VariableStatData *vardata2, RelOptInfo *inner_rel); static bool estimate_multivariate_ndistinct(PlannerInfo *root, - RelOptInfo *rel, List **varinfos, double *ndistinct); + RelOptInfo *rel, List **varinfos, double *ndistinct); static bool convert_to_scalar(Datum value, Oid valuetypid, double *scaledvalue, Datum lobound, Datum hibound, Oid boundstypid, double *scaledlobound, double *scaledhibound); @@ -3364,8 +3364,8 @@ estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows, List *relvarinfos = NIL; /* - * Split the list of varinfos in two - one for the current rel, - * one for remaining Vars on other rels. + * Split the list of varinfos in two - one for the current rel, one + * for remaining Vars on other rels. */ relvarinfos = lcons(varinfo1, relvarinfos); for_each_cell(l, lnext(list_head(varinfos))) @@ -3388,9 +3388,9 @@ estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows, * Get the numdistinct estimate for the Vars of this rel. We * iteratively search for multivariate n-distinct with maximum number * of vars; assuming that each var group is independent of the others, - * we multiply them together. Any remaining relvarinfos after - * no more multivariate matches are found are assumed independent too, - * so their individual ndistinct estimates are multiplied also. + * we multiply them together. Any remaining relvarinfos after no more + * multivariate matches are found are assumed independent too, so + * their individual ndistinct estimates are multiplied also. * * While iterating, count how many separate numdistinct values we * apply. We apply a fudge factor below, but only if we multiplied @@ -3410,7 +3410,7 @@ estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows, } else { - foreach (l, relvarinfos) + foreach(l, relvarinfos) { GroupVarInfo *varinfo2 = (GroupVarInfo *) lfirst(l); @@ -3702,12 +3702,12 @@ estimate_multivariate_ndistinct(PlannerInfo *root, RelOptInfo *rel, } /* look for the ndistinct statistics matching the most vars */ - nmatches = 1; /* we require at least two matches */ + nmatches = 1; /* we require at least two matches */ foreach(lc, rel->statlist) { StatisticExtInfo *info = (StatisticExtInfo *) lfirst(lc); Bitmapset *shared; - int nshared; + int nshared; /* skip statistics of other kinds */ if (info->kind != STATS_EXT_NDISTINCT) @@ -3745,8 +3745,8 @@ estimate_multivariate_ndistinct(PlannerInfo *root, RelOptInfo *rel, */ if (stats) { - int i; - List *newlist = NIL; + int i; + List *newlist = NIL; MVNDistinctItem *item = NULL; /* Find the specific item that exactly matches the combination */ @@ -7766,8 +7766,8 @@ brincostestimate(PlannerInfo *root, IndexPath *path, double loop_count, * * Because we can use all index quals equally when scanning, we can use * the largest correlation (in absolute value) among columns used by the - * query. Start at zero, the worst possible case. If we cannot find - * any correlation statistics, we will keep it as 0. + * query. Start at zero, the worst possible case. If we cannot find any + * correlation statistics, we will keep it as 0. */ *indexCorrelation = 0; @@ -7790,7 +7790,7 @@ brincostestimate(PlannerInfo *root, IndexPath *path, double loop_count, */ if (HeapTupleIsValid(vardata.statsTuple) && !vardata.freefunc) elog(ERROR, - "no function provided to release variable stats with"); + "no function provided to release variable stats with"); } else { @@ -7813,11 +7813,11 @@ brincostestimate(PlannerInfo *root, IndexPath *path, double loop_count, attnum = qinfo->indexcol + 1; if (get_index_stats_hook && - (*get_index_stats_hook) (root, index->indexoid, attnum, &vardata)) + (*get_index_stats_hook) (root, index->indexoid, attnum, &vardata)) { /* - * The hook took control of acquiring a stats tuple. If it did - * supply a tuple, it'd better have supplied a freefunc. + * The hook took control of acquiring a stats tuple. If it + * did supply a tuple, it'd better have supplied a freefunc. */ if (HeapTupleIsValid(vardata.statsTuple) && !vardata.freefunc) @@ -7826,7 +7826,7 @@ brincostestimate(PlannerInfo *root, IndexPath *path, double loop_count, else { vardata.statsTuple = SearchSysCache3(STATRELATTINH, - ObjectIdGetDatum(index->indexoid), + ObjectIdGetDatum(index->indexoid), Int16GetDatum(attnum), BoolGetDatum(false)); vardata.freefunc = ReleaseSysCache; @@ -7872,8 +7872,8 @@ brincostestimate(PlannerInfo *root, IndexPath *path, double loop_count, /* * Now estimate the number of ranges that we'll touch by using the - * indexCorrelation from the stats. Careful not to divide by zero - * (note we're using the absolute value of the correlation). + * indexCorrelation from the stats. Careful not to divide by zero (note + * we're using the absolute value of the correlation). */ if (*indexCorrelation < 1.0e-10) estimatedRanges = indexRanges; @@ -7888,8 +7888,8 @@ brincostestimate(PlannerInfo *root, IndexPath *path, double loop_count, *indexSelectivity = selec; /* - * Compute the index qual costs, much as in genericcostestimate, to add - * to the index costs. + * Compute the index qual costs, much as in genericcostestimate, to add to + * the index costs. */ qual_arg_cost = other_operands_eval_cost(root, qinfos) + orderby_operands_eval_cost(root, path); diff --git a/src/backend/utils/adt/txid.c b/src/backend/utils/adt/txid.c index 5c64e327196..5dd996f62c4 100644 --- a/src/backend/utils/adt/txid.c +++ b/src/backend/utils/adt/txid.c @@ -147,8 +147,8 @@ TransactionIdInRecentPast(uint64 xid_with_epoch, TransactionId *extracted_xid) /* * If the transaction ID has wrapped around, it's definitely too old to * determine the commit status. Otherwise, we can compare it to - * ShmemVariableCache->oldestClogXid to determine whether the relevant CLOG - * entry is guaranteed to still exist. + * ShmemVariableCache->oldestClogXid to determine whether the relevant + * CLOG entry is guaranteed to still exist. */ if (xid_epoch + 1 < now_epoch || (xid_epoch + 1 == now_epoch && xid < now_epoch_last_xid) @@ -454,7 +454,7 @@ txid_current_if_assigned(PG_FUNCTION_ARGS) { txid val; TxidEpoch state; - TransactionId topxid = GetTopTransactionIdIfAny(); + TransactionId topxid = GetTopTransactionIdIfAny(); if (topxid == InvalidTransactionId) PG_RETURN_NULL(); @@ -741,9 +741,9 @@ txid_snapshot_xip(PG_FUNCTION_ARGS) Datum txid_status(PG_FUNCTION_ARGS) { - const char *status; - uint64 xid_with_epoch = PG_GETARG_INT64(0); - TransactionId xid; + const char *status; + uint64 xid_with_epoch = PG_GETARG_INT64(0); + TransactionId xid; /* * We must protect against concurrent truncation of clog entries to avoid @@ -770,8 +770,8 @@ txid_status(PG_FUNCTION_ARGS) * it's aborted if it isn't committed and is older than our * snapshot xmin. * - * Otherwise it must be in-progress (or have been at the time - * we checked commit/abort status). + * Otherwise it must be in-progress (or have been at the time we + * checked commit/abort status). */ if (TransactionIdPrecedes(xid, GetActiveSnapshot()->xmin)) status = gettext_noop("aborted"); diff --git a/src/backend/utils/adt/varlena.c b/src/backend/utils/adt/varlena.c index 0b0032787b2..be399f48f96 100644 --- a/src/backend/utils/adt/varlena.c +++ b/src/backend/utils/adt/varlena.c @@ -1557,8 +1557,10 @@ varstr_cmp(char *arg1, int len1, char *arg2, int len2, Oid collid) else #endif { - int32_t ulen1, ulen2; - UChar *uchar1, *uchar2; + int32_t ulen1, + ulen2; + UChar *uchar1, + *uchar2; ulen1 = icu_to_uchar(&uchar1, arg1, len1); ulen2 = icu_to_uchar(&uchar2, arg2, len2); @@ -1567,10 +1569,10 @@ varstr_cmp(char *arg1, int len1, char *arg2, int len2, Oid collid) uchar1, ulen1, uchar2, ulen2); } -#else /* not USE_ICU */ +#else /* not USE_ICU */ /* shouldn't happen */ elog(ERROR, "unsupported collprovider: %c", mylocale->provider); -#endif /* not USE_ICU */ +#endif /* not USE_ICU */ } else { @@ -2136,13 +2138,15 @@ varstrfastcmp_locale(Datum x, Datum y, SortSupport ssup) &status); if (U_FAILURE(status)) ereport(ERROR, - (errmsg("collation failed: %s", u_errorName(status)))); + (errmsg("collation failed: %s", u_errorName(status)))); } else #endif { - int32_t ulen1, ulen2; - UChar *uchar1, *uchar2; + int32_t ulen1, + ulen2; + UChar *uchar1, + *uchar2; ulen1 = icu_to_uchar(&uchar1, a1p, len1); ulen2 = icu_to_uchar(&uchar2, a2p, len2); @@ -2151,10 +2155,10 @@ varstrfastcmp_locale(Datum x, Datum y, SortSupport ssup) uchar1, ulen1, uchar2, ulen2); } -#else /* not USE_ICU */ +#else /* not USE_ICU */ /* shouldn't happen */ elog(ERROR, "unsupported collprovider: %c", sss->locale->provider); -#endif /* not USE_ICU */ +#endif /* not USE_ICU */ } else { @@ -2300,8 +2304,11 @@ varstr_abbrev_convert(Datum original, SortSupport ssup) } memcpy(sss->buf1, authoritative_data, len); - /* Just like strcoll(), strxfrm() expects a NUL-terminated string. - * Not necessary for ICU, but doesn't hurt. */ + + /* + * Just like strcoll(), strxfrm() expects a NUL-terminated string. Not + * necessary for ICU, but doesn't hurt. + */ sss->buf1[len] = '\0'; sss->last_len1 = len; @@ -2336,13 +2343,13 @@ varstr_abbrev_convert(Datum original, SortSupport ssup) UErrorCode status; uiter_setUTF8(&iter, sss->buf1, len); - state[0] = state[1] = 0; /* won't need that again */ + state[0] = state[1] = 0; /* won't need that again */ status = U_ZERO_ERROR; bsize = ucol_nextSortKeyPart(sss->locale->info.icu.ucol, &iter, state, (uint8_t *) sss->buf2, - Min(sizeof(Datum), sss->buflen2), + Min(sizeof(Datum), sss->buflen2), &status); if (U_FAILURE(status)) ereport(ERROR, @@ -2351,7 +2358,7 @@ varstr_abbrev_convert(Datum original, SortSupport ssup) else bsize = ucol_getSortKey(sss->locale->info.icu.ucol, uchar, ulen, - (uint8_t *) sss->buf2, sss->buflen2); + (uint8_t *) sss->buf2, sss->buflen2); } else #endif diff --git a/src/backend/utils/adt/xml.c b/src/backend/utils/adt/xml.c index 42cffbbdd39..cdcd45419a4 100644 --- a/src/backend/utils/adt/xml.c +++ b/src/backend/utils/adt/xml.c @@ -2385,8 +2385,8 @@ database_get_xml_visible_tables(void) CppAsString2(RELKIND_RELATION) "," CppAsString2(RELKIND_MATVIEW) "," CppAsString2(RELKIND_VIEW) ")" - " AND pg_catalog.has_table_privilege(pg_class.oid, 'SELECT')" - " AND relnamespace IN (" XML_VISIBLE_SCHEMAS ");"); + " AND pg_catalog.has_table_privilege(pg_class.oid, 'SELECT')" + " AND relnamespace IN (" XML_VISIBLE_SCHEMAS ");"); } @@ -4518,9 +4518,8 @@ XmlTableGetValue(TableFuncScanState *state, int colnum, * This line ensure mapping of empty tags to PostgreSQL * value. Usually we would to map a empty tag to empty * string. But this mapping can create empty string when - * user doesn't expect it - when empty tag is enforced - * by libxml2 - when user uses a text() function for - * example. + * user doesn't expect it - when empty tag is enforced by + * libxml2 - when user uses a text() function for example. */ cstr = ""; } diff --git a/src/backend/utils/cache/inval.c b/src/backend/utils/cache/inval.c index a1e6ea2a356..819121638ea 100644 --- a/src/backend/utils/cache/inval.c +++ b/src/backend/utils/cache/inval.c @@ -386,10 +386,9 @@ AddRelcacheInvalidationMessage(InvalidationListHeader *hdr, SharedInvalidationMessage msg; /* - * Don't add a duplicate item. - * We assume dbId need not be checked because it will never change. - * InvalidOid for relId means all relations so we don't need to add - * individual ones when it is present. + * Don't add a duplicate item. We assume dbId need not be checked because + * it will never change. InvalidOid for relId means all relations so we + * don't need to add individual ones when it is present. */ ProcessMessageList(hdr->rclist, if (msg->rc.id == SHAREDINVALRELCACHE_ID && @@ -523,8 +522,8 @@ RegisterRelcacheInvalidation(Oid dbId, Oid relId) /* * If the relation being invalidated is one of those cached in the local - * relcache init file, mark that we need to zap that file at commit. - * Same is true when we are invalidating whole relcache. + * relcache init file, mark that we need to zap that file at commit. Same + * is true when we are invalidating whole relcache. */ if (OidIsValid(dbId) && (RelationIdIsInInitFile(relId) || relId == InvalidOid)) @@ -1139,8 +1138,8 @@ CacheInvalidateHeapTuple(Relation relation, RegisterCatcacheInvalidation); /* - * Now, is this tuple one of the primary definers of a relcache entry? - * See comments in file header for deeper explanation. + * Now, is this tuple one of the primary definers of a relcache entry? See + * comments in file header for deeper explanation. * * Note we ignore newtuple here; we assume an update cannot move a tuple * from being part of one relcache entry to being part of another. diff --git a/src/backend/utils/cache/lsyscache.c b/src/backend/utils/cache/lsyscache.c index b94d4755055..4def73ddfbe 100644 --- a/src/backend/utils/cache/lsyscache.c +++ b/src/backend/utils/cache/lsyscache.c @@ -858,7 +858,7 @@ get_attidentity(Oid relid, AttrNumber attnum) if (HeapTupleIsValid(tp)) { Form_pg_attribute att_tup = (Form_pg_attribute) GETSTRUCT(tp); - char result; + char result; result = att_tup->attidentity; ReleaseSysCache(tp); diff --git a/src/backend/utils/cache/plancache.c b/src/backend/utils/cache/plancache.c index abff7474f55..4b5f8107ef0 100644 --- a/src/backend/utils/cache/plancache.c +++ b/src/backend/utils/cache/plancache.c @@ -89,7 +89,7 @@ static CachedPlanSource *first_saved_plan = NULL; static void ReleaseGenericPlan(CachedPlanSource *plansource); static List *RevalidateCachedQuery(CachedPlanSource *plansource, - QueryEnvironment *queryEnv); + QueryEnvironment *queryEnv); static bool CheckCachedPlan(CachedPlanSource *plansource); static CachedPlan *BuildCachedPlan(CachedPlanSource *plansource, List *qlist, ParamListInfo boundParams, QueryEnvironment *queryEnv); @@ -1520,7 +1520,7 @@ AcquireExecutorLocks(List *stmt_list, bool acquire) * acquire a non-conflicting lock. */ if (list_member_int(plannedstmt->resultRelations, rt_index) || - list_member_int(plannedstmt->nonleafResultRelations, rt_index)) + list_member_int(plannedstmt->nonleafResultRelations, rt_index)) lockmode = RowExclusiveLock; else if ((rc = get_plan_rowmark(plannedstmt->rowMarks, rt_index)) != NULL && RowMarkRequiresRowShareLock(rc->markType)) diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c index 0cd6289f916..c2e8361f2f4 100644 --- a/src/backend/utils/cache/relcache.c +++ b/src/backend/utils/cache/relcache.c @@ -4504,7 +4504,10 @@ RelationGetStatExtList(Relation relation) */ result = NIL; - /* Prepare to scan pg_statistic_ext for entries having stxrelid = this rel. */ + /* + * Prepare to scan pg_statistic_ext for entries having stxrelid = this + * rel. + */ ScanKeyInit(&skey, Anum_pg_statistic_ext_stxrelid, BTEqualStrategyNumber, F_OIDEQ, @@ -4603,9 +4606,10 @@ RelationSetIndexList(Relation relation, List *indexIds, Oid oidIndex) list_free(relation->rd_indexlist); relation->rd_indexlist = indexIds; relation->rd_oidindex = oidIndex; + /* - * For the moment, assume the target rel hasn't got a pk or replica - * index. We'll load them on demand in the API that wraps access to them. + * For the moment, assume the target rel hasn't got a pk or replica index. + * We'll load them on demand in the API that wraps access to them. */ relation->rd_pkindex = InvalidOid; relation->rd_replidindex = InvalidOid; @@ -5169,7 +5173,7 @@ GetRelationPublicationActions(Relation relation) { List *puboids; ListCell *lc; - MemoryContext oldcxt; + MemoryContext oldcxt; PublicationActions *pubactions = palloc0(sizeof(PublicationActions)); if (relation->rd_pubactions) @@ -5200,8 +5204,8 @@ GetRelationPublicationActions(Relation relation) ReleaseSysCache(tup); /* - * If we know everything is replicated, there is no point to check - * for other publications. + * If we know everything is replicated, there is no point to check for + * other publications. */ if (pubactions->pubinsert && pubactions->pubupdate && pubactions->pubdelete) diff --git a/src/backend/utils/cache/syscache.c b/src/backend/utils/cache/syscache.c index f0a16e309c0..922718c9d17 100644 --- a/src/backend/utils/cache/syscache.c +++ b/src/backend/utils/cache/syscache.c @@ -661,7 +661,7 @@ static const struct cachedesc cacheinfo[] = { }, 16 }, - {PublicationRelationId, /* PUBLICATIONOID */ + {PublicationRelationId, /* PUBLICATIONOID */ PublicationObjectIndexId, 1, { @@ -672,7 +672,7 @@ static const struct cachedesc cacheinfo[] = { }, 8 }, - {PublicationRelationId, /* PUBLICATIONNAME */ + {PublicationRelationId, /* PUBLICATIONNAME */ PublicationNameIndexId, 1, { @@ -683,7 +683,7 @@ static const struct cachedesc cacheinfo[] = { }, 8 }, - {PublicationRelRelationId, /* PUBLICATIONREL */ + {PublicationRelRelationId, /* PUBLICATIONREL */ PublicationRelObjectIndexId, 1, { @@ -694,7 +694,7 @@ static const struct cachedesc cacheinfo[] = { }, 64 }, - {PublicationRelRelationId, /* PUBLICATIONRELMAP */ + {PublicationRelRelationId, /* PUBLICATIONRELMAP */ PublicationRelPrrelidPrpubidIndexId, 2, { @@ -716,7 +716,7 @@ static const struct cachedesc cacheinfo[] = { }, 8 }, - {SequenceRelationId, /* SEQRELID */ + {SequenceRelationId, /* SEQRELID */ SequenceRelidIndexId, 1, { @@ -760,7 +760,7 @@ static const struct cachedesc cacheinfo[] = { }, 128 }, - {SubscriptionRelationId, /* SUBSCRIPTIONOID */ + {SubscriptionRelationId, /* SUBSCRIPTIONOID */ SubscriptionObjectIndexId, 1, { @@ -771,7 +771,7 @@ static const struct cachedesc cacheinfo[] = { }, 4 }, - {SubscriptionRelationId, /* SUBSCRIPTIONNAME */ + {SubscriptionRelationId, /* SUBSCRIPTIONNAME */ SubscriptionNameIndexId, 2, { @@ -782,7 +782,7 @@ static const struct cachedesc cacheinfo[] = { }, 4 }, - {SubscriptionRelRelationId, /* SUBSCRIPTIONRELMAP */ + {SubscriptionRelRelationId, /* SUBSCRIPTIONRELMAP */ SubscriptionRelSrrelidSrsubidIndexId, 2, { diff --git a/src/backend/utils/fmgr/dfmgr.c b/src/backend/utils/fmgr/dfmgr.c index 9739c4c1447..28c2583f960 100644 --- a/src/backend/utils/fmgr/dfmgr.c +++ b/src/backend/utils/fmgr/dfmgr.c @@ -65,7 +65,7 @@ char *Dynamic_library_path; static void *internal_load_library(const char *libname); static void incompatible_module_error(const char *libname, - const Pg_magic_struct *module_magic_data) pg_attribute_noreturn(); + const Pg_magic_struct *module_magic_data) pg_attribute_noreturn(); static void internal_unload_library(const char *libname); static bool file_exists(const char *name); static char *expand_dynamic_library_name(const char *name); diff --git a/src/backend/utils/fmgr/fmgr.c b/src/backend/utils/fmgr/fmgr.c index d9e3bf240db..f6d2b7d63ee 100644 --- a/src/backend/utils/fmgr/fmgr.c +++ b/src/backend/utils/fmgr/fmgr.c @@ -396,10 +396,10 @@ fetch_finfo_record(void *filehandle, const char *funcname) { ereport(ERROR, (errcode(ERRCODE_UNDEFINED_FUNCTION), - errmsg("could not find function information for function \"%s\"", - funcname), + errmsg("could not find function information for function \"%s\"", + funcname), errhint("SQL-callable functions need an accompanying PG_FUNCTION_INFO_V1(funcname)."))); - return NULL; /* silence compiler */ + return NULL; /* silence compiler */ } /* Found, so call it */ diff --git a/src/backend/utils/mb/conv.c b/src/backend/utils/mb/conv.c index 5ce5c9a9c25..d46330b2079 100644 --- a/src/backend/utils/mb/conv.c +++ b/src/backend/utils/mb/conv.c @@ -445,7 +445,7 @@ pg_mb_radix_conv(const pg_mb_radix_tree *rt, else return rt->chars16[b4 + rt->b1root - rt->b1_lower]; } - return 0; /* shouldn't happen */ + return 0; /* shouldn't happen */ } /* @@ -607,7 +607,8 @@ UtfToLocal(const unsigned char *utf, int len, /* Now check ordinary map */ if (map) { - uint32 converted = pg_mb_radix_conv(map, l, b1, b2, b3, b4); + uint32 converted = pg_mb_radix_conv(map, l, b1, b2, b3, b4); + if (converted) { iso = store_coded_char(iso, converted); @@ -731,7 +732,7 @@ LocalToUtf(const unsigned char *iso, int len, if (map) { - uint32 converted = pg_mb_radix_conv(map, l, b1, b2, b3, b4); + uint32 converted = pg_mb_radix_conv(map, l, b1, b2, b3, b4); if (converted) { diff --git a/src/backend/utils/mb/conversion_procs/utf8_and_iso8859/utf8_and_iso8859.c b/src/backend/utils/mb/conversion_procs/utf8_and_iso8859/utf8_and_iso8859.c index 4a73ec4776f..ac0bc915ede 100644 --- a/src/backend/utils/mb/conversion_procs/utf8_and_iso8859/utf8_and_iso8859.c +++ b/src/backend/utils/mb/conversion_procs/utf8_and_iso8859/utf8_and_iso8859.c @@ -60,37 +60,37 @@ PG_FUNCTION_INFO_V1(utf8_to_iso8859); typedef struct { pg_enc encoding; - const pg_mb_radix_tree *map1; /* to UTF8 map name */ - const pg_mb_radix_tree *map2; /* from UTF8 map name */ + const pg_mb_radix_tree *map1; /* to UTF8 map name */ + const pg_mb_radix_tree *map2; /* from UTF8 map name */ } pg_conv_map; static const pg_conv_map maps[] = { {PG_LATIN2, &iso8859_2_to_unicode_tree, - &iso8859_2_from_unicode_tree}, /* ISO-8859-2 Latin 2 */ + &iso8859_2_from_unicode_tree}, /* ISO-8859-2 Latin 2 */ {PG_LATIN3, &iso8859_3_to_unicode_tree, - &iso8859_3_from_unicode_tree}, /* ISO-8859-3 Latin 3 */ + &iso8859_3_from_unicode_tree}, /* ISO-8859-3 Latin 3 */ {PG_LATIN4, &iso8859_4_to_unicode_tree, - &iso8859_4_from_unicode_tree}, /* ISO-8859-4 Latin 4 */ + &iso8859_4_from_unicode_tree}, /* ISO-8859-4 Latin 4 */ {PG_LATIN5, &iso8859_9_to_unicode_tree, - &iso8859_9_from_unicode_tree}, /* ISO-8859-9 Latin 5 */ + &iso8859_9_from_unicode_tree}, /* ISO-8859-9 Latin 5 */ {PG_LATIN6, &iso8859_10_to_unicode_tree, - &iso8859_10_from_unicode_tree}, /* ISO-8859-10 Latin 6 */ + &iso8859_10_from_unicode_tree}, /* ISO-8859-10 Latin 6 */ {PG_LATIN7, &iso8859_13_to_unicode_tree, - &iso8859_13_from_unicode_tree}, /* ISO-8859-13 Latin 7 */ + &iso8859_13_from_unicode_tree}, /* ISO-8859-13 Latin 7 */ {PG_LATIN8, &iso8859_14_to_unicode_tree, - &iso8859_14_from_unicode_tree}, /* ISO-8859-14 Latin 8 */ + &iso8859_14_from_unicode_tree}, /* ISO-8859-14 Latin 8 */ {PG_LATIN9, &iso8859_15_to_unicode_tree, - &iso8859_15_from_unicode_tree}, /* ISO-8859-15 Latin 9 */ + &iso8859_15_from_unicode_tree}, /* ISO-8859-15 Latin 9 */ {PG_LATIN10, &iso8859_16_to_unicode_tree, - &iso8859_16_from_unicode_tree}, /* ISO-8859-16 Latin 10 */ + &iso8859_16_from_unicode_tree}, /* ISO-8859-16 Latin 10 */ {PG_ISO_8859_5, &iso8859_5_to_unicode_tree, - &iso8859_5_from_unicode_tree}, /* ISO-8859-5 */ + &iso8859_5_from_unicode_tree}, /* ISO-8859-5 */ {PG_ISO_8859_6, &iso8859_6_to_unicode_tree, - &iso8859_6_from_unicode_tree}, /* ISO-8859-6 */ + &iso8859_6_from_unicode_tree}, /* ISO-8859-6 */ {PG_ISO_8859_7, &iso8859_7_to_unicode_tree, - &iso8859_7_from_unicode_tree}, /* ISO-8859-7 */ + &iso8859_7_from_unicode_tree}, /* ISO-8859-7 */ {PG_ISO_8859_8, &iso8859_8_to_unicode_tree, - &iso8859_8_from_unicode_tree}, /* ISO-8859-8 */ + &iso8859_8_from_unicode_tree}, /* ISO-8859-8 */ }; Datum diff --git a/src/backend/utils/mb/conversion_procs/utf8_and_win/utf8_and_win.c b/src/backend/utils/mb/conversion_procs/utf8_and_win/utf8_and_win.c index 4c8893036c5..971de32f6c2 100644 --- a/src/backend/utils/mb/conversion_procs/utf8_and_win/utf8_and_win.c +++ b/src/backend/utils/mb/conversion_procs/utf8_and_win/utf8_and_win.c @@ -56,13 +56,13 @@ PG_FUNCTION_INFO_V1(utf8_to_win); typedef struct { pg_enc encoding; - const pg_mb_radix_tree *map1; /* to UTF8 map name */ - const pg_mb_radix_tree *map2; /* from UTF8 map name */ + const pg_mb_radix_tree *map1; /* to UTF8 map name */ + const pg_mb_radix_tree *map2; /* from UTF8 map name */ } pg_conv_map; static const pg_conv_map maps[] = { - {PG_WIN866, &win866_to_unicode_tree, &win866_from_unicode_tree}, - {PG_WIN874, &win874_to_unicode_tree, &win874_from_unicode_tree}, + {PG_WIN866, &win866_to_unicode_tree, &win866_from_unicode_tree}, + {PG_WIN874, &win874_to_unicode_tree, &win874_from_unicode_tree}, {PG_WIN1250, &win1250_to_unicode_tree, &win1250_from_unicode_tree}, {PG_WIN1251, &win1251_to_unicode_tree, &win1251_from_unicode_tree}, {PG_WIN1252, &win1252_to_unicode_tree, &win1252_from_unicode_tree}, diff --git a/src/backend/utils/mb/encnames.c b/src/backend/utils/mb/encnames.c index 444eec25b50..f97505e55af 100644 --- a/src/backend/utils/mb/encnames.c +++ b/src/backend/utils/mb/encnames.c @@ -412,43 +412,43 @@ const pg_enc2gettext pg_enc2gettext_tbl[] = * * NULL entries are not supported by ICU, or their mapping is unclear. */ -static const char * const pg_enc2icu_tbl[] = +static const char *const pg_enc2icu_tbl[] = { - NULL, /* PG_SQL_ASCII */ - "EUC-JP", /* PG_EUC_JP */ - "EUC-CN", /* PG_EUC_CN */ - "EUC-KR", /* PG_EUC_KR */ - "EUC-TW", /* PG_EUC_TW */ - NULL, /* PG_EUC_JIS_2004 */ - "UTF-8", /* PG_UTF8 */ - NULL, /* PG_MULE_INTERNAL */ - "ISO-8859-1", /* PG_LATIN1 */ - "ISO-8859-2", /* PG_LATIN2 */ - "ISO-8859-3", /* PG_LATIN3 */ - "ISO-8859-4", /* PG_LATIN4 */ - "ISO-8859-9", /* PG_LATIN5 */ - "ISO-8859-10", /* PG_LATIN6 */ - "ISO-8859-13", /* PG_LATIN7 */ - "ISO-8859-14", /* PG_LATIN8 */ - "ISO-8859-15", /* PG_LATIN9 */ - NULL, /* PG_LATIN10 */ - "CP1256", /* PG_WIN1256 */ - "CP1258", /* PG_WIN1258 */ - "CP866", /* PG_WIN866 */ - NULL, /* PG_WIN874 */ - "KOI8-R", /* PG_KOI8R */ - "CP1251", /* PG_WIN1251 */ - "CP1252", /* PG_WIN1252 */ - "ISO-8859-5", /* PG_ISO_8859_5 */ - "ISO-8859-6", /* PG_ISO_8859_6 */ - "ISO-8859-7", /* PG_ISO_8859_7 */ - "ISO-8859-8", /* PG_ISO_8859_8 */ - "CP1250", /* PG_WIN1250 */ - "CP1253", /* PG_WIN1253 */ - "CP1254", /* PG_WIN1254 */ - "CP1255", /* PG_WIN1255 */ - "CP1257", /* PG_WIN1257 */ - "KOI8-U", /* PG_KOI8U */ + NULL, /* PG_SQL_ASCII */ + "EUC-JP", /* PG_EUC_JP */ + "EUC-CN", /* PG_EUC_CN */ + "EUC-KR", /* PG_EUC_KR */ + "EUC-TW", /* PG_EUC_TW */ + NULL, /* PG_EUC_JIS_2004 */ + "UTF-8", /* PG_UTF8 */ + NULL, /* PG_MULE_INTERNAL */ + "ISO-8859-1", /* PG_LATIN1 */ + "ISO-8859-2", /* PG_LATIN2 */ + "ISO-8859-3", /* PG_LATIN3 */ + "ISO-8859-4", /* PG_LATIN4 */ + "ISO-8859-9", /* PG_LATIN5 */ + "ISO-8859-10", /* PG_LATIN6 */ + "ISO-8859-13", /* PG_LATIN7 */ + "ISO-8859-14", /* PG_LATIN8 */ + "ISO-8859-15", /* PG_LATIN9 */ + NULL, /* PG_LATIN10 */ + "CP1256", /* PG_WIN1256 */ + "CP1258", /* PG_WIN1258 */ + "CP866", /* PG_WIN866 */ + NULL, /* PG_WIN874 */ + "KOI8-R", /* PG_KOI8R */ + "CP1251", /* PG_WIN1251 */ + "CP1252", /* PG_WIN1252 */ + "ISO-8859-5", /* PG_ISO_8859_5 */ + "ISO-8859-6", /* PG_ISO_8859_6 */ + "ISO-8859-7", /* PG_ISO_8859_7 */ + "ISO-8859-8", /* PG_ISO_8859_8 */ + "CP1250", /* PG_WIN1250 */ + "CP1253", /* PG_WIN1253 */ + "CP1254", /* PG_WIN1254 */ + "CP1255", /* PG_WIN1255 */ + "CP1257", /* PG_WIN1257 */ + "KOI8-U", /* PG_KOI8U */ }; bool @@ -476,7 +476,7 @@ get_encoding_name_for_icu(int encoding) return icu_encoding_name; } -#endif /* not FRONTEND */ +#endif /* not FRONTEND */ /* ---------- diff --git a/src/backend/utils/misc/backend_random.c b/src/backend/utils/misc/backend_random.c index dcc23638e17..d8556143dcd 100644 --- a/src/backend/utils/misc/backend_random.c +++ b/src/backend/utils/misc/backend_random.c @@ -53,7 +53,7 @@ bool pg_backend_random(char *dst, int len) { /* should not be called in postmaster */ - Assert (IsUnderPostmaster || !IsPostmasterEnvironment); + Assert(IsUnderPostmaster || !IsPostmasterEnvironment); return pg_strong_random(dst, len); } @@ -69,7 +69,7 @@ typedef struct { bool initialized; unsigned short seed[3]; -} BackendRandomShmemStruct; +} BackendRandomShmemStruct; static BackendRandomShmemStruct *BackendRandomShmem; @@ -106,7 +106,7 @@ pg_backend_random(char *dst, int len) char *end = dst + len; /* should not be called in postmaster */ - Assert (IsUnderPostmaster || !IsPostmasterEnvironment); + Assert(IsUnderPostmaster || !IsPostmasterEnvironment); LWLockAcquire(BackendRandomLock, LW_EXCLUSIVE); @@ -124,8 +124,8 @@ pg_backend_random(char *dst, int len) BackendRandomShmem->seed[2] = (unsigned short) (now.tv_usec >> 16); /* - * Mix in the cancel key, generated by the postmaster. This adds - * what little entropy the postmaster had to the seed. + * Mix in the cancel key, generated by the postmaster. This adds what + * little entropy the postmaster had to the seed. */ BackendRandomShmem->seed[0] ^= (MyCancelKey); BackendRandomShmem->seed[1] ^= (MyCancelKey >> 16); @@ -141,7 +141,7 @@ pg_backend_random(char *dst, int len) /* * pg_jrand48 returns a 32-bit integer. Fill the next 4 bytes from it. */ - r = (uint32) pg_jrand48(BackendRandomShmem->seed); + r = (uint32) pg_jrand48(BackendRandomShmem->seed); for (j = 0; j < 4 && dst < end; j++) { @@ -155,4 +155,4 @@ pg_backend_random(char *dst, int len) } -#endif /* HAVE_STRONG_RANDOM */ +#endif /* HAVE_STRONG_RANDOM */ diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c index cb4e621c848..92e1d63b2f5 100644 --- a/src/backend/utils/misc/guc.c +++ b/src/backend/utils/misc/guc.c @@ -151,7 +151,7 @@ static bool check_log_destination(char **newval, void **extra, GucSource source) static void assign_log_destination(const char *newval, void *extra); static bool check_wal_consistency_checking(char **newval, void **extra, - GucSource source); + GucSource source); static void assign_wal_consistency_checking(const char *newval, void *extra); #ifdef HAVE_SYSLOG @@ -2212,7 +2212,7 @@ static struct config_int ConfigureNamesInt[] = {"max_pred_locks_per_page", PGC_SIGHUP, LOCK_MANAGEMENT, gettext_noop("Sets the maximum number of predicate-locked tuples per page."), gettext_noop("If more than this number of tuples on the same page are locked " - "by a connection, those locks are replaced by a page level lock.") + "by a connection, those locks are replaced by a page level lock.") }, &max_predicate_locks_per_page, 2, 0, INT_MAX, @@ -2259,7 +2259,7 @@ static struct config_int ConfigureNamesInt[] = GUC_UNIT_MB }, &min_wal_size_mb, - 5 * (XLOG_SEG_SIZE/ (1024 * 1024)), 2, MAX_KILOBYTES, + 5 * (XLOG_SEG_SIZE / (1024 * 1024)), 2, MAX_KILOBYTES, NULL, NULL, NULL }, @@ -2270,7 +2270,7 @@ static struct config_int ConfigureNamesInt[] = GUC_UNIT_MB }, &max_wal_size_mb, - 64 * (XLOG_SEG_SIZE/ (1024 * 1024)), 2, MAX_KILOBYTES, + 64 * (XLOG_SEG_SIZE / (1024 * 1024)), 2, MAX_KILOBYTES, NULL, assign_max_wal_size, NULL }, @@ -2452,7 +2452,7 @@ static struct config_int ConfigureNamesInt[] = NULL }, &bgwriter_lru_maxpages, - 100, 0, INT_MAX / 2, /* Same upper limit as shared_buffers */ + 100, 0, INT_MAX / 2, /* Same upper limit as shared_buffers */ NULL, NULL, NULL }, @@ -6714,7 +6714,7 @@ GetConfigOption(const char *name, bool missing_ok, bool restrict_superuser) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser or a member of pg_read_all_settings to examine \"%s\"", - name))); + name))); switch (record->vartype) { @@ -6764,7 +6764,7 @@ GetConfigOptionResetString(const char *name) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser or a member of pg_read_all_settings to examine \"%s\"", - name))); + name))); switch (record->vartype) { @@ -8056,7 +8056,7 @@ GetConfigOptionByName(const char *name, const char **varname, bool missing_ok) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser or a member of pg_read_all_settings to examine \"%s\"", - name))); + name))); if (varname) *varname = record->name; @@ -8083,7 +8083,7 @@ GetConfigOptionByNum(int varnum, const char **values, bool *noshow) { if ((conf->flags & GUC_NO_SHOW_ALL) || ((conf->flags & GUC_SUPERUSER_ONLY) && - !is_member_of_role(GetUserId(), DEFAULT_ROLE_READ_ALL_SETTINGS))) + !is_member_of_role(GetUserId(), DEFAULT_ROLE_READ_ALL_SETTINGS))) *noshow = true; else *noshow = false; diff --git a/src/backend/utils/sort/tuplesort.c b/src/backend/utils/sort/tuplesort.c index 96feacc2579..8a8db0fd337 100644 --- a/src/backend/utils/sort/tuplesort.c +++ b/src/backend/utils/sort/tuplesort.c @@ -2327,8 +2327,8 @@ tuplesort_merge_order(int64 allowedMem) * which in turn can cause the same sort to need more runs, which makes * merging slower even if it can still be done in a single pass. Also, * high order merges are quite slow due to CPU cache effects; it can be - * faster to pay the I/O cost of a polyphase merge than to perform a single - * merge pass across many hundreds of tapes. + * faster to pay the I/O cost of a polyphase merge than to perform a + * single merge pass across many hundreds of tapes. */ mOrder = Max(mOrder, MINORDER); mOrder = Min(mOrder, MAXORDER); diff --git a/src/backend/utils/time/snapmgr.c b/src/backend/utils/time/snapmgr.c index 5fa665eafc4..b3d4fe3ae2a 100644 --- a/src/backend/utils/time/snapmgr.c +++ b/src/backend/utils/time/snapmgr.c @@ -1137,10 +1137,9 @@ AtEOXact_Snapshot(bool isCommit, bool resetXmin) FirstSnapshotSet = false; /* - * During normal commit processing, we call - * ProcArrayEndTransaction() to reset the PgXact->xmin. That call - * happens prior to the call to AtEOXact_Snapshot(), so we need - * not touch xmin here at all. + * During normal commit processing, we call ProcArrayEndTransaction() to + * reset the PgXact->xmin. That call happens prior to the call to + * AtEOXact_Snapshot(), so we need not touch xmin here at all. */ if (resetXmin) SnapshotResetXmin(); |