aboutsummaryrefslogtreecommitdiff
path: root/src/backend/utils/adt
diff options
context:
space:
mode:
authorBruce Momjian <bruce@momjian.us>2015-05-23 21:35:49 -0400
committerBruce Momjian <bruce@momjian.us>2015-05-23 21:35:49 -0400
commit807b9e0dff663c5da875af7907a5106c0ff90673 (patch)
tree89a0cfbd3c9801dcb04aae4ccf2fee935092f958 /src/backend/utils/adt
parent225892552bd3052982d2b97b749e5945ea71facc (diff)
downloadpostgresql-807b9e0dff663c5da875af7907a5106c0ff90673.tar.gz
postgresql-807b9e0dff663c5da875af7907a5106c0ff90673.zip
pgindent run for 9.5
Diffstat (limited to 'src/backend/utils/adt')
-rw-r--r--src/backend/utils/adt/acl.c4
-rw-r--r--src/backend/utils/adt/array_userfuncs.c19
-rw-r--r--src/backend/utils/adt/formatting.c54
-rw-r--r--src/backend/utils/adt/json.c6
-rw-r--r--src/backend/utils/adt/jsonb.c115
-rw-r--r--src/backend/utils/adt/jsonb_util.c11
-rw-r--r--src/backend/utils/adt/jsonfuncs.c103
-rw-r--r--src/backend/utils/adt/levenshtein.c4
-rw-r--r--src/backend/utils/adt/lockfuncs.c2
-rw-r--r--src/backend/utils/adt/misc.c2
-rw-r--r--src/backend/utils/adt/network_gist.c8
-rw-r--r--src/backend/utils/adt/numeric.c59
-rw-r--r--src/backend/utils/adt/pg_locale.c2
-rw-r--r--src/backend/utils/adt/pg_upgrade_support.c26
-rw-r--r--src/backend/utils/adt/pgstatfuncs.c20
-rw-r--r--src/backend/utils/adt/rangetypes_spgist.c30
-rw-r--r--src/backend/utils/adt/regexp.c5
-rw-r--r--src/backend/utils/adt/regproc.c8
-rw-r--r--src/backend/utils/adt/ri_triggers.c16
-rw-r--r--src/backend/utils/adt/ruleutils.c73
-rw-r--r--src/backend/utils/adt/tsquery_op.c5
-rw-r--r--src/backend/utils/adt/txid.c6
-rw-r--r--src/backend/utils/adt/varlena.c184
-rw-r--r--src/backend/utils/adt/xml.c4
24 files changed, 396 insertions, 370 deletions
diff --git a/src/backend/utils/adt/acl.c b/src/backend/utils/adt/acl.c
index e7aecc95c97..3ca168b4736 100644
--- a/src/backend/utils/adt/acl.c
+++ b/src/backend/utils/adt/acl.c
@@ -5202,7 +5202,7 @@ get_rolespec_tuple(const Node *node)
if (!HeapTupleIsValid(tuple))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("role \"%s\" does not exist", role->rolename)));
+ errmsg("role \"%s\" does not exist", role->rolename)));
break;
case ROLESPEC_CURRENT_USER:
@@ -5221,7 +5221,7 @@ get_rolespec_tuple(const Node *node)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("role \"%s\" does not exist", "public")));
- tuple = NULL; /* make compiler happy */
+ tuple = NULL; /* make compiler happy */
default:
elog(ERROR, "unexpected role type %d", role->roletype);
diff --git a/src/backend/utils/adt/array_userfuncs.c b/src/backend/utils/adt/array_userfuncs.c
index f7b57da48e7..c14ea23dfbc 100644
--- a/src/backend/utils/adt/array_userfuncs.c
+++ b/src/backend/utils/adt/array_userfuncs.c
@@ -687,7 +687,7 @@ array_position_start(PG_FUNCTION_ARGS)
/*
* array_position_common
- * Common code for array_position and array_position_start
+ * Common code for array_position and array_position_start
*
* These are separate wrappers for the sake of opr_sanity regression test.
* They are not strict so we have to test for null inputs explicitly.
@@ -755,7 +755,8 @@ array_position_common(FunctionCallInfo fcinfo)
/*
* We arrange to look up type info for array_create_iterator only once per
- * series of calls, assuming the element type doesn't change underneath us.
+ * series of calls, assuming the element type doesn't change underneath
+ * us.
*/
my_extra = (ArrayMetaState *) fcinfo->flinfo->fn_extra;
if (my_extra == NULL)
@@ -778,8 +779,8 @@ array_position_common(FunctionCallInfo fcinfo)
if (!OidIsValid(typentry->eq_opr_finfo.fn_oid))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("could not identify an equality operator for type %s",
- format_type_be(element_type))));
+ errmsg("could not identify an equality operator for type %s",
+ format_type_be(element_type))));
my_extra->element_type = element_type;
fmgr_info(typentry->eq_opr_finfo.fn_oid, &my_extra->proc);
@@ -892,7 +893,8 @@ array_positions(PG_FUNCTION_ARGS)
/*
* We arrange to look up type info for array_create_iterator only once per
- * series of calls, assuming the element type doesn't change underneath us.
+ * series of calls, assuming the element type doesn't change underneath
+ * us.
*/
my_extra = (ArrayMetaState *) fcinfo->flinfo->fn_extra;
if (my_extra == NULL)
@@ -915,15 +917,16 @@ array_positions(PG_FUNCTION_ARGS)
if (!OidIsValid(typentry->eq_opr_finfo.fn_oid))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("could not identify an equality operator for type %s",
- format_type_be(element_type))));
+ errmsg("could not identify an equality operator for type %s",
+ format_type_be(element_type))));
my_extra->element_type = element_type;
fmgr_info(typentry->eq_opr_finfo.fn_oid, &my_extra->proc);
}
/*
- * Accumulate each array position iff the element matches the given element.
+ * Accumulate each array position iff the element matches the given
+ * element.
*/
array_iterator = array_create_iterator(array, 0, my_extra);
while (array_iterate(array_iterator, &value, &isnull))
diff --git a/src/backend/utils/adt/formatting.c b/src/backend/utils/adt/formatting.c
index 84e4db8416a..5391ea0bf0b 100644
--- a/src/backend/utils/adt/formatting.c
+++ b/src/backend/utils/adt/formatting.c
@@ -920,7 +920,7 @@ typedef struct NUMProc
num_count, /* number of write digits */
num_in, /* is inside number */
num_curr, /* current position in number */
- out_pre_spaces, /* spaces before first digit */
+ out_pre_spaces, /* spaces before first digit */
read_dec, /* to_number - was read dec. point */
read_post, /* to_number - number of dec. digit */
@@ -981,7 +981,7 @@ static char *get_last_relevant_decnum(char *num);
static void NUM_numpart_from_char(NUMProc *Np, int id, int input_len);
static void NUM_numpart_to_char(NUMProc *Np, int id);
static char *NUM_processor(FormatNode *node, NUMDesc *Num, char *inout,
- char *number, int from_char_input_len, int to_char_out_pre_spaces,
+ char *number, int from_char_input_len, int to_char_out_pre_spaces,
int sign, bool is_to_char, Oid collid);
static DCHCacheEntry *DCH_cache_search(char *str);
static DCHCacheEntry *DCH_cache_getnew(char *str);
@@ -2541,14 +2541,14 @@ DCH_to_char(FormatNode *node, bool is_interval, TmToChar *in, char *out, Oid col
break;
if (S_TM(n->suffix))
{
- char *str = str_toupper_z(localized_full_months[tm->tm_mon - 1], collid);
+ char *str = str_toupper_z(localized_full_months[tm->tm_mon - 1], collid);
if (strlen(str) <= (n->key->len + TM_SUFFIX_LEN) * DCH_MAX_ITEM_SIZ)
strcpy(s, str);
else
ereport(ERROR,
(errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
- errmsg("localized string format value too long")));
+ errmsg("localized string format value too long")));
}
else
sprintf(s, "%*s", S_FM(n->suffix) ? 0 : -9,
@@ -2561,14 +2561,14 @@ DCH_to_char(FormatNode *node, bool is_interval, TmToChar *in, char *out, Oid col
break;
if (S_TM(n->suffix))
{
- char *str = str_initcap_z(localized_full_months[tm->tm_mon - 1], collid);
+ char *str = str_initcap_z(localized_full_months[tm->tm_mon - 1], collid);
if (strlen(str) <= (n->key->len + TM_SUFFIX_LEN) * DCH_MAX_ITEM_SIZ)
strcpy(s, str);
else
ereport(ERROR,
(errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
- errmsg("localized string format value too long")));
+ errmsg("localized string format value too long")));
}
else
sprintf(s, "%*s", S_FM(n->suffix) ? 0 : -9,
@@ -2581,14 +2581,14 @@ DCH_to_char(FormatNode *node, bool is_interval, TmToChar *in, char *out, Oid col
break;
if (S_TM(n->suffix))
{
- char *str = str_tolower_z(localized_full_months[tm->tm_mon - 1], collid);
+ char *str = str_tolower_z(localized_full_months[tm->tm_mon - 1], collid);
if (strlen(str) <= (n->key->len + TM_SUFFIX_LEN) * DCH_MAX_ITEM_SIZ)
strcpy(s, str);
else
ereport(ERROR,
(errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
- errmsg("localized string format value too long")));
+ errmsg("localized string format value too long")));
}
else
sprintf(s, "%*s", S_FM(n->suffix) ? 0 : -9,
@@ -2601,14 +2601,14 @@ DCH_to_char(FormatNode *node, bool is_interval, TmToChar *in, char *out, Oid col
break;
if (S_TM(n->suffix))
{
- char *str = str_toupper_z(localized_abbrev_months[tm->tm_mon - 1], collid);
+ char *str = str_toupper_z(localized_abbrev_months[tm->tm_mon - 1], collid);
if (strlen(str) <= (n->key->len + TM_SUFFIX_LEN) * DCH_MAX_ITEM_SIZ)
strcpy(s, str);
else
ereport(ERROR,
(errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
- errmsg("localized string format value too long")));
+ errmsg("localized string format value too long")));
}
else
strcpy(s, asc_toupper_z(months[tm->tm_mon - 1]));
@@ -2620,14 +2620,14 @@ DCH_to_char(FormatNode *node, bool is_interval, TmToChar *in, char *out, Oid col
break;
if (S_TM(n->suffix))
{
- char *str = str_initcap_z(localized_abbrev_months[tm->tm_mon - 1], collid);
+ char *str = str_initcap_z(localized_abbrev_months[tm->tm_mon - 1], collid);
if (strlen(str) <= (n->key->len + TM_SUFFIX_LEN) * DCH_MAX_ITEM_SIZ)
strcpy(s, str);
else
ereport(ERROR,
(errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
- errmsg("localized string format value too long")));
+ errmsg("localized string format value too long")));
}
else
strcpy(s, months[tm->tm_mon - 1]);
@@ -2639,14 +2639,14 @@ DCH_to_char(FormatNode *node, bool is_interval, TmToChar *in, char *out, Oid col
break;
if (S_TM(n->suffix))
{
- char *str = str_tolower_z(localized_abbrev_months[tm->tm_mon - 1], collid);
+ char *str = str_tolower_z(localized_abbrev_months[tm->tm_mon - 1], collid);
if (strlen(str) <= (n->key->len + TM_SUFFIX_LEN) * DCH_MAX_ITEM_SIZ)
strcpy(s, str);
else
ereport(ERROR,
(errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
- errmsg("localized string format value too long")));
+ errmsg("localized string format value too long")));
}
else
strcpy(s, asc_tolower_z(months[tm->tm_mon - 1]));
@@ -2662,14 +2662,14 @@ DCH_to_char(FormatNode *node, bool is_interval, TmToChar *in, char *out, Oid col
INVALID_FOR_INTERVAL;
if (S_TM(n->suffix))
{
- char *str = str_toupper_z(localized_full_days[tm->tm_wday], collid);
+ char *str = str_toupper_z(localized_full_days[tm->tm_wday], collid);
if (strlen(str) <= (n->key->len + TM_SUFFIX_LEN) * DCH_MAX_ITEM_SIZ)
strcpy(s, str);
else
ereport(ERROR,
(errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
- errmsg("localized string format value too long")));
+ errmsg("localized string format value too long")));
}
else
sprintf(s, "%*s", S_FM(n->suffix) ? 0 : -9,
@@ -2680,14 +2680,14 @@ DCH_to_char(FormatNode *node, bool is_interval, TmToChar *in, char *out, Oid col
INVALID_FOR_INTERVAL;
if (S_TM(n->suffix))
{
- char *str = str_initcap_z(localized_full_days[tm->tm_wday], collid);
+ char *str = str_initcap_z(localized_full_days[tm->tm_wday], collid);
if (strlen(str) <= (n->key->len + TM_SUFFIX_LEN) * DCH_MAX_ITEM_SIZ)
strcpy(s, str);
else
ereport(ERROR,
(errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
- errmsg("localized string format value too long")));
+ errmsg("localized string format value too long")));
}
else
sprintf(s, "%*s", S_FM(n->suffix) ? 0 : -9,
@@ -2698,14 +2698,14 @@ DCH_to_char(FormatNode *node, bool is_interval, TmToChar *in, char *out, Oid col
INVALID_FOR_INTERVAL;
if (S_TM(n->suffix))
{
- char *str = str_tolower_z(localized_full_days[tm->tm_wday], collid);
+ char *str = str_tolower_z(localized_full_days[tm->tm_wday], collid);
if (strlen(str) <= (n->key->len + TM_SUFFIX_LEN) * DCH_MAX_ITEM_SIZ)
strcpy(s, str);
else
ereport(ERROR,
(errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
- errmsg("localized string format value too long")));
+ errmsg("localized string format value too long")));
}
else
sprintf(s, "%*s", S_FM(n->suffix) ? 0 : -9,
@@ -2716,14 +2716,14 @@ DCH_to_char(FormatNode *node, bool is_interval, TmToChar *in, char *out, Oid col
INVALID_FOR_INTERVAL;
if (S_TM(n->suffix))
{
- char *str = str_toupper_z(localized_abbrev_days[tm->tm_wday], collid);
+ char *str = str_toupper_z(localized_abbrev_days[tm->tm_wday], collid);
if (strlen(str) <= (n->key->len + TM_SUFFIX_LEN) * DCH_MAX_ITEM_SIZ)
strcpy(s, str);
else
ereport(ERROR,
(errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
- errmsg("localized string format value too long")));
+ errmsg("localized string format value too long")));
}
else
strcpy(s, asc_toupper_z(days_short[tm->tm_wday]));
@@ -2733,14 +2733,14 @@ DCH_to_char(FormatNode *node, bool is_interval, TmToChar *in, char *out, Oid col
INVALID_FOR_INTERVAL;
if (S_TM(n->suffix))
{
- char *str = str_initcap_z(localized_abbrev_days[tm->tm_wday], collid);
+ char *str = str_initcap_z(localized_abbrev_days[tm->tm_wday], collid);
if (strlen(str) <= (n->key->len + TM_SUFFIX_LEN) * DCH_MAX_ITEM_SIZ)
strcpy(s, str);
else
ereport(ERROR,
(errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
- errmsg("localized string format value too long")));
+ errmsg("localized string format value too long")));
}
else
strcpy(s, days_short[tm->tm_wday]);
@@ -2750,14 +2750,14 @@ DCH_to_char(FormatNode *node, bool is_interval, TmToChar *in, char *out, Oid col
INVALID_FOR_INTERVAL;
if (S_TM(n->suffix))
{
- char *str = str_tolower_z(localized_abbrev_days[tm->tm_wday], collid);
+ char *str = str_tolower_z(localized_abbrev_days[tm->tm_wday], collid);
if (strlen(str) <= (n->key->len + TM_SUFFIX_LEN) * DCH_MAX_ITEM_SIZ)
strcpy(s, str);
else
ereport(ERROR,
(errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
- errmsg("localized string format value too long")));
+ errmsg("localized string format value too long")));
}
else
strcpy(s, asc_tolower_z(days_short[tm->tm_wday]));
@@ -4572,7 +4572,7 @@ NUM_numpart_to_char(NUMProc *Np, int id)
static char *
NUM_processor(FormatNode *node, NUMDesc *Num, char *inout,
- char *number, int from_char_input_len, int to_char_out_pre_spaces,
+ char *number, int from_char_input_len, int to_char_out_pre_spaces,
int sign, bool is_to_char, Oid collid)
{
FormatNode *n;
diff --git a/src/backend/utils/adt/json.c b/src/backend/utils/adt/json.c
index f08e288c21d..26d38433693 100644
--- a/src/backend/utils/adt/json.c
+++ b/src/backend/utils/adt/json.c
@@ -1442,7 +1442,7 @@ datum_to_json(Datum val, bool is_null, StringInfo result,
if (DATE_NOT_FINITE(date))
{
/* we have to format infinity ourselves */
- appendStringInfoString(result,DT_INFINITY);
+ appendStringInfoString(result, DT_INFINITY);
}
else
{
@@ -1465,7 +1465,7 @@ datum_to_json(Datum val, bool is_null, StringInfo result,
if (TIMESTAMP_NOT_FINITE(timestamp))
{
/* we have to format infinity ourselves */
- appendStringInfoString(result,DT_INFINITY);
+ appendStringInfoString(result, DT_INFINITY);
}
else if (timestamp2tm(timestamp, NULL, &tm, &fsec, NULL, NULL) == 0)
{
@@ -1492,7 +1492,7 @@ datum_to_json(Datum val, bool is_null, StringInfo result,
if (TIMESTAMP_NOT_FINITE(timestamp))
{
/* we have to format infinity ourselves */
- appendStringInfoString(result,DT_INFINITY);
+ appendStringInfoString(result, DT_INFINITY);
}
else if (timestamp2tm(timestamp, &tz, &tm, &fsec, &tzn, NULL) == 0)
{
diff --git a/src/backend/utils/adt/jsonb.c b/src/backend/utils/adt/jsonb.c
index bccc6696a4f..c0959a0ee2a 100644
--- a/src/backend/utils/adt/jsonb.c
+++ b/src/backend/utils/adt/jsonb.c
@@ -57,7 +57,7 @@ typedef enum /* type categories for datum_to_jsonb */
JSONBTYPE_COMPOSITE, /* composite */
JSONBTYPE_JSONCAST, /* something with an explicit cast to JSON */
JSONBTYPE_OTHER /* all else */
-} JsonbTypeCategory;
+} JsonbTypeCategory;
static inline Datum jsonb_from_cstring(char *json, int len);
static size_t checkStringLen(size_t len);
@@ -69,7 +69,7 @@ static void jsonb_in_object_field_start(void *pstate, char *fname, bool isnull);
static void jsonb_put_escaped_value(StringInfo out, JsonbValue *scalarVal);
static void jsonb_in_scalar(void *pstate, char *token, JsonTokenType tokentype);
static void jsonb_categorize_type(Oid typoid,
- JsonbTypeCategory * tcategory,
+ JsonbTypeCategory *tcategory,
Oid *outfuncoid);
static void composite_to_jsonb(Datum composite, JsonbInState *result);
static void array_dim_to_jsonb(JsonbInState *result, int dim, int ndims, int *dims,
@@ -77,14 +77,14 @@ static void array_dim_to_jsonb(JsonbInState *result, int dim, int ndims, int *di
JsonbTypeCategory tcategory, Oid outfuncoid);
static void array_to_jsonb_internal(Datum array, JsonbInState *result);
static void jsonb_categorize_type(Oid typoid,
- JsonbTypeCategory * tcategory,
+ JsonbTypeCategory *tcategory,
Oid *outfuncoid);
static void datum_to_jsonb(Datum val, bool is_null, JsonbInState *result,
JsonbTypeCategory tcategory, Oid outfuncoid,
bool key_scalar);
static void add_jsonb(Datum val, bool is_null, JsonbInState *result,
Oid val_type, bool key_scalar);
-static JsonbParseState * clone_parse_state(JsonbParseState * state);
+static JsonbParseState *clone_parse_state(JsonbParseState *state);
static char *JsonbToCStringWorker(StringInfo out, JsonbContainer *in, int estimated_len, bool indent);
static void add_indent(StringInfo out, bool indent, int level);
@@ -365,10 +365,12 @@ jsonb_in_scalar(void *pstate, char *token, JsonTokenType tokentype)
case JSON_TOKEN_TRUE:
v.type = jbvBool;
v.val.boolean = true;
+
break;
case JSON_TOKEN_FALSE:
v.type = jbvBool;
v.val.boolean = false;
+
break;
case JSON_TOKEN_NULL:
v.type = jbvNull;
@@ -448,15 +450,17 @@ JsonbToCStringWorker(StringInfo out, JsonbContainer *in, int estimated_len, bool
JsonbValue v;
int level = 0;
bool redo_switch = false;
+
/* If we are indenting, don't add a space after a comma */
int ispaces = indent ? 1 : 2;
+
/*
- * Don't indent the very first item. This gets set to the indent flag
- * at the bottom of the loop.
+ * Don't indent the very first item. This gets set to the indent flag at
+ * the bottom of the loop.
*/
- bool use_indent = false;
- bool raw_scalar = false;
- bool last_was_key = false;
+ bool use_indent = false;
+ bool raw_scalar = false;
+ bool last_was_key = false;
if (out == NULL)
out = makeStringInfo();
@@ -530,13 +534,13 @@ JsonbToCStringWorker(StringInfo out, JsonbContainer *in, int estimated_len, bool
appendBinaryStringInfo(out, ", ", ispaces);
first = false;
- if (! raw_scalar)
+ if (!raw_scalar)
add_indent(out, use_indent, level);
jsonb_put_escaped_value(out, &v);
break;
case WJB_END_ARRAY:
level--;
- if (! raw_scalar)
+ if (!raw_scalar)
{
add_indent(out, use_indent, level);
appendStringInfoCharMacro(out, ']');
@@ -580,11 +584,11 @@ add_indent(StringInfo out, bool indent, int level)
*
* Given the datatype OID, return its JsonbTypeCategory, as well as the type's
* output function OID. If the returned category is JSONBTYPE_JSONCAST,
- * we return the OID of the relevant cast function instead.
+ * we return the OID of the relevant cast function instead.
*/
static void
jsonb_categorize_type(Oid typoid,
- JsonbTypeCategory * tcategory,
+ JsonbTypeCategory *tcategory,
Oid *outfuncoid)
{
bool typisvarlena;
@@ -649,16 +653,16 @@ jsonb_categorize_type(Oid typoid,
*tcategory = JSONBTYPE_OTHER;
/*
- * but first let's look for a cast to json (note: not to jsonb)
- * if it's not built-in.
+ * but first let's look for a cast to json (note: not to
+ * jsonb) if it's not built-in.
*/
if (typoid >= FirstNormalObjectId)
{
- Oid castfunc;
+ Oid castfunc;
CoercionPathType ctype;
ctype = find_coercion_pathway(JSONOID, typoid,
- COERCION_EXPLICIT, &castfunc);
+ COERCION_EXPLICIT, &castfunc);
if (ctype == COERCION_PATH_FUNC && OidIsValid(castfunc))
{
*tcategory = JSONBTYPE_JSONCAST;
@@ -774,30 +778,30 @@ datum_to_jsonb(Datum val, bool is_null, JsonbInState *result,
}
}
break;
- case JSONBTYPE_DATE:
- {
- DateADT date;
- struct pg_tm tm;
- char buf[MAXDATELEN + 1];
+ case JSONBTYPE_DATE:
+ {
+ DateADT date;
+ struct pg_tm tm;
+ char buf[MAXDATELEN + 1];
- date = DatumGetDateADT(val);
- jb.type = jbvString;
+ date = DatumGetDateADT(val);
+ jb.type = jbvString;
- if (DATE_NOT_FINITE(date))
- {
- jb.val.string.len = strlen(DT_INFINITY);
- jb.val.string.val = pstrdup(DT_INFINITY);
- }
- else
- {
- j2date(date + POSTGRES_EPOCH_JDATE,
- &(tm.tm_year), &(tm.tm_mon), &(tm.tm_mday));
- EncodeDateOnly(&tm, USE_XSD_DATES, buf);
- jb.val.string.len = strlen(buf);
- jb.val.string.val = pstrdup(buf);
+ if (DATE_NOT_FINITE(date))
+ {
+ jb.val.string.len = strlen(DT_INFINITY);
+ jb.val.string.val = pstrdup(DT_INFINITY);
+ }
+ else
+ {
+ j2date(date + POSTGRES_EPOCH_JDATE,
+ &(tm.tm_year), &(tm.tm_mon), &(tm.tm_mday));
+ EncodeDateOnly(&tm, USE_XSD_DATES, buf);
+ jb.val.string.len = strlen(buf);
+ jb.val.string.val = pstrdup(buf);
+ }
}
- }
- break;
+ break;
case JSONBTYPE_TIMESTAMP:
{
Timestamp timestamp;
@@ -1534,9 +1538,11 @@ jsonb_object_two_arg(PG_FUNCTION_ARGS)
* change them.
*/
static JsonbParseState *
-clone_parse_state(JsonbParseState * state)
+clone_parse_state(JsonbParseState *state)
{
- JsonbParseState *result, *icursor, *ocursor;
+ JsonbParseState *result,
+ *icursor,
+ *ocursor;
if (state == NULL)
return NULL;
@@ -1544,14 +1550,14 @@ clone_parse_state(JsonbParseState * state)
result = palloc(sizeof(JsonbParseState));
icursor = state;
ocursor = result;
- for(;;)
+ for (;;)
{
ocursor->contVal = icursor->contVal;
ocursor->size = icursor->size;
icursor = icursor->next;
if (icursor == NULL)
break;
- ocursor->next= palloc(sizeof(JsonbParseState));
+ ocursor->next = palloc(sizeof(JsonbParseState));
ocursor = ocursor->next;
}
ocursor->next = NULL;
@@ -1652,15 +1658,16 @@ jsonb_agg_transfn(PG_FUNCTION_ARGS)
{
/* copy string values in the aggregate context */
char *buf = palloc(v.val.string.len + 1);
+
snprintf(buf, v.val.string.len + 1, "%s", v.val.string.val);
v.val.string.val = buf;
}
else if (v.type == jbvNumeric)
{
/* same for numeric */
- v.val.numeric =
+ v.val.numeric =
DatumGetNumeric(DirectFunctionCall1(numeric_uplus,
- NumericGetDatum(v.val.numeric)));
+ NumericGetDatum(v.val.numeric)));
}
result->res = pushJsonbValue(&result->parseState,
@@ -1693,15 +1700,15 @@ jsonb_agg_finalfn(PG_FUNCTION_ARGS)
/*
* We need to do a shallow clone of the argument in case the final
- * function is called more than once, so we avoid changing the argument.
- * A shallow clone is sufficient as we aren't going to change any of the
+ * function is called more than once, so we avoid changing the argument. A
+ * shallow clone is sufficient as we aren't going to change any of the
* values, just add the final array end marker.
*/
result.parseState = clone_parse_state(arg->parseState);
result.res = pushJsonbValue(&result.parseState,
- WJB_END_ARRAY, NULL);
+ WJB_END_ARRAY, NULL);
out = JsonbValueToJsonb(result.res);
@@ -1813,6 +1820,7 @@ jsonb_object_agg_transfn(PG_FUNCTION_ARGS)
{
/* copy string values in the aggregate context */
char *buf = palloc(v.val.string.len + 1);
+
snprintf(buf, v.val.string.len + 1, "%s", v.val.string.val);
v.val.string.val = buf;
}
@@ -1871,6 +1879,7 @@ jsonb_object_agg_transfn(PG_FUNCTION_ARGS)
{
/* copy string values in the aggregate context */
char *buf = palloc(v.val.string.len + 1);
+
snprintf(buf, v.val.string.len + 1, "%s", v.val.string.val);
v.val.string.val = buf;
}
@@ -1878,8 +1887,8 @@ jsonb_object_agg_transfn(PG_FUNCTION_ARGS)
{
/* same for numeric */
v.val.numeric =
- DatumGetNumeric(DirectFunctionCall1(numeric_uplus,
- NumericGetDatum(v.val.numeric)));
+ DatumGetNumeric(DirectFunctionCall1(numeric_uplus,
+ NumericGetDatum(v.val.numeric)));
}
result->res = pushJsonbValue(&result->parseState,
@@ -1900,7 +1909,7 @@ Datum
jsonb_object_agg_finalfn(PG_FUNCTION_ARGS)
{
JsonbInState *arg;
- JsonbInState result;
+ JsonbInState result;
Jsonb *out;
/* cannot be called directly because of internal-type argument */
@@ -1913,15 +1922,15 @@ jsonb_object_agg_finalfn(PG_FUNCTION_ARGS)
/*
* We need to do a shallow clone of the argument in case the final
- * function is called more than once, so we avoid changing the argument.
- * A shallow clone is sufficient as we aren't going to change any of the
+ * function is called more than once, so we avoid changing the argument. A
+ * shallow clone is sufficient as we aren't going to change any of the
* values, just add the final object end marker.
*/
result.parseState = clone_parse_state(arg->parseState);
result.res = pushJsonbValue(&result.parseState,
- WJB_END_OBJECT, NULL);
+ WJB_END_OBJECT, NULL);
out = JsonbValueToJsonb(result.res);
diff --git a/src/backend/utils/adt/jsonb_util.c b/src/backend/utils/adt/jsonb_util.c
index 974e3865249..4d733159d06 100644
--- a/src/backend/utils/adt/jsonb_util.c
+++ b/src/backend/utils/adt/jsonb_util.c
@@ -58,8 +58,8 @@ static int lengthCompareJsonbStringValue(const void *a, const void *b);
static int lengthCompareJsonbPair(const void *a, const void *b, void *arg);
static void uniqueifyJsonbObject(JsonbValue *object);
static JsonbValue *pushJsonbValueScalar(JsonbParseState **pstate,
- JsonbIteratorToken seq,
- JsonbValue *scalarVal);
+ JsonbIteratorToken seq,
+ JsonbValue *scalarVal);
/*
* Turn an in-memory JsonbValue into a Jsonb for on-disk storage.
@@ -518,7 +518,7 @@ pushJsonbValue(JsonbParseState **pstate, JsonbIteratorToken seq,
{
JsonbIterator *it;
JsonbValue *res = NULL;
- JsonbValue v;
+ JsonbValue v;
JsonbIteratorToken tok;
if (!jbval || (seq != WJB_ELEM && seq != WJB_VALUE) ||
@@ -543,7 +543,7 @@ pushJsonbValue(JsonbParseState **pstate, JsonbIteratorToken seq,
*/
static JsonbValue *
pushJsonbValueScalar(JsonbParseState **pstate, JsonbIteratorToken seq,
- JsonbValue *scalarVal)
+ JsonbValue *scalarVal)
{
JsonbValue *result = NULL;
@@ -1231,6 +1231,7 @@ JsonbHashScalarValue(const JsonbValue *scalarVal, uint32 *hash)
break;
case jbvBool:
tmp = scalarVal->val.boolean ? 0x02 : 0x04;
+
break;
default:
elog(ERROR, "invalid jsonb scalar type");
@@ -1304,7 +1305,7 @@ compareJsonbScalarValue(JsonbValue *aScalar, JsonbValue *bScalar)
case jbvBool:
if (aScalar->val.boolean == bScalar->val.boolean)
return 0;
- else if (aScalar->val.boolean > bScalar->val.boolean)
+ else if (aScalar->val.boolean >bScalar->val.boolean)
return 1;
else
return -1;
diff --git a/src/backend/utils/adt/jsonfuncs.c b/src/backend/utils/adt/jsonfuncs.c
index 9987c73784c..2f755744c13 100644
--- a/src/backend/utils/adt/jsonfuncs.c
+++ b/src/backend/utils/adt/jsonfuncs.c
@@ -110,8 +110,8 @@ static void sn_object_start(void *state);
static void sn_object_end(void *state);
static void sn_array_start(void *state);
static void sn_array_end(void *state);
-static void sn_object_field_start (void *state, char *fname, bool isnull);
-static void sn_array_element_start (void *state, bool isnull);
+static void sn_object_field_start(void *state, char *fname, bool isnull);
+static void sn_array_element_start(void *state, bool isnull);
static void sn_scalar(void *state, char *token, JsonTokenType tokentype);
/* worker function for populate_recordset and to_recordset */
@@ -126,18 +126,18 @@ static JsonbValue *findJsonbValueFromContainerLen(JsonbContainer *container,
/* functions supporting jsonb_delete, jsonb_replace and jsonb_concat */
static JsonbValue *IteratorConcat(JsonbIterator **it1, JsonbIterator **it2,
- JsonbParseState **state);
+ JsonbParseState **state);
static JsonbValue *walkJsonb(JsonbIterator **it, JsonbParseState **state, bool stop_at_level_zero);
static JsonbValue *replacePath(JsonbIterator **it, Datum *path_elems,
- bool *path_nulls, int path_len,
- JsonbParseState **st, int level, Jsonb *newval);
+ bool *path_nulls, int path_len,
+ JsonbParseState **st, int level, Jsonb *newval);
static void replacePathObject(JsonbIterator **it, Datum *path_elems, bool *path_nulls,
- int path_len, JsonbParseState **st, int level,
- Jsonb *newval, uint32 nelems);
+ int path_len, JsonbParseState **st, int level,
+ Jsonb *newval, uint32 nelems);
static void replacePathArray(JsonbIterator **it, Datum *path_elems, bool *path_nulls,
- int path_len, JsonbParseState **st, int level,
- Jsonb *newval, uint32 npairs);
-static void addJsonbToParseState(JsonbParseState **jbps, Jsonb * jb);
+ int path_len, JsonbParseState **st, int level,
+ Jsonb *newval, uint32 npairs);
+static void addJsonbToParseState(JsonbParseState **jbps, Jsonb *jb);
/* state for json_object_keys */
typedef struct OkeysState
@@ -250,10 +250,11 @@ typedef struct PopulateRecordsetState
} PopulateRecordsetState;
/* state for json_strip_nulls */
-typedef struct StripnullState{
+typedef struct StripnullState
+{
JsonLexContext *lex;
- StringInfo strval;
- bool skip_next_null;
+ StringInfo strval;
+ bool skip_next_null;
} StripnullState;
/* Turn a jsonb object into a record */
@@ -3045,6 +3046,7 @@ static void
sn_object_start(void *state)
{
StripnullState *_state = (StripnullState *) state;
+
appendStringInfoCharMacro(_state->strval, '{');
}
@@ -3052,6 +3054,7 @@ static void
sn_object_end(void *state)
{
StripnullState *_state = (StripnullState *) state;
+
appendStringInfoCharMacro(_state->strval, '}');
}
@@ -3059,6 +3062,7 @@ static void
sn_array_start(void *state)
{
StripnullState *_state = (StripnullState *) state;
+
appendStringInfoCharMacro(_state->strval, '[');
}
@@ -3066,21 +3070,21 @@ static void
sn_array_end(void *state)
{
StripnullState *_state = (StripnullState *) state;
+
appendStringInfoCharMacro(_state->strval, ']');
}
static void
-sn_object_field_start (void *state, char *fname, bool isnull)
+sn_object_field_start(void *state, char *fname, bool isnull)
{
StripnullState *_state = (StripnullState *) state;
if (isnull)
{
/*
- * The next thing must be a scalar or isnull couldn't be true,
- * so there is no danger of this state being carried down
- * into a nested object or array. The flag will be reset in the
- * scalar action.
+ * The next thing must be a scalar or isnull couldn't be true, so
+ * there is no danger of this state being carried down into a nested
+ * object or array. The flag will be reset in the scalar action.
*/
_state->skip_next_null = true;
return;
@@ -3090,16 +3094,16 @@ sn_object_field_start (void *state, char *fname, bool isnull)
appendStringInfoCharMacro(_state->strval, ',');
/*
- * Unfortunately we don't have the quoted and escaped string any more,
- * so we have to re-escape it.
+ * Unfortunately we don't have the quoted and escaped string any more, so
+ * we have to re-escape it.
*/
- escape_json(_state->strval,fname);
+ escape_json(_state->strval, fname);
appendStringInfoCharMacro(_state->strval, ':');
}
static void
-sn_array_element_start (void *state, bool isnull)
+sn_array_element_start(void *state, bool isnull)
{
StripnullState *_state = (StripnullState *) state;
@@ -3114,7 +3118,7 @@ sn_scalar(void *state, char *token, JsonTokenType tokentype)
if (_state->skip_next_null)
{
- Assert (tokentype == JSON_TOKEN_NULL);
+ Assert(tokentype == JSON_TOKEN_NULL);
_state->skip_next_null = false;
return;
}
@@ -3132,7 +3136,7 @@ Datum
json_strip_nulls(PG_FUNCTION_ARGS)
{
text *json = PG_GETARG_TEXT_P(0);
- StripnullState *state;
+ StripnullState *state;
JsonLexContext *lex;
JsonSemAction *sem;
@@ -3166,13 +3170,14 @@ json_strip_nulls(PG_FUNCTION_ARGS)
Datum
jsonb_strip_nulls(PG_FUNCTION_ARGS)
{
- Jsonb * jb = PG_GETARG_JSONB(0);
+ Jsonb *jb = PG_GETARG_JSONB(0);
JsonbIterator *it;
JsonbParseState *parseState = NULL;
JsonbValue *res = NULL;
- int type;
- JsonbValue v,k;
- bool last_was_key = false;
+ int type;
+ JsonbValue v,
+ k;
+ bool last_was_key = false;
if (JB_ROOT_IS_SCALAR(jb))
PG_RETURN_POINTER(jb);
@@ -3181,7 +3186,7 @@ jsonb_strip_nulls(PG_FUNCTION_ARGS)
while ((type = JsonbIteratorNext(&it, &v, false)) != WJB_DONE)
{
- Assert( ! (type == WJB_KEY && last_was_key));
+ Assert(!(type == WJB_KEY && last_was_key));
if (type == WJB_KEY)
{
@@ -3225,13 +3230,12 @@ jsonb_strip_nulls(PG_FUNCTION_ARGS)
* like getting jbvBinary values, so we can't just push jb as a whole.
*/
static void
-addJsonbToParseState(JsonbParseState **jbps, Jsonb * jb)
+addJsonbToParseState(JsonbParseState **jbps, Jsonb *jb)
{
-
JsonbIterator *it;
- JsonbValue *o = &(*jbps)->contVal;
- int type;
- JsonbValue v;
+ JsonbValue *o = &(*jbps)->contVal;
+ int type;
+ JsonbValue v;
it = JsonbIteratorInit(&jb->root);
@@ -3239,8 +3243,8 @@ addJsonbToParseState(JsonbParseState **jbps, Jsonb * jb)
if (JB_ROOT_IS_SCALAR(jb))
{
- (void) JsonbIteratorNext(&it, &v, false); /* skip array header */
- (void) JsonbIteratorNext(&it, &v, false); /* fetch scalar value */
+ (void) JsonbIteratorNext(&it, &v, false); /* skip array header */
+ (void) JsonbIteratorNext(&it, &v, false); /* fetch scalar value */
switch (o->type)
{
@@ -3297,8 +3301,8 @@ jsonb_concat(PG_FUNCTION_ARGS)
Jsonb *out = palloc(VARSIZE(jb1) + VARSIZE(jb2));
JsonbParseState *state = NULL;
JsonbValue *res;
- JsonbIterator *it1,
- *it2;
+ JsonbIterator *it1,
+ *it2;
/*
* If one of the jsonb is empty, just return other.
@@ -3453,7 +3457,7 @@ jsonb_delete_idx(PG_FUNCTION_ARGS)
res = pushJsonbValue(&state, r, r < WJB_BEGIN_ARRAY ? &v : NULL);
}
- Assert (res != NULL);
+ Assert(res != NULL);
PG_RETURN_JSONB(JsonbValueToJsonb(res));
}
@@ -3497,7 +3501,7 @@ jsonb_replace(PG_FUNCTION_ARGS)
res = replacePath(&it, path_elems, path_nulls, path_len, &st, 0, newval);
- Assert (res != NULL);
+ Assert(res != NULL);
PG_RETURN_JSONB(JsonbValueToJsonb(res));
}
@@ -3541,7 +3545,7 @@ jsonb_delete_path(PG_FUNCTION_ARGS)
res = replacePath(&it, path_elems, path_nulls, path_len, &st, 0, NULL);
- Assert (res != NULL);
+ Assert(res != NULL);
PG_RETURN_JSONB(JsonbValueToJsonb(res));
}
@@ -3687,7 +3691,7 @@ walkJsonb(JsonbIterator **it, JsonbParseState **state, bool stop_at_level_zero)
{
uint32 r,
level = 1;
- JsonbValue v;
+ JsonbValue v;
JsonbValue *res = NULL;
while ((r = JsonbIteratorNext(it, &v, false)) != WJB_DONE)
@@ -3758,7 +3762,7 @@ replacePath(JsonbIterator **it, Datum *path_elems,
static void
replacePathObject(JsonbIterator **it, Datum *path_elems, bool *path_nulls,
int path_len, JsonbParseState **st, int level,
- Jsonb *newval, uint32 nelems)
+ Jsonb *newval, uint32 nelems)
{
JsonbValue v;
int i;
@@ -3770,7 +3774,8 @@ replacePathObject(JsonbIterator **it, Datum *path_elems, bool *path_nulls,
for (i = 0; i < nelems; i++)
{
- int r = JsonbIteratorNext(it, &k, true);
+ int r = JsonbIteratorNext(it, &k, true);
+
Assert(r == WJB_KEY);
if (!done &&
@@ -3780,7 +3785,7 @@ replacePathObject(JsonbIterator **it, Datum *path_elems, bool *path_nulls,
{
if (level == path_len - 1)
{
- r = JsonbIteratorNext(it, &v, true); /* skip */
+ r = JsonbIteratorNext(it, &v, true); /* skip */
if (newval != NULL)
{
(void) pushJsonbValue(st, WJB_KEY, &k);
@@ -3801,7 +3806,7 @@ replacePathObject(JsonbIterator **it, Datum *path_elems, bool *path_nulls,
(void) pushJsonbValue(st, r, r < WJB_BEGIN_ARRAY ? &v : NULL);
if (r == WJB_BEGIN_ARRAY || r == WJB_BEGIN_OBJECT)
{
- int walking_level = 1;
+ int walking_level = 1;
while (walking_level != 0)
{
@@ -3859,13 +3864,13 @@ replacePathArray(JsonbIterator **it, Datum *path_elems, bool *path_nulls,
/* iterate over the array elements */
for (i = 0; i < npairs; i++)
{
- int r;
+ int r;
if (i == idx && level < path_len)
{
if (level == path_len - 1)
{
- r = JsonbIteratorNext(it, &v, true); /* skip */
+ r = JsonbIteratorNext(it, &v, true); /* skip */
if (newval != NULL)
addJsonbToParseState(st, newval);
}
@@ -3881,7 +3886,7 @@ replacePathArray(JsonbIterator **it, Datum *path_elems, bool *path_nulls,
if (r == WJB_BEGIN_ARRAY || r == WJB_BEGIN_OBJECT)
{
- int walking_level = 1;
+ int walking_level = 1;
while (walking_level != 0)
{
diff --git a/src/backend/utils/adt/levenshtein.c b/src/backend/utils/adt/levenshtein.c
index f6e2ca6452a..2c30b6c8e9d 100644
--- a/src/backend/utils/adt/levenshtein.c
+++ b/src/backend/utils/adt/levenshtein.c
@@ -96,8 +96,8 @@ varstr_levenshtein(const char *source, int slen, const char *target, int tlen,
#endif
/*
- * A common use for Levenshtein distance is to match attributes when building
- * diagnostic, user-visible messages. Restrict the size of
+ * A common use for Levenshtein distance is to match attributes when
+ * building diagnostic, user-visible messages. Restrict the size of
* MAX_LEVENSHTEIN_STRLEN at compile time so that this is guaranteed to
* work.
*/
diff --git a/src/backend/utils/adt/lockfuncs.c b/src/backend/utils/adt/lockfuncs.c
index 9d53a8b6a32..1705ff0d118 100644
--- a/src/backend/utils/adt/lockfuncs.c
+++ b/src/backend/utils/adt/lockfuncs.c
@@ -419,7 +419,7 @@ PreventAdvisoryLocksInParallelMode(void)
if (IsInParallelMode())
ereport(ERROR,
(errcode(ERRCODE_INVALID_TRANSACTION_STATE),
- errmsg("cannot use advisory locks during a parallel operation")));
+ errmsg("cannot use advisory locks during a parallel operation")));
}
/*
diff --git a/src/backend/utils/adt/misc.c b/src/backend/utils/adt/misc.c
index 61d609f9181..de68cdddf1d 100644
--- a/src/backend/utils/adt/misc.c
+++ b/src/backend/utils/adt/misc.c
@@ -187,7 +187,7 @@ pg_terminate_backend(PG_FUNCTION_ARGS)
if (r == SIGNAL_BACKEND_NOSUPERUSER)
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- (errmsg("must be a superuser to terminate superuser process"))));
+ (errmsg("must be a superuser to terminate superuser process"))));
if (r == SIGNAL_BACKEND_NOPERMISSION)
ereport(ERROR,
diff --git a/src/backend/utils/adt/network_gist.c b/src/backend/utils/adt/network_gist.c
index 0fdb17f947f..756237e751c 100644
--- a/src/backend/utils/adt/network_gist.c
+++ b/src/backend/utils/adt/network_gist.c
@@ -595,10 +595,10 @@ inet_gist_decompress(PG_FUNCTION_ARGS)
Datum
inet_gist_fetch(PG_FUNCTION_ARGS)
{
- GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0);
- GistInetKey *key = DatumGetInetKeyP(entry->key);
- GISTENTRY *retval;
- inet *dst;
+ GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0);
+ GistInetKey *key = DatumGetInetKeyP(entry->key);
+ GISTENTRY *retval;
+ inet *dst;
dst = (inet *) palloc0(sizeof(inet));
diff --git a/src/backend/utils/adt/numeric.c b/src/backend/utils/adt/numeric.c
index 3cef3048eb3..7ce41b78888 100644
--- a/src/backend/utils/adt/numeric.c
+++ b/src/backend/utils/adt/numeric.c
@@ -1731,7 +1731,7 @@ numeric_abbrev_abort(int memtupcount, SortSupport ssup)
if (trace_sort)
elog(LOG,
"numeric_abbrev: aborting abbreviation at cardinality %f"
- " below threshold %f after " INT64_FORMAT " values (%d rows)",
+ " below threshold %f after " INT64_FORMAT " values (%d rows)",
abbr_card, nss->input_count / 10000.0 + 0.5,
nss->input_count, memtupcount);
#endif
@@ -3408,10 +3408,10 @@ numeric_accum_inv(PG_FUNCTION_ARGS)
#ifdef HAVE_INT128
typedef struct Int128AggState
{
- bool calcSumX2; /* if true, calculate sumX2 */
- int64 N; /* count of processed numbers */
- int128 sumX; /* sum of processed numbers */
- int128 sumX2; /* sum of squares of processed numbers */
+ bool calcSumX2; /* if true, calculate sumX2 */
+ int64 N; /* count of processed numbers */
+ int128 sumX; /* sum of processed numbers */
+ int128 sumX2; /* sum of squares of processed numbers */
} Int128AggState;
/*
@@ -3703,9 +3703,9 @@ Datum
numeric_poly_sum(PG_FUNCTION_ARGS)
{
#ifdef HAVE_INT128
- PolyNumAggState *state;
- Numeric res;
- NumericVar result;
+ PolyNumAggState *state;
+ Numeric res;
+ NumericVar result;
state = PG_ARGISNULL(0) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(0);
@@ -3731,9 +3731,10 @@ Datum
numeric_poly_avg(PG_FUNCTION_ARGS)
{
#ifdef HAVE_INT128
- PolyNumAggState *state;
- NumericVar result;
- Datum countd, sumd;
+ PolyNumAggState *state;
+ NumericVar result;
+ Datum countd,
+ sumd;
state = PG_ARGISNULL(0) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(0);
@@ -3962,8 +3963,8 @@ numeric_stddev_pop(PG_FUNCTION_ARGS)
#ifdef HAVE_INT128
static Numeric
numeric_poly_stddev_internal(Int128AggState *state,
- bool variance, bool sample,
- bool *is_null)
+ bool variance, bool sample,
+ bool *is_null)
{
NumericAggState numstate;
Numeric res;
@@ -3997,9 +3998,9 @@ Datum
numeric_poly_var_samp(PG_FUNCTION_ARGS)
{
#ifdef HAVE_INT128
- PolyNumAggState *state;
- Numeric res;
- bool is_null;
+ PolyNumAggState *state;
+ Numeric res;
+ bool is_null;
state = PG_ARGISNULL(0) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(0);
@@ -4018,9 +4019,9 @@ Datum
numeric_poly_stddev_samp(PG_FUNCTION_ARGS)
{
#ifdef HAVE_INT128
- PolyNumAggState *state;
- Numeric res;
- bool is_null;
+ PolyNumAggState *state;
+ Numeric res;
+ bool is_null;
state = PG_ARGISNULL(0) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(0);
@@ -4039,9 +4040,9 @@ Datum
numeric_poly_var_pop(PG_FUNCTION_ARGS)
{
#ifdef HAVE_INT128
- PolyNumAggState *state;
- Numeric res;
- bool is_null;
+ PolyNumAggState *state;
+ Numeric res;
+ bool is_null;
state = PG_ARGISNULL(0) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(0);
@@ -4060,9 +4061,9 @@ Datum
numeric_poly_stddev_pop(PG_FUNCTION_ARGS)
{
#ifdef HAVE_INT128
- PolyNumAggState *state;
- Numeric res;
- bool is_null;
+ PolyNumAggState *state;
+ Numeric res;
+ bool is_null;
state = PG_ARGISNULL(0) ? NULL : (PolyNumAggState *) PG_GETARG_POINTER(0);
@@ -5306,10 +5307,10 @@ int64_to_numericvar(int64 val, NumericVar *var)
static void
int128_to_numericvar(int128 val, NumericVar *var)
{
- uint128 uval,
- newuval;
- NumericDigit *ptr;
- int ndigits;
+ uint128 uval,
+ newuval;
+ NumericDigit *ptr;
+ int ndigits;
/* int128 can require at most 39 decimal digits; add one for safety */
alloc_var(var, 40 / DEC_DIGITS);
diff --git a/src/backend/utils/adt/pg_locale.c b/src/backend/utils/adt/pg_locale.c
index d84969f770b..4be735e918d 100644
--- a/src/backend/utils/adt/pg_locale.c
+++ b/src/backend/utils/adt/pg_locale.c
@@ -635,7 +635,7 @@ cache_single_time(char **dst, const char *format, const struct tm * tm)
/*
* MAX_L10N_DATA is sufficient buffer space for every known locale, and
* POSIX defines no strftime() errors. (Buffer space exhaustion is not an
- * error.) An implementation might report errors (e.g. ENOMEM) by
+ * error.) An implementation might report errors (e.g. ENOMEM) by
* returning 0 (or, less plausibly, a negative value) and setting errno.
* Report errno just in case the implementation did that, but clear it in
* advance of the call so we don't emit a stale, unrelated errno.
diff --git a/src/backend/utils/adt/pg_upgrade_support.c b/src/backend/utils/adt/pg_upgrade_support.c
index d69fa53567b..883378e5240 100644
--- a/src/backend/utils/adt/pg_upgrade_support.c
+++ b/src/backend/utils/adt/pg_upgrade_support.c
@@ -20,19 +20,19 @@
#include "utils/builtins.h"
-Datum binary_upgrade_set_next_pg_type_oid(PG_FUNCTION_ARGS);
-Datum binary_upgrade_set_next_array_pg_type_oid(PG_FUNCTION_ARGS);
-Datum binary_upgrade_set_next_toast_pg_type_oid(PG_FUNCTION_ARGS);
-Datum binary_upgrade_set_next_heap_pg_class_oid(PG_FUNCTION_ARGS);
-Datum binary_upgrade_set_next_index_pg_class_oid(PG_FUNCTION_ARGS);
-Datum binary_upgrade_set_next_toast_pg_class_oid(PG_FUNCTION_ARGS);
-Datum binary_upgrade_set_next_pg_enum_oid(PG_FUNCTION_ARGS);
-Datum binary_upgrade_set_next_pg_authid_oid(PG_FUNCTION_ARGS);
-Datum binary_upgrade_create_empty_extension(PG_FUNCTION_ARGS);
-
-
-#define CHECK_IS_BINARY_UPGRADE \
-do { \
+Datum binary_upgrade_set_next_pg_type_oid(PG_FUNCTION_ARGS);
+Datum binary_upgrade_set_next_array_pg_type_oid(PG_FUNCTION_ARGS);
+Datum binary_upgrade_set_next_toast_pg_type_oid(PG_FUNCTION_ARGS);
+Datum binary_upgrade_set_next_heap_pg_class_oid(PG_FUNCTION_ARGS);
+Datum binary_upgrade_set_next_index_pg_class_oid(PG_FUNCTION_ARGS);
+Datum binary_upgrade_set_next_toast_pg_class_oid(PG_FUNCTION_ARGS);
+Datum binary_upgrade_set_next_pg_enum_oid(PG_FUNCTION_ARGS);
+Datum binary_upgrade_set_next_pg_authid_oid(PG_FUNCTION_ARGS);
+Datum binary_upgrade_create_empty_extension(PG_FUNCTION_ARGS);
+
+
+#define CHECK_IS_BINARY_UPGRADE \
+do { \
if (!IsBinaryUpgrade) \
ereport(ERROR, \
(errcode(ERRCODE_CANT_CHANGE_RUNTIME_PARAM), \
diff --git a/src/backend/utils/adt/pgstatfuncs.c b/src/backend/utils/adt/pgstatfuncs.c
index 2b3778b03ad..f7c9bf63338 100644
--- a/src/backend/utils/adt/pgstatfuncs.c
+++ b/src/backend/utils/adt/pgstatfuncs.c
@@ -531,14 +531,14 @@ Datum
pg_stat_get_activity(PG_FUNCTION_ARGS)
{
#define PG_STAT_GET_ACTIVITY_COLS 22
- int num_backends = pgstat_fetch_stat_numbackends();
- int curr_backend;
- int pid = PG_ARGISNULL(0) ? -1 : PG_GETARG_INT32(0);
- ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
- TupleDesc tupdesc;
- Tuplestorestate *tupstore;
- MemoryContext per_query_ctx;
- MemoryContext oldcontext;
+ int num_backends = pgstat_fetch_stat_numbackends();
+ int curr_backend;
+ int pid = PG_ARGISNULL(0) ? -1 : PG_GETARG_INT32(0);
+ ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
+ TupleDesc tupdesc;
+ Tuplestorestate *tupstore;
+ MemoryContext per_query_ctx;
+ MemoryContext oldcontext;
/* check to see if caller supports us returning a tuplestore */
if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo))
@@ -628,7 +628,7 @@ pg_stat_get_activity(PG_FUNCTION_ARGS)
if (beentry->st_ssl)
{
- values[16] = BoolGetDatum(true); /* ssl */
+ values[16] = BoolGetDatum(true); /* ssl */
values[17] = CStringGetTextDatum(beentry->st_sslstatus->ssl_version);
values[18] = CStringGetTextDatum(beentry->st_sslstatus->ssl_cipher);
values[19] = Int32GetDatum(beentry->st_sslstatus->ssl_bits);
@@ -637,7 +637,7 @@ pg_stat_get_activity(PG_FUNCTION_ARGS)
}
else
{
- values[16] = BoolGetDatum(false); /* ssl */
+ values[16] = BoolGetDatum(false); /* ssl */
nulls[17] = nulls[18] = nulls[19] = nulls[20] = nulls[21] = true;
}
diff --git a/src/backend/utils/adt/rangetypes_spgist.c b/src/backend/utils/adt/rangetypes_spgist.c
index 9281529d7a1..3b5529eb302 100644
--- a/src/backend/utils/adt/rangetypes_spgist.c
+++ b/src/backend/utils/adt/rangetypes_spgist.c
@@ -583,7 +583,7 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS)
*/
cmp = adjacent_inner_consistent(typcache, &lower,
&centroidUpper,
- prevCentroid ? &prevUpper : NULL);
+ prevCentroid ? &prevUpper : NULL);
if (cmp > 0)
which1 = (1 << 1) | (1 << 4);
else if (cmp < 0)
@@ -594,12 +594,12 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS)
/*
* Also search for ranges's adjacent to argument's upper
* bound. They will be found along the line adjacent to
- * (and just right of) X=upper, which falls in quadrants
- * 3 and 4, or 1 and 2.
+ * (and just right of) X=upper, which falls in quadrants 3
+ * and 4, or 1 and 2.
*/
cmp = adjacent_inner_consistent(typcache, &upper,
&centroidLower,
- prevCentroid ? &prevLower : NULL);
+ prevCentroid ? &prevLower : NULL);
if (cmp > 0)
which2 = (1 << 1) | (1 << 2);
else if (cmp < 0)
@@ -782,7 +782,7 @@ adjacent_cmp_bounds(TypeCacheEntry *typcache, RangeBound *arg,
Assert(arg->lower != centroid->lower);
- cmp = range_cmp_bounds(typcache, arg, centroid);
+ cmp = range_cmp_bounds(typcache, arg, centroid);
if (centroid->lower)
{
@@ -799,11 +799,11 @@ adjacent_cmp_bounds(TypeCacheEntry *typcache, RangeBound *arg,
* With the argument range [..., 500), the adjacent range we're
* searching for is [500, ...):
*
- * ARGUMENT CENTROID CMP ADJ
- * [..., 500) [498, ...) > (N) [500, ...) is to the right
- * [..., 500) [499, ...) = (N) [500, ...) is to the right
- * [..., 500) [500, ...) < Y [500, ...) is to the right
- * [..., 500) [501, ...) < N [500, ...) is to the left
+ * ARGUMENT CENTROID CMP ADJ
+ * [..., 500) [498, ...) > (N) [500, ...) is to the right
+ * [..., 500) [499, ...) = (N) [500, ...) is to the right
+ * [..., 500) [500, ...) < Y [500, ...) is to the right
+ * [..., 500) [501, ...) < N [500, ...) is to the left
*
* So, we must search left when the argument is smaller than, and not
* adjacent, to the centroid. Otherwise search right.
@@ -821,11 +821,11 @@ adjacent_cmp_bounds(TypeCacheEntry *typcache, RangeBound *arg,
* bounds. A matching adjacent upper bound must be *smaller* than the
* argument, but only just.
*
- * ARGUMENT CENTROID CMP ADJ
- * [500, ...) [..., 499) > (N) [..., 500) is to the right
- * [500, ...) [..., 500) > (Y) [..., 500) is to the right
- * [500, ...) [..., 501) = (N) [..., 500) is to the left
- * [500, ...) [..., 502) < (N) [..., 500) is to the left
+ * ARGUMENT CENTROID CMP ADJ
+ * [500, ...) [..., 499) > (N) [..., 500) is to the right
+ * [500, ...) [..., 500) > (Y) [..., 500) is to the right
+ * [500, ...) [..., 501) = (N) [..., 500) is to the left
+ * [500, ...) [..., 502) < (N) [..., 500) is to the left
*
* We must search left when the argument is smaller than or equal to
* the centroid. Otherwise search right. We don't need to check
diff --git a/src/backend/utils/adt/regexp.c b/src/backend/utils/adt/regexp.c
index 4f35992629e..6a0fcc20dab 100644
--- a/src/backend/utils/adt/regexp.c
+++ b/src/backend/utils/adt/regexp.c
@@ -696,7 +696,7 @@ similar_escape(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_INVALID_ESCAPE_SEQUENCE),
errmsg("invalid escape string"),
- errhint("Escape string must be empty or one character.")));
+ errhint("Escape string must be empty or one character.")));
}
}
@@ -742,7 +742,8 @@ similar_escape(PG_FUNCTION_ARGS)
if (elen > 1)
{
- int mblen = pg_mblen(p);
+ int mblen = pg_mblen(p);
+
if (mblen > 1)
{
/* slow, multi-byte path */
diff --git a/src/backend/utils/adt/regproc.c b/src/backend/utils/adt/regproc.c
index f27131edd16..0bfeb5e3fd7 100644
--- a/src/backend/utils/adt/regproc.c
+++ b/src/backend/utils/adt/regproc.c
@@ -466,7 +466,7 @@ format_procedure_parts(Oid procedure_oid, List **objnames, List **objargs)
*objargs = NIL;
for (i = 0; i < nargs; i++)
{
- Oid thisargtype = procform->proargtypes.values[i];
+ Oid thisargtype = procform->proargtypes.values[i];
*objargs = lappend(*objargs, format_type_be_qualified(thisargtype));
}
@@ -1637,7 +1637,7 @@ regroleout(PG_FUNCTION_ARGS)
}
/*
- * regrolerecv - converts external binary format to regrole
+ * regrolerecv - converts external binary format to regrole
*/
Datum
regrolerecv(PG_FUNCTION_ARGS)
@@ -1647,7 +1647,7 @@ regrolerecv(PG_FUNCTION_ARGS)
}
/*
- * regrolesend - converts regrole to binary format
+ * regrolesend - converts regrole to binary format
*/
Datum
regrolesend(PG_FUNCTION_ARGS)
@@ -1680,7 +1680,7 @@ regnamespacein(PG_FUNCTION_ARGS)
strspn(nsp_name_or_oid, "0123456789") == strlen(nsp_name_or_oid))
{
result = DatumGetObjectId(DirectFunctionCall1(oidin,
- CStringGetDatum(nsp_name_or_oid)));
+ CStringGetDatum(nsp_name_or_oid)));
PG_RETURN_OID(result);
}
diff --git a/src/backend/utils/adt/ri_triggers.c b/src/backend/utils/adt/ri_triggers.c
index f6bec8be9bc..88dd3faf2d9 100644
--- a/src/backend/utils/adt/ri_triggers.c
+++ b/src/backend/utils/adt/ri_triggers.c
@@ -3274,7 +3274,7 @@ ri_ReportViolation(const RI_ConstraintInfo *riinfo,
{
int fnum = attnums[idx];
char *name,
- *val;
+ *val;
name = SPI_fname(tupdesc, fnum);
val = SPI_getvalue(violator, tupdesc, fnum);
@@ -3298,11 +3298,11 @@ ri_ReportViolation(const RI_ConstraintInfo *riinfo,
RelationGetRelationName(fk_rel),
NameStr(riinfo->conname)),
has_perm ?
- errdetail("Key (%s)=(%s) is not present in table \"%s\".",
- key_names.data, key_values.data,
- RelationGetRelationName(pk_rel)) :
- errdetail("Key is not present in table \"%s\".",
- RelationGetRelationName(pk_rel)),
+ errdetail("Key (%s)=(%s) is not present in table \"%s\".",
+ key_names.data, key_values.data,
+ RelationGetRelationName(pk_rel)) :
+ errdetail("Key is not present in table \"%s\".",
+ RelationGetRelationName(pk_rel)),
errtableconstraint(fk_rel, NameStr(riinfo->conname))));
else
ereport(ERROR,
@@ -3315,8 +3315,8 @@ ri_ReportViolation(const RI_ConstraintInfo *riinfo,
errdetail("Key (%s)=(%s) is still referenced from table \"%s\".",
key_names.data, key_values.data,
RelationGetRelationName(fk_rel)) :
- errdetail("Key is still referenced from table \"%s\".",
- RelationGetRelationName(fk_rel)),
+ errdetail("Key is still referenced from table \"%s\".",
+ RelationGetRelationName(fk_rel)),
errtableconstraint(fk_rel, NameStr(riinfo->conname))));
}
diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c
index 0585251d8fe..c404ae5e4c8 100644
--- a/src/backend/utils/adt/ruleutils.c
+++ b/src/backend/utils/adt/ruleutils.c
@@ -106,8 +106,8 @@ typedef struct
int wrapColumn; /* max line length, or -1 for no limit */
int indentLevel; /* current indent level for prettyprint */
bool varprefix; /* TRUE to print prefixes on Vars */
- ParseExprKind special_exprkind; /* set only for exprkinds needing */
- /* special handling */
+ ParseExprKind special_exprkind; /* set only for exprkinds needing */
+ /* special handling */
} deparse_context;
/*
@@ -350,7 +350,7 @@ static void make_ruledef(StringInfo buf, HeapTuple ruletup, TupleDesc rulettc,
static void make_viewdef(StringInfo buf, HeapTuple ruletup, TupleDesc rulettc,
int prettyFlags, int wrapColumn);
static void get_tablesample_def(TableSampleClause *tablesample,
- deparse_context *context);
+ deparse_context *context);
static void get_query_def(Query *query, StringInfo buf, List *parentnamespace,
TupleDesc resultDesc,
int prettyFlags, int wrapColumn, int startIndent);
@@ -361,8 +361,8 @@ static void get_select_query_def(Query *query, deparse_context *context,
static void get_insert_query_def(Query *query, deparse_context *context);
static void get_update_query_def(Query *query, deparse_context *context);
static void get_update_query_targetlist_def(Query *query, List *targetList,
- deparse_context *context,
- RangeTblEntry *rte);
+ deparse_context *context,
+ RangeTblEntry *rte);
static void get_delete_query_def(Query *query, deparse_context *context);
static void get_utility_query_def(Query *query, deparse_context *context);
static void get_basic_select_query(Query *query, deparse_context *context,
@@ -376,7 +376,7 @@ static Node *get_rule_sortgroupclause(Index ref, List *tlist,
bool force_colno,
deparse_context *context);
static void get_rule_groupingset(GroupingSet *gset, List *targetlist,
- bool omit_parens, deparse_context *context);
+ bool omit_parens, deparse_context *context);
static void get_rule_orderby(List *orderList, List *targetList,
bool force_colno, deparse_context *context);
static void get_rule_windowclause(Query *query, deparse_context *context);
@@ -424,9 +424,9 @@ static void printSubscripts(ArrayRef *aref, deparse_context *context);
static char *get_relation_name(Oid relid);
static char *generate_relation_name(Oid relid, List *namespaces);
static char *generate_function_name(Oid funcid, int nargs,
- List *argnames, Oid *argtypes,
- bool has_variadic, bool *use_variadic_p,
- ParseExprKind special_exprkind);
+ List *argnames, Oid *argtypes,
+ bool has_variadic, bool *use_variadic_p,
+ ParseExprKind special_exprkind);
static char *generate_operator_name(Oid operid, Oid arg1, Oid arg2);
static text *string_to_text(char *str);
static char *flatten_reloptions(Oid relid);
@@ -1963,7 +1963,7 @@ pg_get_functiondef(PG_FUNCTION_ARGS)
print_function_trftypes(&buf, proctup);
appendStringInfo(&buf, "\n LANGUAGE %s\n",
- quote_identifier(get_language_name(proc->prolang, false)));
+ quote_identifier(get_language_name(proc->prolang, false)));
/* Emit some miscellaneous options on one line */
oldlen = buf.len;
@@ -2364,13 +2364,13 @@ is_input_argument(int nth, const char *argmodes)
static void
print_function_trftypes(StringInfo buf, HeapTuple proctup)
{
- Oid *trftypes;
- int ntypes;
+ Oid *trftypes;
+ int ntypes;
ntypes = get_func_trftypes(proctup, &trftypes);
if (ntypes > 0)
{
- int i;
+ int i;
appendStringInfoString(buf, "\n TRANSFORM ");
for (i = 0; i < ntypes; i++)
@@ -4714,7 +4714,7 @@ get_basic_select_query(Query *query, deparse_context *context,
/* Add the GROUP BY clause if given */
if (query->groupClause != NULL || query->groupingSets != NULL)
{
- ParseExprKind save_exprkind;
+ ParseExprKind save_exprkind;
appendContextKeyword(context, " GROUP BY ",
-PRETTYINDENT_STD, PRETTYINDENT_STD, 1);
@@ -5045,13 +5045,13 @@ get_rule_sortgroupclause(Index ref, List *tlist, bool force_colno,
expr = (Node *) tle->expr;
/*
- * Use column-number form if requested by caller. Otherwise, if expression
- * is a constant, force it to be dumped with an explicit cast as decoration
- * --- this is because a simple integer constant is ambiguous (and will be
- * misinterpreted by findTargetlistEntry()) if we dump it without any
- * decoration. If it's anything more complex than a simple Var, then force
- * extra parens around it, to ensure it can't be misinterpreted as a cube()
- * or rollup() construct.
+ * Use column-number form if requested by caller. Otherwise, if
+ * expression is a constant, force it to be dumped with an explicit cast
+ * as decoration --- this is because a simple integer constant is
+ * ambiguous (and will be misinterpreted by findTargetlistEntry()) if we
+ * dump it without any decoration. If it's anything more complex than a
+ * simple Var, then force extra parens around it, to ensure it can't be
+ * misinterpreted as a cube() or rollup() construct.
*/
if (force_colno)
{
@@ -5067,14 +5067,15 @@ get_rule_sortgroupclause(Index ref, List *tlist, bool force_colno,
/*
* We must force parens for function-like expressions even if
* PRETTY_PAREN is off, since those are the ones in danger of
- * misparsing. For other expressions we need to force them
- * only if PRETTY_PAREN is on, since otherwise the expression
- * will output them itself. (We can't skip the parens.)
+ * misparsing. For other expressions we need to force them only if
+ * PRETTY_PAREN is on, since otherwise the expression will output them
+ * itself. (We can't skip the parens.)
*/
- bool need_paren = (PRETTY_PAREN(context)
- || IsA(expr, FuncExpr)
- || IsA(expr, Aggref)
- || IsA(expr, WindowFunc));
+ bool need_paren = (PRETTY_PAREN(context)
+ || IsA(expr, FuncExpr)
+ ||IsA(expr, Aggref)
+ ||IsA(expr, WindowFunc));
+
if (need_paren)
appendStringInfoString(context->buf, "(");
get_rule_expr(expr, context, true);
@@ -5110,7 +5111,7 @@ get_rule_groupingset(GroupingSet *gset, List *targetlist,
foreach(l, gset->content)
{
- Index ref = lfirst_int(l);
+ Index ref = lfirst_int(l);
appendStringInfoString(buf, sep);
get_rule_sortgroupclause(ref, targetlist,
@@ -5502,7 +5503,7 @@ get_insert_query_def(Query *query, deparse_context *context)
}
else if (confl->constraint != InvalidOid)
{
- char *constraint = get_constraint_name(confl->constraint);
+ char *constraint = get_constraint_name(confl->constraint);
appendStringInfo(buf, " ON CONSTRAINT %s",
quote_qualified_identifier(NULL, constraint));
@@ -7917,9 +7918,9 @@ get_rule_expr(Node *node, deparse_context *context,
case T_InferenceElem:
{
- InferenceElem *iexpr = (InferenceElem *) node;
- bool varprefix = context->varprefix;
- bool need_parens;
+ InferenceElem *iexpr = (InferenceElem *) node;
+ bool varprefix = context->varprefix;
+ bool need_parens;
/*
* InferenceElem can only refer to target relation, so a
@@ -7948,13 +7949,13 @@ get_rule_expr(Node *node, deparse_context *context,
if (iexpr->infercollid)
appendStringInfo(buf, " COLLATE %s",
- generate_collation_name(iexpr->infercollid));
+ generate_collation_name(iexpr->infercollid));
/* Add the operator class name, if not default */
if (iexpr->inferopclass)
{
- Oid inferopclass = iexpr->inferopclass;
- Oid inferopcinputtype = get_opclass_input_type(iexpr->inferopclass);
+ Oid inferopclass = iexpr->inferopclass;
+ Oid inferopcinputtype = get_opclass_input_type(iexpr->inferopclass);
get_opclass_name(inferopclass, inferopcinputtype, buf);
}
diff --git a/src/backend/utils/adt/tsquery_op.c b/src/backend/utils/adt/tsquery_op.c
index bd6fc250990..8afd558db33 100644
--- a/src/backend/utils/adt/tsquery_op.c
+++ b/src/backend/utils/adt/tsquery_op.c
@@ -249,6 +249,7 @@ cmp_string(const void *a, const void *b)
{
const char *sa = *((const char **) a);
const char *sb = *((const char **) b);
+
return strcmp(sa, sb);
}
@@ -300,8 +301,8 @@ tsq_mcontains(PG_FUNCTION_ARGS)
result = false;
else
{
- int i;
- int j = 0;
+ int i;
+ int j = 0;
for (i = 0; i < ex_nvalues; i++)
{
diff --git a/src/backend/utils/adt/txid.c b/src/backend/utils/adt/txid.c
index 1d7bb02ca46..ce1d9abddea 100644
--- a/src/backend/utils/adt/txid.c
+++ b/src/backend/utils/adt/txid.c
@@ -142,8 +142,10 @@ cmp_txid(const void *aa, const void *bb)
static void
sort_snapshot(TxidSnapshot *snap)
{
- txid last = 0;
- int nxip, idx1, idx2;
+ txid last = 0;
+ int nxip,
+ idx1,
+ idx2;
if (snap->nxip > 1)
{
diff --git a/src/backend/utils/adt/varlena.c b/src/backend/utils/adt/varlena.c
index 5fd2bef617f..779729d724a 100644
--- a/src/backend/utils/adt/varlena.c
+++ b/src/backend/utils/adt/varlena.c
@@ -56,14 +56,15 @@ typedef struct
typedef struct
{
- char *buf1; /* 1st string, or abbreviation original string buf */
- char *buf2; /* 2nd string, or abbreviation strxfrm() buf */
- int buflen1;
- int buflen2;
- bool collate_c;
- hyperLogLogState abbr_card; /* Abbreviated key cardinality state */
- hyperLogLogState full_card; /* Full key cardinality state */
- double prop_card; /* Required cardinality proportion */
+ char *buf1; /* 1st string, or abbreviation original string
+ * buf */
+ char *buf2; /* 2nd string, or abbreviation strxfrm() buf */
+ int buflen1;
+ int buflen2;
+ bool collate_c;
+ hyperLogLogState abbr_card; /* Abbreviated key cardinality state */
+ hyperLogLogState full_card; /* Full key cardinality state */
+ double prop_card; /* Required cardinality proportion */
#ifdef HAVE_LOCALE_T
pg_locale_t locale;
#endif
@@ -82,9 +83,9 @@ typedef struct
#define PG_RETURN_UNKNOWN_P(x) PG_RETURN_POINTER(x)
static void btsortsupport_worker(SortSupport ssup, Oid collid);
-static int bttextfastcmp_c(Datum x, Datum y, SortSupport ssup);
-static int bttextfastcmp_locale(Datum x, Datum y, SortSupport ssup);
-static int bttextcmp_abbrev(Datum x, Datum y, SortSupport ssup);
+static int bttextfastcmp_c(Datum x, Datum y, SortSupport ssup);
+static int bttextfastcmp_locale(Datum x, Datum y, SortSupport ssup);
+static int bttextcmp_abbrev(Datum x, Datum y, SortSupport ssup);
static Datum bttext_abbrev_convert(Datum original, SortSupport ssup);
static bool bttext_abbrev_abort(int memtupcount, SortSupport ssup);
static int32 text_length(Datum str);
@@ -1415,8 +1416,8 @@ varstr_cmp(char *arg1, int len1, char *arg2, int len2, Oid collid)
}
/*
- * memcmp() can't tell us which of two unequal strings sorts first, but
- * it's a cheap way to tell if they're equal. Testing shows that
+ * memcmp() can't tell us which of two unequal strings sorts first,
+ * but it's a cheap way to tell if they're equal. Testing shows that
* memcmp() followed by strcoll() is only trivially slower than
* strcoll() by itself, so we don't lose much if this doesn't work out
* very often, and if it does - for example, because there are many
@@ -1726,9 +1727,9 @@ bttextcmp(PG_FUNCTION_ARGS)
Datum
bttextsortsupport(PG_FUNCTION_ARGS)
{
- SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0);
- Oid collid = ssup->ssup_collation;
- MemoryContext oldcontext;
+ SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0);
+ Oid collid = ssup->ssup_collation;
+ MemoryContext oldcontext;
oldcontext = MemoryContextSwitchTo(ssup->ssup_cxt);
@@ -1742,30 +1743,30 @@ bttextsortsupport(PG_FUNCTION_ARGS)
static void
btsortsupport_worker(SortSupport ssup, Oid collid)
{
- bool abbreviate = ssup->abbreviate;
- bool collate_c = false;
- TextSortSupport *tss;
+ bool abbreviate = ssup->abbreviate;
+ bool collate_c = false;
+ TextSortSupport *tss;
#ifdef HAVE_LOCALE_T
- pg_locale_t locale = 0;
+ pg_locale_t locale = 0;
#endif
/*
* If possible, set ssup->comparator to a function which can be used to
* directly compare two datums. If we can do this, we'll avoid the
- * overhead of a trip through the fmgr layer for every comparison,
- * which can be substantial.
+ * overhead of a trip through the fmgr layer for every comparison, which
+ * can be substantial.
*
- * Most typically, we'll set the comparator to bttextfastcmp_locale,
- * which uses strcoll() to perform comparisons. However, if LC_COLLATE
- * = C, we can make things quite a bit faster with bttextfastcmp_c,
- * which uses memcmp() rather than strcoll().
+ * Most typically, we'll set the comparator to bttextfastcmp_locale, which
+ * uses strcoll() to perform comparisons. However, if LC_COLLATE = C, we
+ * can make things quite a bit faster with bttextfastcmp_c, which uses
+ * memcmp() rather than strcoll().
*
- * There is a further exception on Windows. When the database encoding
- * is UTF-8 and we are not using the C collation, complex hacks are
- * required. We don't currently have a comparator that handles that case,
- * so we fall back on the slow method of having the sort code invoke
- * bttextcmp() via the fmgr trampoline.
+ * There is a further exception on Windows. When the database encoding is
+ * UTF-8 and we are not using the C collation, complex hacks are required.
+ * We don't currently have a comparator that handles that case, so we fall
+ * back on the slow method of having the sort code invoke bttextcmp() via
+ * the fmgr trampoline.
*/
if (lc_collate_is_c(collid))
{
@@ -1808,13 +1809,13 @@ btsortsupport_worker(SortSupport ssup, Oid collid)
* It's possible that there are platforms where the use of abbreviated
* keys should be disabled at compile time. Having only 4 byte datums
* could make worst-case performance drastically more likely, for example.
- * Moreover, Darwin's strxfrm() implementations is known to not effectively
- * concentrate a significant amount of entropy from the original string in
- * earlier transformed blobs. It's possible that other supported platforms
- * are similarly encumbered. However, even in those cases, the abbreviated
- * keys optimization may win, and if it doesn't, the "abort abbreviation"
- * code may rescue us. So, for now, we don't disable this anywhere on the
- * basis of performance.
+ * Moreover, Darwin's strxfrm() implementations is known to not
+ * effectively concentrate a significant amount of entropy from the
+ * original string in earlier transformed blobs. It's possible that other
+ * supported platforms are similarly encumbered. However, even in those
+ * cases, the abbreviated keys optimization may win, and if it doesn't,
+ * the "abort abbreviation" code may rescue us. So, for now, we don't
+ * disable this anywhere on the basis of performance.
*/
/*
@@ -1893,16 +1894,16 @@ bttextfastcmp_c(Datum x, Datum y, SortSupport ssup)
static int
bttextfastcmp_locale(Datum x, Datum y, SortSupport ssup)
{
- text *arg1 = DatumGetTextPP(x);
- text *arg2 = DatumGetTextPP(y);
- TextSortSupport *tss = (TextSortSupport *) ssup->ssup_extra;
+ text *arg1 = DatumGetTextPP(x);
+ text *arg2 = DatumGetTextPP(y);
+ TextSortSupport *tss = (TextSortSupport *) ssup->ssup_extra;
/* working state */
- char *a1p,
- *a2p;
- int len1,
- len2,
- result;
+ char *a1p,
+ *a2p;
+ int len1,
+ len2,
+ result;
a1p = VARDATA_ANY(arg1);
a2p = VARDATA_ANY(arg2);
@@ -1943,9 +1944,9 @@ bttextfastcmp_locale(Datum x, Datum y, SortSupport ssup)
result = strcoll(tss->buf1, tss->buf2);
/*
- * In some locales strcoll() can claim that nonidentical strings are equal.
- * Believing that would be bad news for a number of reasons, so we follow
- * Perl's lead and sort "equal" strings according to strcmp().
+ * In some locales strcoll() can claim that nonidentical strings are
+ * equal. Believing that would be bad news for a number of reasons, so we
+ * follow Perl's lead and sort "equal" strings according to strcmp().
*/
if (result == 0)
result = strcmp(tss->buf1, tss->buf2);
@@ -1966,9 +1967,9 @@ done:
static int
bttextcmp_abbrev(Datum x, Datum y, SortSupport ssup)
{
- char *a = (char *) &x;
- char *b = (char *) &y;
- int result;
+ char *a = (char *) &x;
+ char *b = (char *) &y;
+ int result;
result = memcmp(a, b, sizeof(Datum));
@@ -1989,15 +1990,15 @@ bttextcmp_abbrev(Datum x, Datum y, SortSupport ssup)
static Datum
bttext_abbrev_convert(Datum original, SortSupport ssup)
{
- TextSortSupport *tss = (TextSortSupport *) ssup->ssup_extra;
- text *authoritative = DatumGetTextPP(original);
- char *authoritative_data = VARDATA_ANY(authoritative);
+ TextSortSupport *tss = (TextSortSupport *) ssup->ssup_extra;
+ text *authoritative = DatumGetTextPP(original);
+ char *authoritative_data = VARDATA_ANY(authoritative);
/* working state */
- Datum res;
- char *pres;
- int len;
- uint32 hash;
+ Datum res;
+ char *pres;
+ int len;
+ uint32 hash;
/*
* Abbreviated key representation is a pass-by-value Datum that is treated
@@ -2009,8 +2010,8 @@ bttext_abbrev_convert(Datum original, SortSupport ssup)
len = VARSIZE_ANY_EXHDR(authoritative);
/*
- * If we're using the C collation, use memcmp(), rather than strxfrm(),
- * to abbreviate keys. The full comparator for the C locale is always
+ * If we're using the C collation, use memcmp(), rather than strxfrm(), to
+ * abbreviate keys. The full comparator for the C locale is always
* memcmp(), and we can't risk having this give a different answer.
* Besides, this should be faster, too.
*/
@@ -2018,7 +2019,7 @@ bttext_abbrev_convert(Datum original, SortSupport ssup)
memcpy(pres, authoritative_data, Min(len, sizeof(Datum)));
else
{
- Size bsize;
+ Size bsize;
/*
* We're not using the C collation, so fall back on strxfrm.
@@ -2075,8 +2076,8 @@ bttext_abbrev_convert(Datum original, SortSupport ssup)
/*
* Maintain approximate cardinality of both abbreviated keys and original,
* authoritative keys using HyperLogLog. Used as cheap insurance against
- * the worst case, where we do many string transformations for no saving in
- * full strcoll()-based comparisons. These statistics are used by
+ * the worst case, where we do many string transformations for no saving
+ * in full strcoll()-based comparisons. These statistics are used by
* bttext_abbrev_abort().
*
* First, Hash key proper, or a significant fraction of it. Mix in length
@@ -2094,8 +2095,8 @@ bttext_abbrev_convert(Datum original, SortSupport ssup)
/* Hash abbreviated key */
#if SIZEOF_DATUM == 8
{
- uint32 lohalf,
- hihalf;
+ uint32 lohalf,
+ hihalf;
lohalf = (uint32) res;
hihalf = (uint32) (res >> 32);
@@ -2118,8 +2119,9 @@ bttext_abbrev_convert(Datum original, SortSupport ssup)
static bool
bttext_abbrev_abort(int memtupcount, SortSupport ssup)
{
- TextSortSupport *tss = (TextSortSupport *) ssup->ssup_extra;
- double abbrev_distinct, key_distinct;
+ TextSortSupport *tss = (TextSortSupport *) ssup->ssup_extra;
+ double abbrev_distinct,
+ key_distinct;
Assert(ssup->abbreviate);
@@ -2131,9 +2133,9 @@ bttext_abbrev_abort(int memtupcount, SortSupport ssup)
key_distinct = estimateHyperLogLog(&tss->full_card);
/*
- * Clamp cardinality estimates to at least one distinct value. While NULLs
- * are generally disregarded, if only NULL values were seen so far, that
- * might misrepresent costs if we failed to clamp.
+ * Clamp cardinality estimates to at least one distinct value. While
+ * NULLs are generally disregarded, if only NULL values were seen so far,
+ * that might misrepresent costs if we failed to clamp.
*/
if (abbrev_distinct <= 1.0)
abbrev_distinct = 1.0;
@@ -2149,7 +2151,7 @@ bttext_abbrev_abort(int memtupcount, SortSupport ssup)
#ifdef TRACE_SORT
if (trace_sort)
{
- double norm_abbrev_card = abbrev_distinct / (double) memtupcount;
+ double norm_abbrev_card = abbrev_distinct / (double) memtupcount;
elog(LOG, "bttext_abbrev: abbrev_distinct after %d: %f "
"(key_distinct: %f, norm_abbrev_card: %f, prop_card: %f)",
@@ -2180,26 +2182,26 @@ bttext_abbrev_abort(int memtupcount, SortSupport ssup)
* When we have exceeded 10,000 tuples, decay required cardinality
* aggressively for next call.
*
- * This is useful because the number of comparisons required on average
- * increases at a linearithmic rate, and at roughly 10,000 tuples that
- * factor will start to dominate over the linear costs of string
- * transformation (this is a conservative estimate). The decay rate is
- * chosen to be a little less aggressive than halving -- which (since
- * we're called at points at which memtupcount has doubled) would never
- * see the cost model actually abort past the first call following a
- * decay. This decay rate is mostly a precaution against a sudden,
- * violent swing in how well abbreviated cardinality tracks full key
- * cardinality. The decay also serves to prevent a marginal case from
- * being aborted too late, when too much has already been invested in
- * string transformation.
+ * This is useful because the number of comparisons required on
+ * average increases at a linearithmic rate, and at roughly 10,000
+ * tuples that factor will start to dominate over the linear costs of
+ * string transformation (this is a conservative estimate). The decay
+ * rate is chosen to be a little less aggressive than halving -- which
+ * (since we're called at points at which memtupcount has doubled)
+ * would never see the cost model actually abort past the first call
+ * following a decay. This decay rate is mostly a precaution against
+ * a sudden, violent swing in how well abbreviated cardinality tracks
+ * full key cardinality. The decay also serves to prevent a marginal
+ * case from being aborted too late, when too much has already been
+ * invested in string transformation.
*
- * It's possible for sets of several million distinct strings with mere
- * tens of thousands of distinct abbreviated keys to still benefit very
- * significantly. This will generally occur provided each abbreviated
- * key is a proxy for a roughly uniform number of the set's full keys.
- * If it isn't so, we hope to catch that early and abort. If it isn't
- * caught early, by the time the problem is apparent it's probably not
- * worth aborting.
+ * It's possible for sets of several million distinct strings with
+ * mere tens of thousands of distinct abbreviated keys to still
+ * benefit very significantly. This will generally occur provided
+ * each abbreviated key is a proxy for a roughly uniform number of the
+ * set's full keys. If it isn't so, we hope to catch that early and
+ * abort. If it isn't caught early, by the time the problem is
+ * apparent it's probably not worth aborting.
*/
if (memtupcount > 10000)
tss->prop_card *= 0.65;
diff --git a/src/backend/utils/adt/xml.c b/src/backend/utils/adt/xml.c
index 8bb7144ecf9..99bc832ab82 100644
--- a/src/backend/utils/adt/xml.c
+++ b/src/backend/utils/adt/xml.c
@@ -1405,7 +1405,7 @@ xml_parse(text *data, XmlOptionType xmloption_arg, bool preserve_whitespace,
if (*(utf8string + count))
{
res_code = xmlParseBalancedChunkMemory(doc, NULL, NULL, 0,
- utf8string + count, NULL);
+ utf8string + count, NULL);
if (res_code != 0 || xmlerrcxt->err_occurred)
xml_ereport(xmlerrcxt, ERROR, ERRCODE_INVALID_XML_CONTENT,
"invalid XML content");
@@ -3697,7 +3697,7 @@ xml_xpathobjtoxmlarray(xmlXPathObjectPtr xpathobj,
for (i = 0; i < result; i++)
{
datum = PointerGetDatum(xml_xmlnodetoxmltype(xpathobj->nodesetval->nodeTab[i],
- xmlerrcxt));
+ xmlerrcxt));
(void) accumArrayResult(astate, datum, false,
XMLOID, CurrentMemoryContext);
}