aboutsummaryrefslogtreecommitdiff
path: root/src/backend/utils/adt
diff options
context:
space:
mode:
authorBruce Momjian <bruce@momjian.us>2014-05-06 12:12:18 -0400
committerBruce Momjian <bruce@momjian.us>2014-05-06 12:12:18 -0400
commit0a7832005792fa6dad171f9cadb8d587fe0dd800 (patch)
tree365cfc42c521a52607e41394b08ef44d338d8fc1 /src/backend/utils/adt
parentfb85cd4320414c3f6e9c8bc69ec944200ae1e493 (diff)
downloadpostgresql-0a7832005792fa6dad171f9cadb8d587fe0dd800.tar.gz
postgresql-0a7832005792fa6dad171f9cadb8d587fe0dd800.zip
pgindent run for 9.4
This includes removing tabs after periods in C comments, which was applied to back branches, so this change should not effect backpatching.
Diffstat (limited to 'src/backend/utils/adt')
-rw-r--r--src/backend/utils/adt/acl.c21
-rw-r--r--src/backend/utils/adt/array_selfuncs.c20
-rw-r--r--src/backend/utils/adt/array_typanalyze.c14
-rw-r--r--src/backend/utils/adt/array_userfuncs.c4
-rw-r--r--src/backend/utils/adt/arrayfuncs.c27
-rw-r--r--src/backend/utils/adt/arrayutils.c2
-rw-r--r--src/backend/utils/adt/cash.c92
-rw-r--r--src/backend/utils/adt/char.c2
-rw-r--r--src/backend/utils/adt/date.c6
-rw-r--r--src/backend/utils/adt/datetime.c34
-rw-r--r--src/backend/utils/adt/datum.c2
-rw-r--r--src/backend/utils/adt/dbsize.c3
-rw-r--r--src/backend/utils/adt/domains.c8
-rw-r--r--src/backend/utils/adt/float.c6
-rw-r--r--src/backend/utils/adt/format_type.c6
-rw-r--r--src/backend/utils/adt/formatting.c4
-rw-r--r--src/backend/utils/adt/geo_ops.c11
-rw-r--r--src/backend/utils/adt/geo_selfuncs.c4
-rw-r--r--src/backend/utils/adt/inet_cidr_ntop.c2
-rw-r--r--src/backend/utils/adt/int.c30
-rw-r--r--src/backend/utils/adt/int8.c44
-rw-r--r--src/backend/utils/adt/json.c12
-rw-r--r--src/backend/utils/adt/jsonb.c26
-rw-r--r--src/backend/utils/adt/jsonb_gin.c80
-rw-r--r--src/backend/utils/adt/jsonb_op.c13
-rw-r--r--src/backend/utils/adt/jsonb_util.c257
-rw-r--r--src/backend/utils/adt/jsonfuncs.c35
-rw-r--r--src/backend/utils/adt/like.c4
-rw-r--r--src/backend/utils/adt/misc.c14
-rw-r--r--src/backend/utils/adt/nabstime.c36
-rw-r--r--src/backend/utils/adt/network.c12
-rw-r--r--src/backend/utils/adt/network_gist.c10
-rw-r--r--src/backend/utils/adt/numeric.c63
-rw-r--r--src/backend/utils/adt/oid.c2
-rw-r--r--src/backend/utils/adt/orderedsetaggs.c8
-rw-r--r--src/backend/utils/adt/pg_locale.c29
-rw-r--r--src/backend/utils/adt/pg_lsn.c43
-rw-r--r--src/backend/utils/adt/pg_lzcompress.c12
-rw-r--r--src/backend/utils/adt/pgstatfuncs.c2
-rw-r--r--src/backend/utils/adt/pseudotypes.c6
-rw-r--r--src/backend/utils/adt/rangetypes.c8
-rw-r--r--src/backend/utils/adt/rangetypes_gist.c8
-rw-r--r--src/backend/utils/adt/regexp.c6
-rw-r--r--src/backend/utils/adt/regproc.c26
-rw-r--r--src/backend/utils/adt/ri_triggers.c14
-rw-r--r--src/backend/utils/adt/rowtypes.c31
-rw-r--r--src/backend/utils/adt/ruleutils.c98
-rw-r--r--src/backend/utils/adt/selfuncs.c143
-rw-r--r--src/backend/utils/adt/timestamp.c36
-rw-r--r--src/backend/utils/adt/tsginidx.c13
-rw-r--r--src/backend/utils/adt/varchar.c4
-rw-r--r--src/backend/utils/adt/varlena.c42
-rw-r--r--src/backend/utils/adt/xml.c64
53 files changed, 775 insertions, 724 deletions
diff --git a/src/backend/utils/adt/acl.c b/src/backend/utils/adt/acl.c
index dfac1243a40..38cd5b89c99 100644
--- a/src/backend/utils/adt/acl.c
+++ b/src/backend/utils/adt/acl.c
@@ -123,7 +123,7 @@ static Oid get_role_oid_or_public(const char *rolname);
/*
* getid
* Consumes the first alphanumeric string (identifier) found in string
- * 's', ignoring any leading white space. If it finds a double quote
+ * 's', ignoring any leading white space. If it finds a double quote
* it returns the word inside the quotes.
*
* RETURNS:
@@ -229,7 +229,7 @@ putid(char *p, const char *s)
*
* RETURNS:
* the string position in 's' immediately following the ACL
- * specification. Also:
+ * specification. Also:
* - loads the structure pointed to by 'aip' with the appropriate
* UID/GID, id type identifier and mode type values.
*/
@@ -837,7 +837,7 @@ acldefault(GrantObjectType objtype, Oid ownerId)
/*
- * SQL-accessible version of acldefault(). Hackish mapping from "char" type to
+ * SQL-accessible version of acldefault(). Hackish mapping from "char" type to
* ACL_OBJECT_* values, but it's only used in the information schema, not
* documented for general use.
*/
@@ -1006,7 +1006,7 @@ aclupdate(const Acl *old_acl, const AclItem *mod_aip,
}
/*
- * Remove abandoned privileges (cascading revoke). Currently we can only
+ * Remove abandoned privileges (cascading revoke). Currently we can only
* handle this when the grantee is not PUBLIC.
*/
if ((old_goptions & ~new_goptions) != 0)
@@ -1072,7 +1072,7 @@ aclnewowner(const Acl *old_acl, Oid oldOwnerId, Oid newOwnerId)
/*
* If the old ACL contained any references to the new owner, then we may
- * now have generated an ACL containing duplicate entries. Find them and
+ * now have generated an ACL containing duplicate entries. Find them and
* merge them so that there are not duplicates. (This is relatively
* expensive since we use a stupid O(N^2) algorithm, but it's unlikely to
* be the normal case.)
@@ -1083,7 +1083,7 @@ aclnewowner(const Acl *old_acl, Oid oldOwnerId, Oid newOwnerId)
* remove privilege-free entries, should there be any in the input.) dst
* is the next output slot, targ is the currently considered input slot
* (always >= dst), and src scans entries to the right of targ looking for
- * duplicates. Once an entry has been emitted to dst it is known
+ * duplicates. Once an entry has been emitted to dst it is known
* duplicate-free and need not be considered anymore.
*/
if (newpresent)
@@ -2468,7 +2468,7 @@ column_privilege_check(Oid tableoid, AttrNumber attnum,
* existence of the pg_class row before risking calling pg_class_aclcheck.
* Note: it might seem there's a race condition against concurrent DROP,
* but really it's safe because there will be no syscache flush between
- * here and there. So if we see the row in the syscache, so will
+ * here and there. So if we see the row in the syscache, so will
* pg_class_aclcheck.
*/
if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(tableoid)))
@@ -4904,7 +4904,7 @@ is_member_of_role_nosuper(Oid member, Oid role)
/*
- * Is member an admin of role? That is, is member the role itself (subject to
+ * Is member an admin of role? That is, is member the role itself (subject to
* restrictions below), a member (directly or indirectly) WITH ADMIN OPTION,
* or a superuser?
*/
@@ -4919,6 +4919,7 @@ is_admin_of_role(Oid member, Oid role)
return true;
if (member == role)
+
/*
* A role can admin itself when it matches the session user and we're
* outside any security-restricted operation, SECURITY DEFINER or
@@ -5015,14 +5016,14 @@ count_one_bits(AclMode mask)
* The grantor must always be either the object owner or some role that has
* been explicitly granted grant options. This ensures that all granted
* privileges appear to flow from the object owner, and there are never
- * multiple "original sources" of a privilege. Therefore, if the would-be
+ * multiple "original sources" of a privilege. Therefore, if the would-be
* grantor is a member of a role that has the needed grant options, we have
* to do the grant as that role instead.
*
* It is possible that the would-be grantor is a member of several roles
* that have different subsets of the desired grant options, but no one
* role has 'em all. In this case we pick a role with the largest number
- * of desired options. Ties are broken in favor of closer ancestors.
+ * of desired options. Ties are broken in favor of closer ancestors.
*
* roleId: the role attempting to do the GRANT/REVOKE
* privileges: the privileges to be granted/revoked
diff --git a/src/backend/utils/adt/array_selfuncs.c b/src/backend/utils/adt/array_selfuncs.c
index 20eb358a620..170a28a067c 100644
--- a/src/backend/utils/adt/array_selfuncs.c
+++ b/src/backend/utils/adt/array_selfuncs.c
@@ -524,7 +524,7 @@ mcelem_array_selec(ArrayType *array, TypeCacheEntry *typentry,
/*
* Estimate selectivity of "column @> const" and "column && const" based on
- * most common element statistics. This estimation assumes element
+ * most common element statistics. This estimation assumes element
* occurrences are independent.
*
* mcelem (of length nmcelem) and numbers (of length nnumbers) are from
@@ -689,7 +689,7 @@ mcelem_array_contain_overlap_selec(Datum *mcelem, int nmcelem,
* In the "column @> const" and "column && const" cases, we usually have a
* "const" with low number of elements (otherwise we have selectivity close
* to 0 or 1 respectively). That's why the effect of dependence related
- * to distinct element count distribution is negligible there. In the
+ * to distinct element count distribution is negligible there. In the
* "column <@ const" case, number of elements is usually high (otherwise we
* have selectivity close to 0). That's why we should do a correction with
* the array distinct element count distribution here.
@@ -848,7 +848,7 @@ mcelem_array_contained_selec(Datum *mcelem, int nmcelem,
/*
* The presence of many distinct rare elements materially decreases
* selectivity. Use the Poisson distribution to estimate the probability
- * of a column value having zero occurrences of such elements. See above
+ * of a column value having zero occurrences of such elements. See above
* for the definition of "rest".
*/
mult *= exp(-rest);
@@ -856,7 +856,7 @@ mcelem_array_contained_selec(Datum *mcelem, int nmcelem,
/*----------
* Using the distinct element count histogram requires
* O(unique_nitems * (nmcelem + unique_nitems))
- * operations. Beyond a certain computational cost threshold, it's
+ * operations. Beyond a certain computational cost threshold, it's
* reasonable to sacrifice accuracy for decreased planning time. We limit
* the number of operations to EFFORT * nmcelem; since nmcelem is limited
* by the column's statistics target, the work done is user-controllable.
@@ -868,7 +868,7 @@ mcelem_array_contained_selec(Datum *mcelem, int nmcelem,
* elements to start with, we'd have to remove any discarded elements'
* frequencies from "mult", but since this is only an approximation
* anyway, we don't bother with that. Therefore it's sufficient to qsort
- * elem_selec[] and take the largest elements. (They will no longer match
+ * elem_selec[] and take the largest elements. (They will no longer match
* up with the elements of array_data[], but we don't care.)
*----------
*/
@@ -878,7 +878,7 @@ mcelem_array_contained_selec(Datum *mcelem, int nmcelem,
unique_nitems > EFFORT * nmcelem / (nmcelem + unique_nitems))
{
/*
- * Use the quadratic formula to solve for largest allowable N. We
+ * Use the quadratic formula to solve for largest allowable N. We
* have A = 1, B = nmcelem, C = - EFFORT * nmcelem.
*/
double b = (double) nmcelem;
@@ -953,7 +953,7 @@ calc_hist(const float4 *hist, int nhist, int n)
/*
* frac is a probability contribution for each interval between histogram
- * values. We have nhist - 1 intervals, so contribution of each one will
+ * values. We have nhist - 1 intervals, so contribution of each one will
* be 1 / (nhist - 1).
*/
frac = 1.0f / ((float) (nhist - 1));
@@ -1020,8 +1020,8 @@ calc_hist(const float4 *hist, int nhist, int n)
* "rest" is the sum of the probabilities of all low-probability events not
* included in p.
*
- * Imagine matrix M of size (n + 1) x (m + 1). Element M[i,j] denotes the
- * probability that exactly j of first i events occur. Obviously M[0,0] = 1.
+ * Imagine matrix M of size (n + 1) x (m + 1). Element M[i,j] denotes the
+ * probability that exactly j of first i events occur. Obviously M[0,0] = 1.
* For any constant j, each increment of i increases the probability iff the
* event occurs. So, by the law of total probability:
* M[i,j] = M[i - 1, j] * (1 - p[i]) + M[i - 1, j - 1] * p[i]
@@ -1143,7 +1143,7 @@ floor_log2(uint32 n)
/*
* find_next_mcelem binary-searches a most common elements array, starting
- * from *index, for the first member >= value. It saves the position of the
+ * from *index, for the first member >= value. It saves the position of the
* match into *index and returns true if it's an exact match. (Note: we
* assume the mcelem elements are distinct so there can't be more than one
* exact match.)
diff --git a/src/backend/utils/adt/array_typanalyze.c b/src/backend/utils/adt/array_typanalyze.c
index 70aba1b5d8d..4d7e9c311fb 100644
--- a/src/backend/utils/adt/array_typanalyze.c
+++ b/src/backend/utils/adt/array_typanalyze.c
@@ -160,13 +160,13 @@ array_typanalyze(PG_FUNCTION_ARGS)
* compute_array_stats() -- compute statistics for a array column
*
* This function computes statistics useful for determining selectivity of
- * the array operators <@, &&, and @>. It is invoked by ANALYZE via the
+ * the array operators <@, &&, and @>. It is invoked by ANALYZE via the
* compute_stats hook after sample rows have been collected.
*
* We also invoke the standard compute_stats function, which will compute
* "scalar" statistics relevant to the btree-style array comparison operators.
* However, exact duplicates of an entire array may be rare despite many
- * arrays sharing individual elements. This especially afflicts long arrays,
+ * arrays sharing individual elements. This especially afflicts long arrays,
* which are also liable to lack all scalar statistics due to the low
* WIDTH_THRESHOLD used in analyze.c. So, in addition to the standard stats,
* we find the most common array elements and compute a histogram of distinct
@@ -201,7 +201,7 @@ array_typanalyze(PG_FUNCTION_ARGS)
* In the absence of a principled basis for other particular values, we
* follow ts_typanalyze() and use parameters s = 0.07/K, epsilon = s/10.
* But we leave out the correction for stopwords, which do not apply to
- * arrays. These parameters give bucket width w = K/0.007 and maximum
+ * arrays. These parameters give bucket width w = K/0.007 and maximum
* expected hashtable size of about 1000 * K.
*
* Elements may repeat within an array. Since duplicates do not change the
@@ -463,7 +463,7 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
/*
* Construct an array of the interesting hashtable items, that is,
- * those meeting the cutoff frequency (s - epsilon)*N. Also identify
+ * those meeting the cutoff frequency (s - epsilon)*N. Also identify
* the minimum and maximum frequencies among these items.
*
* Since epsilon = s/10 and bucket_width = 1/epsilon, the cutoff
@@ -498,7 +498,7 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
/*
* If we obtained more elements than we really want, get rid of those
- * with least frequencies. The easiest way is to qsort the array into
+ * with least frequencies. The easiest way is to qsort the array into
* descending frequency order and truncate the array.
*/
if (num_mcelem < track_len)
@@ -532,7 +532,7 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
/*
* We sorted statistics on the element value, but we want to be
* able to find the minimal and maximal frequencies without going
- * through all the values. We also want the frequency of null
+ * through all the values. We also want the frequency of null
* elements. Store these three values at the end of mcelem_freqs.
*/
mcelem_values = (Datum *) palloc(num_mcelem * sizeof(Datum));
@@ -623,7 +623,7 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
* (compare the histogram-making loop in compute_scalar_stats()).
* But instead of that we have the sorted_count_items[] array,
* which holds unique DEC values with their frequencies (that is,
- * a run-length-compressed version of the full array). So we
+ * a run-length-compressed version of the full array). So we
* control advancing through sorted_count_items[] with the
* variable "frac", which is defined as (x - y) * (num_hist - 1),
* where x is the index in the notional DECs array corresponding
diff --git a/src/backend/utils/adt/array_userfuncs.c b/src/backend/utils/adt/array_userfuncs.c
index c62e3fb1765..831466dec91 100644
--- a/src/backend/utils/adt/array_userfuncs.c
+++ b/src/backend/utils/adt/array_userfuncs.c
@@ -502,7 +502,7 @@ array_agg_transfn(PG_FUNCTION_ARGS)
/*
* The transition type for array_agg() is declared to be "internal", which
- * is a pass-by-value type the same size as a pointer. So we can safely
+ * is a pass-by-value type the same size as a pointer. So we can safely
* pass the ArrayBuildState pointer through nodeAgg.c's machinations.
*/
PG_RETURN_POINTER(state);
@@ -517,7 +517,7 @@ array_agg_finalfn(PG_FUNCTION_ARGS)
int lbs[1];
/*
- * Test for null before Asserting we are in right context. This is to
+ * Test for null before Asserting we are in right context. This is to
* avoid possible Assert failure in 8.4beta installations, where it is
* possible for users to create NULL constants of type internal.
*/
diff --git a/src/backend/utils/adt/arrayfuncs.c b/src/backend/utils/adt/arrayfuncs.c
index 91df1842427..f8e94ec3652 100644
--- a/src/backend/utils/adt/arrayfuncs.c
+++ b/src/backend/utils/adt/arrayfuncs.c
@@ -694,7 +694,7 @@ ReadArrayStr(char *arrayStr,
/*
* We have to remove " and \ characters to create a clean item value to
- * pass to the datatype input routine. We overwrite each item value
+ * pass to the datatype input routine. We overwrite each item value
* in-place within arrayStr to do this. srcptr is the current scan point,
* and dstptr is where we are copying to.
*
@@ -894,7 +894,7 @@ ReadArrayStr(char *arrayStr,
* referenced by Datums after copying them.
*
* If the input data is of varlena type, the caller must have ensured that
- * the values are not toasted. (Doing it here doesn't work since the
+ * the values are not toasted. (Doing it here doesn't work since the
* caller has already allocated space for the array...)
*/
static void
@@ -1747,6 +1747,7 @@ Datum
array_cardinality(PG_FUNCTION_ARGS)
{
ArrayType *v = PG_GETARG_ARRAYTYPE_P(0);
+
PG_RETURN_INT32(ArrayGetNItems(ARR_NDIM(v), ARR_DIMS(v)));
}
@@ -2002,7 +2003,7 @@ array_get_slice(ArrayType *array,
memcpy(ARR_DIMS(newarray), span, ndim * sizeof(int));
/*
- * Lower bounds of the new array are set to 1. Formerly (before 7.3) we
+ * Lower bounds of the new array are set to 1. Formerly (before 7.3) we
* copied the given lowerIndx values ... but that seems confusing.
*/
newlb = ARR_LBOUND(newarray);
@@ -2634,7 +2635,7 @@ array_set_slice(ArrayType *array,
/*
* array_map()
*
- * Map an array through an arbitrary function. Return a new array with
+ * Map an array through an arbitrary function. Return a new array with
* same dimensions and each source element transformed by fn(). Each
* source element is passed as the first argument to fn(); additional
* arguments to be passed to fn() can be specified by the caller.
@@ -2649,9 +2650,9 @@ array_set_slice(ArrayType *array,
* first argument position initially holds the input array value.
* * inpType: OID of element type of input array. This must be the same as,
* or binary-compatible with, the first argument type of fn().
- * * retType: OID of element type of output array. This must be the same as,
+ * * retType: OID of element type of output array. This must be the same as,
* or binary-compatible with, the result type of fn().
- * * amstate: workspace for array_map. Must be zeroed by caller before
+ * * amstate: workspace for array_map. Must be zeroed by caller before
* first call, and not touched after that.
*
* It is legitimate to pass a freshly-zeroed ArrayMapState on each call,
@@ -3505,7 +3506,7 @@ array_cmp(FunctionCallInfo fcinfo)
/*
* If arrays contain same data (up to end of shorter one), apply
- * additional rules to sort by dimensionality. The relative significance
+ * additional rules to sort by dimensionality. The relative significance
* of the different bits of information is historical; mainly we just care
* that we don't say "equal" for arrays of different dimensionality.
*/
@@ -3767,7 +3768,7 @@ array_contain_compare(ArrayType *array1, ArrayType *array2, Oid collation,
/*
* We assume that the comparison operator is strict, so a NULL can't
- * match anything. XXX this diverges from the "NULL=NULL" behavior of
+ * match anything. XXX this diverges from the "NULL=NULL" behavior of
* array_eq, should we act like that?
*/
if (isnull1)
@@ -4258,7 +4259,7 @@ array_copy(char *destptr, int nitems,
*
* Note: this could certainly be optimized using standard bitblt methods.
* However, it's not clear that the typical Postgres array has enough elements
- * to make it worth worrying too much. For the moment, KISS.
+ * to make it worth worrying too much. For the moment, KISS.
*/
void
array_bitmap_copy(bits8 *destbitmap, int destoffset,
@@ -4455,7 +4456,7 @@ array_extract_slice(ArrayType *newarray,
* Insert a slice into an array.
*
* ndim/dim[]/lb[] are dimensions of the original array. A new array with
- * those same dimensions is to be constructed. destArray must already
+ * those same dimensions is to be constructed. destArray must already
* have been allocated and its header initialized.
*
* st[]/endp[] identify the slice to be replaced. Elements within the slice
@@ -5123,7 +5124,7 @@ array_unnest(PG_FUNCTION_ARGS)
* Get the array value and detoast if needed. We can't do this
* earlier because if we have to detoast, we want the detoasted copy
* to be in multi_call_memory_ctx, so it will go away when we're done
- * and not before. (If no detoast happens, we assume the originally
+ * and not before. (If no detoast happens, we assume the originally
* passed array will stick around till then.)
*/
arr = PG_GETARG_ARRAYTYPE_P(0);
@@ -5199,7 +5200,7 @@ array_unnest(PG_FUNCTION_ARGS)
*
* Find all array entries matching (not distinct from) search/search_isnull,
* and delete them if remove is true, else replace them with
- * replace/replace_isnull. Comparisons are done using the specified
+ * replace/replace_isnull. Comparisons are done using the specified
* collation. fcinfo is passed only for caching purposes.
*/
static ArrayType *
@@ -5271,7 +5272,7 @@ array_replace_internal(ArrayType *array,
typalign = typentry->typalign;
/*
- * Detoast values if they are toasted. The replacement value must be
+ * Detoast values if they are toasted. The replacement value must be
* detoasted for insertion into the result array, while detoasting the
* search value only once saves cycles.
*/
diff --git a/src/backend/utils/adt/arrayutils.c b/src/backend/utils/adt/arrayutils.c
index 5b1afa0d8f2..477ccadfb85 100644
--- a/src/backend/utils/adt/arrayutils.c
+++ b/src/backend/utils/adt/arrayutils.c
@@ -193,7 +193,7 @@ mda_next_tuple(int n, int *curr, const int *span)
/*
* ArrayGetIntegerTypmods: verify that argument is a 1-D cstring array,
- * and get the contents converted to integers. Returns a palloc'd array
+ * and get the contents converted to integers. Returns a palloc'd array
* and places the length at *n.
*/
int32 *
diff --git a/src/backend/utils/adt/cash.c b/src/backend/utils/adt/cash.c
index 015875875be..6aba20de851 100644
--- a/src/backend/utils/adt/cash.c
+++ b/src/backend/utils/adt/cash.c
@@ -382,79 +382,79 @@ cash_out(PG_FUNCTION_ARGS)
case 0:
if (cs_precedes)
result = psprintf("(%s%s%s)",
- csymbol,
- (sep_by_space == 1) ? " " : "",
- bufptr);
+ csymbol,
+ (sep_by_space == 1) ? " " : "",
+ bufptr);
else
result = psprintf("(%s%s%s)",
- bufptr,
- (sep_by_space == 1) ? " " : "",
- csymbol);
+ bufptr,
+ (sep_by_space == 1) ? " " : "",
+ csymbol);
break;
case 1:
default:
if (cs_precedes)
result = psprintf("%s%s%s%s%s",
- signsymbol,
- (sep_by_space == 2) ? " " : "",
- csymbol,
- (sep_by_space == 1) ? " " : "",
- bufptr);
+ signsymbol,
+ (sep_by_space == 2) ? " " : "",
+ csymbol,
+ (sep_by_space == 1) ? " " : "",
+ bufptr);
else
result = psprintf("%s%s%s%s%s",
- signsymbol,
- (sep_by_space == 2) ? " " : "",
- bufptr,
- (sep_by_space == 1) ? " " : "",
- csymbol);
+ signsymbol,
+ (sep_by_space == 2) ? " " : "",
+ bufptr,
+ (sep_by_space == 1) ? " " : "",
+ csymbol);
break;
case 2:
if (cs_precedes)
result = psprintf("%s%s%s%s%s",
- csymbol,
- (sep_by_space == 1) ? " " : "",
- bufptr,
- (sep_by_space == 2) ? " " : "",
- signsymbol);
+ csymbol,
+ (sep_by_space == 1) ? " " : "",
+ bufptr,
+ (sep_by_space == 2) ? " " : "",
+ signsymbol);
else
result = psprintf("%s%s%s%s%s",
- bufptr,
- (sep_by_space == 1) ? " " : "",
- csymbol,
- (sep_by_space == 2) ? " " : "",
- signsymbol);
+ bufptr,
+ (sep_by_space == 1) ? " " : "",
+ csymbol,
+ (sep_by_space == 2) ? " " : "",
+ signsymbol);
break;
case 3:
if (cs_precedes)
result = psprintf("%s%s%s%s%s",
- signsymbol,
- (sep_by_space == 2) ? " " : "",
- csymbol,
- (sep_by_space == 1) ? " " : "",
- bufptr);
+ signsymbol,
+ (sep_by_space == 2) ? " " : "",
+ csymbol,
+ (sep_by_space == 1) ? " " : "",
+ bufptr);
else
result = psprintf("%s%s%s%s%s",
- bufptr,
- (sep_by_space == 1) ? " " : "",
- signsymbol,
- (sep_by_space == 2) ? " " : "",
- csymbol);
+ bufptr,
+ (sep_by_space == 1) ? " " : "",
+ signsymbol,
+ (sep_by_space == 2) ? " " : "",
+ csymbol);
break;
case 4:
if (cs_precedes)
result = psprintf("%s%s%s%s%s",
- csymbol,
- (sep_by_space == 2) ? " " : "",
- signsymbol,
- (sep_by_space == 1) ? " " : "",
- bufptr);
+ csymbol,
+ (sep_by_space == 2) ? " " : "",
+ signsymbol,
+ (sep_by_space == 1) ? " " : "",
+ bufptr);
else
result = psprintf("%s%s%s%s%s",
- bufptr,
- (sep_by_space == 1) ? " " : "",
- csymbol,
- (sep_by_space == 2) ? " " : "",
- signsymbol);
+ bufptr,
+ (sep_by_space == 1) ? " " : "",
+ csymbol,
+ (sep_by_space == 2) ? " " : "",
+ signsymbol);
break;
}
diff --git a/src/backend/utils/adt/char.c b/src/backend/utils/adt/char.c
index 99191e1d90c..e0d974eea5a 100644
--- a/src/backend/utils/adt/char.c
+++ b/src/backend/utils/adt/char.c
@@ -59,7 +59,7 @@ charout(PG_FUNCTION_ARGS)
* charrecv - converts external binary format to char
*
* The external representation is one byte, with no character set
- * conversion. This is somewhat dubious, perhaps, but in many
+ * conversion. This is somewhat dubious, perhaps, but in many
* cases people use char for a 1-byte binary type.
*/
Datum
diff --git a/src/backend/utils/adt/date.c b/src/backend/utils/adt/date.c
index 06cc0cda0f0..073104d4bac 100644
--- a/src/backend/utils/adt/date.c
+++ b/src/backend/utils/adt/date.c
@@ -1358,7 +1358,7 @@ AdjustTimeForTypmod(TimeADT *time, int32 typmod)
* Note: this round-to-nearest code is not completely consistent about
* rounding values that are exactly halfway between integral values.
* On most platforms, rint() will implement round-to-nearest-even, but
- * the integer code always rounds up (away from zero). Is it worth
+ * the integer code always rounds up (away from zero). Is it worth
* trying to be consistent?
*/
#ifdef HAVE_INT64_TIMESTAMP
@@ -1706,7 +1706,7 @@ time_interval(PG_FUNCTION_ARGS)
* Convert interval to time data type.
*
* This is defined as producing the fractional-day portion of the interval.
- * Therefore, we can just ignore the months field. It is not real clear
+ * Therefore, we can just ignore the months field. It is not real clear
* what to do with negative intervals, but we choose to subtract the floor,
* so that, say, '-2 hours' becomes '22:00:00'.
*/
@@ -2695,7 +2695,7 @@ timetz_zone(PG_FUNCTION_ARGS)
pg_tz *tzp;
/*
- * Look up the requested timezone. First we look in the date token table
+ * Look up the requested timezone. First we look in the date token table
* (to handle cases like "EST"), and if that fails, we look in the
* timezone database (to handle cases like "America/New_York"). (This
* matches the order in which timestamp input checks the cases; it's
diff --git a/src/backend/utils/adt/datetime.c b/src/backend/utils/adt/datetime.c
index d200437e628..7632d1177e6 100644
--- a/src/backend/utils/adt/datetime.c
+++ b/src/backend/utils/adt/datetime.c
@@ -351,7 +351,7 @@ j2date(int jd, int *year, int *month, int *day)
* j2day - convert Julian date to day-of-week (0..6 == Sun..Sat)
*
* Note: various places use the locution j2day(date - 1) to produce a
- * result according to the convention 0..6 = Mon..Sun. This is a bit of
+ * result according to the convention 0..6 = Mon..Sun. This is a bit of
* a crock, but will work as long as the computation here is just a modulo.
*/
int
@@ -819,10 +819,11 @@ DecodeDateTime(char **field, int *ftype, int nf,
switch (ftype[i])
{
case DTK_DATE:
+
/*
- * Integral julian day with attached time zone?
- * All other forms with JD will be separated into
- * distinct fields, so we handle just this case here.
+ * Integral julian day with attached time zone? All other
+ * forms with JD will be separated into distinct fields, so we
+ * handle just this case here.
*/
if (ptype == DTK_JULIAN)
{
@@ -849,6 +850,7 @@ DecodeDateTime(char **field, int *ftype, int nf,
ptype = 0;
break;
}
+
/*
* Already have a date? Then this might be a time zone name
* with embedded punctuation (e.g. "America/New_York") or a
@@ -1158,17 +1160,18 @@ DecodeDateTime(char **field, int *ftype, int nf,
if (dterr < 0)
return dterr;
}
+
/*
* Is this a YMD or HMS specification, or a year number?
* YMD and HMS are required to be six digits or more, so
* if it is 5 digits, it is a year. If it is six or more
* more digits, we assume it is YMD or HMS unless no date
- * and no time values have been specified. This forces
- * 6+ digit years to be at the end of the string, or to use
+ * and no time values have been specified. This forces 6+
+ * digit years to be at the end of the string, or to use
* the ISO date specification.
*/
else if (flen >= 6 && (!(fmask & DTK_DATE_M) ||
- !(fmask & DTK_TIME_M)))
+ !(fmask & DTK_TIME_M)))
{
dterr = DecodeNumberField(flen, field[i], fmask,
&tmask, tm,
@@ -2490,7 +2493,7 @@ DecodeNumber(int flen, char *str, bool haveTextMonth, int fmask,
/*
* Nothing so far; make a decision about what we think the input
- * is. There used to be lots of heuristics here, but the
+ * is. There used to be lots of heuristics here, but the
* consensus now is to be paranoid. It *must* be either
* YYYY-MM-DD (with a more-than-two-digit year field), or the
* field order defined by DateOrder.
@@ -2523,9 +2526,9 @@ DecodeNumber(int flen, char *str, bool haveTextMonth, int fmask,
{
/*
* We are at the first numeric field of a date that included a
- * textual month name. We want to support the variants
+ * textual month name. We want to support the variants
* MON-DD-YYYY, DD-MON-YYYY, and YYYY-MON-DD as unambiguous
- * inputs. We will also accept MON-DD-YY or DD-MON-YY in
+ * inputs. We will also accept MON-DD-YY or DD-MON-YY in
* either DMY or MDY modes, as well as YY-MON-DD in YMD mode.
*/
if (flen >= 3 || DateOrder == DATEORDER_YMD)
@@ -2654,6 +2657,7 @@ DecodeNumberField(int len, char *str, int fmask,
if (len >= 6)
{
*tmask = DTK_DATE_M;
+
/*
* Start from end and consider first 2 as Day, next 2 as Month,
* and the rest as Year.
@@ -2890,7 +2894,7 @@ DecodeInterval(char **field, int *ftype, int nf, int range,
Assert(*field[i] == '-' || *field[i] == '+');
/*
- * Check for signed hh:mm or hh:mm:ss. If so, process exactly
+ * Check for signed hh:mm or hh:mm:ss. If so, process exactly
* like DTK_TIME case above, plus handling the sign.
*/
if (strchr(field[i] + 1, ':') != NULL &&
@@ -2978,8 +2982,8 @@ DecodeInterval(char **field, int *ftype, int nf, int range,
type = DTK_MONTH;
if (*field[i] == '-')
val2 = -val2;
- if (((double)val * MONTHS_PER_YEAR + val2) > INT_MAX ||
- ((double)val * MONTHS_PER_YEAR + val2) < INT_MIN)
+ if (((double) val * MONTHS_PER_YEAR + val2) > INT_MAX ||
+ ((double) val * MONTHS_PER_YEAR + val2) < INT_MIN)
return DTERR_FIELD_OVERFLOW;
val = val * MONTHS_PER_YEAR + val2;
fval = 0;
@@ -3327,7 +3331,7 @@ DecodeISO8601Interval(char *str,
return dterr;
/*
- * Note: we could step off the end of the string here. Code below
+ * Note: we could step off the end of the string here. Code below
* *must* exit the loop if unit == '\0'.
*/
unit = *str++;
@@ -4130,7 +4134,7 @@ EncodeInterval(struct pg_tm * tm, fsec_t fsec, int style, char *str)
/*
* We've been burnt by stupid errors in the ordering of the datetkn tables
- * once too often. Arrange to check them during postmaster start.
+ * once too often. Arrange to check them during postmaster start.
*/
static bool
CheckDateTokenTable(const char *tablename, const datetkn *base, int nel)
diff --git a/src/backend/utils/adt/datum.c b/src/backend/utils/adt/datum.c
index 4b5d65c5ff5..a79d5d587cc 100644
--- a/src/backend/utils/adt/datum.c
+++ b/src/backend/utils/adt/datum.c
@@ -181,7 +181,7 @@ datumIsEqual(Datum value1, Datum value2, bool typByVal, int typLen)
/*
* just compare the two datums. NOTE: just comparing "len" bytes will
* not do the work, because we do not know how these bytes are aligned
- * inside the "Datum". We assume instead that any given datatype is
+ * inside the "Datum". We assume instead that any given datatype is
* consistent about how it fills extraneous bits in the Datum.
*/
res = (value1 == value2);
diff --git a/src/backend/utils/adt/dbsize.c b/src/backend/utils/adt/dbsize.c
index 68ab0e19061..8c663379ae7 100644
--- a/src/backend/utils/adt/dbsize.c
+++ b/src/backend/utils/adt/dbsize.c
@@ -358,6 +358,7 @@ calculate_toast_table_size(Oid toastrelid)
foreach(lc, indexlist)
{
Relation toastIdxRel;
+
toastIdxRel = relation_open(lfirst_oid(lc),
AccessShareLock);
for (forkNum = 0; forkNum <= MAX_FORKNUM; forkNum++)
@@ -689,7 +690,7 @@ pg_size_pretty_numeric(PG_FUNCTION_ARGS)
* This is expected to be used in queries like
* SELECT pg_relation_filenode(oid) FROM pg_class;
* That leads to a couple of choices. We work from the pg_class row alone
- * rather than actually opening each relation, for efficiency. We don't
+ * rather than actually opening each relation, for efficiency. We don't
* fail if we can't find the relation --- some rows might be visible in
* the query's MVCC snapshot even though the relations have been dropped.
* (Note: we could avoid using the catcache, but there's little point
diff --git a/src/backend/utils/adt/domains.c b/src/backend/utils/adt/domains.c
index 515481805a7..bbca5d68baf 100644
--- a/src/backend/utils/adt/domains.c
+++ b/src/backend/utils/adt/domains.c
@@ -12,11 +12,11 @@
* The overhead required for constraint checking can be high, since examining
* the catalogs to discover the constraints for a given domain is not cheap.
* We have three mechanisms for minimizing this cost:
- * 1. In a nest of domains, we flatten the checking of all the levels
+ * 1. In a nest of domains, we flatten the checking of all the levels
* into just one operation.
- * 2. We cache the list of constraint items in the FmgrInfo struct
+ * 2. We cache the list of constraint items in the FmgrInfo struct
* passed by the caller.
- * 3. If there are CHECK constraints, we cache a standalone ExprContext
+ * 3. If there are CHECK constraints, we cache a standalone ExprContext
* to evaluate them in.
*
*
@@ -311,7 +311,7 @@ domain_recv(PG_FUNCTION_ARGS)
/*
* domain_check - check that a datum satisfies the constraints of a
- * domain. extra and mcxt can be passed if they are available from,
+ * domain. extra and mcxt can be passed if they are available from,
* say, a FmgrInfo structure, or they can be NULL, in which case the
* setup is repeated for each call.
*/
diff --git a/src/backend/utils/adt/float.c b/src/backend/utils/adt/float.c
index 774267ed5d2..41b3eaa2135 100644
--- a/src/backend/utils/adt/float.c
+++ b/src/backend/utils/adt/float.c
@@ -276,7 +276,7 @@ float4in(PG_FUNCTION_ARGS)
/*
* Some platforms return ERANGE for denormalized numbers (those
* that are not zero, but are too close to zero to have full
- * precision). We'd prefer not to throw error for that, so try to
+ * precision). We'd prefer not to throw error for that, so try to
* detect whether it's a "real" out-of-range condition by checking
* to see if the result is zero or huge.
*/
@@ -475,7 +475,7 @@ float8in(PG_FUNCTION_ARGS)
/*
* Some platforms return ERANGE for denormalized numbers (those
* that are not zero, but are too close to zero to have full
- * precision). We'd prefer not to throw error for that, so try to
+ * precision). We'd prefer not to throw error for that, so try to
* detect whether it's a "real" out-of-range condition by checking
* to see if the result is zero or huge.
*/
@@ -2054,7 +2054,7 @@ float8_stddev_samp(PG_FUNCTION_ARGS)
* in that order. Note that Y is the first argument to the aggregates!
*
* It might seem attractive to optimize this by having multiple accumulator
- * functions that only calculate the sums actually needed. But on most
+ * functions that only calculate the sums actually needed. But on most
* modern machines, a couple of extra floating-point multiplies will be
* insignificant compared to the other per-tuple overhead, so I've chosen
* to minimize code space instead.
diff --git a/src/backend/utils/adt/format_type.c b/src/backend/utils/adt/format_type.c
index 5b75d34dcbc..e1763a37642 100644
--- a/src/backend/utils/adt/format_type.c
+++ b/src/backend/utils/adt/format_type.c
@@ -44,14 +44,14 @@ static char *printTypmod(const char *typname, int32 typmod, Oid typmodout);
* double quoted if it contains funny characters or matches a keyword.
*
* If typemod is NULL then we are formatting a type name in a context where
- * no typemod is available, eg a function argument or result type. This
+ * no typemod is available, eg a function argument or result type. This
* yields a slightly different result from specifying typemod = -1 in some
* cases. Given typemod = -1 we feel compelled to produce an output that
* the parser will interpret as having typemod -1, so that pg_dump will
- * produce CREATE TABLE commands that recreate the original state. But
+ * produce CREATE TABLE commands that recreate the original state. But
* given NULL typemod, we assume that the parser's interpretation of
* typemod doesn't matter, and so we are willing to output a slightly
- * "prettier" representation of the same type. For example, type = bpchar
+ * "prettier" representation of the same type. For example, type = bpchar
* and typemod = NULL gets you "character", whereas typemod = -1 gets you
* "bpchar" --- the former will be interpreted as character(1) by the
* parser, which does not yield typemod -1.
diff --git a/src/backend/utils/adt/formatting.c b/src/backend/utils/adt/formatting.c
index 2099ad0c302..15bcefd0021 100644
--- a/src/backend/utils/adt/formatting.c
+++ b/src/backend/utils/adt/formatting.c
@@ -1823,7 +1823,7 @@ str_initcap(const char *buff, size_t nbytes, Oid collid)
/*
* Note: we assume that toupper_l()/tolower_l() will not be so broken
- * as to need guard tests. When using the default collation, we apply
+ * as to need guard tests. When using the default collation, we apply
* the traditional Postgres behavior that forces ASCII-style treatment
* of I/i, but in non-default collations you get exactly what the
* collation says.
@@ -3629,7 +3629,7 @@ do_to_timestamp(text *date_txt, text *fmt,
{
/*
* The month and day field have not been set, so we use the
- * day-of-year field to populate them. Depending on the date mode,
+ * day-of-year field to populate them. Depending on the date mode,
* this field may be interpreted as a Gregorian day-of-year, or an ISO
* week date day-of-year.
*/
diff --git a/src/backend/utils/adt/geo_ops.c b/src/backend/utils/adt/geo_ops.c
index 72cb4e991fc..54391fd7aba 100644
--- a/src/backend/utils/adt/geo_ops.c
+++ b/src/backend/utils/adt/geo_ops.c
@@ -32,7 +32,10 @@
* Internal routines
*/
-enum path_delim { PATH_NONE, PATH_OPEN, PATH_CLOSED };
+enum path_delim
+{
+ PATH_NONE, PATH_OPEN, PATH_CLOSED
+};
static int point_inside(Point *p, int npts, Point *plist);
static int lseg_crossing(double x, double y, double px, double py);
@@ -1024,7 +1027,7 @@ line_out(PG_FUNCTION_ARGS)
Datum
line_recv(PG_FUNCTION_ARGS)
{
- StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
+ StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
LINE *line;
line = (LINE *) palloc(sizeof(LINE));
@@ -1386,7 +1389,7 @@ path_in(PG_FUNCTION_ARGS)
}
base_size = sizeof(path->p[0]) * npts;
- size = offsetof(PATH, p[0]) + base_size;
+ size = offsetof(PATH, p[0]) +base_size;
/* Check for integer overflow */
if (base_size / npts != sizeof(path->p[0]) || size <= base_size)
@@ -3448,7 +3451,7 @@ poly_in(PG_FUNCTION_ARGS)
errmsg("invalid input syntax for type polygon: \"%s\"", str)));
base_size = sizeof(poly->p[0]) * npts;
- size = offsetof(POLYGON, p[0]) + base_size;
+ size = offsetof(POLYGON, p[0]) +base_size;
/* Check for integer overflow */
if (base_size / npts != sizeof(poly->p[0]) || size <= base_size)
diff --git a/src/backend/utils/adt/geo_selfuncs.c b/src/backend/utils/adt/geo_selfuncs.c
index 99ca8edbd04..4a2156d4669 100644
--- a/src/backend/utils/adt/geo_selfuncs.c
+++ b/src/backend/utils/adt/geo_selfuncs.c
@@ -22,7 +22,7 @@
/*
- * Selectivity functions for geometric operators. These are bogus -- unless
+ * Selectivity functions for geometric operators. These are bogus -- unless
* we know the actual key distribution in the index, we can't make a good
* prediction of the selectivity of these operators.
*
@@ -34,7 +34,7 @@
* In general, GiST needs to search multiple subtrees in order to guarantee
* that all occurrences of the same key have been found. Because of this,
* the estimated cost for scanning the index ought to be higher than the
- * output selectivity would indicate. gistcostestimate(), over in selfuncs.c,
+ * output selectivity would indicate. gistcostestimate(), over in selfuncs.c,
* ought to be adjusted accordingly --- but until we can generate somewhat
* realistic numbers here, it hardly matters...
*/
diff --git a/src/backend/utils/adt/inet_cidr_ntop.c b/src/backend/utils/adt/inet_cidr_ntop.c
index 5f2a3d361d9..d33534ec173 100644
--- a/src/backend/utils/adt/inet_cidr_ntop.c
+++ b/src/backend/utils/adt/inet_cidr_ntop.c
@@ -196,7 +196,7 @@ inet_cidr_ntop_ipv6(const u_char *src, int bits, char *dst, size_t size)
}
else
{
- /* Copy src to private buffer. Zero host part. */
+ /* Copy src to private buffer. Zero host part. */
p = (bits + 7) / 8;
memcpy(inbuf, src, p);
memset(inbuf + p, 0, 16 - p);
diff --git a/src/backend/utils/adt/int.c b/src/backend/utils/adt/int.c
index 669355e4540..b8f56e5c2e1 100644
--- a/src/backend/utils/adt/int.c
+++ b/src/backend/utils/adt/int.c
@@ -642,7 +642,7 @@ int4pl(PG_FUNCTION_ARGS)
result = arg1 + arg2;
/*
- * Overflow check. If the inputs are of different signs then their sum
+ * Overflow check. If the inputs are of different signs then their sum
* cannot overflow. If the inputs are of the same sign, their sum had
* better be that sign too.
*/
@@ -663,8 +663,8 @@ int4mi(PG_FUNCTION_ARGS)
result = arg1 - arg2;
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then the
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
* result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
@@ -684,7 +684,7 @@ int4mul(PG_FUNCTION_ARGS)
result = arg1 * arg2;
/*
- * Overflow check. We basically check to see if result / arg2 gives arg1
+ * Overflow check. We basically check to see if result / arg2 gives arg1
* again. There are two cases where this fails: arg2 = 0 (which cannot
* overflow) and arg1 = INT_MIN, arg2 = -1 (where the division itself will
* overflow and thus incorrectly match).
@@ -794,7 +794,7 @@ int2pl(PG_FUNCTION_ARGS)
result = arg1 + arg2;
/*
- * Overflow check. If the inputs are of different signs then their sum
+ * Overflow check. If the inputs are of different signs then their sum
* cannot overflow. If the inputs are of the same sign, their sum had
* better be that sign too.
*/
@@ -815,8 +815,8 @@ int2mi(PG_FUNCTION_ARGS)
result = arg1 - arg2;
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then the
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
* result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
@@ -897,7 +897,7 @@ int24pl(PG_FUNCTION_ARGS)
result = arg1 + arg2;
/*
- * Overflow check. If the inputs are of different signs then their sum
+ * Overflow check. If the inputs are of different signs then their sum
* cannot overflow. If the inputs are of the same sign, their sum had
* better be that sign too.
*/
@@ -918,8 +918,8 @@ int24mi(PG_FUNCTION_ARGS)
result = arg1 - arg2;
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then the
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
* result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
@@ -939,7 +939,7 @@ int24mul(PG_FUNCTION_ARGS)
result = arg1 * arg2;
/*
- * Overflow check. We basically check to see if result / arg2 gives arg1
+ * Overflow check. We basically check to see if result / arg2 gives arg1
* again. There is one case where this fails: arg2 = 0 (which cannot
* overflow).
*
@@ -985,7 +985,7 @@ int42pl(PG_FUNCTION_ARGS)
result = arg1 + arg2;
/*
- * Overflow check. If the inputs are of different signs then their sum
+ * Overflow check. If the inputs are of different signs then their sum
* cannot overflow. If the inputs are of the same sign, their sum had
* better be that sign too.
*/
@@ -1006,8 +1006,8 @@ int42mi(PG_FUNCTION_ARGS)
result = arg1 - arg2;
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then the
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
* result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
@@ -1027,7 +1027,7 @@ int42mul(PG_FUNCTION_ARGS)
result = arg1 * arg2;
/*
- * Overflow check. We basically check to see if result / arg1 gives arg2
+ * Overflow check. We basically check to see if result / arg1 gives arg2
* again. There is one case where this fails: arg1 = 0 (which cannot
* overflow).
*
diff --git a/src/backend/utils/adt/int8.c b/src/backend/utils/adt/int8.c
index e78eb2a2022..96146e0fda0 100644
--- a/src/backend/utils/adt/int8.c
+++ b/src/backend/utils/adt/int8.c
@@ -73,7 +73,7 @@ scanint8(const char *str, bool errorOK, int64 *result)
ptr++;
/*
- * Do an explicit check for INT64_MIN. Ugly though this is, it's
+ * Do an explicit check for INT64_MIN. Ugly though this is, it's
* cleaner than trying to get the loop below to handle it portably.
*/
if (strncmp(ptr, "9223372036854775808", 19) == 0)
@@ -519,7 +519,7 @@ int8pl(PG_FUNCTION_ARGS)
result = arg1 + arg2;
/*
- * Overflow check. If the inputs are of different signs then their sum
+ * Overflow check. If the inputs are of different signs then their sum
* cannot overflow. If the inputs are of the same sign, their sum had
* better be that sign too.
*/
@@ -540,8 +540,8 @@ int8mi(PG_FUNCTION_ARGS)
result = arg1 - arg2;
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then the
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
* result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
@@ -561,7 +561,7 @@ int8mul(PG_FUNCTION_ARGS)
result = arg1 * arg2;
/*
- * Overflow check. We basically check to see if result / arg2 gives arg1
+ * Overflow check. We basically check to see if result / arg2 gives arg1
* again. There are two cases where this fails: arg2 = 0 (which cannot
* overflow) and arg1 = INT64_MIN, arg2 = -1 (where the division itself
* will overflow and thus incorrectly match).
@@ -764,7 +764,7 @@ int8dec(PG_FUNCTION_ARGS)
/*
* These functions are exactly like int8inc/int8dec but are used for
- * aggregates that count only non-null values. Since the functions are
+ * aggregates that count only non-null values. Since the functions are
* declared strict, the null checks happen before we ever get here, and all we
* need do is increment the state value. We could actually make these pg_proc
* entries point right at int8inc/int8dec, but then the opr_sanity regression
@@ -824,7 +824,7 @@ int84pl(PG_FUNCTION_ARGS)
result = arg1 + arg2;
/*
- * Overflow check. If the inputs are of different signs then their sum
+ * Overflow check. If the inputs are of different signs then their sum
* cannot overflow. If the inputs are of the same sign, their sum had
* better be that sign too.
*/
@@ -845,8 +845,8 @@ int84mi(PG_FUNCTION_ARGS)
result = arg1 - arg2;
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then the
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
* result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
@@ -866,7 +866,7 @@ int84mul(PG_FUNCTION_ARGS)
result = arg1 * arg2;
/*
- * Overflow check. We basically check to see if result / arg1 gives arg2
+ * Overflow check. We basically check to see if result / arg1 gives arg2
* again. There is one case where this fails: arg1 = 0 (which cannot
* overflow).
*
@@ -933,7 +933,7 @@ int48pl(PG_FUNCTION_ARGS)
result = arg1 + arg2;
/*
- * Overflow check. If the inputs are of different signs then their sum
+ * Overflow check. If the inputs are of different signs then their sum
* cannot overflow. If the inputs are of the same sign, their sum had
* better be that sign too.
*/
@@ -954,8 +954,8 @@ int48mi(PG_FUNCTION_ARGS)
result = arg1 - arg2;
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then the
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
* result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
@@ -975,7 +975,7 @@ int48mul(PG_FUNCTION_ARGS)
result = arg1 * arg2;
/*
- * Overflow check. We basically check to see if result / arg2 gives arg1
+ * Overflow check. We basically check to see if result / arg2 gives arg1
* again. There is one case where this fails: arg2 = 0 (which cannot
* overflow).
*
@@ -1021,7 +1021,7 @@ int82pl(PG_FUNCTION_ARGS)
result = arg1 + arg2;
/*
- * Overflow check. If the inputs are of different signs then their sum
+ * Overflow check. If the inputs are of different signs then their sum
* cannot overflow. If the inputs are of the same sign, their sum had
* better be that sign too.
*/
@@ -1042,8 +1042,8 @@ int82mi(PG_FUNCTION_ARGS)
result = arg1 - arg2;
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then the
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
* result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
@@ -1063,7 +1063,7 @@ int82mul(PG_FUNCTION_ARGS)
result = arg1 * arg2;
/*
- * Overflow check. We basically check to see if result / arg1 gives arg2
+ * Overflow check. We basically check to see if result / arg1 gives arg2
* again. There is one case where this fails: arg1 = 0 (which cannot
* overflow).
*
@@ -1130,7 +1130,7 @@ int28pl(PG_FUNCTION_ARGS)
result = arg1 + arg2;
/*
- * Overflow check. If the inputs are of different signs then their sum
+ * Overflow check. If the inputs are of different signs then their sum
* cannot overflow. If the inputs are of the same sign, their sum had
* better be that sign too.
*/
@@ -1151,8 +1151,8 @@ int28mi(PG_FUNCTION_ARGS)
result = arg1 - arg2;
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then the
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
* result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
@@ -1172,7 +1172,7 @@ int28mul(PG_FUNCTION_ARGS)
result = arg1 * arg2;
/*
- * Overflow check. We basically check to see if result / arg2 gives arg1
+ * Overflow check. We basically check to see if result / arg2 gives arg1
* again. There is one case where this fails: arg2 = 0 (which cannot
* overflow).
*
diff --git a/src/backend/utils/adt/json.c b/src/backend/utils/adt/json.c
index c34a1bb50be..16f4eccc06e 100644
--- a/src/backend/utils/adt/json.c
+++ b/src/backend/utils/adt/json.c
@@ -598,10 +598,10 @@ json_lex(JsonLexContext *lex)
/*
* We're not dealing with a string, number, legal
- * punctuation mark, or end of string. The only legal
+ * punctuation mark, or end of string. The only legal
* tokens we might find here are true, false, and null,
* but for error reporting purposes we scan until we see a
- * non-alphanumeric character. That way, we can report
+ * non-alphanumeric character. That way, we can report
* the whole word as an unexpected token, rather than just
* some unintuitive prefix thereof.
*/
@@ -897,12 +897,12 @@ json_lex_string(JsonLexContext *lex)
* begin with a '0'.
*
* (3) An optional decimal part, consisting of a period ('.') followed by
- * one or more digits. (Note: While this part can be omitted
+ * one or more digits. (Note: While this part can be omitted
* completely, it's not OK to have only the decimal point without
* any digits afterwards.)
*
* (4) An optional exponent part, consisting of 'e' or 'E', optionally
- * followed by '+' or '-', followed by one or more digits. (Note:
+ * followed by '+' or '-', followed by one or more digits. (Note:
* As with the decimal part, if 'e' or 'E' is present, it must be
* followed by at least one digit.)
*
@@ -980,7 +980,7 @@ json_lex_number(JsonLexContext *lex, char *s, bool *num_err)
}
/*
- * Check for trailing garbage. As in json_lex(), any alphanumeric stuff
+ * Check for trailing garbage. As in json_lex(), any alphanumeric stuff
* here should be considered part of the token for error-reporting
* purposes.
*/
@@ -1805,7 +1805,7 @@ json_agg_transfn(PG_FUNCTION_ARGS)
/*
* The transition type for array_agg() is declared to be "internal", which
- * is a pass-by-value type the same size as a pointer. So we can safely
+ * is a pass-by-value type the same size as a pointer. So we can safely
* pass the ArrayBuildState pointer through nodeAgg.c's machinations.
*/
PG_RETURN_POINTER(state);
diff --git a/src/backend/utils/adt/jsonb.c b/src/backend/utils/adt/jsonb.c
index 781ab66ef2c..cf5d6f23264 100644
--- a/src/backend/utils/adt/jsonb.c
+++ b/src/backend/utils/adt/jsonb.c
@@ -22,7 +22,7 @@ typedef struct JsonbInState
{
JsonbParseState *parseState;
JsonbValue *res;
-} JsonbInState;
+} JsonbInState;
static inline Datum jsonb_from_cstring(char *json, int len);
static size_t checkStringLen(size_t len);
@@ -31,9 +31,9 @@ static void jsonb_in_object_end(void *pstate);
static void jsonb_in_array_start(void *pstate);
static void jsonb_in_array_end(void *pstate);
static void jsonb_in_object_field_start(void *pstate, char *fname, bool isnull);
-static void jsonb_put_escaped_value(StringInfo out, JsonbValue * scalarVal);
+static void jsonb_put_escaped_value(StringInfo out, JsonbValue *scalarVal);
static void jsonb_in_scalar(void *pstate, char *token, JsonTokenType tokentype);
-char *JsonbToCString(StringInfo out, char *in, int estimated_len);
+char *JsonbToCString(StringInfo out, char *in, int estimated_len);
/*
* jsonb type input function
@@ -245,7 +245,7 @@ jsonb_in_object_field_start(void *pstate, char *fname, bool isnull)
JsonbInState *_state = (JsonbInState *) pstate;
JsonbValue v;
- Assert (fname != NULL);
+ Assert(fname != NULL);
v.type = jbvString;
v.val.string.len = checkStringLen(strlen(fname));
v.val.string.val = pnstrdup(fname, v.val.string.len);
@@ -255,7 +255,7 @@ jsonb_in_object_field_start(void *pstate, char *fname, bool isnull)
}
static void
-jsonb_put_escaped_value(StringInfo out, JsonbValue * scalarVal)
+jsonb_put_escaped_value(StringInfo out, JsonbValue *scalarVal)
{
switch (scalarVal->type)
{
@@ -267,8 +267,8 @@ jsonb_put_escaped_value(StringInfo out, JsonbValue * scalarVal)
break;
case jbvNumeric:
appendStringInfoString(out,
- DatumGetCString(DirectFunctionCall1(numeric_out,
- PointerGetDatum(scalarVal->val.numeric))));
+ DatumGetCString(DirectFunctionCall1(numeric_out,
+ PointerGetDatum(scalarVal->val.numeric))));
break;
case jbvBool:
if (scalarVal->val.boolean)
@@ -296,21 +296,23 @@ jsonb_in_scalar(void *pstate, char *token, JsonTokenType tokentype)
{
case JSON_TOKEN_STRING:
- Assert (token != NULL);
+ Assert(token != NULL);
v.type = jbvString;
v.val.string.len = checkStringLen(strlen(token));
v.val.string.val = pnstrdup(token, v.val.string.len);
v.estSize += v.val.string.len;
break;
case JSON_TOKEN_NUMBER:
+
/*
- * No need to check size of numeric values, because maximum numeric
- * size is well below the JsonbValue restriction
+ * No need to check size of numeric values, because maximum
+ * numeric size is well below the JsonbValue restriction
*/
- Assert (token != NULL);
+ Assert(token != NULL);
v.type = jbvNumeric;
v.val.numeric = DatumGetNumeric(DirectFunctionCall3(numeric_in, CStringGetDatum(token), 0, -1));
- v.estSize += VARSIZE_ANY(v.val.numeric) + sizeof(JEntry) /* alignment */ ;
+
+ v.estSize += VARSIZE_ANY(v.val.numeric) +sizeof(JEntry) /* alignment */ ;
break;
case JSON_TOKEN_TRUE:
v.type = jbvBool;
diff --git a/src/backend/utils/adt/jsonb_gin.c b/src/backend/utils/adt/jsonb_gin.c
index 62546ebaf28..9f8c178ab10 100644
--- a/src/backend/utils/adt/jsonb_gin.c
+++ b/src/backend/utils/adt/jsonb_gin.c
@@ -22,12 +22,12 @@
typedef struct PathHashStack
{
- uint32 hash;
+ uint32 hash;
struct PathHashStack *parent;
-} PathHashStack;
+} PathHashStack;
static text *make_text_key(const char *str, int len, char flag);
-static text *make_scalar_key(const JsonbValue * scalarVal, char flag);
+static text *make_scalar_key(const JsonbValue *scalarVal, char flag);
/*
*
@@ -97,14 +97,14 @@ gin_extract_jsonb(PG_FUNCTION_ARGS)
* JsonbExistsStrategyNumber. Our definition of existence does not
* allow for checking the existence of a non-jbvString element (just
* like the definition of the underlying operator), because the
- * operator takes a text rhs argument (which is taken as a proxy for an
- * equivalent Jsonb string).
+ * operator takes a text rhs argument (which is taken as a proxy for
+ * an equivalent Jsonb string).
*
* The way existence is represented does not preclude an alternative
* existence operator, that takes as its rhs value an arbitrarily
- * internally-typed Jsonb. The only reason that isn't the case here is
- * that the existence operator is only really intended to determine if
- * an object has a certain key (object pair keys are of course
+ * internally-typed Jsonb. The only reason that isn't the case here
+ * is that the existence operator is only really intended to determine
+ * if an object has a certain key (object pair keys are of course
* invariably strings), which is extended to jsonb arrays. You could
* think of the default Jsonb definition of existence as being
* equivalent to a definition where all types of scalar array elements
@@ -116,11 +116,11 @@ gin_extract_jsonb(PG_FUNCTION_ARGS)
* JsonbExistsStrategyNumber, since we know that keys are strings for
* both objects and arrays, and don't have to further account for type
* mismatch. Not having to set the reset flag makes it less than
- * tempting to tighten up the definition of existence to preclude array
- * elements entirely, which would arguably be a simpler alternative.
- * In any case the infrastructure used to implement the existence
- * operator could trivially support this hypothetical, slightly
- * distinct definition of existence.
+ * tempting to tighten up the definition of existence to preclude
+ * array elements entirely, which would arguably be a simpler
+ * alternative. In any case the infrastructure used to implement the
+ * existence operator could trivially support this hypothetical,
+ * slightly distinct definition of existence.
*/
switch (r)
{
@@ -290,8 +290,10 @@ gin_triconsistent_jsonb(PG_FUNCTION_ARGS)
{
GinTernaryValue *check = (GinTernaryValue *) PG_GETARG_POINTER(0);
StrategyNumber strategy = PG_GETARG_UINT16(1);
+
/* Jsonb *query = PG_GETARG_JSONB(2); */
int32 nkeys = PG_GETARG_INT32(3);
+
/* Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4); */
GinTernaryValue res = GIN_TRUE;
@@ -299,7 +301,7 @@ gin_triconsistent_jsonb(PG_FUNCTION_ARGS)
if (strategy == JsonbContainsStrategyNumber)
{
- bool has_maybe = false;
+ bool has_maybe = false;
/*
* All extracted keys must be present. Combination of GIN_MAYBE and
@@ -323,8 +325,9 @@ gin_triconsistent_jsonb(PG_FUNCTION_ARGS)
/*
* Index doesn't have information about correspondence of Jsonb keys
* and values (as distinct from GIN keys, which a key/value pair is
- * stored as), so invariably we recheck. This is also reflected in how
- * GIN_MAYBE is given in response to there being no GIN_MAYBE input.
+ * stored as), so invariably we recheck. This is also reflected in
+ * how GIN_MAYBE is given in response to there being no GIN_MAYBE
+ * input.
*/
if (!has_maybe && res == GIN_TRUE)
res = GIN_MAYBE;
@@ -379,8 +382,10 @@ gin_consistent_jsonb_hash(PG_FUNCTION_ARGS)
{
bool *check = (bool *) PG_GETARG_POINTER(0);
StrategyNumber strategy = PG_GETARG_UINT16(1);
+
/* Jsonb *query = PG_GETARG_JSONB(2); */
int32 nkeys = PG_GETARG_INT32(3);
+
/* Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4); */
bool *recheck = (bool *) PG_GETARG_POINTER(5);
bool res = true;
@@ -390,13 +395,13 @@ gin_consistent_jsonb_hash(PG_FUNCTION_ARGS)
elog(ERROR, "unrecognized strategy number: %d", strategy);
/*
- * jsonb_hash_ops index doesn't have information about correspondence
- * of Jsonb keys and values (as distinct from GIN keys, which a
- * key/value pair is stored as), so invariably we recheck. Besides,
- * there are some special rules around the containment of raw scalar
- * arrays and regular arrays that are not represented here. However,
- * if all of the keys are not present, that's sufficient reason to
- * return false and finish immediately.
+ * jsonb_hash_ops index doesn't have information about correspondence of
+ * Jsonb keys and values (as distinct from GIN keys, which a key/value
+ * pair is stored as), so invariably we recheck. Besides, there are some
+ * special rules around the containment of raw scalar arrays and regular
+ * arrays that are not represented here. However, if all of the keys are
+ * not present, that's sufficient reason to return false and finish
+ * immediately.
*/
*recheck = true;
for (i = 0; i < nkeys; i++)
@@ -416,12 +421,14 @@ gin_triconsistent_jsonb_hash(PG_FUNCTION_ARGS)
{
GinTernaryValue *check = (GinTernaryValue *) PG_GETARG_POINTER(0);
StrategyNumber strategy = PG_GETARG_UINT16(1);
+
/* Jsonb *query = PG_GETARG_JSONB(2); */
int32 nkeys = PG_GETARG_INT32(3);
+
/* Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4); */
GinTernaryValue res = GIN_TRUE;
- int32 i;
- bool has_maybe = false;
+ int32 i;
+ bool has_maybe = false;
if (strategy != JsonbContainsStrategyNumber)
elog(ERROR, "unrecognized strategy number: %d", strategy);
@@ -447,10 +454,10 @@ gin_triconsistent_jsonb_hash(PG_FUNCTION_ARGS)
/*
* jsonb_hash_ops index doesn't have information about correspondence of
- * Jsonb keys and values (as distinct from GIN keys, which for this opclass
- * are a hash of a pair, or a hash of just an element), so invariably we
- * recheck. This is also reflected in how GIN_MAYBE is given in response
- * to there being no GIN_MAYBE input.
+ * Jsonb keys and values (as distinct from GIN keys, which for this
+ * opclass are a hash of a pair, or a hash of just an element), so
+ * invariably we recheck. This is also reflected in how GIN_MAYBE is
+ * given in response to there being no GIN_MAYBE input.
*/
if (!has_maybe && res == GIN_TRUE)
res = GIN_MAYBE;
@@ -488,7 +495,7 @@ gin_extract_jsonb_hash(PG_FUNCTION_ARGS)
while ((r = JsonbIteratorNext(&it, &v, false)) != WJB_DONE)
{
- PathHashStack *tmp;
+ PathHashStack *tmp;
if (i >= total)
{
@@ -513,10 +520,10 @@ gin_extract_jsonb_hash(PG_FUNCTION_ARGS)
/*
* We pass forward hashes from previous container nesting
* levels so that nested arrays with an outermost nested
- * object will have element hashes mixed with the outermost
- * key. It's also somewhat useful to have nested objects
- * innermost values have hashes that are a function of not
- * just their own key, but outer keys too.
+ * object will have element hashes mixed with the
+ * outermost key. It's also somewhat useful to have
+ * nested objects innermost values have hashes that are a
+ * function of not just their own key, but outer keys too.
*/
stack->hash = tmp->hash;
}
@@ -526,7 +533,7 @@ gin_extract_jsonb_hash(PG_FUNCTION_ARGS)
* At least nested level, initialize with stable container
* type proxy value
*/
- stack->hash = (r == WJB_BEGIN_ARRAY)? JB_FARRAY:JB_FOBJECT;
+ stack->hash = (r == WJB_BEGIN_ARRAY) ? JB_FARRAY : JB_FOBJECT;
}
stack->parent = tmp;
break;
@@ -607,7 +614,7 @@ make_text_key(const char *str, int len, char flag)
* Create a textual representation of a jsonbValue for GIN storage.
*/
static text *
-make_scalar_key(const JsonbValue * scalarVal, char flag)
+make_scalar_key(const JsonbValue *scalarVal, char flag)
{
text *item;
char *cstr;
@@ -621,6 +628,7 @@ make_scalar_key(const JsonbValue * scalarVal, char flag)
item = make_text_key(scalarVal->val.boolean ? "t" : "f", 1, flag);
break;
case jbvNumeric:
+
/*
* A normalized textual representation, free of trailing zeroes is
* is required.
diff --git a/src/backend/utils/adt/jsonb_op.c b/src/backend/utils/adt/jsonb_op.c
index cfddccbbbbf..38bd5676739 100644
--- a/src/backend/utils/adt/jsonb_op.c
+++ b/src/backend/utils/adt/jsonb_op.c
@@ -69,7 +69,7 @@ jsonb_exists_any(PG_FUNCTION_ARGS)
if (findJsonbValueFromSuperHeader(VARDATA(jb),
JB_FOBJECT | JB_FARRAY,
plowbound,
- arrKey->val.array.elems + i) != NULL)
+ arrKey->val.array.elems + i) != NULL)
PG_RETURN_BOOL(true);
}
@@ -103,7 +103,7 @@ jsonb_exists_all(PG_FUNCTION_ARGS)
if (findJsonbValueFromSuperHeader(VARDATA(jb),
JB_FOBJECT | JB_FARRAY,
plowbound,
- arrKey->val.array.elems + i) == NULL)
+ arrKey->val.array.elems + i) == NULL)
PG_RETURN_BOOL(false);
}
@@ -116,7 +116,8 @@ jsonb_contains(PG_FUNCTION_ARGS)
Jsonb *val = PG_GETARG_JSONB(0);
Jsonb *tmpl = PG_GETARG_JSONB(1);
- JsonbIterator *it1, *it2;
+ JsonbIterator *it1,
+ *it2;
if (JB_ROOT_COUNT(val) < JB_ROOT_COUNT(tmpl) ||
JB_ROOT_IS_OBJECT(val) != JB_ROOT_IS_OBJECT(tmpl))
@@ -135,7 +136,8 @@ jsonb_contained(PG_FUNCTION_ARGS)
Jsonb *tmpl = PG_GETARG_JSONB(0);
Jsonb *val = PG_GETARG_JSONB(1);
- JsonbIterator *it1, *it2;
+ JsonbIterator *it1,
+ *it2;
if (JB_ROOT_COUNT(val) < JB_ROOT_COUNT(tmpl) ||
JB_ROOT_IS_OBJECT(val) != JB_ROOT_IS_OBJECT(tmpl))
@@ -209,7 +211,6 @@ jsonb_le(PG_FUNCTION_ARGS)
Datum
jsonb_ge(PG_FUNCTION_ARGS)
{
-
Jsonb *jba = PG_GETARG_JSONB(0);
Jsonb *jbb = PG_GETARG_JSONB(1);
bool res;
@@ -270,7 +271,7 @@ jsonb_hash(PG_FUNCTION_ARGS)
{
switch (r)
{
- /* Rotation is left to JsonbHashScalarValue() */
+ /* Rotation is left to JsonbHashScalarValue() */
case WJB_BEGIN_ARRAY:
hash ^= JB_FARRAY;
break;
diff --git a/src/backend/utils/adt/jsonb_util.c b/src/backend/utils/adt/jsonb_util.c
index 1ac145b1cd9..1caaa4a9cc3 100644
--- a/src/backend/utils/adt/jsonb_util.c
+++ b/src/backend/utils/adt/jsonb_util.c
@@ -45,10 +45,10 @@
*/
typedef struct convertLevel
{
- uint32 i; /* Iterates once per element, or once per pair */
- uint32 *header; /* Pointer to current container header */
- JEntry *meta; /* This level's metadata */
- char *begin; /* Pointer into convertState.buffer */
+ uint32 i; /* Iterates once per element, or once per pair */
+ uint32 *header; /* Pointer to current container header */
+ JEntry *meta; /* This level's metadata */
+ char *begin; /* Pointer into convertState.buffer */
} convertLevel;
/*
@@ -57,41 +57,41 @@ typedef struct convertLevel
typedef struct convertState
{
/* Preallocated buffer in which to form varlena/Jsonb value */
- Jsonb *buffer;
+ Jsonb *buffer;
/* Pointer into buffer */
- char *ptr;
+ char *ptr;
/* State for */
- convertLevel *allState, /* Overall state array */
- *contPtr; /* Cur container pointer (in allState) */
+ convertLevel *allState, /* Overall state array */
+ *contPtr; /* Cur container pointer (in allState) */
/* Current size of buffer containing allState array */
- Size levelSz;
-
-} convertState;
-
-static int compareJsonbScalarValue(JsonbValue * a, JsonbValue * b);
-static int lexicalCompareJsonbStringValue(const void *a, const void *b);
-static Size convertJsonb(JsonbValue * val, Jsonb* buffer);
-static inline short addPaddingInt(convertState * cstate);
-static void walkJsonbValueConversion(JsonbValue * val, convertState * cstate,
- uint32 nestlevel);
-static void putJsonbValueConversion(convertState * cstate, JsonbValue * val,
- uint32 flags, uint32 level);
-static void putScalarConversion(convertState * cstate, JsonbValue * scalarVal,
- uint32 level, uint32 i);
-static void iteratorFromContainerBuf(JsonbIterator * it, char *buffer);
-static bool formIterIsContainer(JsonbIterator ** it, JsonbValue * val,
- JEntry * ent, bool skipNested);
-static JsonbIterator *freeAndGetParent(JsonbIterator * it);
-static JsonbParseState *pushState(JsonbParseState ** pstate);
-static void appendKey(JsonbParseState * pstate, JsonbValue * scalarVal);
-static void appendValue(JsonbParseState * pstate, JsonbValue * scalarVal);
-static void appendElement(JsonbParseState * pstate, JsonbValue * scalarVal);
-static int lengthCompareJsonbStringValue(const void *a, const void *b, void *arg);
-static int lengthCompareJsonbPair(const void *a, const void *b, void *arg);
-static void uniqueifyJsonbObject(JsonbValue * object);
-static void uniqueifyJsonbArray(JsonbValue * array);
+ Size levelSz;
+
+} convertState;
+
+static int compareJsonbScalarValue(JsonbValue *a, JsonbValue *b);
+static int lexicalCompareJsonbStringValue(const void *a, const void *b);
+static Size convertJsonb(JsonbValue *val, Jsonb *buffer);
+static inline short addPaddingInt(convertState *cstate);
+static void walkJsonbValueConversion(JsonbValue *val, convertState *cstate,
+ uint32 nestlevel);
+static void putJsonbValueConversion(convertState *cstate, JsonbValue *val,
+ uint32 flags, uint32 level);
+static void putScalarConversion(convertState *cstate, JsonbValue *scalarVal,
+ uint32 level, uint32 i);
+static void iteratorFromContainerBuf(JsonbIterator *it, char *buffer);
+static bool formIterIsContainer(JsonbIterator **it, JsonbValue *val,
+ JEntry *ent, bool skipNested);
+static JsonbIterator *freeAndGetParent(JsonbIterator *it);
+static JsonbParseState *pushState(JsonbParseState **pstate);
+static void appendKey(JsonbParseState *pstate, JsonbValue *scalarVal);
+static void appendValue(JsonbParseState *pstate, JsonbValue *scalarVal);
+static void appendElement(JsonbParseState *pstate, JsonbValue *scalarVal);
+static int lengthCompareJsonbStringValue(const void *a, const void *b, void *arg);
+static int lengthCompareJsonbPair(const void *a, const void *b, void *arg);
+static void uniqueifyJsonbObject(JsonbValue *object);
+static void uniqueifyJsonbArray(JsonbValue *array);
/*
* Turn an in-memory JsonbValue into a Jsonb for on-disk storage.
@@ -107,7 +107,7 @@ static void uniqueifyJsonbArray(JsonbValue * array);
* inconvenient to deal with a great amount of other state.
*/
Jsonb *
-JsonbValueToJsonb(JsonbValue * val)
+JsonbValueToJsonb(JsonbValue *val)
{
Jsonb *out;
Size sz;
@@ -164,7 +164,7 @@ int
compareJsonbSuperHeaderValue(JsonbSuperHeader a, JsonbSuperHeader b)
{
JsonbIterator *ita,
- *itb;
+ *itb;
int res = 0;
ita = JsonbIteratorInit(a);
@@ -182,9 +182,9 @@ compareJsonbSuperHeaderValue(JsonbSuperHeader a, JsonbSuperHeader b)
/*
* To a limited extent we'll redundantly iterate over an array/object
- * while re-performing the same test without any reasonable expectation
- * of the same container types having differing lengths (as when we
- * process a WJB_BEGIN_OBJECT, and later the corresponding
+ * while re-performing the same test without any reasonable
+ * expectation of the same container types having differing lengths
+ * (as when we process a WJB_BEGIN_OBJECT, and later the corresponding
* WJB_END_OBJECT), but no matter.
*/
if (ra == rb)
@@ -208,9 +208,10 @@ compareJsonbSuperHeaderValue(JsonbSuperHeader a, JsonbSuperHeader b)
res = compareJsonbScalarValue(&va, &vb);
break;
case jbvArray:
+
/*
- * This could be a "raw scalar" pseudo array. That's a
- * special case here though, since we still want the
+ * This could be a "raw scalar" pseudo array. That's
+ * a special case here though, since we still want the
* general type-based comparisons to apply, and as far
* as we're concerned a pseudo array is just a scalar.
*/
@@ -258,12 +259,14 @@ compareJsonbSuperHeaderValue(JsonbSuperHeader a, JsonbSuperHeader b)
while (ita != NULL)
{
JsonbIterator *i = ita->parent;
+
pfree(ita);
ita = i;
}
while (itb != NULL)
{
JsonbIterator *i = itb->parent;
+
pfree(itb);
itb = i;
}
@@ -313,12 +316,12 @@ compareJsonbSuperHeaderValue(JsonbSuperHeader a, JsonbSuperHeader b)
*/
JsonbValue *
findJsonbValueFromSuperHeader(JsonbSuperHeader sheader, uint32 flags,
- uint32 *lowbound, JsonbValue * key)
+ uint32 *lowbound, JsonbValue *key)
{
- uint32 superheader = *(uint32 *) sheader;
- JEntry *array = (JEntry *) (sheader + sizeof(uint32));
- int count = (superheader & JB_CMASK);
- JsonbValue *result = palloc(sizeof(JsonbValue));
+ uint32 superheader = *(uint32 *) sheader;
+ JEntry *array = (JEntry *) (sheader + sizeof(uint32));
+ int count = (superheader & JB_CMASK);
+ JsonbValue *result = palloc(sizeof(JsonbValue));
Assert((flags & ~(JB_FARRAY | JB_FOBJECT)) == 0);
@@ -347,6 +350,7 @@ findJsonbValueFromSuperHeader(JsonbSuperHeader sheader, uint32 flags,
{
result->type = jbvNumeric;
result->val.numeric = (Numeric) (data + INTALIGN(JBE_OFF(*e)));
+
result->estSize = 2 * sizeof(JEntry) +
VARSIZE_ANY(result->val.numeric);
}
@@ -381,8 +385,8 @@ findJsonbValueFromSuperHeader(JsonbSuperHeader sheader, uint32 flags,
JsonbValue candidate;
/*
- * Note how we compensate for the fact that we're iterating through
- * pairs (not entries) throughout.
+ * Note how we compensate for the fact that we're iterating
+ * through pairs (not entries) throughout.
*/
stopMiddle = stopLow + (count - stopLow) / 2;
@@ -419,6 +423,7 @@ findJsonbValueFromSuperHeader(JsonbSuperHeader sheader, uint32 flags,
{
result->type = jbvNumeric;
result->val.numeric = (Numeric) (data + INTALIGN(JBE_OFF(*v)));
+
result->estSize = 2 * sizeof(JEntry) +
VARSIZE_ANY(result->val.numeric);
}
@@ -431,8 +436,8 @@ findJsonbValueFromSuperHeader(JsonbSuperHeader sheader, uint32 flags,
else
{
/*
- * See header comments to understand why this never happens
- * with arrays
+ * See header comments to understand why this never
+ * happens with arrays
*/
result->type = jbvBinary;
result->val.binary.data = data + INTALIGN(JBE_OFF(*v));
@@ -508,6 +513,7 @@ getIthJsonbValueFromSuperHeader(JsonbSuperHeader sheader, uint32 i)
{
result->type = jbvNumeric;
result->val.numeric = (Numeric) (data + INTALIGN(JBE_OFF(*e)));
+
result->estSize = 2 * sizeof(JEntry) + VARSIZE_ANY(result->val.numeric);
}
else if (JBE_ISBOOL(*e))
@@ -541,7 +547,7 @@ getIthJsonbValueFromSuperHeader(JsonbSuperHeader sheader, uint32 i)
* "raw scalar" pseudo array to append that.
*/
JsonbValue *
-pushJsonbValue(JsonbParseState ** pstate, int seq, JsonbValue * scalarVal)
+pushJsonbValue(JsonbParseState **pstate, int seq, JsonbValue *scalarVal)
{
JsonbValue *result = NULL;
@@ -555,7 +561,7 @@ pushJsonbValue(JsonbParseState ** pstate, int seq, JsonbValue * scalarVal)
(*pstate)->contVal.estSize = 3 * sizeof(JEntry);
(*pstate)->contVal.val.array.nElems = 0;
(*pstate)->contVal.val.array.rawScalar = (scalarVal &&
- scalarVal->val.array.rawScalar);
+ scalarVal->val.array.rawScalar);
if (scalarVal && scalarVal->val.array.nElems > 0)
{
/* Assume that this array is still really a scalar */
@@ -567,7 +573,7 @@ pushJsonbValue(JsonbParseState ** pstate, int seq, JsonbValue * scalarVal)
(*pstate)->size = 4;
}
(*pstate)->contVal.val.array.elems = palloc(sizeof(JsonbValue) *
- (*pstate)->size);
+ (*pstate)->size);
break;
case WJB_BEGIN_OBJECT:
Assert(!scalarVal);
@@ -578,7 +584,7 @@ pushJsonbValue(JsonbParseState ** pstate, int seq, JsonbValue * scalarVal)
(*pstate)->contVal.val.object.nPairs = 0;
(*pstate)->size = 4;
(*pstate)->contVal.val.object.pairs = palloc(sizeof(JsonbPair) *
- (*pstate)->size);
+ (*pstate)->size);
break;
case WJB_KEY:
Assert(scalarVal->type == jbvString);
@@ -674,9 +680,9 @@ JsonbIteratorInit(JsonbSuperHeader sheader)
* garbage.
*/
int
-JsonbIteratorNext(JsonbIterator ** it, JsonbValue * val, bool skipNested)
+JsonbIteratorNext(JsonbIterator **it, JsonbValue *val, bool skipNested)
{
- JsonbIterState state;
+ JsonbIterState state;
/* Guard against stack overflow due to overly complex Jsonb */
check_stack_depth();
@@ -694,9 +700,10 @@ JsonbIteratorNext(JsonbIterator ** it, JsonbValue * val, bool skipNested)
/* Set v to array on first array call */
val->type = jbvArray;
val->val.array.nElems = (*it)->nElems;
+
/*
- * v->val.array.elems is not actually set, because we aren't doing a
- * full conversion
+ * v->val.array.elems is not actually set, because we aren't doing
+ * a full conversion
*/
val->val.array.rawScalar = (*it)->isScalar;
(*it)->i = 0;
@@ -709,8 +716,8 @@ JsonbIteratorNext(JsonbIterator ** it, JsonbValue * val, bool skipNested)
if ((*it)->i >= (*it)->nElems)
{
/*
- * All elements within array already processed. Report this to
- * caller, and give it back original parent iterator (which
+ * All elements within array already processed. Report this
+ * to caller, and give it back original parent iterator (which
* independently tracks iteration progress at its level of
* nesting).
*/
@@ -741,6 +748,7 @@ JsonbIteratorNext(JsonbIterator ** it, JsonbValue * val, bool skipNested)
/* Set v to object on first object call */
val->type = jbvObject;
val->val.object.nPairs = (*it)->nElems;
+
/*
* v->val.object.pairs is not actually set, because we aren't
* doing a full conversion
@@ -756,9 +764,9 @@ JsonbIteratorNext(JsonbIterator ** it, JsonbValue * val, bool skipNested)
{
/*
* All pairs within object already processed. Report this to
- * caller, and give it back original containing iterator (which
- * independently tracks iteration progress at its level of
- * nesting).
+ * caller, and give it back original containing iterator
+ * (which independently tracks iteration progress at its level
+ * of nesting).
*/
*it = freeAndGetParent(*it);
return WJB_END_OBJECT;
@@ -787,8 +795,8 @@ JsonbIteratorNext(JsonbIterator ** it, JsonbValue * val, bool skipNested)
/*
* Value may be a container, in which case we recurse with new,
- * child iterator. If it is, don't bother !skipNested callers with
- * dealing with the jbvBinary representation.
+ * child iterator. If it is, don't bother !skipNested callers
+ * with dealing with the jbvBinary representation.
*/
if (formIterIsContainer(it, val, &(*it)->meta[((*it)->i++) * 2 + 1],
skipNested))
@@ -815,17 +823,18 @@ JsonbIteratorNext(JsonbIterator ** it, JsonbValue * val, bool skipNested)
* We determine if mContained is contained within val.
*/
bool
-JsonbDeepContains(JsonbIterator ** val, JsonbIterator ** mContained)
+JsonbDeepContains(JsonbIterator **val, JsonbIterator **mContained)
{
uint32 rval,
rcont;
JsonbValue vval,
vcontained;
+
/*
* Guard against stack overflow due to overly complex Jsonb.
*
- * Functions called here independently take this precaution, but that might
- * not be sufficient since this is also a recursive function.
+ * Functions called here independently take this precaution, but that
+ * might not be sufficient since this is also a recursive function.
*/
check_stack_depth();
@@ -898,7 +907,8 @@ JsonbDeepContains(JsonbIterator ** val, JsonbIterator ** mContained)
else
{
/* Nested container value (object or array) */
- JsonbIterator *nestval, *nestContained;
+ JsonbIterator *nestval,
+ *nestContained;
Assert(lhsVal->type == jbvBinary);
Assert(vcontained.type == jbvBinary);
@@ -922,8 +932,9 @@ JsonbDeepContains(JsonbIterator ** val, JsonbIterator ** mContained)
* In other words, the mapping of container nodes in the rhs
* "vcontained" Jsonb to internal nodes on the lhs is
* injective, and parent-child edges on the rhs must be mapped
- * to parent-child edges on the lhs to satisfy the condition of
- * containment (plus of course the mapped nodes must be equal).
+ * to parent-child edges on the lhs to satisfy the condition
+ * of containment (plus of course the mapped nodes must be
+ * equal).
*/
if (!JsonbDeepContains(&nestval, &nestContained))
return false;
@@ -942,10 +953,10 @@ JsonbDeepContains(JsonbIterator ** val, JsonbIterator ** mContained)
* arrays.
*
* A raw scalar may contain another raw scalar, and an array may
- * contain a raw scalar, but a raw scalar may not contain an array. We
- * don't do something like this for the object case, since objects can
- * only contain pairs, never raw scalars (a pair is represented by an
- * rhs object argument with a single contained pair).
+ * contain a raw scalar, but a raw scalar may not contain an array.
+ * We don't do something like this for the object case, since objects
+ * can only contain pairs, never raw scalars (a pair is represented by
+ * an rhs object argument with a single contained pair).
*/
if (vval.val.array.rawScalar && !vcontained.val.array.rawScalar)
return false;
@@ -956,8 +967,9 @@ JsonbDeepContains(JsonbIterator ** val, JsonbIterator ** mContained)
rcont = JsonbIteratorNext(mContained, &vcontained, true);
/*
- * When we get through caller's rhs "is it contained within?" array
- * without failing to find one of its values, it's contained.
+ * When we get through caller's rhs "is it contained within?"
+ * array without failing to find one of its values, it's
+ * contained.
*/
if (rcont == WJB_END_ARRAY)
return true;
@@ -989,7 +1001,7 @@ JsonbDeepContains(JsonbIterator ** val, JsonbIterator ** mContained)
for (i = 0; i < nLhsElems; i++)
{
- /* Store all lhs elements in temp array*/
+ /* Store all lhs elements in temp array */
rcont = JsonbIteratorNext(val, &vval, true);
Assert(rcont == WJB_ELEM);
@@ -1009,8 +1021,9 @@ JsonbDeepContains(JsonbIterator ** val, JsonbIterator ** mContained)
for (i = 0; i < nLhsElems; i++)
{
/* Nested container value (object or array) */
- JsonbIterator *nestval, *nestContained;
- bool contains;
+ JsonbIterator *nestval,
+ *nestContained;
+ bool contains;
nestval = JsonbIteratorInit(lhsConts[i].val.binary.data);
nestContained = JsonbIteratorInit(vcontained.val.binary.data);
@@ -1069,9 +1082,9 @@ arrayToJsonbSortedArray(ArrayType *array)
/*
* A text array uses at least eight bytes per element, so any overflow in
* "key_count * sizeof(JsonbPair)" is small enough for palloc() to catch.
- * However, credible improvements to the array format could invalidate that
- * assumption. Therefore, use an explicit check rather than relying on
- * palloc() to complain.
+ * However, credible improvements to the array format could invalidate
+ * that assumption. Therefore, use an explicit check rather than relying
+ * on palloc() to complain.
*/
if (elem_count > JSONB_MAX_PAIRS)
ereport(ERROR,
@@ -1108,9 +1121,9 @@ arrayToJsonbSortedArray(ArrayType *array)
* flags.
*/
void
-JsonbHashScalarValue(const JsonbValue * scalarVal, uint32 * hash)
+JsonbHashScalarValue(const JsonbValue *scalarVal, uint32 *hash)
{
- int tmp;
+ int tmp;
/*
* Combine hash values of successive keys, values and elements by rotating
@@ -1131,11 +1144,11 @@ JsonbHashScalarValue(const JsonbValue * scalarVal, uint32 * hash)
case jbvNumeric:
/* Must be unaffected by trailing zeroes */
tmp = DatumGetInt32(DirectFunctionCall1(hash_numeric,
- NumericGetDatum(scalarVal->val.numeric)));
+ NumericGetDatum(scalarVal->val.numeric)));
*hash ^= tmp;
return;
case jbvBool:
- *hash ^= scalarVal->val.boolean? 0x02:0x04;
+ *hash ^= scalarVal->val.boolean ? 0x02 : 0x04;
return;
default:
elog(ERROR, "invalid jsonb scalar type");
@@ -1150,7 +1163,7 @@ JsonbHashScalarValue(const JsonbValue * scalarVal, uint32 * hash)
* within a single jsonb.
*/
static int
-compareJsonbScalarValue(JsonbValue * aScalar, JsonbValue * bScalar)
+compareJsonbScalarValue(JsonbValue *aScalar, JsonbValue *bScalar)
{
if (aScalar->type == bScalar->type)
{
@@ -1162,8 +1175,8 @@ compareJsonbScalarValue(JsonbValue * aScalar, JsonbValue * bScalar)
return lengthCompareJsonbStringValue(aScalar, bScalar, NULL);
case jbvNumeric:
return DatumGetInt32(DirectFunctionCall2(numeric_cmp,
- PointerGetDatum(aScalar->val.numeric),
- PointerGetDatum(bScalar->val.numeric)));
+ PointerGetDatum(aScalar->val.numeric),
+ PointerGetDatum(bScalar->val.numeric)));
case jbvBool:
if (aScalar->val.boolean != bScalar->val.boolean)
return (aScalar->val.boolean > bScalar->val.boolean) ? 1 : -1;
@@ -1201,10 +1214,10 @@ lexicalCompareJsonbStringValue(const void *a, const void *b)
* sufficiently large to fit the value
*/
static Size
-convertJsonb(JsonbValue * val, Jsonb *buffer)
+convertJsonb(JsonbValue *val, Jsonb *buffer)
{
- convertState state;
- Size len;
+ convertState state;
+ Size len;
/* Should not already have binary representation */
Assert(val->type != jbvBinary);
@@ -1232,7 +1245,7 @@ convertJsonb(JsonbValue * val, Jsonb *buffer)
* token (in a manner similar to generic iteration).
*/
static void
-walkJsonbValueConversion(JsonbValue * val, convertState * cstate,
+walkJsonbValueConversion(JsonbValue *val, convertState *cstate,
uint32 nestlevel)
{
int i;
@@ -1290,9 +1303,11 @@ walkJsonbValueConversion(JsonbValue * val, convertState * cstate,
* access to conversion buffer.
*/
static inline
-short addPaddingInt(convertState * cstate)
+short
+addPaddingInt(convertState *cstate)
{
- short padlen, p;
+ short padlen,
+ p;
padlen = INTALIGN(cstate->ptr - VARDATA(cstate->buffer)) -
(cstate->ptr - VARDATA(cstate->buffer));
@@ -1320,14 +1335,14 @@ short addPaddingInt(convertState * cstate)
* and the end (i.e. there is one call per sequential processing WJB_* token).
*/
static void
-putJsonbValueConversion(convertState * cstate, JsonbValue * val, uint32 flags,
+putJsonbValueConversion(convertState *cstate, JsonbValue *val, uint32 flags,
uint32 level)
{
if (level == cstate->levelSz)
{
cstate->levelSz *= 2;
cstate->allState = repalloc(cstate->allState,
- sizeof(convertLevel) * cstate->levelSz);
+ sizeof(convertLevel) * cstate->levelSz);
}
cstate->contPtr = cstate->allState + level;
@@ -1385,9 +1400,9 @@ putJsonbValueConversion(convertState * cstate, JsonbValue * val, uint32 flags,
}
else if (flags & (WJB_END_ARRAY | WJB_END_OBJECT))
{
- convertLevel *prevPtr; /* Prev container pointer */
- uint32 len,
- i;
+ convertLevel *prevPtr; /* Prev container pointer */
+ uint32 len,
+ i;
Assert(((flags & WJB_END_ARRAY) && val->type == jbvArray) ||
((flags & WJB_END_OBJECT) && val->type == jbvObject));
@@ -1443,10 +1458,10 @@ putJsonbValueConversion(convertState * cstate, JsonbValue * val, uint32 flags,
* metadata peculiar to each scalar type.
*/
static void
-putScalarConversion(convertState * cstate, JsonbValue * scalarVal, uint32 level,
+putScalarConversion(convertState *cstate, JsonbValue *scalarVal, uint32 level,
uint32 i)
{
- int numlen;
+ int numlen;
short padlen;
cstate->contPtr = cstate->allState + level;
@@ -1509,7 +1524,7 @@ putScalarConversion(convertState * cstate, JsonbValue * scalarVal, uint32 level,
* container type.
*/
static void
-iteratorFromContainerBuf(JsonbIterator * it, JsonbSuperHeader sheader)
+iteratorFromContainerBuf(JsonbIterator *it, JsonbSuperHeader sheader)
{
uint32 superheader = *(uint32 *) sheader;
@@ -1531,6 +1546,7 @@ iteratorFromContainerBuf(JsonbIterator * it, JsonbSuperHeader sheader)
Assert(!it->isScalar || it->nElems == 1);
break;
case JB_FOBJECT:
+
/*
* Offset reflects that nElems indicates JsonbPairs in an object.
* Each key and each value contain Jentry metadata just the same.
@@ -1562,7 +1578,7 @@ iteratorFromContainerBuf(JsonbIterator * it, JsonbSuperHeader sheader)
* anywhere).
*/
static bool
-formIterIsContainer(JsonbIterator ** it, JsonbValue * val, JEntry * ent,
+formIterIsContainer(JsonbIterator **it, JsonbValue *val, JEntry *ent,
bool skipNested)
{
if (JBE_ISNULL(*ent))
@@ -1585,6 +1601,7 @@ formIterIsContainer(JsonbIterator ** it, JsonbValue * val, JEntry * ent,
{
val->type = jbvNumeric;
val->val.numeric = (Numeric) ((*it)->dataProper + INTALIGN(JBE_OFF(*ent)));
+
val->estSize = 2 * sizeof(JEntry) + VARSIZE_ANY(val->val.numeric);
return false;
@@ -1609,8 +1626,8 @@ formIterIsContainer(JsonbIterator ** it, JsonbValue * val, JEntry * ent,
else
{
/*
- * Must be container type, so setup caller's iterator to point to that,
- * and return indication of that.
+ * Must be container type, so setup caller's iterator to point to
+ * that, and return indication of that.
*
* Get child iterator.
*/
@@ -1627,11 +1644,11 @@ formIterIsContainer(JsonbIterator ** it, JsonbValue * val, JEntry * ent,
}
/*
- * JsonbIteratorNext() worker: Return parent, while freeing memory for current
+ * JsonbIteratorNext() worker: Return parent, while freeing memory for current
* iterator
*/
static JsonbIterator *
-freeAndGetParent(JsonbIterator * it)
+freeAndGetParent(JsonbIterator *it)
{
JsonbIterator *v = it->parent;
@@ -1643,7 +1660,7 @@ freeAndGetParent(JsonbIterator * it)
* pushJsonbValue() worker: Iteration-like forming of Jsonb
*/
static JsonbParseState *
-pushState(JsonbParseState ** pstate)
+pushState(JsonbParseState **pstate)
{
JsonbParseState *ns = palloc(sizeof(JsonbParseState));
@@ -1655,7 +1672,7 @@ pushState(JsonbParseState ** pstate)
* pushJsonbValue() worker: Append a pair key to state when generating a Jsonb
*/
static void
-appendKey(JsonbParseState * pstate, JsonbValue * string)
+appendKey(JsonbParseState *pstate, JsonbValue *string)
{
JsonbValue *object = &pstate->contVal;
@@ -1672,7 +1689,7 @@ appendKey(JsonbParseState * pstate, JsonbValue * string)
{
pstate->size *= 2;
object->val.object.pairs = repalloc(object->val.object.pairs,
- sizeof(JsonbPair) * pstate->size);
+ sizeof(JsonbPair) * pstate->size);
}
object->val.object.pairs[object->val.object.nPairs].key = *string;
@@ -1686,7 +1703,7 @@ appendKey(JsonbParseState * pstate, JsonbValue * string)
* Jsonb
*/
static void
-appendValue(JsonbParseState * pstate, JsonbValue * scalarVal)
+appendValue(JsonbParseState *pstate, JsonbValue *scalarVal)
{
JsonbValue *object = &pstate->contVal;
@@ -1700,7 +1717,7 @@ appendValue(JsonbParseState * pstate, JsonbValue * scalarVal)
* pushJsonbValue() worker: Append an element to state when generating a Jsonb
*/
static void
-appendElement(JsonbParseState * pstate, JsonbValue * scalarVal)
+appendElement(JsonbParseState *pstate, JsonbValue *scalarVal)
{
JsonbValue *array = &pstate->contVal;
@@ -1716,7 +1733,7 @@ appendElement(JsonbParseState * pstate, JsonbValue * scalarVal)
{
pstate->size *= 2;
array->val.array.elems = repalloc(array->val.array.elems,
- sizeof(JsonbValue) * pstate->size);
+ sizeof(JsonbValue) * pstate->size);
}
array->val.array.elems[array->val.array.nElems++] = *scalarVal;
@@ -1797,7 +1814,7 @@ lengthCompareJsonbPair(const void *a, const void *b, void *binequal)
* Sort and unique-ify pairs in JsonbValue object
*/
static void
-uniqueifyJsonbObject(JsonbValue * object)
+uniqueifyJsonbObject(JsonbValue *object)
{
bool hasNonUniq = false;
@@ -1838,15 +1855,15 @@ uniqueifyJsonbObject(JsonbValue * object)
* Sorting uses internal ordering.
*/
static void
-uniqueifyJsonbArray(JsonbValue * array)
+uniqueifyJsonbArray(JsonbValue *array)
{
- bool hasNonUniq = false;
+ bool hasNonUniq = false;
Assert(array->type == jbvArray);
/*
- * Actually sort values, determining if any were equal on the basis of full
- * binary equality (rather than just having the same string length).
+ * Actually sort values, determining if any were equal on the basis of
+ * full binary equality (rather than just having the same string length).
*/
if (array->val.array.nElems > 1)
qsort_arg(array->val.array.elems, array->val.array.nElems,
diff --git a/src/backend/utils/adt/jsonfuncs.c b/src/backend/utils/adt/jsonfuncs.c
index 2423b737c9e..6b1ce9b3a9f 100644
--- a/src/backend/utils/adt/jsonfuncs.c
+++ b/src/backend/utils/adt/jsonfuncs.c
@@ -104,11 +104,12 @@ static void populate_recordset_array_element_start(void *state, bool isnull);
/* worker function for populate_recordset and to_recordset */
static inline Datum populate_recordset_worker(FunctionCallInfo fcinfo,
bool have_record_arg);
+
/* Worker that takes care of common setup for us */
static JsonbValue *findJsonbValueFromSuperHeaderLen(JsonbSuperHeader sheader,
- uint32 flags,
- char *key,
- uint32 keylen);
+ uint32 flags,
+ char *key,
+ uint32 keylen);
/* search type classification for json_get* functions */
typedef enum
@@ -235,8 +236,8 @@ typedef struct PopulateRecordsetState
} PopulateRecordsetState;
/* Turn a jsonb object into a record */
-static void make_row_from_rec_and_jsonb(Jsonb * element,
- PopulateRecordsetState *state);
+static void make_row_from_rec_and_jsonb(Jsonb *element,
+ PopulateRecordsetState *state);
/*
* SQL function json_object_keys
@@ -791,7 +792,7 @@ get_path_all(FunctionCallInfo fcinfo, bool as_text)
result = get_worker(json, NULL, -1, tpath, ipath, npath, as_text);
if (result != NULL)
- PG_RETURN_TEXT_P(result);
+ PG_RETURN_TEXT_P(result);
else
/* null is NULL, regardless */
PG_RETURN_NULL();
@@ -1178,7 +1179,7 @@ get_jsonb_path_all(FunctionCallInfo fcinfo, bool as_text)
jbvp = findJsonbValueFromSuperHeaderLen(superHeader,
JB_FOBJECT,
VARDATA_ANY(pathtext[i]),
- VARSIZE_ANY_EXHDR(pathtext[i]));
+ VARSIZE_ANY_EXHDR(pathtext[i]));
}
else if (have_array)
{
@@ -1209,8 +1210,8 @@ get_jsonb_path_all(FunctionCallInfo fcinfo, bool as_text)
if (jbvp->type == jbvBinary)
{
- JsonbIterator *it = JsonbIteratorInit(jbvp->val.binary.data);
- int r;
+ JsonbIterator *it = JsonbIteratorInit(jbvp->val.binary.data);
+ int r;
r = JsonbIteratorNext(&it, &tv, true);
superHeader = (JsonbSuperHeader) jbvp->val.binary.data;
@@ -1932,7 +1933,7 @@ elements_array_element_end(void *state, bool isnull)
text *val;
HeapTuple tuple;
Datum values[1];
- bool nulls[1] = {false};
+ bool nulls[1] = {false};
/* skip over nested objects */
if (_state->lex->lex_level != 1)
@@ -2035,7 +2036,7 @@ json_to_record(PG_FUNCTION_ARGS)
static inline Datum
populate_record_worker(FunctionCallInfo fcinfo, bool have_record_arg)
{
- int json_arg_num = have_record_arg ? 1 : 0;
+ int json_arg_num = have_record_arg ? 1 : 0;
Oid jtype = get_fn_expr_argtype(fcinfo->flinfo, json_arg_num);
text *json;
Jsonb *jb = NULL;
@@ -2060,7 +2061,7 @@ populate_record_worker(FunctionCallInfo fcinfo, bool have_record_arg)
if (have_record_arg)
{
- Oid argtype = get_fn_expr_argtype(fcinfo->flinfo, 0);
+ Oid argtype = get_fn_expr_argtype(fcinfo->flinfo, 0);
if (!type_is_rowtype(argtype))
ereport(ERROR,
@@ -2275,7 +2276,7 @@ populate_record_worker(FunctionCallInfo fcinfo, bool have_record_arg)
s = pnstrdup((v->val.boolean) ? "t" : "f", 1);
else if (v->type == jbvNumeric)
s = DatumGetCString(DirectFunctionCall1(numeric_out,
- PointerGetDatum(v->val.numeric)));
+ PointerGetDatum(v->val.numeric)));
else if (!use_json_as_text)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
@@ -2476,7 +2477,7 @@ json_to_recordset(PG_FUNCTION_ARGS)
}
static void
-make_row_from_rec_and_jsonb(Jsonb * element, PopulateRecordsetState *state)
+make_row_from_rec_and_jsonb(Jsonb *element, PopulateRecordsetState *state)
{
Datum *values;
bool *nulls;
@@ -2575,7 +2576,7 @@ make_row_from_rec_and_jsonb(Jsonb * element, PopulateRecordsetState *state)
s = pnstrdup((v->val.boolean) ? "t" : "f", 1);
else if (v->type == jbvNumeric)
s = DatumGetCString(DirectFunctionCall1(numeric_out,
- PointerGetDatum(v->val.numeric)));
+ PointerGetDatum(v->val.numeric)));
else if (!state->use_json_as_text)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
@@ -2603,7 +2604,7 @@ make_row_from_rec_and_jsonb(Jsonb * element, PopulateRecordsetState *state)
static inline Datum
populate_recordset_worker(FunctionCallInfo fcinfo, bool have_record_arg)
{
- int json_arg_num = have_record_arg ? 1 : 0;
+ int json_arg_num = have_record_arg ? 1 : 0;
Oid jtype = get_fn_expr_argtype(fcinfo->flinfo, json_arg_num);
bool use_json_as_text;
ReturnSetInfo *rsi;
@@ -2620,7 +2621,7 @@ populate_recordset_worker(FunctionCallInfo fcinfo, bool have_record_arg)
if (have_record_arg)
{
- Oid argtype = get_fn_expr_argtype(fcinfo->flinfo, 0);
+ Oid argtype = get_fn_expr_argtype(fcinfo->flinfo, 0);
if (!type_is_rowtype(argtype))
ereport(ERROR,
diff --git a/src/backend/utils/adt/like.c b/src/backend/utils/adt/like.c
index 3d5f3d538b6..bcd9e2182d0 100644
--- a/src/backend/utils/adt/like.c
+++ b/src/backend/utils/adt/like.c
@@ -76,12 +76,12 @@ wchareq(char *p1, char *p2)
/*
* Formerly we had a routine iwchareq() here that tried to do case-insensitive
- * comparison of multibyte characters. It did not work at all, however,
+ * comparison of multibyte characters. It did not work at all, however,
* because it relied on tolower() which has a single-byte API ... and
* towlower() wouldn't be much better since we have no suitably cheap way
* of getting a single character transformed to the system's wchar_t format.
* So now, we just downcase the strings using lower() and apply regular LIKE
- * comparison. This should be revisited when we install better locale support.
+ * comparison. This should be revisited when we install better locale support.
*/
/*
diff --git a/src/backend/utils/adt/misc.c b/src/backend/utils/adt/misc.c
index 241f738d608..4eeb6314fae 100644
--- a/src/backend/utils/adt/misc.c
+++ b/src/backend/utils/adt/misc.c
@@ -96,7 +96,7 @@ pg_signal_backend(int pid, int sig)
/*
* BackendPidGetProc returns NULL if the pid isn't valid; but by the time
* we reach kill(), a process for which we get a valid proc here might
- * have terminated on its own. There's no way to acquire a lock on an
+ * have terminated on its own. There's no way to acquire a lock on an
* arbitrary process to prevent that. But since so far all the callers of
* this mechanism involve some request for ending the process anyway, that
* it might end on its own first is not a problem.
@@ -120,7 +120,7 @@ pg_signal_backend(int pid, int sig)
* recycled for a new process, before reaching here? Then we'd be trying
* to kill the wrong thing. Seems near impossible when sequential pid
* assignment and wraparound is used. Perhaps it could happen on a system
- * where pid re-use is randomized. That race condition possibility seems
+ * where pid re-use is randomized. That race condition possibility seems
* too unlikely to worry about.
*/
@@ -140,7 +140,7 @@ pg_signal_backend(int pid, int sig)
}
/*
- * Signal to cancel a backend process. This is allowed if you are superuser or
+ * Signal to cancel a backend process. This is allowed if you are superuser or
* have the same role as the process being canceled.
*/
Datum
@@ -254,7 +254,7 @@ pg_tablespace_databases(PG_FUNCTION_ARGS)
fctx->location = psprintf("base");
else
fctx->location = psprintf("pg_tblspc/%u/%s", tablespaceOid,
- TABLESPACE_VERSION_DIRECTORY);
+ TABLESPACE_VERSION_DIRECTORY);
fctx->dirdesc = AllocateDir(fctx->location);
@@ -326,7 +326,7 @@ pg_tablespace_location(PG_FUNCTION_ARGS)
/*
* It's useful to apply this function to pg_class.reltablespace, wherein
- * zero means "the database's default tablespace". So, rather than
+ * zero means "the database's default tablespace". So, rather than
* throwing an error for zero, we choose to assume that's what is meant.
*/
if (tablespaceOid == InvalidOid)
@@ -384,7 +384,7 @@ pg_sleep(PG_FUNCTION_ARGS)
* loop.
*
* By computing the intended stop time initially, we avoid accumulation of
- * extra delay across multiple sleeps. This also ensures we won't delay
+ * extra delay across multiple sleeps. This also ensures we won't delay
* less than the specified time when WaitLatch is terminated early by a
* non-query-cancelling signal such as SIGHUP.
*/
@@ -547,7 +547,7 @@ pg_relation_is_updatable(PG_FUNCTION_ARGS)
* pg_column_is_updatable - determine whether a column is updatable
*
* This function encapsulates the decision about just what
- * information_schema.columns.is_updatable actually means. It's not clear
+ * information_schema.columns.is_updatable actually means. It's not clear
* whether deletability of the column's relation should be required, so
* we want that decision in C code where we could change it without initdb.
*/
diff --git a/src/backend/utils/adt/nabstime.c b/src/backend/utils/adt/nabstime.c
index 74d24aa0651..a6d30851df9 100644
--- a/src/backend/utils/adt/nabstime.c
+++ b/src/backend/utils/adt/nabstime.c
@@ -118,26 +118,24 @@ abstime2tm(AbsoluteTime _time, int *tzp, struct pg_tm * tm, char **tzn)
if (tzp != NULL)
{
- *tzp = -tm->tm_gmtoff; /* tm_gmtoff is Sun/DEC-ism */
+ *tzp = -tm->tm_gmtoff; /* tm_gmtoff is Sun/DEC-ism */
+ /*
+ * XXX FreeBSD man pages indicate that this should work - tgl 97/04/23
+ */
+ if (tzn != NULL)
+ {
/*
- * XXX FreeBSD man pages indicate that this should work - tgl
- * 97/04/23
+ * Copy no more than MAXTZLEN bytes of timezone to tzn, in case it
+ * contains an error message, which doesn't fit in the buffer
*/
- if (tzn != NULL)
- {
- /*
- * Copy no more than MAXTZLEN bytes of timezone to tzn, in
- * case it contains an error message, which doesn't fit in the
- * buffer
- */
- StrNCpy(*tzn, tm->tm_zone, MAXTZLEN + 1);
- if (strlen(tm->tm_zone) > MAXTZLEN)
- ereport(WARNING,
- (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("invalid time zone name: \"%s\"",
- tm->tm_zone)));
- }
+ StrNCpy(*tzn, tm->tm_zone, MAXTZLEN + 1);
+ if (strlen(tm->tm_zone) > MAXTZLEN)
+ ereport(WARNING,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("invalid time zone name: \"%s\"",
+ tm->tm_zone)));
+ }
}
else
tm->tm_isdst = -1;
@@ -175,7 +173,7 @@ tm2abstime(struct pg_tm * tm, int tz)
sec = tm->tm_sec + tz + (tm->tm_min + (day * HOURS_PER_DAY + tm->tm_hour) * MINS_PER_HOUR) * SECS_PER_MINUTE;
/*
- * check for overflow. We need a little slop here because the H/M/S plus
+ * check for overflow. We need a little slop here because the H/M/S plus
* TZ offset could add up to more than 1 day.
*/
if ((day >= MAX_DAYNUM - 10 && sec < 0) ||
@@ -1140,7 +1138,7 @@ tintervalsame(PG_FUNCTION_ARGS)
* 1. The interval length computations overflow at 2^31 seconds, causing
* intervals longer than that to sort oddly compared to those shorter.
* 2. infinity and minus infinity (NOEND_ABSTIME and NOSTART_ABSTIME) are
- * just ordinary integers. Since this code doesn't handle them specially,
+ * just ordinary integers. Since this code doesn't handle them specially,
* it's possible for [a b] to be considered longer than [c infinity] for
* finite abstimes a, b, c. In combination with the previous point, the
* interval [-infinity infinity] is treated as being shorter than many finite
diff --git a/src/backend/utils/adt/network.c b/src/backend/utils/adt/network.c
index 8bdf5778d89..69c7ac182f0 100644
--- a/src/backend/utils/adt/network.c
+++ b/src/backend/utils/adt/network.c
@@ -39,7 +39,7 @@ network_in(char *src, bool is_cidr)
dst = (inet *) palloc0(sizeof(inet));
/*
- * First, check to see if this is an IPv6 or IPv4 address. IPv6 addresses
+ * First, check to see if this is an IPv6 or IPv4 address. IPv6 addresses
* will have a : somewhere in them (several, in fact) so if there is one
* present, assume it's V6, otherwise assume it's V4.
*/
@@ -144,7 +144,7 @@ cidr_out(PG_FUNCTION_ARGS)
* family, bits, is_cidr, address length, address in network byte order.
*
* Presence of is_cidr is largely for historical reasons, though it might
- * allow some code-sharing on the client side. We send it correctly on
+ * allow some code-sharing on the client side. We send it correctly on
* output, but ignore the value on input.
*/
static inet *
@@ -1401,7 +1401,7 @@ inetmi(PG_FUNCTION_ARGS)
/*
* We form the difference using the traditional complement, increment,
* and add rule, with the increment part being handled by starting the
- * carry off at 1. If you don't think integer arithmetic is done in
+ * carry off at 1. If you don't think integer arithmetic is done in
* two's complement, too bad.
*/
int nb = ip_addrsize(ip);
@@ -1423,7 +1423,7 @@ inetmi(PG_FUNCTION_ARGS)
else
{
/*
- * Input wider than int64: check for overflow. All bytes to
+ * Input wider than int64: check for overflow. All bytes to
* the left of what will fit should be 0 or 0xFF, depending on
* sign of the now-complete result.
*/
@@ -1454,9 +1454,9 @@ inetmi(PG_FUNCTION_ARGS)
* XXX This should go away someday!
*
* This is a kluge needed because we don't yet support zones in stored inet
- * values. Since the result of getnameinfo() might include a zone spec,
+ * values. Since the result of getnameinfo() might include a zone spec,
* call this to remove it anywhere we want to feed getnameinfo's output to
- * network_in. Beats failing entirely.
+ * network_in. Beats failing entirely.
*
* An alternative approach would be to let network_in ignore %-parts for
* itself, but that would mean we'd silently drop zone specs in user input,
diff --git a/src/backend/utils/adt/network_gist.c b/src/backend/utils/adt/network_gist.c
index 0a826ae90a2..69b9d104749 100644
--- a/src/backend/utils/adt/network_gist.c
+++ b/src/backend/utils/adt/network_gist.c
@@ -7,7 +7,7 @@
* "union" of a set of INET/CIDR values. It works like this:
* 1. If the values are not all of the same IP address family, the "union"
* is a dummy value with family number zero, minbits zero, commonbits zero,
- * address all zeroes. Otherwise:
+ * address all zeroes. Otherwise:
* 2. The union has the common IP address family number.
* 3. The union's minbits value is the smallest netmask length ("ip_bits")
* of all the input values.
@@ -202,8 +202,8 @@ inet_gist_consistent(PG_FUNCTION_ARGS)
*
* Compare available common prefix bits to the query, but not beyond
* either the query's netmask or the minimum netmask among the represented
- * values. If these bits don't match the query, we have our answer (and
- * may or may not need to descend, depending on the operator). If they do
+ * values. If these bits don't match the query, we have our answer (and
+ * may or may not need to descend, depending on the operator). If they do
* match, and we are not at a leaf, we descend in all cases.
*
* Note this is the final check for operators that only consider the
@@ -682,7 +682,7 @@ inet_gist_picksplit(PG_FUNCTION_ARGS)
{
/*
* If there's more than 2 families, all but maxfamily go into the
- * left union. This could only happen if the inputs include some
+ * left union. This could only happen if the inputs include some
* IPv4, some IPv6, and some already-multiple-family unions.
*/
tmp = DatumGetInetKeyP(ent[i].key);
@@ -741,7 +741,7 @@ inet_gist_picksplit(PG_FUNCTION_ARGS)
}
/*
- * Compute the union value for each side from scratch. In most cases we
+ * Compute the union value for each side from scratch. In most cases we
* could approximate the union values with what we already know, but this
* ensures that each side has minbits and commonbits set as high as
* possible.
diff --git a/src/backend/utils/adt/numeric.c b/src/backend/utils/adt/numeric.c
index bf4f29d14d7..19d0bdcbb98 100644
--- a/src/backend/utils/adt/numeric.c
+++ b/src/backend/utils/adt/numeric.c
@@ -50,7 +50,7 @@
* Numeric values are represented in a base-NBASE floating point format.
* Each "digit" ranges from 0 to NBASE-1. The type NumericDigit is signed
* and wide enough to store a digit. We assume that NBASE*NBASE can fit in
- * an int. Although the purely calculational routines could handle any even
+ * an int. Although the purely calculational routines could handle any even
* NBASE that's less than sqrt(INT_MAX), in practice we are only interested
* in NBASE a power of ten, so that I/O conversions and decimal rounding
* are easy. Also, it's actually more efficient if NBASE is rather less than
@@ -95,11 +95,11 @@ typedef int16 NumericDigit;
* If the high bits of the first word of a NumericChoice (n_header, or
* n_short.n_header, or n_long.n_sign_dscale) are NUMERIC_SHORT, then the
* numeric follows the NumericShort format; if they are NUMERIC_POS or
- * NUMERIC_NEG, it follows the NumericLong format. If they are NUMERIC_NAN,
+ * NUMERIC_NEG, it follows the NumericLong format. If they are NUMERIC_NAN,
* it is a NaN. We currently always store a NaN using just two bytes (i.e.
* only n_header), but previous releases used only the NumericLong format,
* so we might find 4-byte NaNs on disk if a database has been migrated using
- * pg_upgrade. In either case, when the high bits indicate a NaN, the
+ * pg_upgrade. In either case, when the high bits indicate a NaN, the
* remaining bits are never examined. Currently, we always initialize these
* to zero, but it might be possible to use them for some other purpose in
* the future.
@@ -207,19 +207,19 @@ struct NumericData
: ((n)->choice.n_long.n_weight))
/* ----------
- * NumericVar is the format we use for arithmetic. The digit-array part
+ * NumericVar is the format we use for arithmetic. The digit-array part
* is the same as the NumericData storage format, but the header is more
* complex.
*
* The value represented by a NumericVar is determined by the sign, weight,
* ndigits, and digits[] array.
* Note: the first digit of a NumericVar's value is assumed to be multiplied
- * by NBASE ** weight. Another way to say it is that there are weight+1
+ * by NBASE ** weight. Another way to say it is that there are weight+1
* digits before the decimal point. It is possible to have weight < 0.
*
* buf points at the physical start of the palloc'd digit buffer for the
- * NumericVar. digits points at the first digit in actual use (the one
- * with the specified weight). We normally leave an unused digit or two
+ * NumericVar. digits points at the first digit in actual use (the one
+ * with the specified weight). We normally leave an unused digit or two
* (preset to zeroes) between buf and digits, so that there is room to store
* a carry out of the top digit without reallocating space. We just need to
* decrement digits (and increment weight) to make room for the carry digit.
@@ -596,7 +596,7 @@ numeric_maximum_size(int32 typmod)
* In most cases, the size of a numeric will be smaller than the value
* computed below, because the varlena header will typically get toasted
* down to a single byte before being stored on disk, and it may also be
- * possible to use a short numeric header. But our job here is to compute
+ * possible to use a short numeric header. But our job here is to compute
* the worst case.
*/
return NUMERIC_HDRSZ + (numeric_digits * sizeof(NumericDigit));
@@ -636,7 +636,8 @@ numeric_normalize(Numeric num)
{
NumericVar x;
char *str;
- int orig, last;
+ int orig,
+ last;
/*
* Handle NaN
@@ -754,7 +755,7 @@ numeric_send(PG_FUNCTION_ARGS)
*
* Flatten calls to numeric's length coercion function that solely represent
* increases in allowable precision. Scale changes mutate every datum, so
- * they are unoptimizable. Some values, e.g. 1E-1001, can only fit into an
+ * they are unoptimizable. Some values, e.g. 1E-1001, can only fit into an
* unconstrained numeric, so a change from an unconstrained numeric to any
* constrained numeric is also unoptimizable.
*/
@@ -784,7 +785,7 @@ numeric_transform(PG_FUNCTION_ARGS)
* If new_typmod < VARHDRSZ, the destination is unconstrained; that's
* always OK. If old_typmod >= VARHDRSZ, the source is constrained,
* and we're OK if the scale is unchanged and the precision is not
- * decreasing. See further notes in function header comment.
+ * decreasing. See further notes in function header comment.
*/
if (new_typmod < (int32) VARHDRSZ ||
(old_typmod >= (int32) VARHDRSZ &&
@@ -996,7 +997,7 @@ numeric_uminus(PG_FUNCTION_ARGS)
/*
* The packed format is known to be totally zero digit trimmed always. So
- * we can identify a ZERO by the fact that there are no digits at all. Do
+ * we can identify a ZERO by the fact that there are no digits at all. Do
* nothing to a zero.
*/
if (NUMERIC_NDIGITS(num) != 0)
@@ -1972,7 +1973,7 @@ numeric_sqrt(PG_FUNCTION_ARGS)
PG_RETURN_NUMERIC(make_result(&const_nan));
/*
- * Unpack the argument and determine the result scale. We choose a scale
+ * Unpack the argument and determine the result scale. We choose a scale
* to give at least NUMERIC_MIN_SIG_DIGITS significant digits; but in any
* case not less than the input's dscale.
*/
@@ -2023,7 +2024,7 @@ numeric_exp(PG_FUNCTION_ARGS)
PG_RETURN_NUMERIC(make_result(&const_nan));
/*
- * Unpack the argument and determine the result scale. We choose a scale
+ * Unpack the argument and determine the result scale. We choose a scale
* to give at least NUMERIC_MIN_SIG_DIGITS significant digits; but in any
* case not less than the input's dscale.
*/
@@ -2517,7 +2518,7 @@ typedef struct NumericAggState
NumericVar sumX; /* sum of processed numbers */
NumericVar sumX2; /* sum of squares of processed numbers */
int maxScale; /* maximum scale seen so far */
- int64 maxScaleCount; /* number of values seen with maximum scale */
+ int64 maxScaleCount; /* number of values seen with maximum scale */
int64 NaNcount; /* count of NaN values (not included in N!) */
} NumericAggState;
@@ -2652,8 +2653,8 @@ do_numeric_discard(NumericAggState *state, Numeric newval)
if (state->maxScaleCount > 1 || state->maxScale == 0)
{
/*
- * Some remaining inputs have same dscale, or dscale hasn't
- * gotten above zero anyway
+ * Some remaining inputs have same dscale, or dscale hasn't gotten
+ * above zero anyway
*/
state->maxScaleCount--;
}
@@ -2767,9 +2768,9 @@ numeric_accum_inv(PG_FUNCTION_ARGS)
/*
* Integer data types all use Numeric accumulators to share code and
- * avoid risk of overflow. For int2 and int4 inputs, Numeric accumulation
+ * avoid risk of overflow. For int2 and int4 inputs, Numeric accumulation
* is overkill for the N and sum(X) values, but definitely not overkill
- * for the sum(X*X) value. Hence, we use int2_accum and int4_accum only
+ * for the sum(X*X) value. Hence, we use int2_accum and int4_accum only
* for stddev/variance --- there are faster special-purpose accumulator
* routines for SUM and AVG of these datatypes.
*/
@@ -2965,7 +2966,7 @@ numeric_avg(PG_FUNCTION_ARGS)
if (state == NULL || (state->N + state->NaNcount) == 0)
PG_RETURN_NULL();
- if (state->NaNcount > 0) /* there was at least one NaN input */
+ if (state->NaNcount > 0) /* there was at least one NaN input */
PG_RETURN_NUMERIC(make_result(&const_nan));
N_datum = DirectFunctionCall1(int8_numeric, Int64GetDatum(state->N));
@@ -2985,7 +2986,7 @@ numeric_sum(PG_FUNCTION_ARGS)
if (state == NULL || (state->N + state->NaNcount) == 0)
PG_RETURN_NULL();
- if (state->NaNcount > 0) /* there was at least one NaN input */
+ if (state->NaNcount > 0) /* there was at least one NaN input */
PG_RETURN_NUMERIC(make_result(&const_nan));
PG_RETURN_NUMERIC(make_result(&(state->sumX)));
@@ -3167,7 +3168,7 @@ numeric_stddev_pop(PG_FUNCTION_ARGS)
* the initial condition of the transition data value needs to be NULL. This
* means we can't rely on ExecAgg to automatically insert the first non-null
* data value into the transition data: it doesn't know how to do the type
- * conversion. The upshot is that these routines have to be marked non-strict
+ * conversion. The upshot is that these routines have to be marked non-strict
* and handle substitution of the first non-null input themselves.
*
* Note: these functions are used only in plain aggregation mode.
@@ -3653,7 +3654,7 @@ set_var_from_str(const char *str, const char *cp, NumericVar *dest)
/*
* We first parse the string to extract decimal digits and determine the
- * correct decimal weight. Then convert to NBASE representation.
+ * correct decimal weight. Then convert to NBASE representation.
*/
switch (*cp)
{
@@ -4261,7 +4262,7 @@ apply_typmod(NumericVar *var, int32 typmod)
/*
* Convert numeric to int8, rounding if needed.
*
- * If overflow, return FALSE (no error is raised). Return TRUE if okay.
+ * If overflow, return FALSE (no error is raised). Return TRUE if okay.
*/
static bool
numericvar_to_int8(NumericVar *var, int64 *result)
@@ -4732,7 +4733,7 @@ sub_var(NumericVar *var1, NumericVar *var2, NumericVar *result)
* mul_var() -
*
* Multiplication on variable level. Product of var1 * var2 is stored
- * in result. Result is rounded to no more than rscale fractional digits.
+ * in result. Result is rounded to no more than rscale fractional digits.
*/
static void
mul_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
@@ -4776,7 +4777,7 @@ mul_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
/*
* Determine number of result digits to compute. If the exact result
* would have more than rscale fractional digits, truncate the computation
- * with MUL_GUARD_DIGITS guard digits. We do that by pretending that one
+ * with MUL_GUARD_DIGITS guard digits. We do that by pretending that one
* or both inputs have fewer digits than they really do.
*/
res_ndigits = var1ndigits + var2ndigits + 1;
@@ -5019,7 +5020,7 @@ div_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
*
* We need the first divisor digit to be >= NBASE/2. If it isn't,
* make it so by scaling up both the divisor and dividend by the
- * factor "d". (The reason for allocating dividend[0] above is to
+ * factor "d". (The reason for allocating dividend[0] above is to
* leave room for possible carry here.)
*/
if (divisor[1] < HALF_NBASE)
@@ -5063,7 +5064,7 @@ div_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
/*
* If next2digits are 0, then quotient digit must be 0 and there's
- * no need to adjust the working dividend. It's worth testing
+ * no need to adjust the working dividend. It's worth testing
* here to fall out ASAP when processing trailing zeroes in a
* dividend.
*/
@@ -5081,7 +5082,7 @@ div_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
/*
* Adjust quotient digit if it's too large. Knuth proves that
* after this step, the quotient digit will be either correct or
- * just one too large. (Note: it's OK to use dividend[j+2] here
+ * just one too large. (Note: it's OK to use dividend[j+2] here
* because we know the divisor length is at least 2.)
*/
while (divisor2 * qhat >
@@ -5256,7 +5257,7 @@ div_var_fast(NumericVar *var1, NumericVar *var2, NumericVar *result,
* dividend's digits (plus appended zeroes to reach the desired precision
* including guard digits). Each step of the main loop computes an
* (approximate) quotient digit and stores it into div[], removing one
- * position of dividend space. A final pass of carry propagation takes
+ * position of dividend space. A final pass of carry propagation takes
* care of any mistaken quotient digits.
*/
div = (int *) palloc0((div_ndigits + 1) * sizeof(int));
@@ -6106,7 +6107,7 @@ power_var_int(NumericVar *base, int exp, NumericVar *result, int rscale)
/*
* The general case repeatedly multiplies base according to the bit
- * pattern of exp. We do the multiplications with some extra precision.
+ * pattern of exp. We do the multiplications with some extra precision.
*/
neg = (exp < 0);
exp = Abs(exp);
diff --git a/src/backend/utils/adt/oid.c b/src/backend/utils/adt/oid.c
index 8945ef43f01..2badb558f03 100644
--- a/src/backend/utils/adt/oid.c
+++ b/src/backend/utils/adt/oid.c
@@ -318,7 +318,7 @@ oidparse(Node *node)
/*
* Values too large for int4 will be represented as Float
- * constants by the lexer. Accept these if they are valid OID
+ * constants by the lexer. Accept these if they are valid OID
* strings.
*/
return oidin_subr(strVal(node), NULL);
diff --git a/src/backend/utils/adt/orderedsetaggs.c b/src/backend/utils/adt/orderedsetaggs.c
index 99577a549e6..efb0411c228 100644
--- a/src/backend/utils/adt/orderedsetaggs.c
+++ b/src/backend/utils/adt/orderedsetaggs.c
@@ -462,7 +462,7 @@ percentile_disc_final(PG_FUNCTION_ARGS)
/*
* Note: we *cannot* clean up the tuplesort object here, because the value
- * to be returned is allocated inside its sortcontext. We could use
+ * to be returned is allocated inside its sortcontext. We could use
* datumCopy to copy it out of there, but it doesn't seem worth the
* trouble, since the cleanup callback will clear the tuplesort later.
*/
@@ -580,7 +580,7 @@ percentile_cont_final_common(FunctionCallInfo fcinfo,
/*
* Note: we *cannot* clean up the tuplesort object here, because the value
- * to be returned may be allocated inside its sortcontext. We could use
+ * to be returned may be allocated inside its sortcontext. We could use
* datumCopy to copy it out of there, but it doesn't seem worth the
* trouble, since the cleanup callback will clear the tuplesort later.
*/
@@ -1086,7 +1086,7 @@ mode_final(PG_FUNCTION_ARGS)
/*
* Note: we *cannot* clean up the tuplesort object here, because the value
- * to be returned is allocated inside its sortcontext. We could use
+ * to be returned is allocated inside its sortcontext. We could use
* datumCopy to copy it out of there, but it doesn't seem worth the
* trouble, since the cleanup callback will clear the tuplesort later.
*/
@@ -1331,7 +1331,7 @@ hypothetical_dense_rank_final(PG_FUNCTION_ARGS)
/*
* We alternate fetching into tupslot and extraslot so that we have the
- * previous row available for comparisons. This is accomplished by
+ * previous row available for comparisons. This is accomplished by
* swapping the slot pointer variables after each row.
*/
extraslot = MakeSingleTupleTableSlot(osastate->qstate->tupdesc);
diff --git a/src/backend/utils/adt/pg_locale.c b/src/backend/utils/adt/pg_locale.c
index 0c8474df54a..94bb5a47bb7 100644
--- a/src/backend/utils/adt/pg_locale.c
+++ b/src/backend/utils/adt/pg_locale.c
@@ -20,12 +20,12 @@
*
* The other categories, LC_MONETARY, LC_NUMERIC, and LC_TIME are also
* settable at run-time. However, we don't actually set those locale
- * categories permanently. This would have bizarre effects like no
+ * categories permanently. This would have bizarre effects like no
* longer accepting standard floating-point literals in some locales.
* Instead, we only set the locales briefly when needed, cache the
* required information obtained from localeconv(), and set them back.
* The cached information is only used by the formatting functions
- * (to_char, etc.) and the money type. For the user, this should all be
+ * (to_char, etc.) and the money type. For the user, this should all be
* transparent.
*
* !!! NOW HEAR THIS !!!
@@ -39,7 +39,7 @@
* fail = true;
* setlocale(category, save);
* DOES NOT WORK RELIABLY: on some platforms the second setlocale() call
- * will change the memory save is pointing at. To do this sort of thing
+ * will change the memory save is pointing at. To do this sort of thing
* safely, you *must* pstrdup what setlocale returns the first time.
*
* FYI, The Open Group locale standard is defined here:
@@ -243,7 +243,7 @@ pg_perm_setlocale(int category, const char *locale)
* Is the locale name valid for the locale category?
*
* If successful, and canonname isn't NULL, a palloc'd copy of the locale's
- * canonical name is stored there. This is especially useful for figuring out
+ * canonical name is stored there. This is especially useful for figuring out
* what locale name "" means (ie, the server environment value). (Actually,
* it seems that on most implementations that's the only thing it's good for;
* we could wish that setlocale gave back a canonically spelled version of
@@ -286,7 +286,7 @@ check_locale(int category, const char *locale, char **canonname)
*
* For most locale categories, the assign hook doesn't actually set the locale
* permanently, just reset flags so that the next use will cache the
- * appropriate values. (See explanation at the top of this file.)
+ * appropriate values. (See explanation at the top of this file.)
*
* Note: we accept value = "" as selecting the postmaster's environment
* value, whatever it was (so long as the environment setting is legal).
@@ -463,6 +463,7 @@ PGLC_localeconv(void)
save_lc_numeric = pstrdup(save_lc_numeric);
#ifdef WIN32
+
/*
* Ideally, monetary and numeric local symbols could be returned in any
* server encoding. Unfortunately, the WIN32 API does not allow
@@ -644,6 +645,7 @@ cache_locale_time(void)
save_lc_time = pstrdup(save_lc_time);
#ifdef WIN32
+
/*
* On WIN32, there is no way to get locale-specific time values in a
* specified locale, like we do for monetary/numeric. We can only get
@@ -729,13 +731,13 @@ cache_locale_time(void)
* Convert a Windows setlocale() argument to a Unix-style one.
*
* Regardless of platform, we install message catalogs under a Unix-style
- * LL[_CC][.ENCODING][@VARIANT] naming convention. Only LC_MESSAGES settings
+ * LL[_CC][.ENCODING][@VARIANT] naming convention. Only LC_MESSAGES settings
* following that style will elicit localized interface strings.
*
* Before Visual Studio 2012 (msvcr110.dll), Windows setlocale() accepted "C"
* (but not "c") and strings of the form <Language>[_<Country>][.<CodePage>],
* case-insensitive. setlocale() returns the fully-qualified form; for
- * example, setlocale("thaI") returns "Thai_Thailand.874". Internally,
+ * example, setlocale("thaI") returns "Thai_Thailand.874". Internally,
* setlocale() and _create_locale() select a "locale identifier"[1] and store
* it in an undocumented _locale_t field. From that LCID, we can retrieve the
* ISO 639 language and the ISO 3166 country. Character encoding does not
@@ -746,12 +748,12 @@ cache_locale_time(void)
* Studio 2012, setlocale() accepts locale names in addition to the strings it
* accepted historically. It does not standardize them; setlocale("Th-tH")
* returns "Th-tH". setlocale(category, "") still returns a traditional
- * string. Furthermore, msvcr110.dll changed the undocumented _locale_t
+ * string. Furthermore, msvcr110.dll changed the undocumented _locale_t
* content to carry locale names instead of locale identifiers.
*
* MinGW headers declare _create_locale(), but msvcrt.dll lacks that symbol.
* IsoLocaleName() always fails in a MinGW-built postgres.exe, so only
- * Unix-style values of the lc_messages GUC can elicit localized messages. In
+ * Unix-style values of the lc_messages GUC can elicit localized messages. In
* particular, every lc_messages setting that initdb can select automatically
* will yield only C-locale messages. XXX This could be fixed by running the
* fully-qualified locale name through a lookup table.
@@ -795,7 +797,7 @@ IsoLocaleName(const char *winlocname)
* need not standardize letter case here. So long as we do not ship
* message catalogs for which it would matter, we also need not
* translate the script/variant portion, e.g. uz-Cyrl-UZ to
- * uz_UZ@cyrillic. Simply replace the hyphen with an underscore.
+ * uz_UZ@cyrillic. Simply replace the hyphen with an underscore.
*
* Note that the locale name can be less-specific than the value we
* would derive under earlier Visual Studio releases. For example,
@@ -850,7 +852,7 @@ IsoLocaleName(const char *winlocname)
* could fail if the locale is C, so str_tolower() shouldn't call it
* in that case.
*
- * Note that we currently lack any way to flush the cache. Since we don't
+ * Note that we currently lack any way to flush the cache. Since we don't
* support ALTER COLLATION, this is OK. The worst case is that someone
* drops a collation, and a useless cache entry hangs around in existing
* backends.
@@ -1044,7 +1046,7 @@ report_newlocale_failure(const char *localename)
/*
- * Create a locale_t from a collation OID. Results are cached for the
+ * Create a locale_t from a collation OID. Results are cached for the
* lifetime of the backend. Thus, do not free the result with freelocale().
*
* As a special optimization, the default/database collation returns 0.
@@ -1170,6 +1172,7 @@ wchar2char(char *to, const wchar_t *from, size_t tolen, pg_locale_t locale)
return 0;
#ifdef WIN32
+
/*
* On Windows, the "Unicode" locales assume UTF16 not UTF8 encoding, and
* for some reason mbstowcs and wcstombs won't do this for us, so we use
@@ -1226,7 +1229,7 @@ wchar2char(char *to, const wchar_t *from, size_t tolen, pg_locale_t locale)
* This has almost the API of mbstowcs_l(), except that *from need not be
* null-terminated; instead, the number of input bytes is specified as
* fromlen. Also, we ereport() rather than returning -1 for invalid
- * input encoding. tolen is the maximum number of wchar_t's to store at *to.
+ * input encoding. tolen is the maximum number of wchar_t's to store at *to.
* The output will be zero-terminated iff there is room.
*/
size_t
diff --git a/src/backend/utils/adt/pg_lsn.c b/src/backend/utils/adt/pg_lsn.c
index e2b528a2435..d1448aee7bd 100644
--- a/src/backend/utils/adt/pg_lsn.c
+++ b/src/backend/utils/adt/pg_lsn.c
@@ -29,8 +29,10 @@ Datum
pg_lsn_in(PG_FUNCTION_ARGS)
{
char *str = PG_GETARG_CSTRING(0);
- int len1, len2;
- uint32 id, off;
+ int len1,
+ len2;
+ uint32 id,
+ off;
XLogRecPtr result;
/* Sanity check input format. */
@@ -38,12 +40,12 @@ pg_lsn_in(PG_FUNCTION_ARGS)
if (len1 < 1 || len1 > MAXPG_LSNCOMPONENT || str[len1] != '/')
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type pg_lsn: \"%s\"", str)));
+ errmsg("invalid input syntax for type pg_lsn: \"%s\"", str)));
len2 = strspn(str + len1 + 1, "0123456789abcdefABCDEF");
if (len2 < 1 || len2 > MAXPG_LSNCOMPONENT || str[len1 + 1 + len2] != '\0')
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type pg_lsn: \"%s\"", str)));
+ errmsg("invalid input syntax for type pg_lsn: \"%s\"", str)));
/* Decode result. */
id = (uint32) strtoul(str, NULL, 16);
@@ -59,7 +61,8 @@ pg_lsn_out(PG_FUNCTION_ARGS)
XLogRecPtr lsn = PG_GETARG_LSN(0);
char buf[MAXPG_LSNLEN + 1];
char *result;
- uint32 id, off;
+ uint32 id,
+ off;
/* Decode ID and offset */
id = (uint32) (lsn >> 32);
@@ -83,7 +86,7 @@ pg_lsn_recv(PG_FUNCTION_ARGS)
Datum
pg_lsn_send(PG_FUNCTION_ARGS)
{
- XLogRecPtr lsn = PG_GETARG_LSN(0);
+ XLogRecPtr lsn = PG_GETARG_LSN(0);
StringInfoData buf;
pq_begintypsend(&buf);
@@ -99,8 +102,8 @@ pg_lsn_send(PG_FUNCTION_ARGS)
Datum
pg_lsn_eq(PG_FUNCTION_ARGS)
{
- XLogRecPtr lsn1 = PG_GETARG_LSN(0);
- XLogRecPtr lsn2 = PG_GETARG_LSN(1);
+ XLogRecPtr lsn1 = PG_GETARG_LSN(0);
+ XLogRecPtr lsn2 = PG_GETARG_LSN(1);
PG_RETURN_BOOL(lsn1 == lsn2);
}
@@ -108,8 +111,8 @@ pg_lsn_eq(PG_FUNCTION_ARGS)
Datum
pg_lsn_ne(PG_FUNCTION_ARGS)
{
- XLogRecPtr lsn1 = PG_GETARG_LSN(0);
- XLogRecPtr lsn2 = PG_GETARG_LSN(1);
+ XLogRecPtr lsn1 = PG_GETARG_LSN(0);
+ XLogRecPtr lsn2 = PG_GETARG_LSN(1);
PG_RETURN_BOOL(lsn1 != lsn2);
}
@@ -117,8 +120,8 @@ pg_lsn_ne(PG_FUNCTION_ARGS)
Datum
pg_lsn_lt(PG_FUNCTION_ARGS)
{
- XLogRecPtr lsn1 = PG_GETARG_LSN(0);
- XLogRecPtr lsn2 = PG_GETARG_LSN(1);
+ XLogRecPtr lsn1 = PG_GETARG_LSN(0);
+ XLogRecPtr lsn2 = PG_GETARG_LSN(1);
PG_RETURN_BOOL(lsn1 < lsn2);
}
@@ -126,8 +129,8 @@ pg_lsn_lt(PG_FUNCTION_ARGS)
Datum
pg_lsn_gt(PG_FUNCTION_ARGS)
{
- XLogRecPtr lsn1 = PG_GETARG_LSN(0);
- XLogRecPtr lsn2 = PG_GETARG_LSN(1);
+ XLogRecPtr lsn1 = PG_GETARG_LSN(0);
+ XLogRecPtr lsn2 = PG_GETARG_LSN(1);
PG_RETURN_BOOL(lsn1 > lsn2);
}
@@ -135,8 +138,8 @@ pg_lsn_gt(PG_FUNCTION_ARGS)
Datum
pg_lsn_le(PG_FUNCTION_ARGS)
{
- XLogRecPtr lsn1 = PG_GETARG_LSN(0);
- XLogRecPtr lsn2 = PG_GETARG_LSN(1);
+ XLogRecPtr lsn1 = PG_GETARG_LSN(0);
+ XLogRecPtr lsn2 = PG_GETARG_LSN(1);
PG_RETURN_BOOL(lsn1 <= lsn2);
}
@@ -144,8 +147,8 @@ pg_lsn_le(PG_FUNCTION_ARGS)
Datum
pg_lsn_ge(PG_FUNCTION_ARGS)
{
- XLogRecPtr lsn1 = PG_GETARG_LSN(0);
- XLogRecPtr lsn2 = PG_GETARG_LSN(1);
+ XLogRecPtr lsn1 = PG_GETARG_LSN(0);
+ XLogRecPtr lsn2 = PG_GETARG_LSN(1);
PG_RETURN_BOOL(lsn1 >= lsn2);
}
@@ -158,8 +161,8 @@ pg_lsn_ge(PG_FUNCTION_ARGS)
Datum
pg_lsn_mi(PG_FUNCTION_ARGS)
{
- XLogRecPtr lsn1 = PG_GETARG_LSN(0);
- XLogRecPtr lsn2 = PG_GETARG_LSN(1);
+ XLogRecPtr lsn1 = PG_GETARG_LSN(0);
+ XLogRecPtr lsn2 = PG_GETARG_LSN(1);
char buf[256];
Datum result;
diff --git a/src/backend/utils/adt/pg_lzcompress.c b/src/backend/utils/adt/pg_lzcompress.c
index 30f1c0ab1fe..fe088901f03 100644
--- a/src/backend/utils/adt/pg_lzcompress.c
+++ b/src/backend/utils/adt/pg_lzcompress.c
@@ -576,9 +576,9 @@ pglz_compress(const char *source, int32 slen, PGLZ_Header *dest,
/*
* Experiments suggest that these hash sizes work pretty well. A large
- * hash table minimizes collision, but has a higher startup cost. For
- * a small input, the startup cost dominates. The table size must be
- * a power of two.
+ * hash table minimizes collision, but has a higher startup cost. For a
+ * small input, the startup cost dominates. The table size must be a power
+ * of two.
*/
if (slen < 128)
hashsz = 512;
@@ -615,7 +615,7 @@ pglz_compress(const char *source, int32 slen, PGLZ_Header *dest,
/*
* If we've emitted more than first_success_by bytes without finding
- * anything compressible at all, fail. This lets us fall out
+ * anything compressible at all, fail. This lets us fall out
* reasonably quickly when looking at incompressible input (such as
* pre-compressed data).
*/
@@ -639,7 +639,7 @@ pglz_compress(const char *source, int32 slen, PGLZ_Header *dest,
hist_next, hist_recycle,
dp, dend, mask);
dp++; /* Do not do this ++ in the line above! */
- /* The macro would do it four times - Jan. */
+ /* The macro would do it four times - Jan. */
}
found_match = true;
}
@@ -653,7 +653,7 @@ pglz_compress(const char *source, int32 slen, PGLZ_Header *dest,
hist_next, hist_recycle,
dp, dend, mask);
dp++; /* Do not do this ++ in the line above! */
- /* The macro would do it four times - Jan. */
+ /* The macro would do it four times - Jan. */
}
}
diff --git a/src/backend/utils/adt/pgstatfuncs.c b/src/backend/utils/adt/pgstatfuncs.c
index bf3084fce67..44ccd37e998 100644
--- a/src/backend/utils/adt/pgstatfuncs.c
+++ b/src/backend/utils/adt/pgstatfuncs.c
@@ -1797,5 +1797,5 @@ pg_stat_get_archiver(PG_FUNCTION_ARGS)
/* Returns the record as Datum */
PG_RETURN_DATUM(HeapTupleGetDatum(
- heap_form_tuple(tupdesc, values, nulls)));
+ heap_form_tuple(tupdesc, values, nulls)));
}
diff --git a/src/backend/utils/adt/pseudotypes.c b/src/backend/utils/adt/pseudotypes.c
index a553c1abf1c..475ce13abf4 100644
--- a/src/backend/utils/adt/pseudotypes.c
+++ b/src/backend/utils/adt/pseudotypes.c
@@ -6,7 +6,7 @@
* A pseudo-type isn't really a type and never has any operations, but
* we do need to supply input and output functions to satisfy the links
* in the pseudo-type's entry in pg_type. In most cases the functions
- * just throw an error if invoked. (XXX the error messages here cover
+ * just throw an error if invoked. (XXX the error messages here cover
* the most common case, but might be confusing in some contexts. Can
* we do better?)
*
@@ -139,7 +139,7 @@ anyarray_out(PG_FUNCTION_ARGS)
* anyarray_recv - binary input routine for pseudo-type ANYARRAY.
*
* XXX this could actually be made to work, since the incoming array
- * data will contain the element type OID. Need to think through
+ * data will contain the element type OID. Need to think through
* type-safety issues before allowing it, however.
*/
Datum
@@ -216,7 +216,7 @@ anyrange_out(PG_FUNCTION_ARGS)
* void_in - input routine for pseudo-type VOID.
*
* We allow this so that PL functions can return VOID without any special
- * hack in the PL handler. Whatever value the PL thinks it's returning
+ * hack in the PL handler. Whatever value the PL thinks it's returning
* will just be ignored.
*/
Datum
diff --git a/src/backend/utils/adt/rangetypes.c b/src/backend/utils/adt/rangetypes.c
index 38b51035aec..bc8a480ed3e 100644
--- a/src/backend/utils/adt/rangetypes.c
+++ b/src/backend/utils/adt/rangetypes.c
@@ -1441,7 +1441,7 @@ tstzrange_subdiff(PG_FUNCTION_ARGS)
*
* This is for use by range-related functions that follow the convention
* of using the fn_extra field as a pointer to the type cache entry for
- * the range type. Functions that need to cache more information than
+ * the range type. Functions that need to cache more information than
* that must fend for themselves.
*/
TypeCacheEntry *
@@ -1465,7 +1465,7 @@ range_get_typcache(FunctionCallInfo fcinfo, Oid rngtypid)
* range_serialize: construct a range value from bounds and empty-flag
*
* This does not force canonicalization of the range value. In most cases,
- * external callers should only be canonicalization functions. Note that
+ * external callers should only be canonicalization functions. Note that
* we perform some datatype-independent canonicalization checks anyway.
*/
RangeType *
@@ -1802,7 +1802,7 @@ range_cmp_bounds(TypeCacheEntry *typcache, RangeBound *b1, RangeBound *b2)
* Compare two range boundary point values, returning <0, 0, or >0 according
* to whether b1 is less than, equal to, or greater than b2.
*
- * This is similar to but simpler than range_cmp_bounds(). We just compare
+ * This is similar to but simpler than range_cmp_bounds(). We just compare
* the values held in b1 and b2, ignoring inclusive/exclusive flags. The
* lower/upper flags only matter for infinities, where they tell us if the
* infinity is plus or minus.
@@ -2283,7 +2283,7 @@ range_contains_elem_internal(TypeCacheEntry *typcache, RangeType *r, Datum val)
/*
* datum_compute_size() and datum_write() are used to insert the bound
- * values into a range object. They are modeled after heaptuple.c's
+ * values into a range object. They are modeled after heaptuple.c's
* heap_compute_data_size() and heap_fill_tuple(), but we need not handle
* null values here. TYPE_IS_PACKABLE must test the same conditions as
* heaptuple.c's ATT_IS_PACKABLE macro.
diff --git a/src/backend/utils/adt/rangetypes_gist.c b/src/backend/utils/adt/rangetypes_gist.c
index 13c87ea4a34..2bd28f50389 100644
--- a/src/backend/utils/adt/rangetypes_gist.c
+++ b/src/backend/utils/adt/rangetypes_gist.c
@@ -300,7 +300,7 @@ range_gist_penalty(PG_FUNCTION_ARGS)
else if (orig_lower.infinite && orig_upper.infinite)
{
/*
- * Original range requires broadening. (-inf; +inf) is most far
+ * Original range requires broadening. (-inf; +inf) is most far
* from normal range in this case.
*/
*penalty = 2 * CONTAIN_EMPTY_PENALTY;
@@ -497,7 +497,7 @@ range_gist_penalty(PG_FUNCTION_ARGS)
/*
* The GiST PickSplit method for ranges
*
- * Primarily, we try to segregate ranges of different classes. If splitting
+ * Primarily, we try to segregate ranges of different classes. If splitting
* ranges of the same class, use the appropriate split method for that class.
*/
Datum
@@ -668,7 +668,7 @@ range_gist_same(PG_FUNCTION_ARGS)
/*
* range_eq will ignore the RANGE_CONTAIN_EMPTY flag, so we have to check
- * that for ourselves. More generally, if the entries have been properly
+ * that for ourselves. More generally, if the entries have been properly
* normalized, then unequal flags bytes must mean unequal ranges ... so
* let's just test all the flag bits at once.
*/
@@ -816,7 +816,7 @@ range_gist_consistent_int(TypeCacheEntry *typcache, StrategyNumber strategy,
/*
* Empty ranges are contained by anything, so if key is or
- * contains any empty ranges, we must descend into it. Otherwise,
+ * contains any empty ranges, we must descend into it. Otherwise,
* descend only if key overlaps the query.
*/
if (RangeIsOrContainsEmpty(key))
diff --git a/src/backend/utils/adt/regexp.c b/src/backend/utils/adt/regexp.c
index 7c5b0d53bcf..caf45ef85f9 100644
--- a/src/backend/utils/adt/regexp.c
+++ b/src/backend/utils/adt/regexp.c
@@ -142,7 +142,7 @@ RE_compile_and_cache(text *text_re, int cflags, Oid collation)
char errMsg[100];
/*
- * Look for a match among previously compiled REs. Since the data
+ * Look for a match among previously compiled REs. Since the data
* structure is self-organizing with most-used entries at the front, our
* search strategy can just be to scan from the front.
*/
@@ -192,7 +192,7 @@ RE_compile_and_cache(text *text_re, int cflags, Oid collation)
/*
* Here and in other places in this file, do CHECK_FOR_INTERRUPTS
- * before reporting a regex error. This is so that if the regex
+ * before reporting a regex error. This is so that if the regex
* library aborts and returns REG_CANCEL, we don't print an error
* message that implies the regex was invalid.
*/
@@ -298,7 +298,7 @@ RE_wchar_execute(regex_t *re, pg_wchar *data, int data_len,
* dat_len --- the length of the data string
* nmatch, pmatch --- optional return area for match details
*
- * Data is given in the database encoding. We internally
+ * Data is given in the database encoding. We internally
* convert to array of pg_wchar which is what Spencer's regex package wants.
*/
static bool
diff --git a/src/backend/utils/adt/regproc.c b/src/backend/utils/adt/regproc.c
index 6210f45a195..c0314ee5322 100644
--- a/src/backend/utils/adt/regproc.c
+++ b/src/backend/utils/adt/regproc.c
@@ -85,7 +85,7 @@ regprocin(PG_FUNCTION_ARGS)
/*
* In bootstrap mode we assume the given name is not schema-qualified, and
- * just search pg_proc for a unique match. This is needed for
+ * just search pg_proc for a unique match. This is needed for
* initializing other system catalogs (pg_namespace may not exist yet, and
* certainly there are no schemas other than pg_catalog).
*/
@@ -165,8 +165,8 @@ to_regproc(PG_FUNCTION_ARGS)
FuncCandidateList clist;
/*
- * Parse the name into components and see if it matches any pg_proc entries
- * in the current search path.
+ * Parse the name into components and see if it matches any pg_proc
+ * entries in the current search path.
*/
names = stringToQualifiedNameList(pro_name);
clist = FuncnameGetCandidates(names, -1, NIL, false, false, true);
@@ -295,7 +295,7 @@ regprocedurein(PG_FUNCTION_ARGS)
/*
* Else it's a name and arguments. Parse the name and arguments, look up
* potential matches in the current namespace search list, and scan to see
- * which one exactly matches the given argument types. (There will not be
+ * which one exactly matches the given argument types. (There will not be
* more than one match.)
*
* XXX at present, this code will not work in bootstrap mode, hence this
@@ -339,7 +339,7 @@ to_regprocedure(PG_FUNCTION_ARGS)
/*
* Parse the name and arguments, look up potential matches in the current
* namespace search list, and scan to see which one exactly matches the
- * given argument types. (There will not be more than one match.)
+ * given argument types. (There will not be more than one match.)
*/
parseNameAndArgTypes(pro_name, false, &names, &nargs, argtypes);
@@ -376,7 +376,7 @@ format_procedure_qualified(Oid procedure_oid)
* Routine to produce regprocedure names; see format_procedure above.
*
* force_qualify says whether to schema-qualify; if true, the name is always
- * qualified regardless of search_path visibility. Otherwise the name is only
+ * qualified regardless of search_path visibility. Otherwise the name is only
* qualified if the function is not in path.
*/
static char *
@@ -510,7 +510,7 @@ regoperin(PG_FUNCTION_ARGS)
/*
* In bootstrap mode we assume the given name is not schema-qualified, and
- * just search pg_operator for a unique match. This is needed for
+ * just search pg_operator for a unique match. This is needed for
* initializing other system catalogs (pg_namespace may not exist yet, and
* certainly there are no schemas other than pg_catalog).
*/
@@ -724,7 +724,7 @@ regoperatorin(PG_FUNCTION_ARGS)
/*
* Else it's a name and arguments. Parse the name and arguments, look up
* potential matches in the current namespace search list, and scan to see
- * which one exactly matches the given argument types. (There will not be
+ * which one exactly matches the given argument types. (There will not be
* more than one match.)
*
* XXX at present, this code will not work in bootstrap mode, hence this
@@ -770,7 +770,7 @@ to_regoperator(PG_FUNCTION_ARGS)
/*
* Parse the name and arguments, look up potential matches in the current
* namespace search list, and scan to see which one exactly matches the
- * given argument types. (There will not be more than one match.)
+ * given argument types. (There will not be more than one match.)
*/
parseNameAndArgTypes(opr_name_or_oid, true, &names, &nargs, argtypes);
if (nargs == 1)
@@ -1006,8 +1006,8 @@ to_regclass(PG_FUNCTION_ARGS)
List *names;
/*
- * Parse the name into components and see if it matches any pg_class entries
- * in the current search path.
+ * Parse the name into components and see if it matches any pg_class
+ * entries in the current search path.
*/
names = stringToQualifiedNameList(class_name);
@@ -1045,7 +1045,7 @@ regclassout(PG_FUNCTION_ARGS)
/*
* In bootstrap mode, skip the fancy namespace stuff and just return
- * the class name. (This path is only needed for debugging output
+ * the class name. (This path is only needed for debugging output
* anyway.)
*/
if (IsBootstrapProcessingMode())
@@ -1560,7 +1560,7 @@ stringToQualifiedNameList(const char *string)
/*
* Given a C string, parse it into a qualified function or operator name
- * followed by a parenthesized list of type names. Reduce the
+ * followed by a parenthesized list of type names. Reduce the
* type names to an array of OIDs (returned into *nargs and *argtypes;
* the argtypes array should be of size FUNC_MAX_ARGS). The function or
* operator name is returned to *names as a List of Strings.
diff --git a/src/backend/utils/adt/ri_triggers.c b/src/backend/utils/adt/ri_triggers.c
index 1e1e616fa48..d30847b34e6 100644
--- a/src/backend/utils/adt/ri_triggers.c
+++ b/src/backend/utils/adt/ri_triggers.c
@@ -698,7 +698,7 @@ ri_restrict_del(TriggerData *trigdata, bool is_no_action)
/*
* If another PK row now exists providing the old key values, we
- * should not do anything. However, this check should only be
+ * should not do anything. However, this check should only be
* made in the NO ACTION case; in RESTRICT cases we don't wish to
* allow another row to be substituted.
*/
@@ -922,7 +922,7 @@ ri_restrict_upd(TriggerData *trigdata, bool is_no_action)
/*
* If another PK row now exists providing the old key values, we
- * should not do anything. However, this check should only be
+ * should not do anything. However, this check should only be
* made in the NO ACTION case; in RESTRICT cases we don't wish to
* allow another row to be substituted.
*/
@@ -1850,7 +1850,7 @@ RI_FKey_setdefault_del(PG_FUNCTION_ARGS)
* believe no check is necessary. So we need to do another lookup
* now and in case a reference still exists, abort the operation.
* That is already implemented in the NO ACTION trigger, so just
- * run it. (This recheck is only needed in the SET DEFAULT case,
+ * run it. (This recheck is only needed in the SET DEFAULT case,
* since CASCADE would remove such rows, while SET NULL is certain
* to result in rows that satisfy the FK constraint.)
*/
@@ -2041,7 +2041,7 @@ RI_FKey_setdefault_upd(PG_FUNCTION_ARGS)
* believe no check is necessary. So we need to do another lookup
* now and in case a reference still exists, abort the operation.
* That is already implemented in the NO ACTION trigger, so just
- * run it. (This recheck is only needed in the SET DEFAULT case,
+ * run it. (This recheck is only needed in the SET DEFAULT case,
* since CASCADE must change the FK key values, while SET NULL is
* certain to result in rows that satisfy the FK constraint.)
*/
@@ -2397,7 +2397,7 @@ RI_Initial_Check(Trigger *trigger, Relation fk_rel, Relation pk_rel)
* Temporarily increase work_mem so that the check query can be executed
* more efficiently. It seems okay to do this because the query is simple
* enough to not use a multiple of work_mem, and one typically would not
- * have many large foreign-key validations happening concurrently. So
+ * have many large foreign-key validations happening concurrently. So
* this seems to meet the criteria for being considered a "maintenance"
* operation, and accordingly we use maintenance_work_mem.
*
@@ -2451,7 +2451,7 @@ RI_Initial_Check(Trigger *trigger, Relation fk_rel, Relation pk_rel)
/*
* The columns to look at in the result tuple are 1..N, not whatever
- * they are in the fk_rel. Hack up riinfo so that the subroutines
+ * they are in the fk_rel. Hack up riinfo so that the subroutines
* called here will behave properly.
*
* In addition to this, we have to pass the correct tupdesc to
@@ -3180,7 +3180,7 @@ ri_ReportViolation(const RI_ConstraintInfo *riinfo,
errhint("This is most likely due to a rule having rewritten the query.")));
/*
- * Determine which relation to complain about. If tupdesc wasn't passed
+ * Determine which relation to complain about. If tupdesc wasn't passed
* by caller, assume the violator tuple came from there.
*/
onfk = (queryno == RI_PLAN_CHECK_LOOKUPPK);
diff --git a/src/backend/utils/adt/rowtypes.c b/src/backend/utils/adt/rowtypes.c
index 521c3daea7e..9543d01d492 100644
--- a/src/backend/utils/adt/rowtypes.c
+++ b/src/backend/utils/adt/rowtypes.c
@@ -279,7 +279,7 @@ record_in(PG_FUNCTION_ARGS)
/*
* We cannot return tuple->t_data because heap_form_tuple allocates it as
* part of a larger chunk, and our caller may expect to be able to pfree
- * our result. So must copy the info into a new palloc chunk.
+ * our result. So must copy the info into a new palloc chunk.
*/
result = (HeapTupleHeader) palloc(tuple->t_len);
memcpy(result, tuple->t_data, tuple->t_len);
@@ -623,7 +623,7 @@ record_recv(PG_FUNCTION_ARGS)
/*
* We cannot return tuple->t_data because heap_form_tuple allocates it as
* part of a larger chunk, and our caller may expect to be able to pfree
- * our result. So must copy the info into a new palloc chunk.
+ * our result. So must copy the info into a new palloc chunk.
*/
result = (HeapTupleHeader) palloc(tuple->t_len);
memcpy(result, tuple->t_data, tuple->t_len);
@@ -861,7 +861,7 @@ record_cmp(FunctionCallInfo fcinfo)
/*
* Scan corresponding columns, allowing for dropped columns in different
- * places in the two rows. i1 and i2 are physical column indexes, j is
+ * places in the two rows. i1 and i2 are physical column indexes, j is
* the logical column index.
*/
i1 = i2 = j = 0;
@@ -1097,7 +1097,7 @@ record_eq(PG_FUNCTION_ARGS)
/*
* Scan corresponding columns, allowing for dropped columns in different
- * places in the two rows. i1 and i2 are physical column indexes, j is
+ * places in the two rows. i1 and i2 are physical column indexes, j is
* the logical column index.
*/
i1 = i2 = j = 0;
@@ -1356,7 +1356,7 @@ record_image_cmp(FunctionCallInfo fcinfo)
/*
* Scan corresponding columns, allowing for dropped columns in different
- * places in the two rows. i1 and i2 are physical column indexes, j is
+ * places in the two rows. i1 and i2 are physical column indexes, j is
* the logical column index.
*/
i1 = i2 = j = 0;
@@ -1390,11 +1390,12 @@ record_image_cmp(FunctionCallInfo fcinfo)
format_type_be(tupdesc2->attrs[i2]->atttypid),
j + 1)));
- /*
- * The same type should have the same length (or both should be variable).
- */
- Assert(tupdesc1->attrs[i1]->attlen ==
- tupdesc2->attrs[i2]->attlen);
+ /*
+ * The same type should have the same length (or both should be
+ * variable).
+ */
+ Assert(tupdesc1->attrs[i1]->attlen ==
+ tupdesc2->attrs[i2]->attlen);
/*
* We consider two NULLs equal; NULL > not-NULL.
@@ -1421,8 +1422,8 @@ record_image_cmp(FunctionCallInfo fcinfo)
{
Size len1,
len2;
- struct varlena *arg1val;
- struct varlena *arg2val;
+ struct varlena *arg1val;
+ struct varlena *arg2val;
len1 = toast_raw_datum_size(values1[i1]);
len2 = toast_raw_datum_size(values2[i2]);
@@ -1632,7 +1633,7 @@ record_image_eq(PG_FUNCTION_ARGS)
/*
* Scan corresponding columns, allowing for dropped columns in different
- * places in the two rows. i1 and i2 are physical column indexes, j is
+ * places in the two rows. i1 and i2 are physical column indexes, j is
* the logical column index.
*/
i1 = i2 = j = 0;
@@ -1690,8 +1691,8 @@ record_image_eq(PG_FUNCTION_ARGS)
result = false;
else
{
- struct varlena *arg1val;
- struct varlena *arg2val;
+ struct varlena *arg1val;
+ struct varlena *arg2val;
arg1val = PG_DETOAST_DATUM_PACKED(values1[i1]);
arg2val = PG_DETOAST_DATUM_PACKED(values2[i2]);
diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c
index 36d9953108b..a30d8febf85 100644
--- a/src/backend/utils/adt/ruleutils.c
+++ b/src/backend/utils/adt/ruleutils.c
@@ -155,11 +155,11 @@ typedef struct
*
* Selecting aliases is unreasonably complicated because of the need to dump
* rules/views whose underlying tables may have had columns added, deleted, or
- * renamed since the query was parsed. We must nonetheless print the rule/view
+ * renamed since the query was parsed. We must nonetheless print the rule/view
* in a form that can be reloaded and will produce the same results as before.
*
* For each RTE used in the query, we must assign column aliases that are
- * unique within that RTE. SQL does not require this of the original query,
+ * unique within that RTE. SQL does not require this of the original query,
* but due to factors such as *-expansion we need to be able to uniquely
* reference every column in a decompiled query. As long as we qualify all
* column references, per-RTE uniqueness is sufficient for that.
@@ -214,8 +214,8 @@ typedef struct
/*
* new_colnames is an array containing column aliases to use for columns
* that would exist if the query was re-parsed against the current
- * definitions of its base tables. This is what to print as the column
- * alias list for the RTE. This array does not include dropped columns,
+ * definitions of its base tables. This is what to print as the column
+ * alias list for the RTE. This array does not include dropped columns,
* but it will include columns added since original parsing. Indexes in
* it therefore have little to do with current varattno values. As above,
* entries are unique unless this is for an unnamed JOIN RTE. (In such an
@@ -1077,7 +1077,7 @@ pg_get_indexdef_worker(Oid indexrelid, int colno,
context = deparse_context_for(get_relation_name(indrelid), indrelid);
/*
- * Start the index definition. Note that the index's name should never be
+ * Start the index definition. Note that the index's name should never be
* schema-qualified, but the indexed rel's name may be.
*/
initStringInfo(&buf);
@@ -1304,9 +1304,9 @@ pg_get_constraintdef_worker(Oid constraintId, bool fullCommand,
HeapTuple tup;
Form_pg_constraint conForm;
StringInfoData buf;
- SysScanDesc scandesc;
+ SysScanDesc scandesc;
ScanKeyData scankey[1];
- Snapshot snapshot = RegisterSnapshot(GetTransactionSnapshot());
+ Snapshot snapshot = RegisterSnapshot(GetTransactionSnapshot());
Relation relation = heap_open(ConstraintRelationId, AccessShareLock);
ScanKeyInit(&scankey[0],
@@ -1315,15 +1315,15 @@ pg_get_constraintdef_worker(Oid constraintId, bool fullCommand,
ObjectIdGetDatum(constraintId));
scandesc = systable_beginscan(relation,
- ConstraintOidIndexId,
- true,
- snapshot,
- 1,
- scankey);
+ ConstraintOidIndexId,
+ true,
+ snapshot,
+ 1,
+ scankey);
/*
- * We later use the tuple with SysCacheGetAttr() as if we
- * had obtained it via SearchSysCache, which works fine.
+ * We later use the tuple with SysCacheGetAttr() as if we had obtained it
+ * via SearchSysCache, which works fine.
*/
tup = systable_getnext(scandesc);
@@ -1806,7 +1806,7 @@ pg_get_serial_sequence(PG_FUNCTION_ARGS)
SysScanDesc scan;
HeapTuple tup;
- /* Look up table name. Can't lock it - we might not have privileges. */
+ /* Look up table name. Can't lock it - we might not have privileges. */
tablerv = makeRangeVarFromNameList(textToQualifiedNameList(tablename));
tableOid = RangeVarGetRelid(tablerv, NoLock, false);
@@ -2406,8 +2406,10 @@ pg_get_function_arg_default(PG_FUNCTION_ARGS)
proc = (Form_pg_proc) GETSTRUCT(proctup);
- /* Calculate index into proargdefaults: proargdefaults corresponds to the
- * last N input arguments, where N = pronargdefaults. */
+ /*
+ * Calculate index into proargdefaults: proargdefaults corresponds to the
+ * last N input arguments, where N = pronargdefaults.
+ */
nth_default = nth_inputarg - 1 - (proc->pronargs - proc->pronargdefaults);
if (nth_default < 0 || nth_default >= list_length(argdefaults))
@@ -2444,7 +2446,7 @@ deparse_expression(Node *expr, List *dpcontext,
* tree (ie, not the raw output of gram.y).
*
* dpcontext is a list of deparse_namespace nodes representing the context
- * for interpreting Vars in the node tree. It can be NIL if no Vars are
+ * for interpreting Vars in the node tree. It can be NIL if no Vars are
* expected.
*
* forceprefix is TRUE to force all Vars to be prefixed with their table names.
@@ -2484,7 +2486,7 @@ deparse_expression_pretty(Node *expr, List *dpcontext,
*
* Given the reference name (alias) and OID of a relation, build deparsing
* context for an expression referencing only that relation (as varno 1,
- * varlevelsup 0). This is sufficient for many uses of deparse_expression.
+ * varlevelsup 0). This is sufficient for many uses of deparse_expression.
* ----------
*/
List *
@@ -2555,7 +2557,7 @@ deparse_context_for_planstate(Node *planstate, List *ancestors,
dpns->ctes = NIL;
/*
- * Set up column name aliases. We will get rather bogus results for join
+ * Set up column name aliases. We will get rather bogus results for join
* RTEs, but that doesn't matter because plan trees don't contain any join
* alias Vars.
*/
@@ -3113,7 +3115,7 @@ set_relation_column_names(deparse_namespace *dpns, RangeTblEntry *rte,
/*
* Scan the columns, select a unique alias for each one, and store it in
* colinfo->colnames and colinfo->new_colnames. The former array has NULL
- * entries for dropped columns, the latter omits them. Also mark
+ * entries for dropped columns, the latter omits them. Also mark
* new_colnames entries as to whether they are new since parse time; this
* is the case for entries beyond the length of rte->eref->colnames.
*/
@@ -3168,7 +3170,7 @@ set_relation_column_names(deparse_namespace *dpns, RangeTblEntry *rte,
/*
* For a relation RTE, we need only print the alias column names if any
- * are different from the underlying "real" names. For a function RTE,
+ * are different from the underlying "real" names. For a function RTE,
* always emit a complete column alias list; this is to protect against
* possible instability of the default column names (eg, from altering
* parameter names). For other RTE types, print if we changed anything OR
@@ -3631,7 +3633,7 @@ identify_join_columns(JoinExpr *j, RangeTblEntry *jrte,
/*
* If there's a USING clause, deconstruct the join quals to identify the
- * merged columns. This is a tad painful but if we cannot rely on the
+ * merged columns. This is a tad painful but if we cannot rely on the
* column names, there is no other representation of which columns were
* joined by USING. (Unless the join type is FULL, we can't tell from the
* joinaliasvars list which columns are merged.) Note: we assume that the
@@ -3765,7 +3767,7 @@ set_deparse_planstate(deparse_namespace *dpns, PlanState *ps)
* We special-case Append and MergeAppend to pretend that the first child
* plan is the OUTER referent; we have to interpret OUTER Vars in their
* tlists according to one of the children, and the first one is the most
- * natural choice. Likewise special-case ModifyTable to pretend that the
+ * natural choice. Likewise special-case ModifyTable to pretend that the
* first child plan is the OUTER referent; this is to support RETURNING
* lists containing references to non-target relations.
*/
@@ -4167,8 +4169,8 @@ get_query_def(Query *query, StringInfo buf, List *parentnamespace,
/*
* Before we begin to examine the query, acquire locks on referenced
- * relations, and fix up deleted columns in JOIN RTEs. This ensures
- * consistent results. Note we assume it's OK to scribble on the passed
+ * relations, and fix up deleted columns in JOIN RTEs. This ensures
+ * consistent results. Note we assume it's OK to scribble on the passed
* querytree!
*
* We are only deparsing the query (we are not about to execute it), so we
@@ -4641,7 +4643,7 @@ get_target_list(List *targetList, deparse_context *context,
}
/*
- * Figure out what the result column should be called. In the context
+ * Figure out what the result column should be called. In the context
* of a view, use the view's tuple descriptor (so as to pick up the
* effects of any column RENAME that's been done on the view).
* Otherwise, just use what we can find in the TLE.
@@ -4863,7 +4865,7 @@ get_rule_sortgroupclause(SortGroupClause *srt, List *tlist, bool force_colno,
* expression is a constant, force it to be dumped with an explicit cast
* as decoration --- this is because a simple integer constant is
* ambiguous (and will be misinterpreted by findTargetlistEntry()) if we
- * dump it without any decoration. Otherwise, just dump the expression
+ * dump it without any decoration. Otherwise, just dump the expression
* normally.
*/
if (force_colno)
@@ -5558,8 +5560,8 @@ get_variable(Var *var, int levelsup, bool istoplevel, deparse_context *context)
/*
* If it's an unnamed join, look at the expansion of the alias variable.
* If it's a simple reference to one of the input vars, then recursively
- * print the name of that var instead. When it's not a simple reference,
- * we have to just print the unqualified join column name. (This can only
+ * print the name of that var instead. When it's not a simple reference,
+ * we have to just print the unqualified join column name. (This can only
* happen with "dangerous" merged columns in a JOIN USING; we took pains
* previously to make the unqualified column name unique in such cases.)
*
@@ -5587,7 +5589,7 @@ get_variable(Var *var, int levelsup, bool istoplevel, deparse_context *context)
/*
* Unnamed join has no refname. (Note: since it's unnamed, there is
* no way the user could have referenced it to create a whole-row Var
- * for it. So we don't have to cover that case below.)
+ * for it. So we don't have to cover that case below.)
*/
Assert(refname == NULL);
}
@@ -5628,7 +5630,7 @@ get_variable(Var *var, int levelsup, bool istoplevel, deparse_context *context)
/*
- * Get the name of a field of an expression of composite type. The
+ * Get the name of a field of an expression of composite type. The
* expression is usually a Var, but we handle other cases too.
*
* levelsup is an extra offset to interpret the Var's varlevelsup correctly.
@@ -5638,7 +5640,7 @@ get_variable(Var *var, int levelsup, bool istoplevel, deparse_context *context)
* could also be RECORD. Since no actual table or view column is allowed to
* have type RECORD, a Var of type RECORD must refer to a JOIN or FUNCTION RTE
* or to a subquery output. We drill down to find the ultimate defining
- * expression and attempt to infer the field name from it. We ereport if we
+ * expression and attempt to infer the field name from it. We ereport if we
* can't determine the name.
*
* Similarly, a PARAM of type RECORD has to refer to some expression of
@@ -6003,7 +6005,7 @@ get_name_for_var_field(Var *var, int fieldno,
/*
* We now have an expression we can't expand any more, so see if
- * get_expr_result_type() can do anything with it. If not, pass to
+ * get_expr_result_type() can do anything with it. If not, pass to
* lookup_rowtype_tupdesc() which will probably fail, but will give an
* appropriate error message while failing.
*/
@@ -6021,7 +6023,7 @@ get_name_for_var_field(Var *var, int fieldno,
* reference a parameter supplied by an upper NestLoop or SubPlan plan node.
*
* If successful, return the expression and set *dpns_p and *ancestor_cell_p
- * appropriately for calling push_ancestor_plan(). If no referent can be
+ * appropriately for calling push_ancestor_plan(). If no referent can be
* found, return NULL.
*/
static Node *
@@ -6153,7 +6155,7 @@ get_parameter(Param *param, deparse_context *context)
/*
* If it's a PARAM_EXEC parameter, try to locate the expression from which
- * the parameter was computed. Note that failing to find a referent isn't
+ * the parameter was computed. Note that failing to find a referent isn't
* an error, since the Param might well be a subplan output rather than an
* input.
*/
@@ -6631,10 +6633,10 @@ get_rule_expr(Node *node, deparse_context *context,
/*
* If there's a refassgnexpr, we want to print the node in the
- * format "array[subscripts] := refassgnexpr". This is not
+ * format "array[subscripts] := refassgnexpr". This is not
* legal SQL, so decompilation of INSERT or UPDATE statements
* should always use processIndirection as part of the
- * statement-level syntax. We should only see this when
+ * statement-level syntax. We should only see this when
* EXPLAIN tries to print the targetlist of a plan resulting
* from such a statement.
*/
@@ -6793,7 +6795,7 @@ get_rule_expr(Node *node, deparse_context *context,
/*
* We cannot see an already-planned subplan in rule deparsing,
- * only while EXPLAINing a query plan. We don't try to
+ * only while EXPLAINing a query plan. We don't try to
* reconstruct the original SQL, just reference the subplan
* that appears elsewhere in EXPLAIN's result.
*/
@@ -6866,14 +6868,14 @@ get_rule_expr(Node *node, deparse_context *context,
* There is no good way to represent a FieldStore as real SQL,
* so decompilation of INSERT or UPDATE statements should
* always use processIndirection as part of the
- * statement-level syntax. We should only get here when
+ * statement-level syntax. We should only get here when
* EXPLAIN tries to print the targetlist of a plan resulting
* from such a statement. The plan case is even harder than
* ordinary rules would be, because the planner tries to
* collapse multiple assignments to the same field or subfield
* into one FieldStore; so we can see a list of target fields
* not just one, and the arguments could be FieldStores
- * themselves. We don't bother to try to print the target
+ * themselves. We don't bother to try to print the target
* field names; we just print the source arguments, with a
* ROW() around them if there's more than one. This isn't
* terribly complete, but it's probably good enough for
@@ -7668,7 +7670,7 @@ get_agg_expr(Aggref *aggref, deparse_context *context)
{
/*
* Ordered-set aggregates do not use "*" syntax. Also, we needn't
- * worry about inserting VARIADIC. So we can just dump the direct
+ * worry about inserting VARIADIC. So we can just dump the direct
* args as-is.
*/
Assert(!aggref->aggvariadic);
@@ -7810,7 +7812,7 @@ get_coercion_expr(Node *arg, deparse_context *context,
* Since parse_coerce.c doesn't immediately collapse application of
* length-coercion functions to constants, what we'll typically see in
* such cases is a Const with typmod -1 and a length-coercion function
- * right above it. Avoid generating redundant output. However, beware of
+ * right above it. Avoid generating redundant output. However, beware of
* suppressing casts when the user actually wrote something like
* 'foo'::text::char(3).
*/
@@ -7892,7 +7894,7 @@ get_const_expr(Const *constval, deparse_context *context, int showtype)
/*
* These types are printed without quotes unless they contain
* values that aren't accepted by the scanner unquoted (e.g.,
- * 'NaN'). Note that strtod() and friends might accept NaN,
+ * 'NaN'). Note that strtod() and friends might accept NaN,
* so we can't use that to test.
*
* In reality we only need to defend against infinity and NaN,
@@ -8416,7 +8418,7 @@ get_from_clause_item(Node *jtnode, Query *query, deparse_context *context)
else if (rte->rtekind == RTE_FUNCTION)
{
/*
- * For a function RTE, always print alias. This covers possible
+ * For a function RTE, always print alias. This covers possible
* renaming of the function and/or instability of the
* FigureColname rules for things that aren't simple functions.
* Note we'd need to force it anyway for the columndef list case.
@@ -8672,7 +8674,7 @@ get_opclass_name(Oid opclass, Oid actual_datatype,
if (!OidIsValid(actual_datatype) ||
GetDefaultOpClass(actual_datatype, opcrec->opcmethod) != opclass)
{
- /* Okay, we need the opclass name. Do we need to qualify it? */
+ /* Okay, we need the opclass name. Do we need to qualify it? */
opcname = NameStr(opcrec->opcname);
if (OpclassIsVisible(opclass))
appendStringInfo(buf, " %s", quote_identifier(opcname));
@@ -8967,13 +8969,13 @@ generate_relation_name(Oid relid, List *namespaces)
* generate_function_name
* Compute the name to display for a function specified by OID,
* given that it is being called with the specified actual arg names and
- * types. (Those matter because of ambiguous-function resolution rules.)
+ * types. (Those matter because of ambiguous-function resolution rules.)
*
* If we're dealing with a potentially variadic function (in practice, this
* means a FuncExpr or Aggref, not some other way of calling a function), then
* has_variadic must specify whether variadic arguments have been merged,
* and *use_variadic_p will be set to indicate whether to print VARIADIC in
- * the output. For non-FuncExpr cases, has_variadic should be FALSE and
+ * the output. For non-FuncExpr cases, has_variadic should be FALSE and
* use_variadic_p can be NULL.
*
* The result includes all necessary quoting and schema-prefixing.
diff --git a/src/backend/utils/adt/selfuncs.c b/src/backend/utils/adt/selfuncs.c
index 1ffc0160b77..e932ccf0da5 100644
--- a/src/backend/utils/adt/selfuncs.c
+++ b/src/backend/utils/adt/selfuncs.c
@@ -72,7 +72,7 @@
* float8 oprjoin (internal, oid, internal, int2, internal);
*
* (Before Postgres 8.4, join estimators had only the first four of these
- * parameters. That signature is still allowed, but deprecated.) The
+ * parameters. That signature is still allowed, but deprecated.) The
* relationship between jointype and sjinfo is explained in the comments for
* clause_selectivity() --- the short version is that jointype is usually
* best ignored in favor of examining sjinfo.
@@ -209,7 +209,7 @@ static List *add_predicate_to_quals(IndexOptInfo *index, List *indexQuals);
*
* Note: this routine is also used to estimate selectivity for some
* operators that are not "=" but have comparable selectivity behavior,
- * such as "~=" (geometric approximate-match). Even for "=", we must
+ * such as "~=" (geometric approximate-match). Even for "=", we must
* keep in mind that the left and right datatypes may differ.
*/
Datum
@@ -273,7 +273,7 @@ var_eq_const(VariableStatData *vardata, Oid operator,
/*
* If we matched the var to a unique index or DISTINCT clause, assume
- * there is exactly one match regardless of anything else. (This is
+ * there is exactly one match regardless of anything else. (This is
* slightly bogus, since the index or clause's equality operator might be
* different from ours, but it's much more likely to be right than
* ignoring the information.)
@@ -296,7 +296,7 @@ var_eq_const(VariableStatData *vardata, Oid operator,
/*
* Is the constant "=" to any of the column's most common values?
* (Although the given operator may not really be "=", we will assume
- * that seeing whether it returns TRUE is an appropriate test. If you
+ * that seeing whether it returns TRUE is an appropriate test. If you
* don't like this, maybe you shouldn't be using eqsel for your
* operator...)
*/
@@ -408,7 +408,7 @@ var_eq_non_const(VariableStatData *vardata, Oid operator,
/*
* If we matched the var to a unique index or DISTINCT clause, assume
- * there is exactly one match regardless of anything else. (This is
+ * there is exactly one match regardless of anything else. (This is
* slightly bogus, since the index or clause's equality operator might be
* different from ours, but it's much more likely to be right than
* ignoring the information.)
@@ -432,7 +432,7 @@ var_eq_non_const(VariableStatData *vardata, Oid operator,
* result averaged over all possible values whether common or
* uncommon. (Essentially, we are assuming that the not-yet-known
* comparison value is equally likely to be any of the possible
- * values, regardless of their frequency in the table. Is that a good
+ * values, regardless of their frequency in the table. Is that a good
* idea?)
*/
selec = 1.0 - stats->stanullfrac;
@@ -655,7 +655,7 @@ mcv_selectivity(VariableStatData *vardata, FmgrInfo *opproc,
* essentially using the histogram just as a representative sample. However,
* small histograms are unlikely to be all that representative, so the caller
* should be prepared to fall back on some other estimation approach when the
- * histogram is missing or very small. It may also be prudent to combine this
+ * histogram is missing or very small. It may also be prudent to combine this
* approach with another one when the histogram is small.
*
* If the actual histogram size is not at least min_hist_size, we won't bother
@@ -673,7 +673,7 @@ mcv_selectivity(VariableStatData *vardata, FmgrInfo *opproc,
*
* Note that the result disregards both the most-common-values (if any) and
* null entries. The caller is expected to combine this result with
- * statistics for those portions of the column population. It may also be
+ * statistics for those portions of the column population. It may also be
* prudent to clamp the result range, ie, disbelieve exact 0 or 1 outputs.
*/
double
@@ -786,7 +786,7 @@ ineq_histogram_selectivity(PlannerInfo *root,
*
* If the binary search accesses the first or last histogram
* entry, we try to replace that endpoint with the true column min
- * or max as found by get_actual_variable_range(). This
+ * or max as found by get_actual_variable_range(). This
* ameliorates misestimates when the min or max is moving as a
* result of changes since the last ANALYZE. Note that this could
* result in effectively including MCVs into the histogram that
@@ -890,7 +890,7 @@ ineq_histogram_selectivity(PlannerInfo *root,
/*
* Watch out for the possibility that we got a NaN or
- * Infinity from the division. This can happen
+ * Infinity from the division. This can happen
* despite the previous checks, if for example "low"
* is -Infinity.
*/
@@ -905,7 +905,7 @@ ineq_histogram_selectivity(PlannerInfo *root,
* Ideally we'd produce an error here, on the grounds that
* the given operator shouldn't have scalarXXsel
* registered as its selectivity func unless we can deal
- * with its operand types. But currently, all manner of
+ * with its operand types. But currently, all manner of
* stuff is invoking scalarXXsel, so give a default
* estimate until that can be fixed.
*/
@@ -931,7 +931,7 @@ ineq_histogram_selectivity(PlannerInfo *root,
/*
* The histogram boundaries are only approximate to begin with,
- * and may well be out of date anyway. Therefore, don't believe
+ * and may well be out of date anyway. Therefore, don't believe
* extremely small or large selectivity estimates --- unless we
* got actual current endpoint values from the table.
*/
@@ -1128,7 +1128,7 @@ patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype, bool negate)
/*
* If this is for a NOT LIKE or similar operator, get the corresponding
- * positive-match operator and work with that. Set result to the correct
+ * positive-match operator and work with that. Set result to the correct
* default estimate, too.
*/
if (negate)
@@ -1214,7 +1214,7 @@ patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype, bool negate)
/*
* Pull out any fixed prefix implied by the pattern, and estimate the
- * fractional selectivity of the remainder of the pattern. Unlike many of
+ * fractional selectivity of the remainder of the pattern. Unlike many of
* the other functions in this file, we use the pattern operator's actual
* collation for this step. This is not because we expect the collation
* to make a big difference in the selectivity estimate (it seldom would),
@@ -1332,7 +1332,7 @@ patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype, bool negate)
/*
* If we have most-common-values info, add up the fractions of the MCV
* entries that satisfy MCV OP PATTERN. These fractions contribute
- * directly to the result selectivity. Also add up the total fraction
+ * directly to the result selectivity. Also add up the total fraction
* represented by MCV entries.
*/
mcv_selec = mcv_selectivity(&vardata, &opproc, constval, true,
@@ -1838,7 +1838,7 @@ scalararraysel(PlannerInfo *root,
/*
* For generic operators, we assume the probability of success is
- * independent for each array element. But for "= ANY" or "<> ALL",
+ * independent for each array element. But for "= ANY" or "<> ALL",
* if the array elements are distinct (which'd typically be the case)
* then the probabilities are disjoint, and we should just sum them.
*
@@ -2253,9 +2253,9 @@ eqjoinsel_inner(Oid operator,
if (have_mcvs1 && have_mcvs2)
{
/*
- * We have most-common-value lists for both relations. Run through
+ * We have most-common-value lists for both relations. Run through
* the lists to see which MCVs actually join to each other with the
- * given operator. This allows us to determine the exact join
+ * given operator. This allows us to determine the exact join
* selectivity for the portion of the relations represented by the MCV
* lists. We still have to estimate for the remaining population, but
* in a skewed distribution this gives us a big leg up in accuracy.
@@ -2287,7 +2287,7 @@ eqjoinsel_inner(Oid operator,
/*
* Note we assume that each MCV will match at most one member of the
- * other MCV list. If the operator isn't really equality, there could
+ * other MCV list. If the operator isn't really equality, there could
* be multiple matches --- but we don't look for them, both for speed
* and because the math wouldn't add up...
*/
@@ -2452,7 +2452,7 @@ eqjoinsel_semi(Oid operator,
/*
* We clamp nd2 to be not more than what we estimate the inner relation's
- * size to be. This is intuitively somewhat reasonable since obviously
+ * size to be. This is intuitively somewhat reasonable since obviously
* there can't be more than that many distinct values coming from the
* inner rel. The reason for the asymmetry (ie, that we don't clamp nd1
* likewise) is that this is the only pathway by which restriction clauses
@@ -2497,9 +2497,9 @@ eqjoinsel_semi(Oid operator,
if (have_mcvs1 && have_mcvs2 && OidIsValid(operator))
{
/*
- * We have most-common-value lists for both relations. Run through
+ * We have most-common-value lists for both relations. Run through
* the lists to see which MCVs actually join to each other with the
- * given operator. This allows us to determine the exact join
+ * given operator. This allows us to determine the exact join
* selectivity for the portion of the relations represented by the MCV
* lists. We still have to estimate for the remaining population, but
* in a skewed distribution this gives us a big leg up in accuracy.
@@ -2530,7 +2530,7 @@ eqjoinsel_semi(Oid operator,
/*
* Note we assume that each MCV will match at most one member of the
- * other MCV list. If the operator isn't really equality, there could
+ * other MCV list. If the operator isn't really equality, there could
* be multiple matches --- but we don't look for them, both for speed
* and because the math wouldn't add up...
*/
@@ -2567,7 +2567,7 @@ eqjoinsel_semi(Oid operator,
/*
* Now we need to estimate the fraction of relation 1 that has at
- * least one join partner. We know for certain that the matched MCVs
+ * least one join partner. We know for certain that the matched MCVs
* do, so that gives us a lower bound, but we're really in the dark
* about everything else. Our crude approach is: if nd1 <= nd2 then
* assume all non-null rel1 rows have join partners, else assume for
@@ -3165,11 +3165,11 @@ add_unique_group_var(PlannerInfo *root, List *varinfos,
* case (all possible cross-product terms actually appear as groups) since
* very often the grouped-by Vars are highly correlated. Our current approach
* is as follows:
- * 1. Expressions yielding boolean are assumed to contribute two groups,
+ * 1. Expressions yielding boolean are assumed to contribute two groups,
* independently of their content, and are ignored in the subsequent
- * steps. This is mainly because tests like "col IS NULL" break the
+ * steps. This is mainly because tests like "col IS NULL" break the
* heuristic used in step 2 especially badly.
- * 2. Reduce the given expressions to a list of unique Vars used. For
+ * 2. Reduce the given expressions to a list of unique Vars used. For
* example, GROUP BY a, a + b is treated the same as GROUP BY a, b.
* It is clearly correct not to count the same Var more than once.
* It is also reasonable to treat f(x) the same as x: f() cannot
@@ -3179,14 +3179,14 @@ add_unique_group_var(PlannerInfo *root, List *varinfos,
* As a special case, if a GROUP BY expression can be matched to an
* expressional index for which we have statistics, then we treat the
* whole expression as though it were just a Var.
- * 3. If the list contains Vars of different relations that are known equal
+ * 3. If the list contains Vars of different relations that are known equal
* due to equivalence classes, then drop all but one of the Vars from each
* known-equal set, keeping the one with smallest estimated # of values
* (since the extra values of the others can't appear in joined rows).
* Note the reason we only consider Vars of different relations is that
* if we considered ones of the same rel, we'd be double-counting the
* restriction selectivity of the equality in the next step.
- * 4. For Vars within a single source rel, we multiply together the numbers
+ * 4. For Vars within a single source rel, we multiply together the numbers
* of values, clamp to the number of rows in the rel (divided by 10 if
* more than one Var), and then multiply by the selectivity of the
* restriction clauses for that rel. When there's more than one Var,
@@ -3197,7 +3197,7 @@ add_unique_group_var(PlannerInfo *root, List *varinfos,
* by the restriction selectivity is effectively assuming that the
* restriction clauses are independent of the grouping, which is a crummy
* assumption, but it's hard to do better.
- * 5. If there are Vars from multiple rels, we repeat step 4 for each such
+ * 5. If there are Vars from multiple rels, we repeat step 4 for each such
* rel, and multiply the results together.
* Note that rels not containing grouped Vars are ignored completely, as are
* join clauses. Such rels cannot increase the number of groups, and we
@@ -3228,7 +3228,7 @@ estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows)
return 1.0;
/*
- * Count groups derived from boolean grouping expressions. For other
+ * Count groups derived from boolean grouping expressions. For other
* expressions, find the unique Vars used, treating an expression as a Var
* if we can find stats for it. For each one, record the statistical
* estimate of number of distinct values (total in its table, without
@@ -3317,7 +3317,7 @@ estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows)
* Group Vars by relation and estimate total numdistinct.
*
* For each iteration of the outer loop, we process the frontmost Var in
- * varinfos, plus all other Vars in the same relation. We remove these
+ * varinfos, plus all other Vars in the same relation. We remove these
* Vars from the newvarinfos list for the next iteration. This is the
* easiest way to group Vars of same rel together.
*/
@@ -3418,11 +3418,11 @@ estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows)
* distribution, so this will have to do for now.
*
* We are passed the number of buckets the executor will use for the given
- * input relation. If the data were perfectly distributed, with the same
+ * input relation. If the data were perfectly distributed, with the same
* number of tuples going into each available bucket, then the bucketsize
* fraction would be 1/nbuckets. But this happy state of affairs will occur
* only if (a) there are at least nbuckets distinct data values, and (b)
- * we have a not-too-skewed data distribution. Otherwise the buckets will
+ * we have a not-too-skewed data distribution. Otherwise the buckets will
* be nonuniformly occupied. If the other relation in the join has a key
* distribution similar to this one's, then the most-loaded buckets are
* exactly those that will be probed most often. Therefore, the "average"
@@ -3595,7 +3595,7 @@ convert_to_scalar(Datum value, Oid valuetypid, double *scaledvalue,
* operators to estimate selectivity for the other's. This is outright
* wrong in some cases --- in particular signed versus unsigned
* interpretation could trip us up. But it's useful enough in the
- * majority of cases that we do it anyway. Should think about more
+ * majority of cases that we do it anyway. Should think about more
* rigorous ways to do it.
*/
switch (valuetypid)
@@ -3950,6 +3950,7 @@ convert_string_datum(Datum value, Oid typid)
xfrmlen = strxfrm(NULL, val, 0);
#endif
#ifdef WIN32
+
/*
* On Windows, strxfrm returns INT_MAX when an error occurs. Instead
* of trying to allocate this much memory (and fail), just return the
@@ -4178,7 +4179,7 @@ get_restriction_variable(PlannerInfo *root, List *args, int varRelid,
right = (Node *) lsecond(args);
/*
- * Examine both sides. Note that when varRelid is nonzero, Vars of other
+ * Examine both sides. Note that when varRelid is nonzero, Vars of other
* relations will be treated as pseudoconstants.
*/
examine_variable(root, left, varRelid, vardata);
@@ -4323,7 +4324,7 @@ examine_variable(PlannerInfo *root, Node *node, int varRelid,
/*
* Okay, it's a more complicated expression. Determine variable
- * membership. Note that when varRelid isn't zero, only vars of that
+ * membership. Note that when varRelid isn't zero, only vars of that
* relation are considered "real" vars.
*/
varnos = pull_varnos(basenode);
@@ -4372,13 +4373,13 @@ examine_variable(PlannerInfo *root, Node *node, int varRelid,
if (onerel)
{
/*
- * We have an expression in vars of a single relation. Try to match
+ * We have an expression in vars of a single relation. Try to match
* it to expressional index columns, in hopes of finding some
* statistics.
*
* XXX it's conceivable that there are multiple matches with different
* index opfamilies; if so, we need to pick one that matches the
- * operator we are estimating for. FIXME later.
+ * operator we are estimating for. FIXME later.
*/
ListCell *ilist;
@@ -4580,7 +4581,7 @@ examine_simple_variable(PlannerInfo *root, Var *var,
*
* This is probably a harsher restriction than necessary; it's
* certainly OK for the selectivity estimator (which is a C function,
- * and therefore omnipotent anyway) to look at the statistics. But
+ * and therefore omnipotent anyway) to look at the statistics. But
* many selectivity estimators will happily *invoke the operator
* function* to try to work out a good estimate - and that's not OK.
* So for now, don't dig down for stats.
@@ -4633,7 +4634,7 @@ get_variable_numdistinct(VariableStatData *vardata, bool *isdefault)
*isdefault = false;
/*
- * Determine the stadistinct value to use. There are cases where we can
+ * Determine the stadistinct value to use. There are cases where we can
* get an estimate even without a pg_statistic entry, or can get a better
* value than is in pg_statistic.
*/
@@ -4757,7 +4758,7 @@ get_variable_range(PlannerInfo *root, VariableStatData *vardata, Oid sortop,
/*
* XXX It's very tempting to try to use the actual column min and max, if
- * we can get them relatively-cheaply with an index probe. However, since
+ * we can get them relatively-cheaply with an index probe. However, since
* this function is called many times during join planning, that could
* have unpleasant effects on planning speed. Need more investigation
* before enabling this.
@@ -5008,7 +5009,7 @@ get_actual_variable_range(PlannerInfo *root, VariableStatData *vardata,
* and it can be very expensive if a lot of uncommitted rows
* exist at the end of the index (because we'll laboriously
* fetch each one and reject it). What seems like a good
- * compromise is to use SnapshotDirty. That will accept
+ * compromise is to use SnapshotDirty. That will accept
* uncommitted rows, and thus avoid fetching multiple heap
* tuples in this scenario. On the other hand, it will reject
* known-dead rows, and thus not give a bogus answer when the
@@ -5147,7 +5148,7 @@ find_join_input_rel(PlannerInfo *root, Relids relids)
* Check whether char is a letter (and, hence, subject to case-folding)
*
* In multibyte character sets, we can't use isalpha, and it does not seem
- * worth trying to convert to wchar_t to use iswalpha. Instead, just assume
+ * worth trying to convert to wchar_t to use iswalpha. Instead, just assume
* any multibyte char is potentially case-varying.
*/
static int
@@ -5399,7 +5400,7 @@ pattern_fixed_prefix(Const *patt, Pattern_Type ptype, Oid collation,
* together with info about MCVs and NULLs.
*
* We use the >= and < operators from the specified btree opfamily to do the
- * estimation. The given variable and Const must be of the associated
+ * estimation. The given variable and Const must be of the associated
* datatype.
*
* XXX Note: we make use of the upper bound to estimate operator selectivity
@@ -5458,7 +5459,7 @@ prefix_selectivity(PlannerInfo *root, VariableStatData *vardata,
/*
* Merge the two selectivities in the same way as for a range query
- * (see clauselist_selectivity()). Note that we don't need to worry
+ * (see clauselist_selectivity()). Note that we don't need to worry
* about double-exclusion of nulls, since ineq_histogram_selectivity
* doesn't count those anyway.
*/
@@ -5695,7 +5696,7 @@ byte_increment(unsigned char *ptr, int len)
* that is not a bulletproof guarantee that an extension of the string might
* not sort after it; an example is that "foo " is less than "foo!", but it
* is not clear that a "dictionary" sort ordering will consider "foo!" less
- * than "foo bar". CAUTION: Therefore, this function should be used only for
+ * than "foo bar". CAUTION: Therefore, this function should be used only for
* estimation purposes when working in a non-C collation.
*
* To try to catch most cases where an extended string might otherwise sort
@@ -5952,7 +5953,7 @@ string_to_bytea_const(const char *str, size_t str_len)
* genericcostestimate is a general-purpose estimator that can be used for
* most index types. In some cases we use genericcostestimate as the base
* code and then incorporate additional index-type-specific knowledge in
- * the type-specific calling function. To avoid code duplication, we make
+ * the type-specific calling function. To avoid code duplication, we make
* genericcostestimate return a number of intermediate values as well as
* its preliminary estimates of the output cost values. The GenericCosts
* struct includes all these values.
@@ -6072,7 +6073,7 @@ genericcostestimate(PlannerInfo *root,
*
* In practice access to upper index levels is often nearly free because
* those tend to stay in cache under load; moreover, the cost involved is
- * highly dependent on index type. We therefore ignore such costs here
+ * highly dependent on index type. We therefore ignore such costs here
* and leave it to the caller to add a suitable charge if needed.
*/
if (index->pages > 1 && index->tuples > 1)
@@ -6091,9 +6092,9 @@ genericcostestimate(PlannerInfo *root,
* The above calculations are all per-index-scan. However, if we are in a
* nestloop inner scan, we can expect the scan to be repeated (with
* different search keys) for each row of the outer relation. Likewise,
- * ScalarArrayOpExpr quals result in multiple index scans. This creates
+ * ScalarArrayOpExpr quals result in multiple index scans. This creates
* the potential for cache effects to reduce the number of disk page
- * fetches needed. We want to estimate the average per-scan I/O cost in
+ * fetches needed. We want to estimate the average per-scan I/O cost in
* the presence of caching.
*
* We use the Mackert-Lohman formula (see costsize.c for details) to
@@ -6140,7 +6141,7 @@ genericcostestimate(PlannerInfo *root,
* evaluated once at the start of the scan to reduce them to runtime keys
* to pass to the index AM (see nodeIndexscan.c). We model the per-tuple
* CPU costs as cpu_index_tuple_cost plus one cpu_operator_cost per
- * indexqual operator. Because we have numIndexTuples as a per-scan
+ * indexqual operator. Because we have numIndexTuples as a per-scan
* number, we have to multiply by num_sa_scans to get the correct result
* for ScalarArrayOpExpr cases. Similarly add in costs for any index
* ORDER BY expressions.
@@ -6187,16 +6188,16 @@ genericcostestimate(PlannerInfo *root,
* ANDing the index predicate with the explicitly given indexquals produces
* a more accurate idea of the index's selectivity. However, we need to be
* careful not to insert redundant clauses, because clauselist_selectivity()
- * is easily fooled into computing a too-low selectivity estimate. Our
+ * is easily fooled into computing a too-low selectivity estimate. Our
* approach is to add only the predicate clause(s) that cannot be proven to
- * be implied by the given indexquals. This successfully handles cases such
+ * be implied by the given indexquals. This successfully handles cases such
* as a qual "x = 42" used with a partial index "WHERE x >= 40 AND x < 50".
* There are many other cases where we won't detect redundancy, leading to a
* too-low selectivity estimate, which will bias the system in favor of using
- * partial indexes where possible. That is not necessarily bad though.
+ * partial indexes where possible. That is not necessarily bad though.
*
* Note that indexQuals contains RestrictInfo nodes while the indpred
- * does not, so the output list will be mixed. This is OK for both
+ * does not, so the output list will be mixed. This is OK for both
* predicate_implied_by() and clauselist_selectivity(), but might be
* problematic if the result were passed to other things.
*/
@@ -6255,7 +6256,7 @@ btcostestimate(PG_FUNCTION_ARGS)
* the index scan). Additional quals can suppress visits to the heap, so
* it's OK to count them in indexSelectivity, but they should not count
* for estimating numIndexTuples. So we must examine the given indexquals
- * to find out which ones count as boundary quals. We rely on the
+ * to find out which ones count as boundary quals. We rely on the
* knowledge that they are given in index column order.
*
* For a RowCompareExpr, we consider only the first column, just as
@@ -6594,7 +6595,7 @@ hashcostestimate(PG_FUNCTION_ARGS)
* because the hash AM makes sure that's always one page.
*
* Likewise, we could consider charging some CPU for each index tuple in
- * the bucket, if we knew how many there were. But the per-tuple cost is
+ * the bucket, if we knew how many there were. But the per-tuple cost is
* just a hash value comparison, not a general datatype-dependent
* comparison, so any such charge ought to be quite a bit less than
* cpu_operator_cost; which makes it probably not worth worrying about.
@@ -6652,7 +6653,7 @@ gistcostestimate(PG_FUNCTION_ARGS)
/*
* Add a CPU-cost component to represent the costs of initial descent. We
* just use log(N) here not log2(N) since the branching factor isn't
- * necessarily two anyway. As for btree, charge once per SA scan.
+ * necessarily two anyway. As for btree, charge once per SA scan.
*/
if (index->tuples > 1) /* avoid computing log(0) */
{
@@ -6714,7 +6715,7 @@ spgcostestimate(PG_FUNCTION_ARGS)
/*
* Add a CPU-cost component to represent the costs of initial descent. We
* just use log(N) here not log2(N) since the branching factor isn't
- * necessarily two anyway. As for btree, charge once per SA scan.
+ * necessarily two anyway. As for btree, charge once per SA scan.
*/
if (index->tuples > 1) /* avoid computing log(0) */
{
@@ -6791,7 +6792,7 @@ gincost_pattern(IndexOptInfo *index, int indexcol,
/*
* Get the operator's strategy number and declared input data types within
- * the index opfamily. (We don't need the latter, but we use
+ * the index opfamily. (We don't need the latter, but we use
* get_op_opfamily_properties because it will throw error if it fails to
* find a matching pg_amop entry.)
*/
@@ -6937,7 +6938,7 @@ gincost_opexpr(PlannerInfo *root, IndexOptInfo *index, OpExpr *clause,
* each of which involves one value from the RHS array, plus all the
* non-array quals (if any). To model this, we average the counts across
* the RHS elements, and add the averages to the counts in *counts (which
- * correspond to per-indexscan costs). We also multiply counts->arrayScans
+ * correspond to per-indexscan costs). We also multiply counts->arrayScans
* by N, causing gincostestimate to scale up its estimates accordingly.
*/
static bool
@@ -7107,7 +7108,7 @@ gincostestimate(PG_FUNCTION_ARGS)
/*
* nPendingPages can be trusted, but the other fields are as of the last
- * VACUUM. Scale them by the ratio numPages / nTotalPages to account for
+ * VACUUM. Scale them by the ratio numPages / nTotalPages to account for
* growth since then. If the fields are zero (implying no VACUUM at all,
* and an index created pre-9.1), assume all pages are entry pages.
*/
@@ -7252,7 +7253,7 @@ gincostestimate(PG_FUNCTION_ARGS)
/*
* Add an estimate of entry pages read by partial match algorithm. It's a
- * scan over leaf pages in entry tree. We haven't any useful stats here,
+ * scan over leaf pages in entry tree. We haven't any useful stats here,
* so estimate it as proportion.
*/
entryPagesFetched += ceil(numEntryPages * counts.partialEntries / numEntries);
@@ -7294,17 +7295,17 @@ gincostestimate(PG_FUNCTION_ARGS)
*
* We assume every entry to have the same number of items, and that there
* is no overlap between them. (XXX: tsvector and array opclasses collect
- * statistics on the frequency of individual keys; it would be nice to
- * use those here.)
+ * statistics on the frequency of individual keys; it would be nice to use
+ * those here.)
*/
dataPagesFetched = ceil(numDataPages * counts.exactEntries / numEntries);
/*
- * If there is a lot of overlap among the entries, in particular if one
- * of the entries is very frequent, the above calculation can grossly
- * under-estimate. As a simple cross-check, calculate a lower bound
- * based on the overall selectivity of the quals. At a minimum, we must
- * read one item pointer for each matching entry.
+ * If there is a lot of overlap among the entries, in particular if one of
+ * the entries is very frequent, the above calculation can grossly
+ * under-estimate. As a simple cross-check, calculate a lower bound based
+ * on the overall selectivity of the quals. At a minimum, we must read
+ * one item pointer for each matching entry.
*
* The width of each item pointer varies, based on the level of
* compression. We don't have statistics on that, but an average of
diff --git a/src/backend/utils/adt/timestamp.c b/src/backend/utils/adt/timestamp.c
index efc1e9b9925..11007c6d894 100644
--- a/src/backend/utils/adt/timestamp.c
+++ b/src/backend/utils/adt/timestamp.c
@@ -41,7 +41,7 @@
#error -ffast-math is known to break this code
#endif
-#define SAMESIGN(a,b) (((a) < 0) == ((b) < 0))
+#define SAMESIGN(a,b) (((a) < 0) == ((b) < 0))
#ifndef INT64_MAX
#define INT64_MAX INT64CONST(0x7FFFFFFFFFFFFFFF)
@@ -391,7 +391,7 @@ AdjustTimestampForTypmod(Timestamp *time, int32 typmod)
* Note: this round-to-nearest code is not completely consistent about
* rounding values that are exactly halfway between integral values.
* On most platforms, rint() will implement round-to-nearest-even, but
- * the integer code always rounds up (away from zero). Is it worth
+ * the integer code always rounds up (away from zero). Is it worth
* trying to be consistent?
*/
#ifdef HAVE_INT64_TIMESTAMP
@@ -488,7 +488,7 @@ timestamptz_in(PG_FUNCTION_ARGS)
* if it's acceptable. Otherwise, an error is thrown.
*/
static int
-parse_sane_timezone(struct pg_tm *tm, text *zone)
+parse_sane_timezone(struct pg_tm * tm, text *zone)
{
char tzname[TZ_STRLEN_MAX + 1];
int rt;
@@ -497,7 +497,7 @@ parse_sane_timezone(struct pg_tm *tm, text *zone)
text_to_cstring_buffer(zone, tzname, sizeof(tzname));
/*
- * Look up the requested timezone. First we try to interpret it as a
+ * Look up the requested timezone. First we try to interpret it as a
* numeric timezone specification; if DecodeTimezone decides it doesn't
* like the format, we look in the date token table (to handle cases like
* "EST"), and if that also fails, we look in the timezone database (to
@@ -507,7 +507,7 @@ parse_sane_timezone(struct pg_tm *tm, text *zone)
* offset abbreviations.)
*
* Note pg_tzset happily parses numeric input that DecodeTimezone would
- * reject. To avoid having it accept input that would otherwise be seen
+ * reject. To avoid having it accept input that would otherwise be seen
* as invalid, it's enough to disallow having a digit in the first
* position of our input string.
*/
@@ -528,7 +528,7 @@ parse_sane_timezone(struct pg_tm *tm, text *zone)
if (rt == DTERR_TZDISP_OVERFLOW)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("numeric time zone \"%s\" out of range", tzname)));
+ errmsg("numeric time zone \"%s\" out of range", tzname)));
else if (rt != DTERR_BAD_FORMAT)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
@@ -997,7 +997,7 @@ interval_send(PG_FUNCTION_ARGS)
/*
* The interval typmod stores a "range" in its high 16 bits and a "precision"
- * in its low 16 bits. Both contribute to defining the resolution of the
+ * in its low 16 bits. Both contribute to defining the resolution of the
* type. Range addresses resolution granules larger than one second, and
* precision specifies resolution below one second. This representation can
* express all SQL standard resolutions, but we implement them all in terms of
@@ -1205,7 +1205,7 @@ interval_transform(PG_FUNCTION_ARGS)
/*
* Temporally-smaller fields occupy higher positions in the range
- * bitmap. Since only the temporally-smallest bit matters for length
+ * bitmap. Since only the temporally-smallest bit matters for length
* coercion purposes, we compare the last-set bits in the ranges.
* Precision, which is to say, sub-second precision, only affects
* ranges that include SECOND.
@@ -1294,7 +1294,7 @@ AdjustIntervalForTypmod(Interval *interval, int32 typmod)
* that fields to the right of the last one specified are zeroed out,
* but those to the left of it remain valid. Thus for example there
* is no operational difference between INTERVAL YEAR TO MONTH and
- * INTERVAL MONTH. In some cases we could meaningfully enforce that
+ * INTERVAL MONTH. In some cases we could meaningfully enforce that
* higher-order fields are zero; for example INTERVAL DAY could reject
* nonzero "month" field. However that seems a bit pointless when we
* can't do it consistently. (We cannot enforce a range limit on the
@@ -1304,9 +1304,9 @@ AdjustIntervalForTypmod(Interval *interval, int32 typmod)
*
* Note: before PG 8.4 we interpreted a limited set of fields as
* actually causing a "modulo" operation on a given value, potentially
- * losing high-order as well as low-order information. But there is
+ * losing high-order as well as low-order information. But there is
* no support for such behavior in the standard, and it seems fairly
- * undesirable on data consistency grounds anyway. Now we only
+ * undesirable on data consistency grounds anyway. Now we only
* perform truncation or rounding of low-order fields.
*/
if (range == INTERVAL_FULL_RANGE)
@@ -1426,7 +1426,7 @@ AdjustIntervalForTypmod(Interval *interval, int32 typmod)
/*
* Note: this round-to-nearest code is not completely consistent
* about rounding values that are exactly halfway between integral
- * values. On most platforms, rint() will implement
+ * values. On most platforms, rint() will implement
* round-to-nearest-even, but the integer code always rounds up
* (away from zero). Is it worth trying to be consistent?
*/
@@ -1470,7 +1470,7 @@ make_interval(PG_FUNCTION_ARGS)
Interval *result;
/*
- * Reject out-of-range inputs. We really ought to check the integer
+ * Reject out-of-range inputs. We really ought to check the integer
* inputs as well, but it's not entirely clear what limits to apply.
*/
if (isinf(secs) || isnan(secs))
@@ -1718,7 +1718,7 @@ timestamptz_to_time_t(TimestampTz t)
* Produce a C-string representation of a TimestampTz.
*
* This is mostly for use in emitting messages. The primary difference
- * from timestamptz_out is that we force the output format to ISO. Note
+ * from timestamptz_out is that we force the output format to ISO. Note
* also that the result is in a static buffer, not pstrdup'd.
*/
const char *
@@ -1862,7 +1862,7 @@ recalc_t:
*
* First, convert to an integral timestamp, avoiding possibly
* platform-specific roundoff-in-wrong-direction errors, and adjust to
- * Unix epoch. Then see if we can convert to pg_time_t without loss. This
+ * Unix epoch. Then see if we can convert to pg_time_t without loss. This
* coding avoids hardwiring any assumptions about the width of pg_time_t,
* so it should behave sanely on machines without int64.
*/
@@ -2010,7 +2010,7 @@ recalc:
int
tm2interval(struct pg_tm * tm, fsec_t fsec, Interval *span)
{
- double total_months = (double)tm->tm_year * MONTHS_PER_YEAR + tm->tm_mon;
+ double total_months = (double) tm->tm_year * MONTHS_PER_YEAR + tm->tm_mon;
if (total_months > INT_MAX || total_months < INT_MIN)
return -1;
@@ -4888,7 +4888,7 @@ timestamp_zone(PG_FUNCTION_ARGS)
PG_RETURN_TIMESTAMPTZ(timestamp);
/*
- * Look up the requested timezone. First we look in the date token table
+ * Look up the requested timezone. First we look in the date token table
* (to handle cases like "EST"), and if that fails, we look in the
* timezone database (to handle cases like "America/New_York"). (This
* matches the order in which timestamp input checks the cases; it's
@@ -5061,7 +5061,7 @@ timestamptz_zone(PG_FUNCTION_ARGS)
PG_RETURN_TIMESTAMP(timestamp);
/*
- * Look up the requested timezone. First we look in the date token table
+ * Look up the requested timezone. First we look in the date token table
* (to handle cases like "EST"), and if that fails, we look in the
* timezone database (to handle cases like "America/New_York"). (This
* matches the order in which timestamp input checks the cases; it's
diff --git a/src/backend/utils/adt/tsginidx.c b/src/backend/utils/adt/tsginidx.c
index df47105d0b2..bdef47f093c 100644
--- a/src/backend/utils/adt/tsginidx.c
+++ b/src/backend/utils/adt/tsginidx.c
@@ -204,9 +204,12 @@ checkcondition_gin(void *checkval, QueryOperand *val)
*/
static GinTernaryValue
TS_execute_ternary(QueryItem *curitem, void *checkval,
- GinTernaryValue (*chkcond) (void *checkval, QueryOperand *val))
+ GinTernaryValue (*chkcond) (void *checkval, QueryOperand *val))
{
- GinTernaryValue val1, val2, result;
+ GinTernaryValue val1,
+ val2,
+ result;
+
/* since this function recurses, it could be driven to stack overflow */
check_stack_depth();
@@ -223,7 +226,7 @@ TS_execute_ternary(QueryItem *curitem, void *checkval,
case OP_AND:
val1 = TS_execute_ternary(curitem + curitem->qoperator.left,
- checkval, chkcond);
+ checkval, chkcond);
if (val1 == GIN_FALSE)
return GIN_FALSE;
val2 = TS_execute_ternary(curitem + 1, checkval, chkcond);
@@ -236,7 +239,7 @@ TS_execute_ternary(QueryItem *curitem, void *checkval,
case OP_OR:
val1 = TS_execute_ternary(curitem + curitem->qoperator.left,
- checkval, chkcond);
+ checkval, chkcond);
if (val1 == GIN_TRUE)
return GIN_TRUE;
val2 = TS_execute_ternary(curitem + 1, checkval, chkcond);
@@ -339,7 +342,7 @@ gin_tsquery_triconsistent(PG_FUNCTION_ARGS)
* Formerly, gin_extract_tsvector had only two arguments. Now it has three,
* but we still need a pg_proc entry with two args to support reloading
* pre-9.1 contrib/tsearch2 opclass declarations. This compatibility
- * function should go away eventually. (Note: you might say "hey, but the
+ * function should go away eventually. (Note: you might say "hey, but the
* code above is only *using* two args, so let's just declare it that way".
* If you try that you'll find the opr_sanity regression test complains.)
*/
diff --git a/src/backend/utils/adt/varchar.c b/src/backend/utils/adt/varchar.c
index 502ca44e04a..72b9f99dbc9 100644
--- a/src/backend/utils/adt/varchar.c
+++ b/src/backend/utils/adt/varchar.c
@@ -257,7 +257,7 @@ bpcharsend(PG_FUNCTION_ARGS)
*
* Truncation rules: for an explicit cast, silently truncate to the given
* length; for an implicit cast, raise error unless extra characters are
- * all spaces. (This is sort-of per SQL: the spec would actually have us
+ * all spaces. (This is sort-of per SQL: the spec would actually have us
* raise a "completion condition" for the explicit cast case, but Postgres
* hasn't got such a concept.)
*/
@@ -584,7 +584,7 @@ varchar_transform(PG_FUNCTION_ARGS)
*
* Truncation rules: for an explicit cast, silently truncate to the given
* length; for an implicit cast, raise error unless extra characters are
- * all spaces. (This is sort-of per SQL: the spec would actually have us
+ * all spaces. (This is sort-of per SQL: the spec would actually have us
* raise a "completion condition" for the explicit cast case, but Postgres
* hasn't got such a concept.)
*/
diff --git a/src/backend/utils/adt/varlena.c b/src/backend/utils/adt/varlena.c
index aab4897f618..f8d9fec34e4 100644
--- a/src/backend/utils/adt/varlena.c
+++ b/src/backend/utils/adt/varlena.c
@@ -591,7 +591,7 @@ textlen(PG_FUNCTION_ARGS)
* Does the real work for textlen()
*
* This is broken out so it can be called directly by other string processing
- * functions. Note that the argument is passed as a Datum, to indicate that
+ * functions. Note that the argument is passed as a Datum, to indicate that
* it may still be in compressed form. We can avoid decompressing it at all
* in some cases.
*/
@@ -763,7 +763,7 @@ text_substr_no_len(PG_FUNCTION_ARGS)
* Does the real work for text_substr() and text_substr_no_len()
*
* This is broken out so it can be called directly by other string processing
- * functions. Note that the argument is passed as a Datum, to indicate that
+ * functions. Note that the argument is passed as a Datum, to indicate that
* it may still be in compressed/toasted form. We can avoid detoasting all
* of it in some cases.
*
@@ -1113,7 +1113,7 @@ text_position_setup(text *t1, text *t2, TextPositionState *state)
* searched (t1) and the "needle" is the pattern being sought (t2).
*
* If the needle is empty or bigger than the haystack then there is no
- * point in wasting cycles initializing the table. We also choose not to
+ * point in wasting cycles initializing the table. We also choose not to
* use B-M-H for needles of length 1, since the skip table can't possibly
* save anything in that case.
*/
@@ -1129,7 +1129,7 @@ text_position_setup(text *t1, text *t2, TextPositionState *state)
* declaration of TextPositionState allows up to 256 elements, but for
* short search problems we don't really want to have to initialize so
* many elements --- it would take too long in comparison to the
- * actual search time. So we choose a useful skip table size based on
+ * actual search time. So we choose a useful skip table size based on
* the haystack length minus the needle length. The closer the needle
* length is to the haystack length the less useful skipping becomes.
*
@@ -1161,7 +1161,7 @@ text_position_setup(text *t1, text *t2, TextPositionState *state)
state->skiptable[i] = len2;
/*
- * Now examine the needle. For each character except the last one,
+ * Now examine the needle. For each character except the last one,
* set the corresponding table element to the appropriate skip
* distance. Note that when two characters share the same skip table
* entry, the one later in the needle must determine the skip
@@ -1249,11 +1249,11 @@ text_position_next(int start_pos, TextPositionState *state)
/*
* No match, so use the haystack char at hptr to decide how
- * far to advance. If the needle had any occurrence of that
+ * far to advance. If the needle had any occurrence of that
* character (or more precisely, one sharing the same
* skiptable entry) before its last character, then we advance
* far enough to align the last such needle character with
- * that haystack position. Otherwise we can advance by the
+ * that haystack position. Otherwise we can advance by the
* whole needle length.
*/
hptr += state->skiptable[(unsigned char) *hptr & skiptablemask];
@@ -1305,11 +1305,11 @@ text_position_next(int start_pos, TextPositionState *state)
/*
* No match, so use the haystack char at hptr to decide how
- * far to advance. If the needle had any occurrence of that
+ * far to advance. If the needle had any occurrence of that
* character (or more precisely, one sharing the same
* skiptable entry) before its last character, then we advance
* far enough to align the last such needle character with
- * that haystack position. Otherwise we can advance by the
+ * that haystack position. Otherwise we can advance by the
* whole needle length.
*/
hptr += state->skiptable[*hptr & skiptablemask];
@@ -1344,7 +1344,7 @@ varstr_cmp(char *arg1, int len1, char *arg2, int len2, Oid collid)
/*
* Unfortunately, there is no strncoll(), so in the non-C locale case we
- * have to do some memory copying. This turns out to be significantly
+ * have to do some memory copying. This turns out to be significantly
* slower, so we optimize the case where LC_COLLATE is C. We also try to
* optimize relatively-short strings by avoiding palloc/pfree overhead.
*/
@@ -2334,7 +2334,7 @@ textToQualifiedNameList(text *textval)
* SplitIdentifierString --- parse a string containing identifiers
*
* This is the guts of textToQualifiedNameList, and is exported for use in
- * other situations such as parsing GUC variables. In the GUC case, it's
+ * other situations such as parsing GUC variables. In the GUC case, it's
* important to avoid memory leaks, so the API is designed to minimize the
* amount of stuff that needs to be allocated and freed.
*
@@ -2342,7 +2342,7 @@ textToQualifiedNameList(text *textval)
* rawstring: the input string; must be overwritable! On return, it's
* been modified to contain the separated identifiers.
* separator: the separator punctuation expected between identifiers
- * (typically '.' or ','). Whitespace may also appear around
+ * (typically '.' or ','). Whitespace may also appear around
* identifiers.
* Outputs:
* namelist: filled with a palloc'd list of pointers to identifiers within
@@ -2411,7 +2411,7 @@ SplitIdentifierString(char *rawstring, char separator,
*
* XXX because we want to overwrite the input in-place, we cannot
* support a downcasing transformation that increases the string
- * length. This is not a problem given the current implementation
+ * length. This is not a problem given the current implementation
* of downcase_truncate_identifier, but we'll probably have to do
* something about this someday.
*/
@@ -2468,7 +2468,7 @@ SplitIdentifierString(char *rawstring, char separator,
* Inputs:
* rawstring: the input string; must be modifiable!
* separator: the separator punctuation expected between directories
- * (typically ',' or ';'). Whitespace may also appear around
+ * (typically ',' or ';'). Whitespace may also appear around
* directories.
* Outputs:
* namelist: filled with a palloc'd list of directory names.
@@ -2875,7 +2875,7 @@ check_replace_text_has_escape_char(const text *replace_text)
* appendStringInfoRegexpSubstr
*
* Append replace_text to str, substituting regexp back references for
- * \n escapes. start_ptr is the start of the match in the source string,
+ * \n escapes. start_ptr is the start of the match in the source string,
* at logical character position data_pos.
*/
static void
@@ -2958,7 +2958,7 @@ appendStringInfoRegexpSubstr(StringInfo str, text *replace_text,
if (so != -1 && eo != -1)
{
/*
- * Copy the text that is back reference of regexp. Note so and eo
+ * Copy the text that is back reference of regexp. Note so and eo
* are counted in characters not bytes.
*/
char *chunk_start;
@@ -3836,7 +3836,7 @@ concat_internal(const char *sepstr, int argidx,
/*
* Non-null argument had better be an array. We assume that any call
* context that could let get_fn_expr_variadic return true will have
- * checked that a VARIADIC-labeled parameter actually is an array. So
+ * checked that a VARIADIC-labeled parameter actually is an array. So
* it should be okay to just Assert that it's an array rather than
* doing a full-fledged error check.
*/
@@ -4237,7 +4237,7 @@ text_format(PG_FUNCTION_ARGS)
/*
* Get the appropriate typOutput function, reusing previous one if
- * same type as previous argument. That's particularly useful in the
+ * same type as previous argument. That's particularly useful in the
* variadic-array case, but often saves work even for ordinary calls.
*/
if (typid != prev_type)
@@ -4329,12 +4329,12 @@ text_format_parse_digits(const char **ptr, const char *end_ptr, int *value)
*
* Inputs are start_ptr (the position after '%') and end_ptr (string end + 1).
* Output parameters:
- * argpos: argument position for value to be printed. -1 means unspecified.
- * widthpos: argument position for width. Zero means the argument position
+ * argpos: argument position for value to be printed. -1 means unspecified.
+ * widthpos: argument position for width. Zero means the argument position
* was unspecified (ie, take the next arg) and -1 means no width
* argument (width was omitted or specified as a constant).
* flags: bitmask of flags.
- * width: directly-specified width value. Zero means the width was omitted
+ * width: directly-specified width value. Zero means the width was omitted
* (note it's not necessary to distinguish this case from an explicit
* zero width value).
*
diff --git a/src/backend/utils/adt/xml.c b/src/backend/utils/adt/xml.c
index 765469c623e..422be69bd6d 100644
--- a/src/backend/utils/adt/xml.c
+++ b/src/backend/utils/adt/xml.c
@@ -19,7 +19,7 @@
* fail. For one thing, this avoids having to manage variant catalog
* installations. But it also has nice effects such as that you can
* dump a database containing XML type data even if the server is not
- * linked with libxml. Thus, make sure xml_out() works even if nothing
+ * linked with libxml. Thus, make sure xml_out() works even if nothing
* else does.
*/
@@ -286,7 +286,7 @@ xml_out(PG_FUNCTION_ARGS)
xmltype *x = PG_GETARG_XML_P(0);
/*
- * xml_out removes the encoding property in all cases. This is because we
+ * xml_out removes the encoding property in all cases. This is because we
* cannot control from here whether the datum will be converted to a
* different client encoding, so we'd do more harm than good by including
* it.
@@ -454,7 +454,7 @@ xmlcomment(PG_FUNCTION_ARGS)
/*
* TODO: xmlconcat needs to merge the notations and unparsed entities
- * of the argument values. Not very important in practice, though.
+ * of the argument values. Not very important in practice, though.
*/
xmltype *
xmlconcat(List *args)
@@ -589,7 +589,7 @@ xmlelement(XmlExprState *xmlExpr, ExprContext *econtext)
/*
* We first evaluate all the arguments, then start up libxml and create
- * the result. This avoids issues if one of the arguments involves a call
+ * the result. This avoids issues if one of the arguments involves a call
* to some other function or subsystem that wants to use libxml on its own
* terms.
*/
@@ -926,7 +926,7 @@ pg_xml_init_library(void)
* pg_xml_init --- set up for use of libxml and register an error handler
*
* This should be called by each function that is about to use libxml
- * facilities and requires error handling. It initializes libxml with
+ * facilities and requires error handling. It initializes libxml with
* pg_xml_init_library() and establishes our libxml error handler.
*
* strictness determines which errors are reported and which are ignored.
@@ -972,7 +972,7 @@ pg_xml_init(PgXmlStrictness strictness)
/*
* Verify that xmlSetStructuredErrorFunc set the context variable we
- * expected it to. If not, the error context pointer we just saved is not
+ * expected it to. If not, the error context pointer we just saved is not
* the correct thing to restore, and since that leaves us without a way to
* restore the context in pg_xml_done, we must fail.
*
@@ -1129,7 +1129,7 @@ parse_xml_decl(const xmlChar *str, size_t *lenp,
int utf8len;
/*
- * Only initialize libxml. We don't need error handling here, but we do
+ * Only initialize libxml. We don't need error handling here, but we do
* need to make sure libxml is initialized before calling any of its
* functions. Note that this is safe (and a no-op) if caller has already
* done pg_xml_init().
@@ -1272,7 +1272,7 @@ finished:
/*
* Write an XML declaration. On output, we adjust the XML declaration
- * as follows. (These rules are the moral equivalent of the clause
+ * as follows. (These rules are the moral equivalent of the clause
* "Serialization of an XML value" in the SQL standard.)
*
* We try to avoid generating an XML declaration if possible. This is
@@ -1496,7 +1496,7 @@ xml_pstrdup(const char *string)
/*
* xmlPgEntityLoader --- entity loader callback function
*
- * Silently prevent any external entity URL from being loaded. We don't want
+ * Silently prevent any external entity URL from being loaded. We don't want
* to throw an error, so instead make the entity appear to expand to an empty
* string.
*
@@ -1665,8 +1665,8 @@ xml_errorHandler(void *data, xmlErrorPtr error)
chopStringInfoNewlines(errorBuf);
/*
- * Legacy error handling mode. err_occurred is never set, we just add the
- * message to err_buf. This mode exists because the xml2 contrib module
+ * Legacy error handling mode. err_occurred is never set, we just add the
+ * message to err_buf. This mode exists because the xml2 contrib module
* uses our error-handling infrastructure, but we don't want to change its
* behaviour since it's deprecated anyway. This is also why we don't
* distinguish between notices, warnings and errors here --- the old-style
@@ -1887,7 +1887,7 @@ map_sql_identifier_to_xml_name(char *ident, bool fully_escaped,
static char *
unicode_to_sqlchar(pg_wchar c)
{
- char utf8string[8]; /* need room for trailing zero */
+ char utf8string[8]; /* need room for trailing zero */
char *result;
memset(utf8string, 0, sizeof(utf8string));
@@ -1939,8 +1939,8 @@ map_xml_name_to_sql_identifier(char *name)
*
* When xml_escape_strings is true, then certain characters in string
* values are replaced by entity references (&lt; etc.), as specified
- * in SQL/XML:2008 section 9.8 GR 9) a) iii). This is normally what is
- * wanted. The false case is mainly useful when the resulting value
+ * in SQL/XML:2008 section 9.8 GR 9) a) iii). This is normally what is
+ * wanted. The false case is mainly useful when the resulting value
* is used with xmlTextWriterWriteAttribute() to write out an
* attribute, because that function does the escaping itself.
*/
@@ -2221,13 +2221,13 @@ _SPI_strdup(const char *s)
*
* There are two kinds of mappings: Mapping SQL data (table contents)
* to XML documents, and mapping SQL structure (the "schema") to XML
- * Schema. And there are functions that do both at the same time.
+ * Schema. And there are functions that do both at the same time.
*
* Then you can map a database, a schema, or a table, each in both
* ways. This breaks down recursively: Mapping a database invokes
* mapping schemas, which invokes mapping tables, which invokes
* mapping rows, which invokes mapping columns, although you can't
- * call the last two from the outside. Because of this, there are a
+ * call the last two from the outside. Because of this, there are a
* number of xyz_internal() functions which are to be called both from
* the function manager wrapper and from some upper layer in a
* recursive call.
@@ -2236,7 +2236,7 @@ _SPI_strdup(const char *s)
* nulls, tableforest, and targetns mean.
*
* Some style guidelines for XML output: Use double quotes for quoting
- * XML attributes. Indent XML elements by two spaces, but remember
+ * XML attributes. Indent XML elements by two spaces, but remember
* that a lot of code is called recursively at different levels, so
* it's better not to indent rather than create output that indents
* and outdents weirdly. Add newlines to make the output look nice.
@@ -2400,12 +2400,12 @@ cursor_to_xml(PG_FUNCTION_ARGS)
* Write the start tag of the root element of a data mapping.
*
* top_level means that this is the very top level of the eventual
- * output. For example, when the user calls table_to_xml, then a call
+ * output. For example, when the user calls table_to_xml, then a call
* with a table name to this function is the top level. When the user
* calls database_to_xml, then a call with a schema name to this
* function is not the top level. If top_level is false, then the XML
* namespace declarations are omitted, because they supposedly already
- * appeared earlier in the output. Repeating them is not wrong, but
+ * appeared earlier in the output. Repeating them is not wrong, but
* it looks ugly.
*/
static void
@@ -2937,7 +2937,7 @@ map_multipart_sql_identifier_to_xml_name(char *a, char *b, char *c, char *d)
if (a)
appendStringInfoString(&result,
- map_sql_identifier_to_xml_name(a, true, true));
+ map_sql_identifier_to_xml_name(a, true, true));
if (b)
appendStringInfo(&result, ".%s",
map_sql_identifier_to_xml_name(b, true, true));
@@ -3348,7 +3348,7 @@ map_sql_typecoll_to_xmlschema_types(List *tupdesc_list)
* SQL/XML:2008 sections 9.5 and 9.6.
*
* (The distinction between 9.5 and 9.6 is basically that 9.6 adds
- * a name attribute, which this function does. The name-less version
+ * a name attribute, which this function does. The name-less version
* 9.5 doesn't appear to be required anywhere.)
*/
static const char *
@@ -3362,11 +3362,11 @@ map_sql_type_to_xmlschema_type(Oid typeoid, int typmod)
if (typeoid == XMLOID)
{
appendStringInfoString(&result,
- "<xsd:complexType mixed=\"true\">\n"
- " <xsd:sequence>\n"
- " <xsd:any name=\"element\" minOccurs=\"0\" maxOccurs=\"unbounded\" processContents=\"skip\"/>\n"
- " </xsd:sequence>\n"
- "</xsd:complexType>\n");
+ "<xsd:complexType mixed=\"true\">\n"
+ " <xsd:sequence>\n"
+ " <xsd:any name=\"element\" minOccurs=\"0\" maxOccurs=\"unbounded\" processContents=\"skip\"/>\n"
+ " </xsd:sequence>\n"
+ "</xsd:complexType>\n");
}
else
{
@@ -3440,12 +3440,12 @@ map_sql_type_to_xmlschema_type(Oid typeoid, int typmod)
case FLOAT8OID:
appendStringInfoString(&result,
- " <xsd:restriction base=\"xsd:double\"></xsd:restriction>\n");
+ " <xsd:restriction base=\"xsd:double\"></xsd:restriction>\n");
break;
case BOOLOID:
appendStringInfoString(&result,
- " <xsd:restriction base=\"xsd:boolean\"></xsd:restriction>\n");
+ " <xsd:restriction base=\"xsd:boolean\"></xsd:restriction>\n");
break;
case TIMEOID:
@@ -3496,9 +3496,9 @@ map_sql_type_to_xmlschema_type(Oid typeoid, int typmod)
case DATEOID:
appendStringInfoString(&result,
- " <xsd:restriction base=\"xsd:date\">\n"
- " <xsd:pattern value=\"\\p{Nd}{4}-\\p{Nd}{2}-\\p{Nd}{2}\"/>\n"
- " </xsd:restriction>\n");
+ " <xsd:restriction base=\"xsd:date\">\n"
+ " <xsd:pattern value=\"\\p{Nd}{4}-\\p{Nd}{2}-\\p{Nd}{2}\"/>\n"
+ " </xsd:restriction>\n");
break;
default:
@@ -3524,7 +3524,7 @@ map_sql_type_to_xmlschema_type(Oid typeoid, int typmod)
/*
* Map an SQL row to an XML element, taking the row from the active
- * SPI cursor. See also SQL/XML:2008 section 9.10.
+ * SPI cursor. See also SQL/XML:2008 section 9.10.
*/
static void
SPI_sql_row_to_xmlelement(int rownum, StringInfo result, char *tablename,