aboutsummaryrefslogtreecommitdiff
path: root/src/backend/utils
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/utils')
-rw-r--r--src/backend/utils/adt/acl.c21
-rw-r--r--src/backend/utils/adt/array_selfuncs.c20
-rw-r--r--src/backend/utils/adt/array_typanalyze.c14
-rw-r--r--src/backend/utils/adt/array_userfuncs.c4
-rw-r--r--src/backend/utils/adt/arrayfuncs.c27
-rw-r--r--src/backend/utils/adt/arrayutils.c2
-rw-r--r--src/backend/utils/adt/cash.c92
-rw-r--r--src/backend/utils/adt/char.c2
-rw-r--r--src/backend/utils/adt/date.c6
-rw-r--r--src/backend/utils/adt/datetime.c34
-rw-r--r--src/backend/utils/adt/datum.c2
-rw-r--r--src/backend/utils/adt/dbsize.c3
-rw-r--r--src/backend/utils/adt/domains.c8
-rw-r--r--src/backend/utils/adt/float.c6
-rw-r--r--src/backend/utils/adt/format_type.c6
-rw-r--r--src/backend/utils/adt/formatting.c4
-rw-r--r--src/backend/utils/adt/geo_ops.c11
-rw-r--r--src/backend/utils/adt/geo_selfuncs.c4
-rw-r--r--src/backend/utils/adt/inet_cidr_ntop.c2
-rw-r--r--src/backend/utils/adt/int.c30
-rw-r--r--src/backend/utils/adt/int8.c44
-rw-r--r--src/backend/utils/adt/json.c12
-rw-r--r--src/backend/utils/adt/jsonb.c26
-rw-r--r--src/backend/utils/adt/jsonb_gin.c80
-rw-r--r--src/backend/utils/adt/jsonb_op.c13
-rw-r--r--src/backend/utils/adt/jsonb_util.c257
-rw-r--r--src/backend/utils/adt/jsonfuncs.c35
-rw-r--r--src/backend/utils/adt/like.c4
-rw-r--r--src/backend/utils/adt/misc.c14
-rw-r--r--src/backend/utils/adt/nabstime.c36
-rw-r--r--src/backend/utils/adt/network.c12
-rw-r--r--src/backend/utils/adt/network_gist.c10
-rw-r--r--src/backend/utils/adt/numeric.c63
-rw-r--r--src/backend/utils/adt/oid.c2
-rw-r--r--src/backend/utils/adt/orderedsetaggs.c8
-rw-r--r--src/backend/utils/adt/pg_locale.c29
-rw-r--r--src/backend/utils/adt/pg_lsn.c43
-rw-r--r--src/backend/utils/adt/pg_lzcompress.c12
-rw-r--r--src/backend/utils/adt/pgstatfuncs.c2
-rw-r--r--src/backend/utils/adt/pseudotypes.c6
-rw-r--r--src/backend/utils/adt/rangetypes.c8
-rw-r--r--src/backend/utils/adt/rangetypes_gist.c8
-rw-r--r--src/backend/utils/adt/regexp.c6
-rw-r--r--src/backend/utils/adt/regproc.c26
-rw-r--r--src/backend/utils/adt/ri_triggers.c14
-rw-r--r--src/backend/utils/adt/rowtypes.c31
-rw-r--r--src/backend/utils/adt/ruleutils.c98
-rw-r--r--src/backend/utils/adt/selfuncs.c143
-rw-r--r--src/backend/utils/adt/timestamp.c36
-rw-r--r--src/backend/utils/adt/tsginidx.c13
-rw-r--r--src/backend/utils/adt/varchar.c4
-rw-r--r--src/backend/utils/adt/varlena.c42
-rw-r--r--src/backend/utils/adt/xml.c64
-rw-r--r--src/backend/utils/cache/attoptcache.c2
-rw-r--r--src/backend/utils/cache/catcache.c21
-rw-r--r--src/backend/utils/cache/inval.c30
-rw-r--r--src/backend/utils/cache/lsyscache.c10
-rw-r--r--src/backend/utils/cache/plancache.c42
-rw-r--r--src/backend/utils/cache/relcache.c122
-rw-r--r--src/backend/utils/cache/relfilenodemap.c13
-rw-r--r--src/backend/utils/cache/relmapper.c20
-rw-r--r--src/backend/utils/cache/spccache.c6
-rw-r--r--src/backend/utils/cache/syscache.c21
-rw-r--r--src/backend/utils/cache/typcache.c8
-rw-r--r--src/backend/utils/error/elog.c99
-rw-r--r--src/backend/utils/fmgr/dfmgr.c6
-rw-r--r--src/backend/utils/fmgr/fmgr.c18
-rw-r--r--src/backend/utils/fmgr/funcapi.c10
-rw-r--r--src/backend/utils/hash/dynahash.c28
-rw-r--r--src/backend/utils/init/miscinit.c30
-rw-r--r--src/backend/utils/init/postinit.c18
-rw-r--r--src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c2
-rw-r--r--src/backend/utils/mb/mbutils.c18
-rw-r--r--src/backend/utils/mb/wchar.c8
-rw-r--r--src/backend/utils/mb/wstrcmp.c2
-rw-r--r--src/backend/utils/mb/wstrncmp.c2
-rw-r--r--src/backend/utils/misc/guc.c67
-rw-r--r--src/backend/utils/misc/ps_status.c6
-rw-r--r--src/backend/utils/misc/rbtree.c12
-rw-r--r--src/backend/utils/misc/timeout.c22
-rw-r--r--src/backend/utils/misc/tzparser.c4
-rw-r--r--src/backend/utils/mmgr/aset.c16
-rw-r--r--src/backend/utils/mmgr/mcxt.c16
-rw-r--r--src/backend/utils/mmgr/portalmem.c18
-rw-r--r--src/backend/utils/resowner/resowner.c8
-rw-r--r--src/backend/utils/sort/logtape.c30
-rw-r--r--src/backend/utils/sort/tuplesort.c77
-rw-r--r--src/backend/utils/sort/tuplestore.c28
-rw-r--r--src/backend/utils/time/combocid.c6
-rw-r--r--src/backend/utils/time/snapmgr.c38
-rw-r--r--src/backend/utils/time/tqual.c57
91 files changed, 1256 insertions, 1184 deletions
diff --git a/src/backend/utils/adt/acl.c b/src/backend/utils/adt/acl.c
index dfac1243a40..38cd5b89c99 100644
--- a/src/backend/utils/adt/acl.c
+++ b/src/backend/utils/adt/acl.c
@@ -123,7 +123,7 @@ static Oid get_role_oid_or_public(const char *rolname);
/*
* getid
* Consumes the first alphanumeric string (identifier) found in string
- * 's', ignoring any leading white space. If it finds a double quote
+ * 's', ignoring any leading white space. If it finds a double quote
* it returns the word inside the quotes.
*
* RETURNS:
@@ -229,7 +229,7 @@ putid(char *p, const char *s)
*
* RETURNS:
* the string position in 's' immediately following the ACL
- * specification. Also:
+ * specification. Also:
* - loads the structure pointed to by 'aip' with the appropriate
* UID/GID, id type identifier and mode type values.
*/
@@ -837,7 +837,7 @@ acldefault(GrantObjectType objtype, Oid ownerId)
/*
- * SQL-accessible version of acldefault(). Hackish mapping from "char" type to
+ * SQL-accessible version of acldefault(). Hackish mapping from "char" type to
* ACL_OBJECT_* values, but it's only used in the information schema, not
* documented for general use.
*/
@@ -1006,7 +1006,7 @@ aclupdate(const Acl *old_acl, const AclItem *mod_aip,
}
/*
- * Remove abandoned privileges (cascading revoke). Currently we can only
+ * Remove abandoned privileges (cascading revoke). Currently we can only
* handle this when the grantee is not PUBLIC.
*/
if ((old_goptions & ~new_goptions) != 0)
@@ -1072,7 +1072,7 @@ aclnewowner(const Acl *old_acl, Oid oldOwnerId, Oid newOwnerId)
/*
* If the old ACL contained any references to the new owner, then we may
- * now have generated an ACL containing duplicate entries. Find them and
+ * now have generated an ACL containing duplicate entries. Find them and
* merge them so that there are not duplicates. (This is relatively
* expensive since we use a stupid O(N^2) algorithm, but it's unlikely to
* be the normal case.)
@@ -1083,7 +1083,7 @@ aclnewowner(const Acl *old_acl, Oid oldOwnerId, Oid newOwnerId)
* remove privilege-free entries, should there be any in the input.) dst
* is the next output slot, targ is the currently considered input slot
* (always >= dst), and src scans entries to the right of targ looking for
- * duplicates. Once an entry has been emitted to dst it is known
+ * duplicates. Once an entry has been emitted to dst it is known
* duplicate-free and need not be considered anymore.
*/
if (newpresent)
@@ -2468,7 +2468,7 @@ column_privilege_check(Oid tableoid, AttrNumber attnum,
* existence of the pg_class row before risking calling pg_class_aclcheck.
* Note: it might seem there's a race condition against concurrent DROP,
* but really it's safe because there will be no syscache flush between
- * here and there. So if we see the row in the syscache, so will
+ * here and there. So if we see the row in the syscache, so will
* pg_class_aclcheck.
*/
if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(tableoid)))
@@ -4904,7 +4904,7 @@ is_member_of_role_nosuper(Oid member, Oid role)
/*
- * Is member an admin of role? That is, is member the role itself (subject to
+ * Is member an admin of role? That is, is member the role itself (subject to
* restrictions below), a member (directly or indirectly) WITH ADMIN OPTION,
* or a superuser?
*/
@@ -4919,6 +4919,7 @@ is_admin_of_role(Oid member, Oid role)
return true;
if (member == role)
+
/*
* A role can admin itself when it matches the session user and we're
* outside any security-restricted operation, SECURITY DEFINER or
@@ -5015,14 +5016,14 @@ count_one_bits(AclMode mask)
* The grantor must always be either the object owner or some role that has
* been explicitly granted grant options. This ensures that all granted
* privileges appear to flow from the object owner, and there are never
- * multiple "original sources" of a privilege. Therefore, if the would-be
+ * multiple "original sources" of a privilege. Therefore, if the would-be
* grantor is a member of a role that has the needed grant options, we have
* to do the grant as that role instead.
*
* It is possible that the would-be grantor is a member of several roles
* that have different subsets of the desired grant options, but no one
* role has 'em all. In this case we pick a role with the largest number
- * of desired options. Ties are broken in favor of closer ancestors.
+ * of desired options. Ties are broken in favor of closer ancestors.
*
* roleId: the role attempting to do the GRANT/REVOKE
* privileges: the privileges to be granted/revoked
diff --git a/src/backend/utils/adt/array_selfuncs.c b/src/backend/utils/adt/array_selfuncs.c
index 20eb358a620..170a28a067c 100644
--- a/src/backend/utils/adt/array_selfuncs.c
+++ b/src/backend/utils/adt/array_selfuncs.c
@@ -524,7 +524,7 @@ mcelem_array_selec(ArrayType *array, TypeCacheEntry *typentry,
/*
* Estimate selectivity of "column @> const" and "column && const" based on
- * most common element statistics. This estimation assumes element
+ * most common element statistics. This estimation assumes element
* occurrences are independent.
*
* mcelem (of length nmcelem) and numbers (of length nnumbers) are from
@@ -689,7 +689,7 @@ mcelem_array_contain_overlap_selec(Datum *mcelem, int nmcelem,
* In the "column @> const" and "column && const" cases, we usually have a
* "const" with low number of elements (otherwise we have selectivity close
* to 0 or 1 respectively). That's why the effect of dependence related
- * to distinct element count distribution is negligible there. In the
+ * to distinct element count distribution is negligible there. In the
* "column <@ const" case, number of elements is usually high (otherwise we
* have selectivity close to 0). That's why we should do a correction with
* the array distinct element count distribution here.
@@ -848,7 +848,7 @@ mcelem_array_contained_selec(Datum *mcelem, int nmcelem,
/*
* The presence of many distinct rare elements materially decreases
* selectivity. Use the Poisson distribution to estimate the probability
- * of a column value having zero occurrences of such elements. See above
+ * of a column value having zero occurrences of such elements. See above
* for the definition of "rest".
*/
mult *= exp(-rest);
@@ -856,7 +856,7 @@ mcelem_array_contained_selec(Datum *mcelem, int nmcelem,
/*----------
* Using the distinct element count histogram requires
* O(unique_nitems * (nmcelem + unique_nitems))
- * operations. Beyond a certain computational cost threshold, it's
+ * operations. Beyond a certain computational cost threshold, it's
* reasonable to sacrifice accuracy for decreased planning time. We limit
* the number of operations to EFFORT * nmcelem; since nmcelem is limited
* by the column's statistics target, the work done is user-controllable.
@@ -868,7 +868,7 @@ mcelem_array_contained_selec(Datum *mcelem, int nmcelem,
* elements to start with, we'd have to remove any discarded elements'
* frequencies from "mult", but since this is only an approximation
* anyway, we don't bother with that. Therefore it's sufficient to qsort
- * elem_selec[] and take the largest elements. (They will no longer match
+ * elem_selec[] and take the largest elements. (They will no longer match
* up with the elements of array_data[], but we don't care.)
*----------
*/
@@ -878,7 +878,7 @@ mcelem_array_contained_selec(Datum *mcelem, int nmcelem,
unique_nitems > EFFORT * nmcelem / (nmcelem + unique_nitems))
{
/*
- * Use the quadratic formula to solve for largest allowable N. We
+ * Use the quadratic formula to solve for largest allowable N. We
* have A = 1, B = nmcelem, C = - EFFORT * nmcelem.
*/
double b = (double) nmcelem;
@@ -953,7 +953,7 @@ calc_hist(const float4 *hist, int nhist, int n)
/*
* frac is a probability contribution for each interval between histogram
- * values. We have nhist - 1 intervals, so contribution of each one will
+ * values. We have nhist - 1 intervals, so contribution of each one will
* be 1 / (nhist - 1).
*/
frac = 1.0f / ((float) (nhist - 1));
@@ -1020,8 +1020,8 @@ calc_hist(const float4 *hist, int nhist, int n)
* "rest" is the sum of the probabilities of all low-probability events not
* included in p.
*
- * Imagine matrix M of size (n + 1) x (m + 1). Element M[i,j] denotes the
- * probability that exactly j of first i events occur. Obviously M[0,0] = 1.
+ * Imagine matrix M of size (n + 1) x (m + 1). Element M[i,j] denotes the
+ * probability that exactly j of first i events occur. Obviously M[0,0] = 1.
* For any constant j, each increment of i increases the probability iff the
* event occurs. So, by the law of total probability:
* M[i,j] = M[i - 1, j] * (1 - p[i]) + M[i - 1, j - 1] * p[i]
@@ -1143,7 +1143,7 @@ floor_log2(uint32 n)
/*
* find_next_mcelem binary-searches a most common elements array, starting
- * from *index, for the first member >= value. It saves the position of the
+ * from *index, for the first member >= value. It saves the position of the
* match into *index and returns true if it's an exact match. (Note: we
* assume the mcelem elements are distinct so there can't be more than one
* exact match.)
diff --git a/src/backend/utils/adt/array_typanalyze.c b/src/backend/utils/adt/array_typanalyze.c
index 70aba1b5d8d..4d7e9c311fb 100644
--- a/src/backend/utils/adt/array_typanalyze.c
+++ b/src/backend/utils/adt/array_typanalyze.c
@@ -160,13 +160,13 @@ array_typanalyze(PG_FUNCTION_ARGS)
* compute_array_stats() -- compute statistics for a array column
*
* This function computes statistics useful for determining selectivity of
- * the array operators <@, &&, and @>. It is invoked by ANALYZE via the
+ * the array operators <@, &&, and @>. It is invoked by ANALYZE via the
* compute_stats hook after sample rows have been collected.
*
* We also invoke the standard compute_stats function, which will compute
* "scalar" statistics relevant to the btree-style array comparison operators.
* However, exact duplicates of an entire array may be rare despite many
- * arrays sharing individual elements. This especially afflicts long arrays,
+ * arrays sharing individual elements. This especially afflicts long arrays,
* which are also liable to lack all scalar statistics due to the low
* WIDTH_THRESHOLD used in analyze.c. So, in addition to the standard stats,
* we find the most common array elements and compute a histogram of distinct
@@ -201,7 +201,7 @@ array_typanalyze(PG_FUNCTION_ARGS)
* In the absence of a principled basis for other particular values, we
* follow ts_typanalyze() and use parameters s = 0.07/K, epsilon = s/10.
* But we leave out the correction for stopwords, which do not apply to
- * arrays. These parameters give bucket width w = K/0.007 and maximum
+ * arrays. These parameters give bucket width w = K/0.007 and maximum
* expected hashtable size of about 1000 * K.
*
* Elements may repeat within an array. Since duplicates do not change the
@@ -463,7 +463,7 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
/*
* Construct an array of the interesting hashtable items, that is,
- * those meeting the cutoff frequency (s - epsilon)*N. Also identify
+ * those meeting the cutoff frequency (s - epsilon)*N. Also identify
* the minimum and maximum frequencies among these items.
*
* Since epsilon = s/10 and bucket_width = 1/epsilon, the cutoff
@@ -498,7 +498,7 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
/*
* If we obtained more elements than we really want, get rid of those
- * with least frequencies. The easiest way is to qsort the array into
+ * with least frequencies. The easiest way is to qsort the array into
* descending frequency order and truncate the array.
*/
if (num_mcelem < track_len)
@@ -532,7 +532,7 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
/*
* We sorted statistics on the element value, but we want to be
* able to find the minimal and maximal frequencies without going
- * through all the values. We also want the frequency of null
+ * through all the values. We also want the frequency of null
* elements. Store these three values at the end of mcelem_freqs.
*/
mcelem_values = (Datum *) palloc(num_mcelem * sizeof(Datum));
@@ -623,7 +623,7 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
* (compare the histogram-making loop in compute_scalar_stats()).
* But instead of that we have the sorted_count_items[] array,
* which holds unique DEC values with their frequencies (that is,
- * a run-length-compressed version of the full array). So we
+ * a run-length-compressed version of the full array). So we
* control advancing through sorted_count_items[] with the
* variable "frac", which is defined as (x - y) * (num_hist - 1),
* where x is the index in the notional DECs array corresponding
diff --git a/src/backend/utils/adt/array_userfuncs.c b/src/backend/utils/adt/array_userfuncs.c
index c62e3fb1765..831466dec91 100644
--- a/src/backend/utils/adt/array_userfuncs.c
+++ b/src/backend/utils/adt/array_userfuncs.c
@@ -502,7 +502,7 @@ array_agg_transfn(PG_FUNCTION_ARGS)
/*
* The transition type for array_agg() is declared to be "internal", which
- * is a pass-by-value type the same size as a pointer. So we can safely
+ * is a pass-by-value type the same size as a pointer. So we can safely
* pass the ArrayBuildState pointer through nodeAgg.c's machinations.
*/
PG_RETURN_POINTER(state);
@@ -517,7 +517,7 @@ array_agg_finalfn(PG_FUNCTION_ARGS)
int lbs[1];
/*
- * Test for null before Asserting we are in right context. This is to
+ * Test for null before Asserting we are in right context. This is to
* avoid possible Assert failure in 8.4beta installations, where it is
* possible for users to create NULL constants of type internal.
*/
diff --git a/src/backend/utils/adt/arrayfuncs.c b/src/backend/utils/adt/arrayfuncs.c
index 91df1842427..f8e94ec3652 100644
--- a/src/backend/utils/adt/arrayfuncs.c
+++ b/src/backend/utils/adt/arrayfuncs.c
@@ -694,7 +694,7 @@ ReadArrayStr(char *arrayStr,
/*
* We have to remove " and \ characters to create a clean item value to
- * pass to the datatype input routine. We overwrite each item value
+ * pass to the datatype input routine. We overwrite each item value
* in-place within arrayStr to do this. srcptr is the current scan point,
* and dstptr is where we are copying to.
*
@@ -894,7 +894,7 @@ ReadArrayStr(char *arrayStr,
* referenced by Datums after copying them.
*
* If the input data is of varlena type, the caller must have ensured that
- * the values are not toasted. (Doing it here doesn't work since the
+ * the values are not toasted. (Doing it here doesn't work since the
* caller has already allocated space for the array...)
*/
static void
@@ -1747,6 +1747,7 @@ Datum
array_cardinality(PG_FUNCTION_ARGS)
{
ArrayType *v = PG_GETARG_ARRAYTYPE_P(0);
+
PG_RETURN_INT32(ArrayGetNItems(ARR_NDIM(v), ARR_DIMS(v)));
}
@@ -2002,7 +2003,7 @@ array_get_slice(ArrayType *array,
memcpy(ARR_DIMS(newarray), span, ndim * sizeof(int));
/*
- * Lower bounds of the new array are set to 1. Formerly (before 7.3) we
+ * Lower bounds of the new array are set to 1. Formerly (before 7.3) we
* copied the given lowerIndx values ... but that seems confusing.
*/
newlb = ARR_LBOUND(newarray);
@@ -2634,7 +2635,7 @@ array_set_slice(ArrayType *array,
/*
* array_map()
*
- * Map an array through an arbitrary function. Return a new array with
+ * Map an array through an arbitrary function. Return a new array with
* same dimensions and each source element transformed by fn(). Each
* source element is passed as the first argument to fn(); additional
* arguments to be passed to fn() can be specified by the caller.
@@ -2649,9 +2650,9 @@ array_set_slice(ArrayType *array,
* first argument position initially holds the input array value.
* * inpType: OID of element type of input array. This must be the same as,
* or binary-compatible with, the first argument type of fn().
- * * retType: OID of element type of output array. This must be the same as,
+ * * retType: OID of element type of output array. This must be the same as,
* or binary-compatible with, the result type of fn().
- * * amstate: workspace for array_map. Must be zeroed by caller before
+ * * amstate: workspace for array_map. Must be zeroed by caller before
* first call, and not touched after that.
*
* It is legitimate to pass a freshly-zeroed ArrayMapState on each call,
@@ -3505,7 +3506,7 @@ array_cmp(FunctionCallInfo fcinfo)
/*
* If arrays contain same data (up to end of shorter one), apply
- * additional rules to sort by dimensionality. The relative significance
+ * additional rules to sort by dimensionality. The relative significance
* of the different bits of information is historical; mainly we just care
* that we don't say "equal" for arrays of different dimensionality.
*/
@@ -3767,7 +3768,7 @@ array_contain_compare(ArrayType *array1, ArrayType *array2, Oid collation,
/*
* We assume that the comparison operator is strict, so a NULL can't
- * match anything. XXX this diverges from the "NULL=NULL" behavior of
+ * match anything. XXX this diverges from the "NULL=NULL" behavior of
* array_eq, should we act like that?
*/
if (isnull1)
@@ -4258,7 +4259,7 @@ array_copy(char *destptr, int nitems,
*
* Note: this could certainly be optimized using standard bitblt methods.
* However, it's not clear that the typical Postgres array has enough elements
- * to make it worth worrying too much. For the moment, KISS.
+ * to make it worth worrying too much. For the moment, KISS.
*/
void
array_bitmap_copy(bits8 *destbitmap, int destoffset,
@@ -4455,7 +4456,7 @@ array_extract_slice(ArrayType *newarray,
* Insert a slice into an array.
*
* ndim/dim[]/lb[] are dimensions of the original array. A new array with
- * those same dimensions is to be constructed. destArray must already
+ * those same dimensions is to be constructed. destArray must already
* have been allocated and its header initialized.
*
* st[]/endp[] identify the slice to be replaced. Elements within the slice
@@ -5123,7 +5124,7 @@ array_unnest(PG_FUNCTION_ARGS)
* Get the array value and detoast if needed. We can't do this
* earlier because if we have to detoast, we want the detoasted copy
* to be in multi_call_memory_ctx, so it will go away when we're done
- * and not before. (If no detoast happens, we assume the originally
+ * and not before. (If no detoast happens, we assume the originally
* passed array will stick around till then.)
*/
arr = PG_GETARG_ARRAYTYPE_P(0);
@@ -5199,7 +5200,7 @@ array_unnest(PG_FUNCTION_ARGS)
*
* Find all array entries matching (not distinct from) search/search_isnull,
* and delete them if remove is true, else replace them with
- * replace/replace_isnull. Comparisons are done using the specified
+ * replace/replace_isnull. Comparisons are done using the specified
* collation. fcinfo is passed only for caching purposes.
*/
static ArrayType *
@@ -5271,7 +5272,7 @@ array_replace_internal(ArrayType *array,
typalign = typentry->typalign;
/*
- * Detoast values if they are toasted. The replacement value must be
+ * Detoast values if they are toasted. The replacement value must be
* detoasted for insertion into the result array, while detoasting the
* search value only once saves cycles.
*/
diff --git a/src/backend/utils/adt/arrayutils.c b/src/backend/utils/adt/arrayutils.c
index 5b1afa0d8f2..477ccadfb85 100644
--- a/src/backend/utils/adt/arrayutils.c
+++ b/src/backend/utils/adt/arrayutils.c
@@ -193,7 +193,7 @@ mda_next_tuple(int n, int *curr, const int *span)
/*
* ArrayGetIntegerTypmods: verify that argument is a 1-D cstring array,
- * and get the contents converted to integers. Returns a palloc'd array
+ * and get the contents converted to integers. Returns a palloc'd array
* and places the length at *n.
*/
int32 *
diff --git a/src/backend/utils/adt/cash.c b/src/backend/utils/adt/cash.c
index 015875875be..6aba20de851 100644
--- a/src/backend/utils/adt/cash.c
+++ b/src/backend/utils/adt/cash.c
@@ -382,79 +382,79 @@ cash_out(PG_FUNCTION_ARGS)
case 0:
if (cs_precedes)
result = psprintf("(%s%s%s)",
- csymbol,
- (sep_by_space == 1) ? " " : "",
- bufptr);
+ csymbol,
+ (sep_by_space == 1) ? " " : "",
+ bufptr);
else
result = psprintf("(%s%s%s)",
- bufptr,
- (sep_by_space == 1) ? " " : "",
- csymbol);
+ bufptr,
+ (sep_by_space == 1) ? " " : "",
+ csymbol);
break;
case 1:
default:
if (cs_precedes)
result = psprintf("%s%s%s%s%s",
- signsymbol,
- (sep_by_space == 2) ? " " : "",
- csymbol,
- (sep_by_space == 1) ? " " : "",
- bufptr);
+ signsymbol,
+ (sep_by_space == 2) ? " " : "",
+ csymbol,
+ (sep_by_space == 1) ? " " : "",
+ bufptr);
else
result = psprintf("%s%s%s%s%s",
- signsymbol,
- (sep_by_space == 2) ? " " : "",
- bufptr,
- (sep_by_space == 1) ? " " : "",
- csymbol);
+ signsymbol,
+ (sep_by_space == 2) ? " " : "",
+ bufptr,
+ (sep_by_space == 1) ? " " : "",
+ csymbol);
break;
case 2:
if (cs_precedes)
result = psprintf("%s%s%s%s%s",
- csymbol,
- (sep_by_space == 1) ? " " : "",
- bufptr,
- (sep_by_space == 2) ? " " : "",
- signsymbol);
+ csymbol,
+ (sep_by_space == 1) ? " " : "",
+ bufptr,
+ (sep_by_space == 2) ? " " : "",
+ signsymbol);
else
result = psprintf("%s%s%s%s%s",
- bufptr,
- (sep_by_space == 1) ? " " : "",
- csymbol,
- (sep_by_space == 2) ? " " : "",
- signsymbol);
+ bufptr,
+ (sep_by_space == 1) ? " " : "",
+ csymbol,
+ (sep_by_space == 2) ? " " : "",
+ signsymbol);
break;
case 3:
if (cs_precedes)
result = psprintf("%s%s%s%s%s",
- signsymbol,
- (sep_by_space == 2) ? " " : "",
- csymbol,
- (sep_by_space == 1) ? " " : "",
- bufptr);
+ signsymbol,
+ (sep_by_space == 2) ? " " : "",
+ csymbol,
+ (sep_by_space == 1) ? " " : "",
+ bufptr);
else
result = psprintf("%s%s%s%s%s",
- bufptr,
- (sep_by_space == 1) ? " " : "",
- signsymbol,
- (sep_by_space == 2) ? " " : "",
- csymbol);
+ bufptr,
+ (sep_by_space == 1) ? " " : "",
+ signsymbol,
+ (sep_by_space == 2) ? " " : "",
+ csymbol);
break;
case 4:
if (cs_precedes)
result = psprintf("%s%s%s%s%s",
- csymbol,
- (sep_by_space == 2) ? " " : "",
- signsymbol,
- (sep_by_space == 1) ? " " : "",
- bufptr);
+ csymbol,
+ (sep_by_space == 2) ? " " : "",
+ signsymbol,
+ (sep_by_space == 1) ? " " : "",
+ bufptr);
else
result = psprintf("%s%s%s%s%s",
- bufptr,
- (sep_by_space == 1) ? " " : "",
- csymbol,
- (sep_by_space == 2) ? " " : "",
- signsymbol);
+ bufptr,
+ (sep_by_space == 1) ? " " : "",
+ csymbol,
+ (sep_by_space == 2) ? " " : "",
+ signsymbol);
break;
}
diff --git a/src/backend/utils/adt/char.c b/src/backend/utils/adt/char.c
index 99191e1d90c..e0d974eea5a 100644
--- a/src/backend/utils/adt/char.c
+++ b/src/backend/utils/adt/char.c
@@ -59,7 +59,7 @@ charout(PG_FUNCTION_ARGS)
* charrecv - converts external binary format to char
*
* The external representation is one byte, with no character set
- * conversion. This is somewhat dubious, perhaps, but in many
+ * conversion. This is somewhat dubious, perhaps, but in many
* cases people use char for a 1-byte binary type.
*/
Datum
diff --git a/src/backend/utils/adt/date.c b/src/backend/utils/adt/date.c
index 06cc0cda0f0..073104d4bac 100644
--- a/src/backend/utils/adt/date.c
+++ b/src/backend/utils/adt/date.c
@@ -1358,7 +1358,7 @@ AdjustTimeForTypmod(TimeADT *time, int32 typmod)
* Note: this round-to-nearest code is not completely consistent about
* rounding values that are exactly halfway between integral values.
* On most platforms, rint() will implement round-to-nearest-even, but
- * the integer code always rounds up (away from zero). Is it worth
+ * the integer code always rounds up (away from zero). Is it worth
* trying to be consistent?
*/
#ifdef HAVE_INT64_TIMESTAMP
@@ -1706,7 +1706,7 @@ time_interval(PG_FUNCTION_ARGS)
* Convert interval to time data type.
*
* This is defined as producing the fractional-day portion of the interval.
- * Therefore, we can just ignore the months field. It is not real clear
+ * Therefore, we can just ignore the months field. It is not real clear
* what to do with negative intervals, but we choose to subtract the floor,
* so that, say, '-2 hours' becomes '22:00:00'.
*/
@@ -2695,7 +2695,7 @@ timetz_zone(PG_FUNCTION_ARGS)
pg_tz *tzp;
/*
- * Look up the requested timezone. First we look in the date token table
+ * Look up the requested timezone. First we look in the date token table
* (to handle cases like "EST"), and if that fails, we look in the
* timezone database (to handle cases like "America/New_York"). (This
* matches the order in which timestamp input checks the cases; it's
diff --git a/src/backend/utils/adt/datetime.c b/src/backend/utils/adt/datetime.c
index d200437e628..7632d1177e6 100644
--- a/src/backend/utils/adt/datetime.c
+++ b/src/backend/utils/adt/datetime.c
@@ -351,7 +351,7 @@ j2date(int jd, int *year, int *month, int *day)
* j2day - convert Julian date to day-of-week (0..6 == Sun..Sat)
*
* Note: various places use the locution j2day(date - 1) to produce a
- * result according to the convention 0..6 = Mon..Sun. This is a bit of
+ * result according to the convention 0..6 = Mon..Sun. This is a bit of
* a crock, but will work as long as the computation here is just a modulo.
*/
int
@@ -819,10 +819,11 @@ DecodeDateTime(char **field, int *ftype, int nf,
switch (ftype[i])
{
case DTK_DATE:
+
/*
- * Integral julian day with attached time zone?
- * All other forms with JD will be separated into
- * distinct fields, so we handle just this case here.
+ * Integral julian day with attached time zone? All other
+ * forms with JD will be separated into distinct fields, so we
+ * handle just this case here.
*/
if (ptype == DTK_JULIAN)
{
@@ -849,6 +850,7 @@ DecodeDateTime(char **field, int *ftype, int nf,
ptype = 0;
break;
}
+
/*
* Already have a date? Then this might be a time zone name
* with embedded punctuation (e.g. "America/New_York") or a
@@ -1158,17 +1160,18 @@ DecodeDateTime(char **field, int *ftype, int nf,
if (dterr < 0)
return dterr;
}
+
/*
* Is this a YMD or HMS specification, or a year number?
* YMD and HMS are required to be six digits or more, so
* if it is 5 digits, it is a year. If it is six or more
* more digits, we assume it is YMD or HMS unless no date
- * and no time values have been specified. This forces
- * 6+ digit years to be at the end of the string, or to use
+ * and no time values have been specified. This forces 6+
+ * digit years to be at the end of the string, or to use
* the ISO date specification.
*/
else if (flen >= 6 && (!(fmask & DTK_DATE_M) ||
- !(fmask & DTK_TIME_M)))
+ !(fmask & DTK_TIME_M)))
{
dterr = DecodeNumberField(flen, field[i], fmask,
&tmask, tm,
@@ -2490,7 +2493,7 @@ DecodeNumber(int flen, char *str, bool haveTextMonth, int fmask,
/*
* Nothing so far; make a decision about what we think the input
- * is. There used to be lots of heuristics here, but the
+ * is. There used to be lots of heuristics here, but the
* consensus now is to be paranoid. It *must* be either
* YYYY-MM-DD (with a more-than-two-digit year field), or the
* field order defined by DateOrder.
@@ -2523,9 +2526,9 @@ DecodeNumber(int flen, char *str, bool haveTextMonth, int fmask,
{
/*
* We are at the first numeric field of a date that included a
- * textual month name. We want to support the variants
+ * textual month name. We want to support the variants
* MON-DD-YYYY, DD-MON-YYYY, and YYYY-MON-DD as unambiguous
- * inputs. We will also accept MON-DD-YY or DD-MON-YY in
+ * inputs. We will also accept MON-DD-YY or DD-MON-YY in
* either DMY or MDY modes, as well as YY-MON-DD in YMD mode.
*/
if (flen >= 3 || DateOrder == DATEORDER_YMD)
@@ -2654,6 +2657,7 @@ DecodeNumberField(int len, char *str, int fmask,
if (len >= 6)
{
*tmask = DTK_DATE_M;
+
/*
* Start from end and consider first 2 as Day, next 2 as Month,
* and the rest as Year.
@@ -2890,7 +2894,7 @@ DecodeInterval(char **field, int *ftype, int nf, int range,
Assert(*field[i] == '-' || *field[i] == '+');
/*
- * Check for signed hh:mm or hh:mm:ss. If so, process exactly
+ * Check for signed hh:mm or hh:mm:ss. If so, process exactly
* like DTK_TIME case above, plus handling the sign.
*/
if (strchr(field[i] + 1, ':') != NULL &&
@@ -2978,8 +2982,8 @@ DecodeInterval(char **field, int *ftype, int nf, int range,
type = DTK_MONTH;
if (*field[i] == '-')
val2 = -val2;
- if (((double)val * MONTHS_PER_YEAR + val2) > INT_MAX ||
- ((double)val * MONTHS_PER_YEAR + val2) < INT_MIN)
+ if (((double) val * MONTHS_PER_YEAR + val2) > INT_MAX ||
+ ((double) val * MONTHS_PER_YEAR + val2) < INT_MIN)
return DTERR_FIELD_OVERFLOW;
val = val * MONTHS_PER_YEAR + val2;
fval = 0;
@@ -3327,7 +3331,7 @@ DecodeISO8601Interval(char *str,
return dterr;
/*
- * Note: we could step off the end of the string here. Code below
+ * Note: we could step off the end of the string here. Code below
* *must* exit the loop if unit == '\0'.
*/
unit = *str++;
@@ -4130,7 +4134,7 @@ EncodeInterval(struct pg_tm * tm, fsec_t fsec, int style, char *str)
/*
* We've been burnt by stupid errors in the ordering of the datetkn tables
- * once too often. Arrange to check them during postmaster start.
+ * once too often. Arrange to check them during postmaster start.
*/
static bool
CheckDateTokenTable(const char *tablename, const datetkn *base, int nel)
diff --git a/src/backend/utils/adt/datum.c b/src/backend/utils/adt/datum.c
index 4b5d65c5ff5..a79d5d587cc 100644
--- a/src/backend/utils/adt/datum.c
+++ b/src/backend/utils/adt/datum.c
@@ -181,7 +181,7 @@ datumIsEqual(Datum value1, Datum value2, bool typByVal, int typLen)
/*
* just compare the two datums. NOTE: just comparing "len" bytes will
* not do the work, because we do not know how these bytes are aligned
- * inside the "Datum". We assume instead that any given datatype is
+ * inside the "Datum". We assume instead that any given datatype is
* consistent about how it fills extraneous bits in the Datum.
*/
res = (value1 == value2);
diff --git a/src/backend/utils/adt/dbsize.c b/src/backend/utils/adt/dbsize.c
index 68ab0e19061..8c663379ae7 100644
--- a/src/backend/utils/adt/dbsize.c
+++ b/src/backend/utils/adt/dbsize.c
@@ -358,6 +358,7 @@ calculate_toast_table_size(Oid toastrelid)
foreach(lc, indexlist)
{
Relation toastIdxRel;
+
toastIdxRel = relation_open(lfirst_oid(lc),
AccessShareLock);
for (forkNum = 0; forkNum <= MAX_FORKNUM; forkNum++)
@@ -689,7 +690,7 @@ pg_size_pretty_numeric(PG_FUNCTION_ARGS)
* This is expected to be used in queries like
* SELECT pg_relation_filenode(oid) FROM pg_class;
* That leads to a couple of choices. We work from the pg_class row alone
- * rather than actually opening each relation, for efficiency. We don't
+ * rather than actually opening each relation, for efficiency. We don't
* fail if we can't find the relation --- some rows might be visible in
* the query's MVCC snapshot even though the relations have been dropped.
* (Note: we could avoid using the catcache, but there's little point
diff --git a/src/backend/utils/adt/domains.c b/src/backend/utils/adt/domains.c
index 515481805a7..bbca5d68baf 100644
--- a/src/backend/utils/adt/domains.c
+++ b/src/backend/utils/adt/domains.c
@@ -12,11 +12,11 @@
* The overhead required for constraint checking can be high, since examining
* the catalogs to discover the constraints for a given domain is not cheap.
* We have three mechanisms for minimizing this cost:
- * 1. In a nest of domains, we flatten the checking of all the levels
+ * 1. In a nest of domains, we flatten the checking of all the levels
* into just one operation.
- * 2. We cache the list of constraint items in the FmgrInfo struct
+ * 2. We cache the list of constraint items in the FmgrInfo struct
* passed by the caller.
- * 3. If there are CHECK constraints, we cache a standalone ExprContext
+ * 3. If there are CHECK constraints, we cache a standalone ExprContext
* to evaluate them in.
*
*
@@ -311,7 +311,7 @@ domain_recv(PG_FUNCTION_ARGS)
/*
* domain_check - check that a datum satisfies the constraints of a
- * domain. extra and mcxt can be passed if they are available from,
+ * domain. extra and mcxt can be passed if they are available from,
* say, a FmgrInfo structure, or they can be NULL, in which case the
* setup is repeated for each call.
*/
diff --git a/src/backend/utils/adt/float.c b/src/backend/utils/adt/float.c
index 774267ed5d2..41b3eaa2135 100644
--- a/src/backend/utils/adt/float.c
+++ b/src/backend/utils/adt/float.c
@@ -276,7 +276,7 @@ float4in(PG_FUNCTION_ARGS)
/*
* Some platforms return ERANGE for denormalized numbers (those
* that are not zero, but are too close to zero to have full
- * precision). We'd prefer not to throw error for that, so try to
+ * precision). We'd prefer not to throw error for that, so try to
* detect whether it's a "real" out-of-range condition by checking
* to see if the result is zero or huge.
*/
@@ -475,7 +475,7 @@ float8in(PG_FUNCTION_ARGS)
/*
* Some platforms return ERANGE for denormalized numbers (those
* that are not zero, but are too close to zero to have full
- * precision). We'd prefer not to throw error for that, so try to
+ * precision). We'd prefer not to throw error for that, so try to
* detect whether it's a "real" out-of-range condition by checking
* to see if the result is zero or huge.
*/
@@ -2054,7 +2054,7 @@ float8_stddev_samp(PG_FUNCTION_ARGS)
* in that order. Note that Y is the first argument to the aggregates!
*
* It might seem attractive to optimize this by having multiple accumulator
- * functions that only calculate the sums actually needed. But on most
+ * functions that only calculate the sums actually needed. But on most
* modern machines, a couple of extra floating-point multiplies will be
* insignificant compared to the other per-tuple overhead, so I've chosen
* to minimize code space instead.
diff --git a/src/backend/utils/adt/format_type.c b/src/backend/utils/adt/format_type.c
index 5b75d34dcbc..e1763a37642 100644
--- a/src/backend/utils/adt/format_type.c
+++ b/src/backend/utils/adt/format_type.c
@@ -44,14 +44,14 @@ static char *printTypmod(const char *typname, int32 typmod, Oid typmodout);
* double quoted if it contains funny characters or matches a keyword.
*
* If typemod is NULL then we are formatting a type name in a context where
- * no typemod is available, eg a function argument or result type. This
+ * no typemod is available, eg a function argument or result type. This
* yields a slightly different result from specifying typemod = -1 in some
* cases. Given typemod = -1 we feel compelled to produce an output that
* the parser will interpret as having typemod -1, so that pg_dump will
- * produce CREATE TABLE commands that recreate the original state. But
+ * produce CREATE TABLE commands that recreate the original state. But
* given NULL typemod, we assume that the parser's interpretation of
* typemod doesn't matter, and so we are willing to output a slightly
- * "prettier" representation of the same type. For example, type = bpchar
+ * "prettier" representation of the same type. For example, type = bpchar
* and typemod = NULL gets you "character", whereas typemod = -1 gets you
* "bpchar" --- the former will be interpreted as character(1) by the
* parser, which does not yield typemod -1.
diff --git a/src/backend/utils/adt/formatting.c b/src/backend/utils/adt/formatting.c
index 2099ad0c302..15bcefd0021 100644
--- a/src/backend/utils/adt/formatting.c
+++ b/src/backend/utils/adt/formatting.c
@@ -1823,7 +1823,7 @@ str_initcap(const char *buff, size_t nbytes, Oid collid)
/*
* Note: we assume that toupper_l()/tolower_l() will not be so broken
- * as to need guard tests. When using the default collation, we apply
+ * as to need guard tests. When using the default collation, we apply
* the traditional Postgres behavior that forces ASCII-style treatment
* of I/i, but in non-default collations you get exactly what the
* collation says.
@@ -3629,7 +3629,7 @@ do_to_timestamp(text *date_txt, text *fmt,
{
/*
* The month and day field have not been set, so we use the
- * day-of-year field to populate them. Depending on the date mode,
+ * day-of-year field to populate them. Depending on the date mode,
* this field may be interpreted as a Gregorian day-of-year, or an ISO
* week date day-of-year.
*/
diff --git a/src/backend/utils/adt/geo_ops.c b/src/backend/utils/adt/geo_ops.c
index 72cb4e991fc..54391fd7aba 100644
--- a/src/backend/utils/adt/geo_ops.c
+++ b/src/backend/utils/adt/geo_ops.c
@@ -32,7 +32,10 @@
* Internal routines
*/
-enum path_delim { PATH_NONE, PATH_OPEN, PATH_CLOSED };
+enum path_delim
+{
+ PATH_NONE, PATH_OPEN, PATH_CLOSED
+};
static int point_inside(Point *p, int npts, Point *plist);
static int lseg_crossing(double x, double y, double px, double py);
@@ -1024,7 +1027,7 @@ line_out(PG_FUNCTION_ARGS)
Datum
line_recv(PG_FUNCTION_ARGS)
{
- StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
+ StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
LINE *line;
line = (LINE *) palloc(sizeof(LINE));
@@ -1386,7 +1389,7 @@ path_in(PG_FUNCTION_ARGS)
}
base_size = sizeof(path->p[0]) * npts;
- size = offsetof(PATH, p[0]) + base_size;
+ size = offsetof(PATH, p[0]) +base_size;
/* Check for integer overflow */
if (base_size / npts != sizeof(path->p[0]) || size <= base_size)
@@ -3448,7 +3451,7 @@ poly_in(PG_FUNCTION_ARGS)
errmsg("invalid input syntax for type polygon: \"%s\"", str)));
base_size = sizeof(poly->p[0]) * npts;
- size = offsetof(POLYGON, p[0]) + base_size;
+ size = offsetof(POLYGON, p[0]) +base_size;
/* Check for integer overflow */
if (base_size / npts != sizeof(poly->p[0]) || size <= base_size)
diff --git a/src/backend/utils/adt/geo_selfuncs.c b/src/backend/utils/adt/geo_selfuncs.c
index 99ca8edbd04..4a2156d4669 100644
--- a/src/backend/utils/adt/geo_selfuncs.c
+++ b/src/backend/utils/adt/geo_selfuncs.c
@@ -22,7 +22,7 @@
/*
- * Selectivity functions for geometric operators. These are bogus -- unless
+ * Selectivity functions for geometric operators. These are bogus -- unless
* we know the actual key distribution in the index, we can't make a good
* prediction of the selectivity of these operators.
*
@@ -34,7 +34,7 @@
* In general, GiST needs to search multiple subtrees in order to guarantee
* that all occurrences of the same key have been found. Because of this,
* the estimated cost for scanning the index ought to be higher than the
- * output selectivity would indicate. gistcostestimate(), over in selfuncs.c,
+ * output selectivity would indicate. gistcostestimate(), over in selfuncs.c,
* ought to be adjusted accordingly --- but until we can generate somewhat
* realistic numbers here, it hardly matters...
*/
diff --git a/src/backend/utils/adt/inet_cidr_ntop.c b/src/backend/utils/adt/inet_cidr_ntop.c
index 5f2a3d361d9..d33534ec173 100644
--- a/src/backend/utils/adt/inet_cidr_ntop.c
+++ b/src/backend/utils/adt/inet_cidr_ntop.c
@@ -196,7 +196,7 @@ inet_cidr_ntop_ipv6(const u_char *src, int bits, char *dst, size_t size)
}
else
{
- /* Copy src to private buffer. Zero host part. */
+ /* Copy src to private buffer. Zero host part. */
p = (bits + 7) / 8;
memcpy(inbuf, src, p);
memset(inbuf + p, 0, 16 - p);
diff --git a/src/backend/utils/adt/int.c b/src/backend/utils/adt/int.c
index 669355e4540..b8f56e5c2e1 100644
--- a/src/backend/utils/adt/int.c
+++ b/src/backend/utils/adt/int.c
@@ -642,7 +642,7 @@ int4pl(PG_FUNCTION_ARGS)
result = arg1 + arg2;
/*
- * Overflow check. If the inputs are of different signs then their sum
+ * Overflow check. If the inputs are of different signs then their sum
* cannot overflow. If the inputs are of the same sign, their sum had
* better be that sign too.
*/
@@ -663,8 +663,8 @@ int4mi(PG_FUNCTION_ARGS)
result = arg1 - arg2;
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then the
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
* result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
@@ -684,7 +684,7 @@ int4mul(PG_FUNCTION_ARGS)
result = arg1 * arg2;
/*
- * Overflow check. We basically check to see if result / arg2 gives arg1
+ * Overflow check. We basically check to see if result / arg2 gives arg1
* again. There are two cases where this fails: arg2 = 0 (which cannot
* overflow) and arg1 = INT_MIN, arg2 = -1 (where the division itself will
* overflow and thus incorrectly match).
@@ -794,7 +794,7 @@ int2pl(PG_FUNCTION_ARGS)
result = arg1 + arg2;
/*
- * Overflow check. If the inputs are of different signs then their sum
+ * Overflow check. If the inputs are of different signs then their sum
* cannot overflow. If the inputs are of the same sign, their sum had
* better be that sign too.
*/
@@ -815,8 +815,8 @@ int2mi(PG_FUNCTION_ARGS)
result = arg1 - arg2;
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then the
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
* result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
@@ -897,7 +897,7 @@ int24pl(PG_FUNCTION_ARGS)
result = arg1 + arg2;
/*
- * Overflow check. If the inputs are of different signs then their sum
+ * Overflow check. If the inputs are of different signs then their sum
* cannot overflow. If the inputs are of the same sign, their sum had
* better be that sign too.
*/
@@ -918,8 +918,8 @@ int24mi(PG_FUNCTION_ARGS)
result = arg1 - arg2;
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then the
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
* result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
@@ -939,7 +939,7 @@ int24mul(PG_FUNCTION_ARGS)
result = arg1 * arg2;
/*
- * Overflow check. We basically check to see if result / arg2 gives arg1
+ * Overflow check. We basically check to see if result / arg2 gives arg1
* again. There is one case where this fails: arg2 = 0 (which cannot
* overflow).
*
@@ -985,7 +985,7 @@ int42pl(PG_FUNCTION_ARGS)
result = arg1 + arg2;
/*
- * Overflow check. If the inputs are of different signs then their sum
+ * Overflow check. If the inputs are of different signs then their sum
* cannot overflow. If the inputs are of the same sign, their sum had
* better be that sign too.
*/
@@ -1006,8 +1006,8 @@ int42mi(PG_FUNCTION_ARGS)
result = arg1 - arg2;
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then the
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
* result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
@@ -1027,7 +1027,7 @@ int42mul(PG_FUNCTION_ARGS)
result = arg1 * arg2;
/*
- * Overflow check. We basically check to see if result / arg1 gives arg2
+ * Overflow check. We basically check to see if result / arg1 gives arg2
* again. There is one case where this fails: arg1 = 0 (which cannot
* overflow).
*
diff --git a/src/backend/utils/adt/int8.c b/src/backend/utils/adt/int8.c
index e78eb2a2022..96146e0fda0 100644
--- a/src/backend/utils/adt/int8.c
+++ b/src/backend/utils/adt/int8.c
@@ -73,7 +73,7 @@ scanint8(const char *str, bool errorOK, int64 *result)
ptr++;
/*
- * Do an explicit check for INT64_MIN. Ugly though this is, it's
+ * Do an explicit check for INT64_MIN. Ugly though this is, it's
* cleaner than trying to get the loop below to handle it portably.
*/
if (strncmp(ptr, "9223372036854775808", 19) == 0)
@@ -519,7 +519,7 @@ int8pl(PG_FUNCTION_ARGS)
result = arg1 + arg2;
/*
- * Overflow check. If the inputs are of different signs then their sum
+ * Overflow check. If the inputs are of different signs then their sum
* cannot overflow. If the inputs are of the same sign, their sum had
* better be that sign too.
*/
@@ -540,8 +540,8 @@ int8mi(PG_FUNCTION_ARGS)
result = arg1 - arg2;
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then the
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
* result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
@@ -561,7 +561,7 @@ int8mul(PG_FUNCTION_ARGS)
result = arg1 * arg2;
/*
- * Overflow check. We basically check to see if result / arg2 gives arg1
+ * Overflow check. We basically check to see if result / arg2 gives arg1
* again. There are two cases where this fails: arg2 = 0 (which cannot
* overflow) and arg1 = INT64_MIN, arg2 = -1 (where the division itself
* will overflow and thus incorrectly match).
@@ -764,7 +764,7 @@ int8dec(PG_FUNCTION_ARGS)
/*
* These functions are exactly like int8inc/int8dec but are used for
- * aggregates that count only non-null values. Since the functions are
+ * aggregates that count only non-null values. Since the functions are
* declared strict, the null checks happen before we ever get here, and all we
* need do is increment the state value. We could actually make these pg_proc
* entries point right at int8inc/int8dec, but then the opr_sanity regression
@@ -824,7 +824,7 @@ int84pl(PG_FUNCTION_ARGS)
result = arg1 + arg2;
/*
- * Overflow check. If the inputs are of different signs then their sum
+ * Overflow check. If the inputs are of different signs then their sum
* cannot overflow. If the inputs are of the same sign, their sum had
* better be that sign too.
*/
@@ -845,8 +845,8 @@ int84mi(PG_FUNCTION_ARGS)
result = arg1 - arg2;
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then the
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
* result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
@@ -866,7 +866,7 @@ int84mul(PG_FUNCTION_ARGS)
result = arg1 * arg2;
/*
- * Overflow check. We basically check to see if result / arg1 gives arg2
+ * Overflow check. We basically check to see if result / arg1 gives arg2
* again. There is one case where this fails: arg1 = 0 (which cannot
* overflow).
*
@@ -933,7 +933,7 @@ int48pl(PG_FUNCTION_ARGS)
result = arg1 + arg2;
/*
- * Overflow check. If the inputs are of different signs then their sum
+ * Overflow check. If the inputs are of different signs then their sum
* cannot overflow. If the inputs are of the same sign, their sum had
* better be that sign too.
*/
@@ -954,8 +954,8 @@ int48mi(PG_FUNCTION_ARGS)
result = arg1 - arg2;
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then the
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
* result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
@@ -975,7 +975,7 @@ int48mul(PG_FUNCTION_ARGS)
result = arg1 * arg2;
/*
- * Overflow check. We basically check to see if result / arg2 gives arg1
+ * Overflow check. We basically check to see if result / arg2 gives arg1
* again. There is one case where this fails: arg2 = 0 (which cannot
* overflow).
*
@@ -1021,7 +1021,7 @@ int82pl(PG_FUNCTION_ARGS)
result = arg1 + arg2;
/*
- * Overflow check. If the inputs are of different signs then their sum
+ * Overflow check. If the inputs are of different signs then their sum
* cannot overflow. If the inputs are of the same sign, their sum had
* better be that sign too.
*/
@@ -1042,8 +1042,8 @@ int82mi(PG_FUNCTION_ARGS)
result = arg1 - arg2;
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then the
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
* result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
@@ -1063,7 +1063,7 @@ int82mul(PG_FUNCTION_ARGS)
result = arg1 * arg2;
/*
- * Overflow check. We basically check to see if result / arg1 gives arg2
+ * Overflow check. We basically check to see if result / arg1 gives arg2
* again. There is one case where this fails: arg1 = 0 (which cannot
* overflow).
*
@@ -1130,7 +1130,7 @@ int28pl(PG_FUNCTION_ARGS)
result = arg1 + arg2;
/*
- * Overflow check. If the inputs are of different signs then their sum
+ * Overflow check. If the inputs are of different signs then their sum
* cannot overflow. If the inputs are of the same sign, their sum had
* better be that sign too.
*/
@@ -1151,8 +1151,8 @@ int28mi(PG_FUNCTION_ARGS)
result = arg1 - arg2;
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then the
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
* result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
@@ -1172,7 +1172,7 @@ int28mul(PG_FUNCTION_ARGS)
result = arg1 * arg2;
/*
- * Overflow check. We basically check to see if result / arg2 gives arg1
+ * Overflow check. We basically check to see if result / arg2 gives arg1
* again. There is one case where this fails: arg2 = 0 (which cannot
* overflow).
*
diff --git a/src/backend/utils/adt/json.c b/src/backend/utils/adt/json.c
index c34a1bb50be..16f4eccc06e 100644
--- a/src/backend/utils/adt/json.c
+++ b/src/backend/utils/adt/json.c
@@ -598,10 +598,10 @@ json_lex(JsonLexContext *lex)
/*
* We're not dealing with a string, number, legal
- * punctuation mark, or end of string. The only legal
+ * punctuation mark, or end of string. The only legal
* tokens we might find here are true, false, and null,
* but for error reporting purposes we scan until we see a
- * non-alphanumeric character. That way, we can report
+ * non-alphanumeric character. That way, we can report
* the whole word as an unexpected token, rather than just
* some unintuitive prefix thereof.
*/
@@ -897,12 +897,12 @@ json_lex_string(JsonLexContext *lex)
* begin with a '0'.
*
* (3) An optional decimal part, consisting of a period ('.') followed by
- * one or more digits. (Note: While this part can be omitted
+ * one or more digits. (Note: While this part can be omitted
* completely, it's not OK to have only the decimal point without
* any digits afterwards.)
*
* (4) An optional exponent part, consisting of 'e' or 'E', optionally
- * followed by '+' or '-', followed by one or more digits. (Note:
+ * followed by '+' or '-', followed by one or more digits. (Note:
* As with the decimal part, if 'e' or 'E' is present, it must be
* followed by at least one digit.)
*
@@ -980,7 +980,7 @@ json_lex_number(JsonLexContext *lex, char *s, bool *num_err)
}
/*
- * Check for trailing garbage. As in json_lex(), any alphanumeric stuff
+ * Check for trailing garbage. As in json_lex(), any alphanumeric stuff
* here should be considered part of the token for error-reporting
* purposes.
*/
@@ -1805,7 +1805,7 @@ json_agg_transfn(PG_FUNCTION_ARGS)
/*
* The transition type for array_agg() is declared to be "internal", which
- * is a pass-by-value type the same size as a pointer. So we can safely
+ * is a pass-by-value type the same size as a pointer. So we can safely
* pass the ArrayBuildState pointer through nodeAgg.c's machinations.
*/
PG_RETURN_POINTER(state);
diff --git a/src/backend/utils/adt/jsonb.c b/src/backend/utils/adt/jsonb.c
index 781ab66ef2c..cf5d6f23264 100644
--- a/src/backend/utils/adt/jsonb.c
+++ b/src/backend/utils/adt/jsonb.c
@@ -22,7 +22,7 @@ typedef struct JsonbInState
{
JsonbParseState *parseState;
JsonbValue *res;
-} JsonbInState;
+} JsonbInState;
static inline Datum jsonb_from_cstring(char *json, int len);
static size_t checkStringLen(size_t len);
@@ -31,9 +31,9 @@ static void jsonb_in_object_end(void *pstate);
static void jsonb_in_array_start(void *pstate);
static void jsonb_in_array_end(void *pstate);
static void jsonb_in_object_field_start(void *pstate, char *fname, bool isnull);
-static void jsonb_put_escaped_value(StringInfo out, JsonbValue * scalarVal);
+static void jsonb_put_escaped_value(StringInfo out, JsonbValue *scalarVal);
static void jsonb_in_scalar(void *pstate, char *token, JsonTokenType tokentype);
-char *JsonbToCString(StringInfo out, char *in, int estimated_len);
+char *JsonbToCString(StringInfo out, char *in, int estimated_len);
/*
* jsonb type input function
@@ -245,7 +245,7 @@ jsonb_in_object_field_start(void *pstate, char *fname, bool isnull)
JsonbInState *_state = (JsonbInState *) pstate;
JsonbValue v;
- Assert (fname != NULL);
+ Assert(fname != NULL);
v.type = jbvString;
v.val.string.len = checkStringLen(strlen(fname));
v.val.string.val = pnstrdup(fname, v.val.string.len);
@@ -255,7 +255,7 @@ jsonb_in_object_field_start(void *pstate, char *fname, bool isnull)
}
static void
-jsonb_put_escaped_value(StringInfo out, JsonbValue * scalarVal)
+jsonb_put_escaped_value(StringInfo out, JsonbValue *scalarVal)
{
switch (scalarVal->type)
{
@@ -267,8 +267,8 @@ jsonb_put_escaped_value(StringInfo out, JsonbValue * scalarVal)
break;
case jbvNumeric:
appendStringInfoString(out,
- DatumGetCString(DirectFunctionCall1(numeric_out,
- PointerGetDatum(scalarVal->val.numeric))));
+ DatumGetCString(DirectFunctionCall1(numeric_out,
+ PointerGetDatum(scalarVal->val.numeric))));
break;
case jbvBool:
if (scalarVal->val.boolean)
@@ -296,21 +296,23 @@ jsonb_in_scalar(void *pstate, char *token, JsonTokenType tokentype)
{
case JSON_TOKEN_STRING:
- Assert (token != NULL);
+ Assert(token != NULL);
v.type = jbvString;
v.val.string.len = checkStringLen(strlen(token));
v.val.string.val = pnstrdup(token, v.val.string.len);
v.estSize += v.val.string.len;
break;
case JSON_TOKEN_NUMBER:
+
/*
- * No need to check size of numeric values, because maximum numeric
- * size is well below the JsonbValue restriction
+ * No need to check size of numeric values, because maximum
+ * numeric size is well below the JsonbValue restriction
*/
- Assert (token != NULL);
+ Assert(token != NULL);
v.type = jbvNumeric;
v.val.numeric = DatumGetNumeric(DirectFunctionCall3(numeric_in, CStringGetDatum(token), 0, -1));
- v.estSize += VARSIZE_ANY(v.val.numeric) + sizeof(JEntry) /* alignment */ ;
+
+ v.estSize += VARSIZE_ANY(v.val.numeric) +sizeof(JEntry) /* alignment */ ;
break;
case JSON_TOKEN_TRUE:
v.type = jbvBool;
diff --git a/src/backend/utils/adt/jsonb_gin.c b/src/backend/utils/adt/jsonb_gin.c
index 62546ebaf28..9f8c178ab10 100644
--- a/src/backend/utils/adt/jsonb_gin.c
+++ b/src/backend/utils/adt/jsonb_gin.c
@@ -22,12 +22,12 @@
typedef struct PathHashStack
{
- uint32 hash;
+ uint32 hash;
struct PathHashStack *parent;
-} PathHashStack;
+} PathHashStack;
static text *make_text_key(const char *str, int len, char flag);
-static text *make_scalar_key(const JsonbValue * scalarVal, char flag);
+static text *make_scalar_key(const JsonbValue *scalarVal, char flag);
/*
*
@@ -97,14 +97,14 @@ gin_extract_jsonb(PG_FUNCTION_ARGS)
* JsonbExistsStrategyNumber. Our definition of existence does not
* allow for checking the existence of a non-jbvString element (just
* like the definition of the underlying operator), because the
- * operator takes a text rhs argument (which is taken as a proxy for an
- * equivalent Jsonb string).
+ * operator takes a text rhs argument (which is taken as a proxy for
+ * an equivalent Jsonb string).
*
* The way existence is represented does not preclude an alternative
* existence operator, that takes as its rhs value an arbitrarily
- * internally-typed Jsonb. The only reason that isn't the case here is
- * that the existence operator is only really intended to determine if
- * an object has a certain key (object pair keys are of course
+ * internally-typed Jsonb. The only reason that isn't the case here
+ * is that the existence operator is only really intended to determine
+ * if an object has a certain key (object pair keys are of course
* invariably strings), which is extended to jsonb arrays. You could
* think of the default Jsonb definition of existence as being
* equivalent to a definition where all types of scalar array elements
@@ -116,11 +116,11 @@ gin_extract_jsonb(PG_FUNCTION_ARGS)
* JsonbExistsStrategyNumber, since we know that keys are strings for
* both objects and arrays, and don't have to further account for type
* mismatch. Not having to set the reset flag makes it less than
- * tempting to tighten up the definition of existence to preclude array
- * elements entirely, which would arguably be a simpler alternative.
- * In any case the infrastructure used to implement the existence
- * operator could trivially support this hypothetical, slightly
- * distinct definition of existence.
+ * tempting to tighten up the definition of existence to preclude
+ * array elements entirely, which would arguably be a simpler
+ * alternative. In any case the infrastructure used to implement the
+ * existence operator could trivially support this hypothetical,
+ * slightly distinct definition of existence.
*/
switch (r)
{
@@ -290,8 +290,10 @@ gin_triconsistent_jsonb(PG_FUNCTION_ARGS)
{
GinTernaryValue *check = (GinTernaryValue *) PG_GETARG_POINTER(0);
StrategyNumber strategy = PG_GETARG_UINT16(1);
+
/* Jsonb *query = PG_GETARG_JSONB(2); */
int32 nkeys = PG_GETARG_INT32(3);
+
/* Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4); */
GinTernaryValue res = GIN_TRUE;
@@ -299,7 +301,7 @@ gin_triconsistent_jsonb(PG_FUNCTION_ARGS)
if (strategy == JsonbContainsStrategyNumber)
{
- bool has_maybe = false;
+ bool has_maybe = false;
/*
* All extracted keys must be present. Combination of GIN_MAYBE and
@@ -323,8 +325,9 @@ gin_triconsistent_jsonb(PG_FUNCTION_ARGS)
/*
* Index doesn't have information about correspondence of Jsonb keys
* and values (as distinct from GIN keys, which a key/value pair is
- * stored as), so invariably we recheck. This is also reflected in how
- * GIN_MAYBE is given in response to there being no GIN_MAYBE input.
+ * stored as), so invariably we recheck. This is also reflected in
+ * how GIN_MAYBE is given in response to there being no GIN_MAYBE
+ * input.
*/
if (!has_maybe && res == GIN_TRUE)
res = GIN_MAYBE;
@@ -379,8 +382,10 @@ gin_consistent_jsonb_hash(PG_FUNCTION_ARGS)
{
bool *check = (bool *) PG_GETARG_POINTER(0);
StrategyNumber strategy = PG_GETARG_UINT16(1);
+
/* Jsonb *query = PG_GETARG_JSONB(2); */
int32 nkeys = PG_GETARG_INT32(3);
+
/* Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4); */
bool *recheck = (bool *) PG_GETARG_POINTER(5);
bool res = true;
@@ -390,13 +395,13 @@ gin_consistent_jsonb_hash(PG_FUNCTION_ARGS)
elog(ERROR, "unrecognized strategy number: %d", strategy);
/*
- * jsonb_hash_ops index doesn't have information about correspondence
- * of Jsonb keys and values (as distinct from GIN keys, which a
- * key/value pair is stored as), so invariably we recheck. Besides,
- * there are some special rules around the containment of raw scalar
- * arrays and regular arrays that are not represented here. However,
- * if all of the keys are not present, that's sufficient reason to
- * return false and finish immediately.
+ * jsonb_hash_ops index doesn't have information about correspondence of
+ * Jsonb keys and values (as distinct from GIN keys, which a key/value
+ * pair is stored as), so invariably we recheck. Besides, there are some
+ * special rules around the containment of raw scalar arrays and regular
+ * arrays that are not represented here. However, if all of the keys are
+ * not present, that's sufficient reason to return false and finish
+ * immediately.
*/
*recheck = true;
for (i = 0; i < nkeys; i++)
@@ -416,12 +421,14 @@ gin_triconsistent_jsonb_hash(PG_FUNCTION_ARGS)
{
GinTernaryValue *check = (GinTernaryValue *) PG_GETARG_POINTER(0);
StrategyNumber strategy = PG_GETARG_UINT16(1);
+
/* Jsonb *query = PG_GETARG_JSONB(2); */
int32 nkeys = PG_GETARG_INT32(3);
+
/* Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4); */
GinTernaryValue res = GIN_TRUE;
- int32 i;
- bool has_maybe = false;
+ int32 i;
+ bool has_maybe = false;
if (strategy != JsonbContainsStrategyNumber)
elog(ERROR, "unrecognized strategy number: %d", strategy);
@@ -447,10 +454,10 @@ gin_triconsistent_jsonb_hash(PG_FUNCTION_ARGS)
/*
* jsonb_hash_ops index doesn't have information about correspondence of
- * Jsonb keys and values (as distinct from GIN keys, which for this opclass
- * are a hash of a pair, or a hash of just an element), so invariably we
- * recheck. This is also reflected in how GIN_MAYBE is given in response
- * to there being no GIN_MAYBE input.
+ * Jsonb keys and values (as distinct from GIN keys, which for this
+ * opclass are a hash of a pair, or a hash of just an element), so
+ * invariably we recheck. This is also reflected in how GIN_MAYBE is
+ * given in response to there being no GIN_MAYBE input.
*/
if (!has_maybe && res == GIN_TRUE)
res = GIN_MAYBE;
@@ -488,7 +495,7 @@ gin_extract_jsonb_hash(PG_FUNCTION_ARGS)
while ((r = JsonbIteratorNext(&it, &v, false)) != WJB_DONE)
{
- PathHashStack *tmp;
+ PathHashStack *tmp;
if (i >= total)
{
@@ -513,10 +520,10 @@ gin_extract_jsonb_hash(PG_FUNCTION_ARGS)
/*
* We pass forward hashes from previous container nesting
* levels so that nested arrays with an outermost nested
- * object will have element hashes mixed with the outermost
- * key. It's also somewhat useful to have nested objects
- * innermost values have hashes that are a function of not
- * just their own key, but outer keys too.
+ * object will have element hashes mixed with the
+ * outermost key. It's also somewhat useful to have
+ * nested objects innermost values have hashes that are a
+ * function of not just their own key, but outer keys too.
*/
stack->hash = tmp->hash;
}
@@ -526,7 +533,7 @@ gin_extract_jsonb_hash(PG_FUNCTION_ARGS)
* At least nested level, initialize with stable container
* type proxy value
*/
- stack->hash = (r == WJB_BEGIN_ARRAY)? JB_FARRAY:JB_FOBJECT;
+ stack->hash = (r == WJB_BEGIN_ARRAY) ? JB_FARRAY : JB_FOBJECT;
}
stack->parent = tmp;
break;
@@ -607,7 +614,7 @@ make_text_key(const char *str, int len, char flag)
* Create a textual representation of a jsonbValue for GIN storage.
*/
static text *
-make_scalar_key(const JsonbValue * scalarVal, char flag)
+make_scalar_key(const JsonbValue *scalarVal, char flag)
{
text *item;
char *cstr;
@@ -621,6 +628,7 @@ make_scalar_key(const JsonbValue * scalarVal, char flag)
item = make_text_key(scalarVal->val.boolean ? "t" : "f", 1, flag);
break;
case jbvNumeric:
+
/*
* A normalized textual representation, free of trailing zeroes is
* is required.
diff --git a/src/backend/utils/adt/jsonb_op.c b/src/backend/utils/adt/jsonb_op.c
index cfddccbbbbf..38bd5676739 100644
--- a/src/backend/utils/adt/jsonb_op.c
+++ b/src/backend/utils/adt/jsonb_op.c
@@ -69,7 +69,7 @@ jsonb_exists_any(PG_FUNCTION_ARGS)
if (findJsonbValueFromSuperHeader(VARDATA(jb),
JB_FOBJECT | JB_FARRAY,
plowbound,
- arrKey->val.array.elems + i) != NULL)
+ arrKey->val.array.elems + i) != NULL)
PG_RETURN_BOOL(true);
}
@@ -103,7 +103,7 @@ jsonb_exists_all(PG_FUNCTION_ARGS)
if (findJsonbValueFromSuperHeader(VARDATA(jb),
JB_FOBJECT | JB_FARRAY,
plowbound,
- arrKey->val.array.elems + i) == NULL)
+ arrKey->val.array.elems + i) == NULL)
PG_RETURN_BOOL(false);
}
@@ -116,7 +116,8 @@ jsonb_contains(PG_FUNCTION_ARGS)
Jsonb *val = PG_GETARG_JSONB(0);
Jsonb *tmpl = PG_GETARG_JSONB(1);
- JsonbIterator *it1, *it2;
+ JsonbIterator *it1,
+ *it2;
if (JB_ROOT_COUNT(val) < JB_ROOT_COUNT(tmpl) ||
JB_ROOT_IS_OBJECT(val) != JB_ROOT_IS_OBJECT(tmpl))
@@ -135,7 +136,8 @@ jsonb_contained(PG_FUNCTION_ARGS)
Jsonb *tmpl = PG_GETARG_JSONB(0);
Jsonb *val = PG_GETARG_JSONB(1);
- JsonbIterator *it1, *it2;
+ JsonbIterator *it1,
+ *it2;
if (JB_ROOT_COUNT(val) < JB_ROOT_COUNT(tmpl) ||
JB_ROOT_IS_OBJECT(val) != JB_ROOT_IS_OBJECT(tmpl))
@@ -209,7 +211,6 @@ jsonb_le(PG_FUNCTION_ARGS)
Datum
jsonb_ge(PG_FUNCTION_ARGS)
{
-
Jsonb *jba = PG_GETARG_JSONB(0);
Jsonb *jbb = PG_GETARG_JSONB(1);
bool res;
@@ -270,7 +271,7 @@ jsonb_hash(PG_FUNCTION_ARGS)
{
switch (r)
{
- /* Rotation is left to JsonbHashScalarValue() */
+ /* Rotation is left to JsonbHashScalarValue() */
case WJB_BEGIN_ARRAY:
hash ^= JB_FARRAY;
break;
diff --git a/src/backend/utils/adt/jsonb_util.c b/src/backend/utils/adt/jsonb_util.c
index 1ac145b1cd9..1caaa4a9cc3 100644
--- a/src/backend/utils/adt/jsonb_util.c
+++ b/src/backend/utils/adt/jsonb_util.c
@@ -45,10 +45,10 @@
*/
typedef struct convertLevel
{
- uint32 i; /* Iterates once per element, or once per pair */
- uint32 *header; /* Pointer to current container header */
- JEntry *meta; /* This level's metadata */
- char *begin; /* Pointer into convertState.buffer */
+ uint32 i; /* Iterates once per element, or once per pair */
+ uint32 *header; /* Pointer to current container header */
+ JEntry *meta; /* This level's metadata */
+ char *begin; /* Pointer into convertState.buffer */
} convertLevel;
/*
@@ -57,41 +57,41 @@ typedef struct convertLevel
typedef struct convertState
{
/* Preallocated buffer in which to form varlena/Jsonb value */
- Jsonb *buffer;
+ Jsonb *buffer;
/* Pointer into buffer */
- char *ptr;
+ char *ptr;
/* State for */
- convertLevel *allState, /* Overall state array */
- *contPtr; /* Cur container pointer (in allState) */
+ convertLevel *allState, /* Overall state array */
+ *contPtr; /* Cur container pointer (in allState) */
/* Current size of buffer containing allState array */
- Size levelSz;
-
-} convertState;
-
-static int compareJsonbScalarValue(JsonbValue * a, JsonbValue * b);
-static int lexicalCompareJsonbStringValue(const void *a, const void *b);
-static Size convertJsonb(JsonbValue * val, Jsonb* buffer);
-static inline short addPaddingInt(convertState * cstate);
-static void walkJsonbValueConversion(JsonbValue * val, convertState * cstate,
- uint32 nestlevel);
-static void putJsonbValueConversion(convertState * cstate, JsonbValue * val,
- uint32 flags, uint32 level);
-static void putScalarConversion(convertState * cstate, JsonbValue * scalarVal,
- uint32 level, uint32 i);
-static void iteratorFromContainerBuf(JsonbIterator * it, char *buffer);
-static bool formIterIsContainer(JsonbIterator ** it, JsonbValue * val,
- JEntry * ent, bool skipNested);
-static JsonbIterator *freeAndGetParent(JsonbIterator * it);
-static JsonbParseState *pushState(JsonbParseState ** pstate);
-static void appendKey(JsonbParseState * pstate, JsonbValue * scalarVal);
-static void appendValue(JsonbParseState * pstate, JsonbValue * scalarVal);
-static void appendElement(JsonbParseState * pstate, JsonbValue * scalarVal);
-static int lengthCompareJsonbStringValue(const void *a, const void *b, void *arg);
-static int lengthCompareJsonbPair(const void *a, const void *b, void *arg);
-static void uniqueifyJsonbObject(JsonbValue * object);
-static void uniqueifyJsonbArray(JsonbValue * array);
+ Size levelSz;
+
+} convertState;
+
+static int compareJsonbScalarValue(JsonbValue *a, JsonbValue *b);
+static int lexicalCompareJsonbStringValue(const void *a, const void *b);
+static Size convertJsonb(JsonbValue *val, Jsonb *buffer);
+static inline short addPaddingInt(convertState *cstate);
+static void walkJsonbValueConversion(JsonbValue *val, convertState *cstate,
+ uint32 nestlevel);
+static void putJsonbValueConversion(convertState *cstate, JsonbValue *val,
+ uint32 flags, uint32 level);
+static void putScalarConversion(convertState *cstate, JsonbValue *scalarVal,
+ uint32 level, uint32 i);
+static void iteratorFromContainerBuf(JsonbIterator *it, char *buffer);
+static bool formIterIsContainer(JsonbIterator **it, JsonbValue *val,
+ JEntry *ent, bool skipNested);
+static JsonbIterator *freeAndGetParent(JsonbIterator *it);
+static JsonbParseState *pushState(JsonbParseState **pstate);
+static void appendKey(JsonbParseState *pstate, JsonbValue *scalarVal);
+static void appendValue(JsonbParseState *pstate, JsonbValue *scalarVal);
+static void appendElement(JsonbParseState *pstate, JsonbValue *scalarVal);
+static int lengthCompareJsonbStringValue(const void *a, const void *b, void *arg);
+static int lengthCompareJsonbPair(const void *a, const void *b, void *arg);
+static void uniqueifyJsonbObject(JsonbValue *object);
+static void uniqueifyJsonbArray(JsonbValue *array);
/*
* Turn an in-memory JsonbValue into a Jsonb for on-disk storage.
@@ -107,7 +107,7 @@ static void uniqueifyJsonbArray(JsonbValue * array);
* inconvenient to deal with a great amount of other state.
*/
Jsonb *
-JsonbValueToJsonb(JsonbValue * val)
+JsonbValueToJsonb(JsonbValue *val)
{
Jsonb *out;
Size sz;
@@ -164,7 +164,7 @@ int
compareJsonbSuperHeaderValue(JsonbSuperHeader a, JsonbSuperHeader b)
{
JsonbIterator *ita,
- *itb;
+ *itb;
int res = 0;
ita = JsonbIteratorInit(a);
@@ -182,9 +182,9 @@ compareJsonbSuperHeaderValue(JsonbSuperHeader a, JsonbSuperHeader b)
/*
* To a limited extent we'll redundantly iterate over an array/object
- * while re-performing the same test without any reasonable expectation
- * of the same container types having differing lengths (as when we
- * process a WJB_BEGIN_OBJECT, and later the corresponding
+ * while re-performing the same test without any reasonable
+ * expectation of the same container types having differing lengths
+ * (as when we process a WJB_BEGIN_OBJECT, and later the corresponding
* WJB_END_OBJECT), but no matter.
*/
if (ra == rb)
@@ -208,9 +208,10 @@ compareJsonbSuperHeaderValue(JsonbSuperHeader a, JsonbSuperHeader b)
res = compareJsonbScalarValue(&va, &vb);
break;
case jbvArray:
+
/*
- * This could be a "raw scalar" pseudo array. That's a
- * special case here though, since we still want the
+ * This could be a "raw scalar" pseudo array. That's
+ * a special case here though, since we still want the
* general type-based comparisons to apply, and as far
* as we're concerned a pseudo array is just a scalar.
*/
@@ -258,12 +259,14 @@ compareJsonbSuperHeaderValue(JsonbSuperHeader a, JsonbSuperHeader b)
while (ita != NULL)
{
JsonbIterator *i = ita->parent;
+
pfree(ita);
ita = i;
}
while (itb != NULL)
{
JsonbIterator *i = itb->parent;
+
pfree(itb);
itb = i;
}
@@ -313,12 +316,12 @@ compareJsonbSuperHeaderValue(JsonbSuperHeader a, JsonbSuperHeader b)
*/
JsonbValue *
findJsonbValueFromSuperHeader(JsonbSuperHeader sheader, uint32 flags,
- uint32 *lowbound, JsonbValue * key)
+ uint32 *lowbound, JsonbValue *key)
{
- uint32 superheader = *(uint32 *) sheader;
- JEntry *array = (JEntry *) (sheader + sizeof(uint32));
- int count = (superheader & JB_CMASK);
- JsonbValue *result = palloc(sizeof(JsonbValue));
+ uint32 superheader = *(uint32 *) sheader;
+ JEntry *array = (JEntry *) (sheader + sizeof(uint32));
+ int count = (superheader & JB_CMASK);
+ JsonbValue *result = palloc(sizeof(JsonbValue));
Assert((flags & ~(JB_FARRAY | JB_FOBJECT)) == 0);
@@ -347,6 +350,7 @@ findJsonbValueFromSuperHeader(JsonbSuperHeader sheader, uint32 flags,
{
result->type = jbvNumeric;
result->val.numeric = (Numeric) (data + INTALIGN(JBE_OFF(*e)));
+
result->estSize = 2 * sizeof(JEntry) +
VARSIZE_ANY(result->val.numeric);
}
@@ -381,8 +385,8 @@ findJsonbValueFromSuperHeader(JsonbSuperHeader sheader, uint32 flags,
JsonbValue candidate;
/*
- * Note how we compensate for the fact that we're iterating through
- * pairs (not entries) throughout.
+ * Note how we compensate for the fact that we're iterating
+ * through pairs (not entries) throughout.
*/
stopMiddle = stopLow + (count - stopLow) / 2;
@@ -419,6 +423,7 @@ findJsonbValueFromSuperHeader(JsonbSuperHeader sheader, uint32 flags,
{
result->type = jbvNumeric;
result->val.numeric = (Numeric) (data + INTALIGN(JBE_OFF(*v)));
+
result->estSize = 2 * sizeof(JEntry) +
VARSIZE_ANY(result->val.numeric);
}
@@ -431,8 +436,8 @@ findJsonbValueFromSuperHeader(JsonbSuperHeader sheader, uint32 flags,
else
{
/*
- * See header comments to understand why this never happens
- * with arrays
+ * See header comments to understand why this never
+ * happens with arrays
*/
result->type = jbvBinary;
result->val.binary.data = data + INTALIGN(JBE_OFF(*v));
@@ -508,6 +513,7 @@ getIthJsonbValueFromSuperHeader(JsonbSuperHeader sheader, uint32 i)
{
result->type = jbvNumeric;
result->val.numeric = (Numeric) (data + INTALIGN(JBE_OFF(*e)));
+
result->estSize = 2 * sizeof(JEntry) + VARSIZE_ANY(result->val.numeric);
}
else if (JBE_ISBOOL(*e))
@@ -541,7 +547,7 @@ getIthJsonbValueFromSuperHeader(JsonbSuperHeader sheader, uint32 i)
* "raw scalar" pseudo array to append that.
*/
JsonbValue *
-pushJsonbValue(JsonbParseState ** pstate, int seq, JsonbValue * scalarVal)
+pushJsonbValue(JsonbParseState **pstate, int seq, JsonbValue *scalarVal)
{
JsonbValue *result = NULL;
@@ -555,7 +561,7 @@ pushJsonbValue(JsonbParseState ** pstate, int seq, JsonbValue * scalarVal)
(*pstate)->contVal.estSize = 3 * sizeof(JEntry);
(*pstate)->contVal.val.array.nElems = 0;
(*pstate)->contVal.val.array.rawScalar = (scalarVal &&
- scalarVal->val.array.rawScalar);
+ scalarVal->val.array.rawScalar);
if (scalarVal && scalarVal->val.array.nElems > 0)
{
/* Assume that this array is still really a scalar */
@@ -567,7 +573,7 @@ pushJsonbValue(JsonbParseState ** pstate, int seq, JsonbValue * scalarVal)
(*pstate)->size = 4;
}
(*pstate)->contVal.val.array.elems = palloc(sizeof(JsonbValue) *
- (*pstate)->size);
+ (*pstate)->size);
break;
case WJB_BEGIN_OBJECT:
Assert(!scalarVal);
@@ -578,7 +584,7 @@ pushJsonbValue(JsonbParseState ** pstate, int seq, JsonbValue * scalarVal)
(*pstate)->contVal.val.object.nPairs = 0;
(*pstate)->size = 4;
(*pstate)->contVal.val.object.pairs = palloc(sizeof(JsonbPair) *
- (*pstate)->size);
+ (*pstate)->size);
break;
case WJB_KEY:
Assert(scalarVal->type == jbvString);
@@ -674,9 +680,9 @@ JsonbIteratorInit(JsonbSuperHeader sheader)
* garbage.
*/
int
-JsonbIteratorNext(JsonbIterator ** it, JsonbValue * val, bool skipNested)
+JsonbIteratorNext(JsonbIterator **it, JsonbValue *val, bool skipNested)
{
- JsonbIterState state;
+ JsonbIterState state;
/* Guard against stack overflow due to overly complex Jsonb */
check_stack_depth();
@@ -694,9 +700,10 @@ JsonbIteratorNext(JsonbIterator ** it, JsonbValue * val, bool skipNested)
/* Set v to array on first array call */
val->type = jbvArray;
val->val.array.nElems = (*it)->nElems;
+
/*
- * v->val.array.elems is not actually set, because we aren't doing a
- * full conversion
+ * v->val.array.elems is not actually set, because we aren't doing
+ * a full conversion
*/
val->val.array.rawScalar = (*it)->isScalar;
(*it)->i = 0;
@@ -709,8 +716,8 @@ JsonbIteratorNext(JsonbIterator ** it, JsonbValue * val, bool skipNested)
if ((*it)->i >= (*it)->nElems)
{
/*
- * All elements within array already processed. Report this to
- * caller, and give it back original parent iterator (which
+ * All elements within array already processed. Report this
+ * to caller, and give it back original parent iterator (which
* independently tracks iteration progress at its level of
* nesting).
*/
@@ -741,6 +748,7 @@ JsonbIteratorNext(JsonbIterator ** it, JsonbValue * val, bool skipNested)
/* Set v to object on first object call */
val->type = jbvObject;
val->val.object.nPairs = (*it)->nElems;
+
/*
* v->val.object.pairs is not actually set, because we aren't
* doing a full conversion
@@ -756,9 +764,9 @@ JsonbIteratorNext(JsonbIterator ** it, JsonbValue * val, bool skipNested)
{
/*
* All pairs within object already processed. Report this to
- * caller, and give it back original containing iterator (which
- * independently tracks iteration progress at its level of
- * nesting).
+ * caller, and give it back original containing iterator
+ * (which independently tracks iteration progress at its level
+ * of nesting).
*/
*it = freeAndGetParent(*it);
return WJB_END_OBJECT;
@@ -787,8 +795,8 @@ JsonbIteratorNext(JsonbIterator ** it, JsonbValue * val, bool skipNested)
/*
* Value may be a container, in which case we recurse with new,
- * child iterator. If it is, don't bother !skipNested callers with
- * dealing with the jbvBinary representation.
+ * child iterator. If it is, don't bother !skipNested callers
+ * with dealing with the jbvBinary representation.
*/
if (formIterIsContainer(it, val, &(*it)->meta[((*it)->i++) * 2 + 1],
skipNested))
@@ -815,17 +823,18 @@ JsonbIteratorNext(JsonbIterator ** it, JsonbValue * val, bool skipNested)
* We determine if mContained is contained within val.
*/
bool
-JsonbDeepContains(JsonbIterator ** val, JsonbIterator ** mContained)
+JsonbDeepContains(JsonbIterator **val, JsonbIterator **mContained)
{
uint32 rval,
rcont;
JsonbValue vval,
vcontained;
+
/*
* Guard against stack overflow due to overly complex Jsonb.
*
- * Functions called here independently take this precaution, but that might
- * not be sufficient since this is also a recursive function.
+ * Functions called here independently take this precaution, but that
+ * might not be sufficient since this is also a recursive function.
*/
check_stack_depth();
@@ -898,7 +907,8 @@ JsonbDeepContains(JsonbIterator ** val, JsonbIterator ** mContained)
else
{
/* Nested container value (object or array) */
- JsonbIterator *nestval, *nestContained;
+ JsonbIterator *nestval,
+ *nestContained;
Assert(lhsVal->type == jbvBinary);
Assert(vcontained.type == jbvBinary);
@@ -922,8 +932,9 @@ JsonbDeepContains(JsonbIterator ** val, JsonbIterator ** mContained)
* In other words, the mapping of container nodes in the rhs
* "vcontained" Jsonb to internal nodes on the lhs is
* injective, and parent-child edges on the rhs must be mapped
- * to parent-child edges on the lhs to satisfy the condition of
- * containment (plus of course the mapped nodes must be equal).
+ * to parent-child edges on the lhs to satisfy the condition
+ * of containment (plus of course the mapped nodes must be
+ * equal).
*/
if (!JsonbDeepContains(&nestval, &nestContained))
return false;
@@ -942,10 +953,10 @@ JsonbDeepContains(JsonbIterator ** val, JsonbIterator ** mContained)
* arrays.
*
* A raw scalar may contain another raw scalar, and an array may
- * contain a raw scalar, but a raw scalar may not contain an array. We
- * don't do something like this for the object case, since objects can
- * only contain pairs, never raw scalars (a pair is represented by an
- * rhs object argument with a single contained pair).
+ * contain a raw scalar, but a raw scalar may not contain an array.
+ * We don't do something like this for the object case, since objects
+ * can only contain pairs, never raw scalars (a pair is represented by
+ * an rhs object argument with a single contained pair).
*/
if (vval.val.array.rawScalar && !vcontained.val.array.rawScalar)
return false;
@@ -956,8 +967,9 @@ JsonbDeepContains(JsonbIterator ** val, JsonbIterator ** mContained)
rcont = JsonbIteratorNext(mContained, &vcontained, true);
/*
- * When we get through caller's rhs "is it contained within?" array
- * without failing to find one of its values, it's contained.
+ * When we get through caller's rhs "is it contained within?"
+ * array without failing to find one of its values, it's
+ * contained.
*/
if (rcont == WJB_END_ARRAY)
return true;
@@ -989,7 +1001,7 @@ JsonbDeepContains(JsonbIterator ** val, JsonbIterator ** mContained)
for (i = 0; i < nLhsElems; i++)
{
- /* Store all lhs elements in temp array*/
+ /* Store all lhs elements in temp array */
rcont = JsonbIteratorNext(val, &vval, true);
Assert(rcont == WJB_ELEM);
@@ -1009,8 +1021,9 @@ JsonbDeepContains(JsonbIterator ** val, JsonbIterator ** mContained)
for (i = 0; i < nLhsElems; i++)
{
/* Nested container value (object or array) */
- JsonbIterator *nestval, *nestContained;
- bool contains;
+ JsonbIterator *nestval,
+ *nestContained;
+ bool contains;
nestval = JsonbIteratorInit(lhsConts[i].val.binary.data);
nestContained = JsonbIteratorInit(vcontained.val.binary.data);
@@ -1069,9 +1082,9 @@ arrayToJsonbSortedArray(ArrayType *array)
/*
* A text array uses at least eight bytes per element, so any overflow in
* "key_count * sizeof(JsonbPair)" is small enough for palloc() to catch.
- * However, credible improvements to the array format could invalidate that
- * assumption. Therefore, use an explicit check rather than relying on
- * palloc() to complain.
+ * However, credible improvements to the array format could invalidate
+ * that assumption. Therefore, use an explicit check rather than relying
+ * on palloc() to complain.
*/
if (elem_count > JSONB_MAX_PAIRS)
ereport(ERROR,
@@ -1108,9 +1121,9 @@ arrayToJsonbSortedArray(ArrayType *array)
* flags.
*/
void
-JsonbHashScalarValue(const JsonbValue * scalarVal, uint32 * hash)
+JsonbHashScalarValue(const JsonbValue *scalarVal, uint32 *hash)
{
- int tmp;
+ int tmp;
/*
* Combine hash values of successive keys, values and elements by rotating
@@ -1131,11 +1144,11 @@ JsonbHashScalarValue(const JsonbValue * scalarVal, uint32 * hash)
case jbvNumeric:
/* Must be unaffected by trailing zeroes */
tmp = DatumGetInt32(DirectFunctionCall1(hash_numeric,
- NumericGetDatum(scalarVal->val.numeric)));
+ NumericGetDatum(scalarVal->val.numeric)));
*hash ^= tmp;
return;
case jbvBool:
- *hash ^= scalarVal->val.boolean? 0x02:0x04;
+ *hash ^= scalarVal->val.boolean ? 0x02 : 0x04;
return;
default:
elog(ERROR, "invalid jsonb scalar type");
@@ -1150,7 +1163,7 @@ JsonbHashScalarValue(const JsonbValue * scalarVal, uint32 * hash)
* within a single jsonb.
*/
static int
-compareJsonbScalarValue(JsonbValue * aScalar, JsonbValue * bScalar)
+compareJsonbScalarValue(JsonbValue *aScalar, JsonbValue *bScalar)
{
if (aScalar->type == bScalar->type)
{
@@ -1162,8 +1175,8 @@ compareJsonbScalarValue(JsonbValue * aScalar, JsonbValue * bScalar)
return lengthCompareJsonbStringValue(aScalar, bScalar, NULL);
case jbvNumeric:
return DatumGetInt32(DirectFunctionCall2(numeric_cmp,
- PointerGetDatum(aScalar->val.numeric),
- PointerGetDatum(bScalar->val.numeric)));
+ PointerGetDatum(aScalar->val.numeric),
+ PointerGetDatum(bScalar->val.numeric)));
case jbvBool:
if (aScalar->val.boolean != bScalar->val.boolean)
return (aScalar->val.boolean > bScalar->val.boolean) ? 1 : -1;
@@ -1201,10 +1214,10 @@ lexicalCompareJsonbStringValue(const void *a, const void *b)
* sufficiently large to fit the value
*/
static Size
-convertJsonb(JsonbValue * val, Jsonb *buffer)
+convertJsonb(JsonbValue *val, Jsonb *buffer)
{
- convertState state;
- Size len;
+ convertState state;
+ Size len;
/* Should not already have binary representation */
Assert(val->type != jbvBinary);
@@ -1232,7 +1245,7 @@ convertJsonb(JsonbValue * val, Jsonb *buffer)
* token (in a manner similar to generic iteration).
*/
static void
-walkJsonbValueConversion(JsonbValue * val, convertState * cstate,
+walkJsonbValueConversion(JsonbValue *val, convertState *cstate,
uint32 nestlevel)
{
int i;
@@ -1290,9 +1303,11 @@ walkJsonbValueConversion(JsonbValue * val, convertState * cstate,
* access to conversion buffer.
*/
static inline
-short addPaddingInt(convertState * cstate)
+short
+addPaddingInt(convertState *cstate)
{
- short padlen, p;
+ short padlen,
+ p;
padlen = INTALIGN(cstate->ptr - VARDATA(cstate->buffer)) -
(cstate->ptr - VARDATA(cstate->buffer));
@@ -1320,14 +1335,14 @@ short addPaddingInt(convertState * cstate)
* and the end (i.e. there is one call per sequential processing WJB_* token).
*/
static void
-putJsonbValueConversion(convertState * cstate, JsonbValue * val, uint32 flags,
+putJsonbValueConversion(convertState *cstate, JsonbValue *val, uint32 flags,
uint32 level)
{
if (level == cstate->levelSz)
{
cstate->levelSz *= 2;
cstate->allState = repalloc(cstate->allState,
- sizeof(convertLevel) * cstate->levelSz);
+ sizeof(convertLevel) * cstate->levelSz);
}
cstate->contPtr = cstate->allState + level;
@@ -1385,9 +1400,9 @@ putJsonbValueConversion(convertState * cstate, JsonbValue * val, uint32 flags,
}
else if (flags & (WJB_END_ARRAY | WJB_END_OBJECT))
{
- convertLevel *prevPtr; /* Prev container pointer */
- uint32 len,
- i;
+ convertLevel *prevPtr; /* Prev container pointer */
+ uint32 len,
+ i;
Assert(((flags & WJB_END_ARRAY) && val->type == jbvArray) ||
((flags & WJB_END_OBJECT) && val->type == jbvObject));
@@ -1443,10 +1458,10 @@ putJsonbValueConversion(convertState * cstate, JsonbValue * val, uint32 flags,
* metadata peculiar to each scalar type.
*/
static void
-putScalarConversion(convertState * cstate, JsonbValue * scalarVal, uint32 level,
+putScalarConversion(convertState *cstate, JsonbValue *scalarVal, uint32 level,
uint32 i)
{
- int numlen;
+ int numlen;
short padlen;
cstate->contPtr = cstate->allState + level;
@@ -1509,7 +1524,7 @@ putScalarConversion(convertState * cstate, JsonbValue * scalarVal, uint32 level,
* container type.
*/
static void
-iteratorFromContainerBuf(JsonbIterator * it, JsonbSuperHeader sheader)
+iteratorFromContainerBuf(JsonbIterator *it, JsonbSuperHeader sheader)
{
uint32 superheader = *(uint32 *) sheader;
@@ -1531,6 +1546,7 @@ iteratorFromContainerBuf(JsonbIterator * it, JsonbSuperHeader sheader)
Assert(!it->isScalar || it->nElems == 1);
break;
case JB_FOBJECT:
+
/*
* Offset reflects that nElems indicates JsonbPairs in an object.
* Each key and each value contain Jentry metadata just the same.
@@ -1562,7 +1578,7 @@ iteratorFromContainerBuf(JsonbIterator * it, JsonbSuperHeader sheader)
* anywhere).
*/
static bool
-formIterIsContainer(JsonbIterator ** it, JsonbValue * val, JEntry * ent,
+formIterIsContainer(JsonbIterator **it, JsonbValue *val, JEntry *ent,
bool skipNested)
{
if (JBE_ISNULL(*ent))
@@ -1585,6 +1601,7 @@ formIterIsContainer(JsonbIterator ** it, JsonbValue * val, JEntry * ent,
{
val->type = jbvNumeric;
val->val.numeric = (Numeric) ((*it)->dataProper + INTALIGN(JBE_OFF(*ent)));
+
val->estSize = 2 * sizeof(JEntry) + VARSIZE_ANY(val->val.numeric);
return false;
@@ -1609,8 +1626,8 @@ formIterIsContainer(JsonbIterator ** it, JsonbValue * val, JEntry * ent,
else
{
/*
- * Must be container type, so setup caller's iterator to point to that,
- * and return indication of that.
+ * Must be container type, so setup caller's iterator to point to
+ * that, and return indication of that.
*
* Get child iterator.
*/
@@ -1627,11 +1644,11 @@ formIterIsContainer(JsonbIterator ** it, JsonbValue * val, JEntry * ent,
}
/*
- * JsonbIteratorNext() worker: Return parent, while freeing memory for current
+ * JsonbIteratorNext() worker: Return parent, while freeing memory for current
* iterator
*/
static JsonbIterator *
-freeAndGetParent(JsonbIterator * it)
+freeAndGetParent(JsonbIterator *it)
{
JsonbIterator *v = it->parent;
@@ -1643,7 +1660,7 @@ freeAndGetParent(JsonbIterator * it)
* pushJsonbValue() worker: Iteration-like forming of Jsonb
*/
static JsonbParseState *
-pushState(JsonbParseState ** pstate)
+pushState(JsonbParseState **pstate)
{
JsonbParseState *ns = palloc(sizeof(JsonbParseState));
@@ -1655,7 +1672,7 @@ pushState(JsonbParseState ** pstate)
* pushJsonbValue() worker: Append a pair key to state when generating a Jsonb
*/
static void
-appendKey(JsonbParseState * pstate, JsonbValue * string)
+appendKey(JsonbParseState *pstate, JsonbValue *string)
{
JsonbValue *object = &pstate->contVal;
@@ -1672,7 +1689,7 @@ appendKey(JsonbParseState * pstate, JsonbValue * string)
{
pstate->size *= 2;
object->val.object.pairs = repalloc(object->val.object.pairs,
- sizeof(JsonbPair) * pstate->size);
+ sizeof(JsonbPair) * pstate->size);
}
object->val.object.pairs[object->val.object.nPairs].key = *string;
@@ -1686,7 +1703,7 @@ appendKey(JsonbParseState * pstate, JsonbValue * string)
* Jsonb
*/
static void
-appendValue(JsonbParseState * pstate, JsonbValue * scalarVal)
+appendValue(JsonbParseState *pstate, JsonbValue *scalarVal)
{
JsonbValue *object = &pstate->contVal;
@@ -1700,7 +1717,7 @@ appendValue(JsonbParseState * pstate, JsonbValue * scalarVal)
* pushJsonbValue() worker: Append an element to state when generating a Jsonb
*/
static void
-appendElement(JsonbParseState * pstate, JsonbValue * scalarVal)
+appendElement(JsonbParseState *pstate, JsonbValue *scalarVal)
{
JsonbValue *array = &pstate->contVal;
@@ -1716,7 +1733,7 @@ appendElement(JsonbParseState * pstate, JsonbValue * scalarVal)
{
pstate->size *= 2;
array->val.array.elems = repalloc(array->val.array.elems,
- sizeof(JsonbValue) * pstate->size);
+ sizeof(JsonbValue) * pstate->size);
}
array->val.array.elems[array->val.array.nElems++] = *scalarVal;
@@ -1797,7 +1814,7 @@ lengthCompareJsonbPair(const void *a, const void *b, void *binequal)
* Sort and unique-ify pairs in JsonbValue object
*/
static void
-uniqueifyJsonbObject(JsonbValue * object)
+uniqueifyJsonbObject(JsonbValue *object)
{
bool hasNonUniq = false;
@@ -1838,15 +1855,15 @@ uniqueifyJsonbObject(JsonbValue * object)
* Sorting uses internal ordering.
*/
static void
-uniqueifyJsonbArray(JsonbValue * array)
+uniqueifyJsonbArray(JsonbValue *array)
{
- bool hasNonUniq = false;
+ bool hasNonUniq = false;
Assert(array->type == jbvArray);
/*
- * Actually sort values, determining if any were equal on the basis of full
- * binary equality (rather than just having the same string length).
+ * Actually sort values, determining if any were equal on the basis of
+ * full binary equality (rather than just having the same string length).
*/
if (array->val.array.nElems > 1)
qsort_arg(array->val.array.elems, array->val.array.nElems,
diff --git a/src/backend/utils/adt/jsonfuncs.c b/src/backend/utils/adt/jsonfuncs.c
index 2423b737c9e..6b1ce9b3a9f 100644
--- a/src/backend/utils/adt/jsonfuncs.c
+++ b/src/backend/utils/adt/jsonfuncs.c
@@ -104,11 +104,12 @@ static void populate_recordset_array_element_start(void *state, bool isnull);
/* worker function for populate_recordset and to_recordset */
static inline Datum populate_recordset_worker(FunctionCallInfo fcinfo,
bool have_record_arg);
+
/* Worker that takes care of common setup for us */
static JsonbValue *findJsonbValueFromSuperHeaderLen(JsonbSuperHeader sheader,
- uint32 flags,
- char *key,
- uint32 keylen);
+ uint32 flags,
+ char *key,
+ uint32 keylen);
/* search type classification for json_get* functions */
typedef enum
@@ -235,8 +236,8 @@ typedef struct PopulateRecordsetState
} PopulateRecordsetState;
/* Turn a jsonb object into a record */
-static void make_row_from_rec_and_jsonb(Jsonb * element,
- PopulateRecordsetState *state);
+static void make_row_from_rec_and_jsonb(Jsonb *element,
+ PopulateRecordsetState *state);
/*
* SQL function json_object_keys
@@ -791,7 +792,7 @@ get_path_all(FunctionCallInfo fcinfo, bool as_text)
result = get_worker(json, NULL, -1, tpath, ipath, npath, as_text);
if (result != NULL)
- PG_RETURN_TEXT_P(result);
+ PG_RETURN_TEXT_P(result);
else
/* null is NULL, regardless */
PG_RETURN_NULL();
@@ -1178,7 +1179,7 @@ get_jsonb_path_all(FunctionCallInfo fcinfo, bool as_text)
jbvp = findJsonbValueFromSuperHeaderLen(superHeader,
JB_FOBJECT,
VARDATA_ANY(pathtext[i]),
- VARSIZE_ANY_EXHDR(pathtext[i]));
+ VARSIZE_ANY_EXHDR(pathtext[i]));
}
else if (have_array)
{
@@ -1209,8 +1210,8 @@ get_jsonb_path_all(FunctionCallInfo fcinfo, bool as_text)
if (jbvp->type == jbvBinary)
{
- JsonbIterator *it = JsonbIteratorInit(jbvp->val.binary.data);
- int r;
+ JsonbIterator *it = JsonbIteratorInit(jbvp->val.binary.data);
+ int r;
r = JsonbIteratorNext(&it, &tv, true);
superHeader = (JsonbSuperHeader) jbvp->val.binary.data;
@@ -1932,7 +1933,7 @@ elements_array_element_end(void *state, bool isnull)
text *val;
HeapTuple tuple;
Datum values[1];
- bool nulls[1] = {false};
+ bool nulls[1] = {false};
/* skip over nested objects */
if (_state->lex->lex_level != 1)
@@ -2035,7 +2036,7 @@ json_to_record(PG_FUNCTION_ARGS)
static inline Datum
populate_record_worker(FunctionCallInfo fcinfo, bool have_record_arg)
{
- int json_arg_num = have_record_arg ? 1 : 0;
+ int json_arg_num = have_record_arg ? 1 : 0;
Oid jtype = get_fn_expr_argtype(fcinfo->flinfo, json_arg_num);
text *json;
Jsonb *jb = NULL;
@@ -2060,7 +2061,7 @@ populate_record_worker(FunctionCallInfo fcinfo, bool have_record_arg)
if (have_record_arg)
{
- Oid argtype = get_fn_expr_argtype(fcinfo->flinfo, 0);
+ Oid argtype = get_fn_expr_argtype(fcinfo->flinfo, 0);
if (!type_is_rowtype(argtype))
ereport(ERROR,
@@ -2275,7 +2276,7 @@ populate_record_worker(FunctionCallInfo fcinfo, bool have_record_arg)
s = pnstrdup((v->val.boolean) ? "t" : "f", 1);
else if (v->type == jbvNumeric)
s = DatumGetCString(DirectFunctionCall1(numeric_out,
- PointerGetDatum(v->val.numeric)));
+ PointerGetDatum(v->val.numeric)));
else if (!use_json_as_text)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
@@ -2476,7 +2477,7 @@ json_to_recordset(PG_FUNCTION_ARGS)
}
static void
-make_row_from_rec_and_jsonb(Jsonb * element, PopulateRecordsetState *state)
+make_row_from_rec_and_jsonb(Jsonb *element, PopulateRecordsetState *state)
{
Datum *values;
bool *nulls;
@@ -2575,7 +2576,7 @@ make_row_from_rec_and_jsonb(Jsonb * element, PopulateRecordsetState *state)
s = pnstrdup((v->val.boolean) ? "t" : "f", 1);
else if (v->type == jbvNumeric)
s = DatumGetCString(DirectFunctionCall1(numeric_out,
- PointerGetDatum(v->val.numeric)));
+ PointerGetDatum(v->val.numeric)));
else if (!state->use_json_as_text)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
@@ -2603,7 +2604,7 @@ make_row_from_rec_and_jsonb(Jsonb * element, PopulateRecordsetState *state)
static inline Datum
populate_recordset_worker(FunctionCallInfo fcinfo, bool have_record_arg)
{
- int json_arg_num = have_record_arg ? 1 : 0;
+ int json_arg_num = have_record_arg ? 1 : 0;
Oid jtype = get_fn_expr_argtype(fcinfo->flinfo, json_arg_num);
bool use_json_as_text;
ReturnSetInfo *rsi;
@@ -2620,7 +2621,7 @@ populate_recordset_worker(FunctionCallInfo fcinfo, bool have_record_arg)
if (have_record_arg)
{
- Oid argtype = get_fn_expr_argtype(fcinfo->flinfo, 0);
+ Oid argtype = get_fn_expr_argtype(fcinfo->flinfo, 0);
if (!type_is_rowtype(argtype))
ereport(ERROR,
diff --git a/src/backend/utils/adt/like.c b/src/backend/utils/adt/like.c
index 3d5f3d538b6..bcd9e2182d0 100644
--- a/src/backend/utils/adt/like.c
+++ b/src/backend/utils/adt/like.c
@@ -76,12 +76,12 @@ wchareq(char *p1, char *p2)
/*
* Formerly we had a routine iwchareq() here that tried to do case-insensitive
- * comparison of multibyte characters. It did not work at all, however,
+ * comparison of multibyte characters. It did not work at all, however,
* because it relied on tolower() which has a single-byte API ... and
* towlower() wouldn't be much better since we have no suitably cheap way
* of getting a single character transformed to the system's wchar_t format.
* So now, we just downcase the strings using lower() and apply regular LIKE
- * comparison. This should be revisited when we install better locale support.
+ * comparison. This should be revisited when we install better locale support.
*/
/*
diff --git a/src/backend/utils/adt/misc.c b/src/backend/utils/adt/misc.c
index 241f738d608..4eeb6314fae 100644
--- a/src/backend/utils/adt/misc.c
+++ b/src/backend/utils/adt/misc.c
@@ -96,7 +96,7 @@ pg_signal_backend(int pid, int sig)
/*
* BackendPidGetProc returns NULL if the pid isn't valid; but by the time
* we reach kill(), a process for which we get a valid proc here might
- * have terminated on its own. There's no way to acquire a lock on an
+ * have terminated on its own. There's no way to acquire a lock on an
* arbitrary process to prevent that. But since so far all the callers of
* this mechanism involve some request for ending the process anyway, that
* it might end on its own first is not a problem.
@@ -120,7 +120,7 @@ pg_signal_backend(int pid, int sig)
* recycled for a new process, before reaching here? Then we'd be trying
* to kill the wrong thing. Seems near impossible when sequential pid
* assignment and wraparound is used. Perhaps it could happen on a system
- * where pid re-use is randomized. That race condition possibility seems
+ * where pid re-use is randomized. That race condition possibility seems
* too unlikely to worry about.
*/
@@ -140,7 +140,7 @@ pg_signal_backend(int pid, int sig)
}
/*
- * Signal to cancel a backend process. This is allowed if you are superuser or
+ * Signal to cancel a backend process. This is allowed if you are superuser or
* have the same role as the process being canceled.
*/
Datum
@@ -254,7 +254,7 @@ pg_tablespace_databases(PG_FUNCTION_ARGS)
fctx->location = psprintf("base");
else
fctx->location = psprintf("pg_tblspc/%u/%s", tablespaceOid,
- TABLESPACE_VERSION_DIRECTORY);
+ TABLESPACE_VERSION_DIRECTORY);
fctx->dirdesc = AllocateDir(fctx->location);
@@ -326,7 +326,7 @@ pg_tablespace_location(PG_FUNCTION_ARGS)
/*
* It's useful to apply this function to pg_class.reltablespace, wherein
- * zero means "the database's default tablespace". So, rather than
+ * zero means "the database's default tablespace". So, rather than
* throwing an error for zero, we choose to assume that's what is meant.
*/
if (tablespaceOid == InvalidOid)
@@ -384,7 +384,7 @@ pg_sleep(PG_FUNCTION_ARGS)
* loop.
*
* By computing the intended stop time initially, we avoid accumulation of
- * extra delay across multiple sleeps. This also ensures we won't delay
+ * extra delay across multiple sleeps. This also ensures we won't delay
* less than the specified time when WaitLatch is terminated early by a
* non-query-cancelling signal such as SIGHUP.
*/
@@ -547,7 +547,7 @@ pg_relation_is_updatable(PG_FUNCTION_ARGS)
* pg_column_is_updatable - determine whether a column is updatable
*
* This function encapsulates the decision about just what
- * information_schema.columns.is_updatable actually means. It's not clear
+ * information_schema.columns.is_updatable actually means. It's not clear
* whether deletability of the column's relation should be required, so
* we want that decision in C code where we could change it without initdb.
*/
diff --git a/src/backend/utils/adt/nabstime.c b/src/backend/utils/adt/nabstime.c
index 74d24aa0651..a6d30851df9 100644
--- a/src/backend/utils/adt/nabstime.c
+++ b/src/backend/utils/adt/nabstime.c
@@ -118,26 +118,24 @@ abstime2tm(AbsoluteTime _time, int *tzp, struct pg_tm * tm, char **tzn)
if (tzp != NULL)
{
- *tzp = -tm->tm_gmtoff; /* tm_gmtoff is Sun/DEC-ism */
+ *tzp = -tm->tm_gmtoff; /* tm_gmtoff is Sun/DEC-ism */
+ /*
+ * XXX FreeBSD man pages indicate that this should work - tgl 97/04/23
+ */
+ if (tzn != NULL)
+ {
/*
- * XXX FreeBSD man pages indicate that this should work - tgl
- * 97/04/23
+ * Copy no more than MAXTZLEN bytes of timezone to tzn, in case it
+ * contains an error message, which doesn't fit in the buffer
*/
- if (tzn != NULL)
- {
- /*
- * Copy no more than MAXTZLEN bytes of timezone to tzn, in
- * case it contains an error message, which doesn't fit in the
- * buffer
- */
- StrNCpy(*tzn, tm->tm_zone, MAXTZLEN + 1);
- if (strlen(tm->tm_zone) > MAXTZLEN)
- ereport(WARNING,
- (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("invalid time zone name: \"%s\"",
- tm->tm_zone)));
- }
+ StrNCpy(*tzn, tm->tm_zone, MAXTZLEN + 1);
+ if (strlen(tm->tm_zone) > MAXTZLEN)
+ ereport(WARNING,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("invalid time zone name: \"%s\"",
+ tm->tm_zone)));
+ }
}
else
tm->tm_isdst = -1;
@@ -175,7 +173,7 @@ tm2abstime(struct pg_tm * tm, int tz)
sec = tm->tm_sec + tz + (tm->tm_min + (day * HOURS_PER_DAY + tm->tm_hour) * MINS_PER_HOUR) * SECS_PER_MINUTE;
/*
- * check for overflow. We need a little slop here because the H/M/S plus
+ * check for overflow. We need a little slop here because the H/M/S plus
* TZ offset could add up to more than 1 day.
*/
if ((day >= MAX_DAYNUM - 10 && sec < 0) ||
@@ -1140,7 +1138,7 @@ tintervalsame(PG_FUNCTION_ARGS)
* 1. The interval length computations overflow at 2^31 seconds, causing
* intervals longer than that to sort oddly compared to those shorter.
* 2. infinity and minus infinity (NOEND_ABSTIME and NOSTART_ABSTIME) are
- * just ordinary integers. Since this code doesn't handle them specially,
+ * just ordinary integers. Since this code doesn't handle them specially,
* it's possible for [a b] to be considered longer than [c infinity] for
* finite abstimes a, b, c. In combination with the previous point, the
* interval [-infinity infinity] is treated as being shorter than many finite
diff --git a/src/backend/utils/adt/network.c b/src/backend/utils/adt/network.c
index 8bdf5778d89..69c7ac182f0 100644
--- a/src/backend/utils/adt/network.c
+++ b/src/backend/utils/adt/network.c
@@ -39,7 +39,7 @@ network_in(char *src, bool is_cidr)
dst = (inet *) palloc0(sizeof(inet));
/*
- * First, check to see if this is an IPv6 or IPv4 address. IPv6 addresses
+ * First, check to see if this is an IPv6 or IPv4 address. IPv6 addresses
* will have a : somewhere in them (several, in fact) so if there is one
* present, assume it's V6, otherwise assume it's V4.
*/
@@ -144,7 +144,7 @@ cidr_out(PG_FUNCTION_ARGS)
* family, bits, is_cidr, address length, address in network byte order.
*
* Presence of is_cidr is largely for historical reasons, though it might
- * allow some code-sharing on the client side. We send it correctly on
+ * allow some code-sharing on the client side. We send it correctly on
* output, but ignore the value on input.
*/
static inet *
@@ -1401,7 +1401,7 @@ inetmi(PG_FUNCTION_ARGS)
/*
* We form the difference using the traditional complement, increment,
* and add rule, with the increment part being handled by starting the
- * carry off at 1. If you don't think integer arithmetic is done in
+ * carry off at 1. If you don't think integer arithmetic is done in
* two's complement, too bad.
*/
int nb = ip_addrsize(ip);
@@ -1423,7 +1423,7 @@ inetmi(PG_FUNCTION_ARGS)
else
{
/*
- * Input wider than int64: check for overflow. All bytes to
+ * Input wider than int64: check for overflow. All bytes to
* the left of what will fit should be 0 or 0xFF, depending on
* sign of the now-complete result.
*/
@@ -1454,9 +1454,9 @@ inetmi(PG_FUNCTION_ARGS)
* XXX This should go away someday!
*
* This is a kluge needed because we don't yet support zones in stored inet
- * values. Since the result of getnameinfo() might include a zone spec,
+ * values. Since the result of getnameinfo() might include a zone spec,
* call this to remove it anywhere we want to feed getnameinfo's output to
- * network_in. Beats failing entirely.
+ * network_in. Beats failing entirely.
*
* An alternative approach would be to let network_in ignore %-parts for
* itself, but that would mean we'd silently drop zone specs in user input,
diff --git a/src/backend/utils/adt/network_gist.c b/src/backend/utils/adt/network_gist.c
index 0a826ae90a2..69b9d104749 100644
--- a/src/backend/utils/adt/network_gist.c
+++ b/src/backend/utils/adt/network_gist.c
@@ -7,7 +7,7 @@
* "union" of a set of INET/CIDR values. It works like this:
* 1. If the values are not all of the same IP address family, the "union"
* is a dummy value with family number zero, minbits zero, commonbits zero,
- * address all zeroes. Otherwise:
+ * address all zeroes. Otherwise:
* 2. The union has the common IP address family number.
* 3. The union's minbits value is the smallest netmask length ("ip_bits")
* of all the input values.
@@ -202,8 +202,8 @@ inet_gist_consistent(PG_FUNCTION_ARGS)
*
* Compare available common prefix bits to the query, but not beyond
* either the query's netmask or the minimum netmask among the represented
- * values. If these bits don't match the query, we have our answer (and
- * may or may not need to descend, depending on the operator). If they do
+ * values. If these bits don't match the query, we have our answer (and
+ * may or may not need to descend, depending on the operator). If they do
* match, and we are not at a leaf, we descend in all cases.
*
* Note this is the final check for operators that only consider the
@@ -682,7 +682,7 @@ inet_gist_picksplit(PG_FUNCTION_ARGS)
{
/*
* If there's more than 2 families, all but maxfamily go into the
- * left union. This could only happen if the inputs include some
+ * left union. This could only happen if the inputs include some
* IPv4, some IPv6, and some already-multiple-family unions.
*/
tmp = DatumGetInetKeyP(ent[i].key);
@@ -741,7 +741,7 @@ inet_gist_picksplit(PG_FUNCTION_ARGS)
}
/*
- * Compute the union value for each side from scratch. In most cases we
+ * Compute the union value for each side from scratch. In most cases we
* could approximate the union values with what we already know, but this
* ensures that each side has minbits and commonbits set as high as
* possible.
diff --git a/src/backend/utils/adt/numeric.c b/src/backend/utils/adt/numeric.c
index bf4f29d14d7..19d0bdcbb98 100644
--- a/src/backend/utils/adt/numeric.c
+++ b/src/backend/utils/adt/numeric.c
@@ -50,7 +50,7 @@
* Numeric values are represented in a base-NBASE floating point format.
* Each "digit" ranges from 0 to NBASE-1. The type NumericDigit is signed
* and wide enough to store a digit. We assume that NBASE*NBASE can fit in
- * an int. Although the purely calculational routines could handle any even
+ * an int. Although the purely calculational routines could handle any even
* NBASE that's less than sqrt(INT_MAX), in practice we are only interested
* in NBASE a power of ten, so that I/O conversions and decimal rounding
* are easy. Also, it's actually more efficient if NBASE is rather less than
@@ -95,11 +95,11 @@ typedef int16 NumericDigit;
* If the high bits of the first word of a NumericChoice (n_header, or
* n_short.n_header, or n_long.n_sign_dscale) are NUMERIC_SHORT, then the
* numeric follows the NumericShort format; if they are NUMERIC_POS or
- * NUMERIC_NEG, it follows the NumericLong format. If they are NUMERIC_NAN,
+ * NUMERIC_NEG, it follows the NumericLong format. If they are NUMERIC_NAN,
* it is a NaN. We currently always store a NaN using just two bytes (i.e.
* only n_header), but previous releases used only the NumericLong format,
* so we might find 4-byte NaNs on disk if a database has been migrated using
- * pg_upgrade. In either case, when the high bits indicate a NaN, the
+ * pg_upgrade. In either case, when the high bits indicate a NaN, the
* remaining bits are never examined. Currently, we always initialize these
* to zero, but it might be possible to use them for some other purpose in
* the future.
@@ -207,19 +207,19 @@ struct NumericData
: ((n)->choice.n_long.n_weight))
/* ----------
- * NumericVar is the format we use for arithmetic. The digit-array part
+ * NumericVar is the format we use for arithmetic. The digit-array part
* is the same as the NumericData storage format, but the header is more
* complex.
*
* The value represented by a NumericVar is determined by the sign, weight,
* ndigits, and digits[] array.
* Note: the first digit of a NumericVar's value is assumed to be multiplied
- * by NBASE ** weight. Another way to say it is that there are weight+1
+ * by NBASE ** weight. Another way to say it is that there are weight+1
* digits before the decimal point. It is possible to have weight < 0.
*
* buf points at the physical start of the palloc'd digit buffer for the
- * NumericVar. digits points at the first digit in actual use (the one
- * with the specified weight). We normally leave an unused digit or two
+ * NumericVar. digits points at the first digit in actual use (the one
+ * with the specified weight). We normally leave an unused digit or two
* (preset to zeroes) between buf and digits, so that there is room to store
* a carry out of the top digit without reallocating space. We just need to
* decrement digits (and increment weight) to make room for the carry digit.
@@ -596,7 +596,7 @@ numeric_maximum_size(int32 typmod)
* In most cases, the size of a numeric will be smaller than the value
* computed below, because the varlena header will typically get toasted
* down to a single byte before being stored on disk, and it may also be
- * possible to use a short numeric header. But our job here is to compute
+ * possible to use a short numeric header. But our job here is to compute
* the worst case.
*/
return NUMERIC_HDRSZ + (numeric_digits * sizeof(NumericDigit));
@@ -636,7 +636,8 @@ numeric_normalize(Numeric num)
{
NumericVar x;
char *str;
- int orig, last;
+ int orig,
+ last;
/*
* Handle NaN
@@ -754,7 +755,7 @@ numeric_send(PG_FUNCTION_ARGS)
*
* Flatten calls to numeric's length coercion function that solely represent
* increases in allowable precision. Scale changes mutate every datum, so
- * they are unoptimizable. Some values, e.g. 1E-1001, can only fit into an
+ * they are unoptimizable. Some values, e.g. 1E-1001, can only fit into an
* unconstrained numeric, so a change from an unconstrained numeric to any
* constrained numeric is also unoptimizable.
*/
@@ -784,7 +785,7 @@ numeric_transform(PG_FUNCTION_ARGS)
* If new_typmod < VARHDRSZ, the destination is unconstrained; that's
* always OK. If old_typmod >= VARHDRSZ, the source is constrained,
* and we're OK if the scale is unchanged and the precision is not
- * decreasing. See further notes in function header comment.
+ * decreasing. See further notes in function header comment.
*/
if (new_typmod < (int32) VARHDRSZ ||
(old_typmod >= (int32) VARHDRSZ &&
@@ -996,7 +997,7 @@ numeric_uminus(PG_FUNCTION_ARGS)
/*
* The packed format is known to be totally zero digit trimmed always. So
- * we can identify a ZERO by the fact that there are no digits at all. Do
+ * we can identify a ZERO by the fact that there are no digits at all. Do
* nothing to a zero.
*/
if (NUMERIC_NDIGITS(num) != 0)
@@ -1972,7 +1973,7 @@ numeric_sqrt(PG_FUNCTION_ARGS)
PG_RETURN_NUMERIC(make_result(&const_nan));
/*
- * Unpack the argument and determine the result scale. We choose a scale
+ * Unpack the argument and determine the result scale. We choose a scale
* to give at least NUMERIC_MIN_SIG_DIGITS significant digits; but in any
* case not less than the input's dscale.
*/
@@ -2023,7 +2024,7 @@ numeric_exp(PG_FUNCTION_ARGS)
PG_RETURN_NUMERIC(make_result(&const_nan));
/*
- * Unpack the argument and determine the result scale. We choose a scale
+ * Unpack the argument and determine the result scale. We choose a scale
* to give at least NUMERIC_MIN_SIG_DIGITS significant digits; but in any
* case not less than the input's dscale.
*/
@@ -2517,7 +2518,7 @@ typedef struct NumericAggState
NumericVar sumX; /* sum of processed numbers */
NumericVar sumX2; /* sum of squares of processed numbers */
int maxScale; /* maximum scale seen so far */
- int64 maxScaleCount; /* number of values seen with maximum scale */
+ int64 maxScaleCount; /* number of values seen with maximum scale */
int64 NaNcount; /* count of NaN values (not included in N!) */
} NumericAggState;
@@ -2652,8 +2653,8 @@ do_numeric_discard(NumericAggState *state, Numeric newval)
if (state->maxScaleCount > 1 || state->maxScale == 0)
{
/*
- * Some remaining inputs have same dscale, or dscale hasn't
- * gotten above zero anyway
+ * Some remaining inputs have same dscale, or dscale hasn't gotten
+ * above zero anyway
*/
state->maxScaleCount--;
}
@@ -2767,9 +2768,9 @@ numeric_accum_inv(PG_FUNCTION_ARGS)
/*
* Integer data types all use Numeric accumulators to share code and
- * avoid risk of overflow. For int2 and int4 inputs, Numeric accumulation
+ * avoid risk of overflow. For int2 and int4 inputs, Numeric accumulation
* is overkill for the N and sum(X) values, but definitely not overkill
- * for the sum(X*X) value. Hence, we use int2_accum and int4_accum only
+ * for the sum(X*X) value. Hence, we use int2_accum and int4_accum only
* for stddev/variance --- there are faster special-purpose accumulator
* routines for SUM and AVG of these datatypes.
*/
@@ -2965,7 +2966,7 @@ numeric_avg(PG_FUNCTION_ARGS)
if (state == NULL || (state->N + state->NaNcount) == 0)
PG_RETURN_NULL();
- if (state->NaNcount > 0) /* there was at least one NaN input */
+ if (state->NaNcount > 0) /* there was at least one NaN input */
PG_RETURN_NUMERIC(make_result(&const_nan));
N_datum = DirectFunctionCall1(int8_numeric, Int64GetDatum(state->N));
@@ -2985,7 +2986,7 @@ numeric_sum(PG_FUNCTION_ARGS)
if (state == NULL || (state->N + state->NaNcount) == 0)
PG_RETURN_NULL();
- if (state->NaNcount > 0) /* there was at least one NaN input */
+ if (state->NaNcount > 0) /* there was at least one NaN input */
PG_RETURN_NUMERIC(make_result(&const_nan));
PG_RETURN_NUMERIC(make_result(&(state->sumX)));
@@ -3167,7 +3168,7 @@ numeric_stddev_pop(PG_FUNCTION_ARGS)
* the initial condition of the transition data value needs to be NULL. This
* means we can't rely on ExecAgg to automatically insert the first non-null
* data value into the transition data: it doesn't know how to do the type
- * conversion. The upshot is that these routines have to be marked non-strict
+ * conversion. The upshot is that these routines have to be marked non-strict
* and handle substitution of the first non-null input themselves.
*
* Note: these functions are used only in plain aggregation mode.
@@ -3653,7 +3654,7 @@ set_var_from_str(const char *str, const char *cp, NumericVar *dest)
/*
* We first parse the string to extract decimal digits and determine the
- * correct decimal weight. Then convert to NBASE representation.
+ * correct decimal weight. Then convert to NBASE representation.
*/
switch (*cp)
{
@@ -4261,7 +4262,7 @@ apply_typmod(NumericVar *var, int32 typmod)
/*
* Convert numeric to int8, rounding if needed.
*
- * If overflow, return FALSE (no error is raised). Return TRUE if okay.
+ * If overflow, return FALSE (no error is raised). Return TRUE if okay.
*/
static bool
numericvar_to_int8(NumericVar *var, int64 *result)
@@ -4732,7 +4733,7 @@ sub_var(NumericVar *var1, NumericVar *var2, NumericVar *result)
* mul_var() -
*
* Multiplication on variable level. Product of var1 * var2 is stored
- * in result. Result is rounded to no more than rscale fractional digits.
+ * in result. Result is rounded to no more than rscale fractional digits.
*/
static void
mul_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
@@ -4776,7 +4777,7 @@ mul_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
/*
* Determine number of result digits to compute. If the exact result
* would have more than rscale fractional digits, truncate the computation
- * with MUL_GUARD_DIGITS guard digits. We do that by pretending that one
+ * with MUL_GUARD_DIGITS guard digits. We do that by pretending that one
* or both inputs have fewer digits than they really do.
*/
res_ndigits = var1ndigits + var2ndigits + 1;
@@ -5019,7 +5020,7 @@ div_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
*
* We need the first divisor digit to be >= NBASE/2. If it isn't,
* make it so by scaling up both the divisor and dividend by the
- * factor "d". (The reason for allocating dividend[0] above is to
+ * factor "d". (The reason for allocating dividend[0] above is to
* leave room for possible carry here.)
*/
if (divisor[1] < HALF_NBASE)
@@ -5063,7 +5064,7 @@ div_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
/*
* If next2digits are 0, then quotient digit must be 0 and there's
- * no need to adjust the working dividend. It's worth testing
+ * no need to adjust the working dividend. It's worth testing
* here to fall out ASAP when processing trailing zeroes in a
* dividend.
*/
@@ -5081,7 +5082,7 @@ div_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
/*
* Adjust quotient digit if it's too large. Knuth proves that
* after this step, the quotient digit will be either correct or
- * just one too large. (Note: it's OK to use dividend[j+2] here
+ * just one too large. (Note: it's OK to use dividend[j+2] here
* because we know the divisor length is at least 2.)
*/
while (divisor2 * qhat >
@@ -5256,7 +5257,7 @@ div_var_fast(NumericVar *var1, NumericVar *var2, NumericVar *result,
* dividend's digits (plus appended zeroes to reach the desired precision
* including guard digits). Each step of the main loop computes an
* (approximate) quotient digit and stores it into div[], removing one
- * position of dividend space. A final pass of carry propagation takes
+ * position of dividend space. A final pass of carry propagation takes
* care of any mistaken quotient digits.
*/
div = (int *) palloc0((div_ndigits + 1) * sizeof(int));
@@ -6106,7 +6107,7 @@ power_var_int(NumericVar *base, int exp, NumericVar *result, int rscale)
/*
* The general case repeatedly multiplies base according to the bit
- * pattern of exp. We do the multiplications with some extra precision.
+ * pattern of exp. We do the multiplications with some extra precision.
*/
neg = (exp < 0);
exp = Abs(exp);
diff --git a/src/backend/utils/adt/oid.c b/src/backend/utils/adt/oid.c
index 8945ef43f01..2badb558f03 100644
--- a/src/backend/utils/adt/oid.c
+++ b/src/backend/utils/adt/oid.c
@@ -318,7 +318,7 @@ oidparse(Node *node)
/*
* Values too large for int4 will be represented as Float
- * constants by the lexer. Accept these if they are valid OID
+ * constants by the lexer. Accept these if they are valid OID
* strings.
*/
return oidin_subr(strVal(node), NULL);
diff --git a/src/backend/utils/adt/orderedsetaggs.c b/src/backend/utils/adt/orderedsetaggs.c
index 99577a549e6..efb0411c228 100644
--- a/src/backend/utils/adt/orderedsetaggs.c
+++ b/src/backend/utils/adt/orderedsetaggs.c
@@ -462,7 +462,7 @@ percentile_disc_final(PG_FUNCTION_ARGS)
/*
* Note: we *cannot* clean up the tuplesort object here, because the value
- * to be returned is allocated inside its sortcontext. We could use
+ * to be returned is allocated inside its sortcontext. We could use
* datumCopy to copy it out of there, but it doesn't seem worth the
* trouble, since the cleanup callback will clear the tuplesort later.
*/
@@ -580,7 +580,7 @@ percentile_cont_final_common(FunctionCallInfo fcinfo,
/*
* Note: we *cannot* clean up the tuplesort object here, because the value
- * to be returned may be allocated inside its sortcontext. We could use
+ * to be returned may be allocated inside its sortcontext. We could use
* datumCopy to copy it out of there, but it doesn't seem worth the
* trouble, since the cleanup callback will clear the tuplesort later.
*/
@@ -1086,7 +1086,7 @@ mode_final(PG_FUNCTION_ARGS)
/*
* Note: we *cannot* clean up the tuplesort object here, because the value
- * to be returned is allocated inside its sortcontext. We could use
+ * to be returned is allocated inside its sortcontext. We could use
* datumCopy to copy it out of there, but it doesn't seem worth the
* trouble, since the cleanup callback will clear the tuplesort later.
*/
@@ -1331,7 +1331,7 @@ hypothetical_dense_rank_final(PG_FUNCTION_ARGS)
/*
* We alternate fetching into tupslot and extraslot so that we have the
- * previous row available for comparisons. This is accomplished by
+ * previous row available for comparisons. This is accomplished by
* swapping the slot pointer variables after each row.
*/
extraslot = MakeSingleTupleTableSlot(osastate->qstate->tupdesc);
diff --git a/src/backend/utils/adt/pg_locale.c b/src/backend/utils/adt/pg_locale.c
index 0c8474df54a..94bb5a47bb7 100644
--- a/src/backend/utils/adt/pg_locale.c
+++ b/src/backend/utils/adt/pg_locale.c
@@ -20,12 +20,12 @@
*
* The other categories, LC_MONETARY, LC_NUMERIC, and LC_TIME are also
* settable at run-time. However, we don't actually set those locale
- * categories permanently. This would have bizarre effects like no
+ * categories permanently. This would have bizarre effects like no
* longer accepting standard floating-point literals in some locales.
* Instead, we only set the locales briefly when needed, cache the
* required information obtained from localeconv(), and set them back.
* The cached information is only used by the formatting functions
- * (to_char, etc.) and the money type. For the user, this should all be
+ * (to_char, etc.) and the money type. For the user, this should all be
* transparent.
*
* !!! NOW HEAR THIS !!!
@@ -39,7 +39,7 @@
* fail = true;
* setlocale(category, save);
* DOES NOT WORK RELIABLY: on some platforms the second setlocale() call
- * will change the memory save is pointing at. To do this sort of thing
+ * will change the memory save is pointing at. To do this sort of thing
* safely, you *must* pstrdup what setlocale returns the first time.
*
* FYI, The Open Group locale standard is defined here:
@@ -243,7 +243,7 @@ pg_perm_setlocale(int category, const char *locale)
* Is the locale name valid for the locale category?
*
* If successful, and canonname isn't NULL, a palloc'd copy of the locale's
- * canonical name is stored there. This is especially useful for figuring out
+ * canonical name is stored there. This is especially useful for figuring out
* what locale name "" means (ie, the server environment value). (Actually,
* it seems that on most implementations that's the only thing it's good for;
* we could wish that setlocale gave back a canonically spelled version of
@@ -286,7 +286,7 @@ check_locale(int category, const char *locale, char **canonname)
*
* For most locale categories, the assign hook doesn't actually set the locale
* permanently, just reset flags so that the next use will cache the
- * appropriate values. (See explanation at the top of this file.)
+ * appropriate values. (See explanation at the top of this file.)
*
* Note: we accept value = "" as selecting the postmaster's environment
* value, whatever it was (so long as the environment setting is legal).
@@ -463,6 +463,7 @@ PGLC_localeconv(void)
save_lc_numeric = pstrdup(save_lc_numeric);
#ifdef WIN32
+
/*
* Ideally, monetary and numeric local symbols could be returned in any
* server encoding. Unfortunately, the WIN32 API does not allow
@@ -644,6 +645,7 @@ cache_locale_time(void)
save_lc_time = pstrdup(save_lc_time);
#ifdef WIN32
+
/*
* On WIN32, there is no way to get locale-specific time values in a
* specified locale, like we do for monetary/numeric. We can only get
@@ -729,13 +731,13 @@ cache_locale_time(void)
* Convert a Windows setlocale() argument to a Unix-style one.
*
* Regardless of platform, we install message catalogs under a Unix-style
- * LL[_CC][.ENCODING][@VARIANT] naming convention. Only LC_MESSAGES settings
+ * LL[_CC][.ENCODING][@VARIANT] naming convention. Only LC_MESSAGES settings
* following that style will elicit localized interface strings.
*
* Before Visual Studio 2012 (msvcr110.dll), Windows setlocale() accepted "C"
* (but not "c") and strings of the form <Language>[_<Country>][.<CodePage>],
* case-insensitive. setlocale() returns the fully-qualified form; for
- * example, setlocale("thaI") returns "Thai_Thailand.874". Internally,
+ * example, setlocale("thaI") returns "Thai_Thailand.874". Internally,
* setlocale() and _create_locale() select a "locale identifier"[1] and store
* it in an undocumented _locale_t field. From that LCID, we can retrieve the
* ISO 639 language and the ISO 3166 country. Character encoding does not
@@ -746,12 +748,12 @@ cache_locale_time(void)
* Studio 2012, setlocale() accepts locale names in addition to the strings it
* accepted historically. It does not standardize them; setlocale("Th-tH")
* returns "Th-tH". setlocale(category, "") still returns a traditional
- * string. Furthermore, msvcr110.dll changed the undocumented _locale_t
+ * string. Furthermore, msvcr110.dll changed the undocumented _locale_t
* content to carry locale names instead of locale identifiers.
*
* MinGW headers declare _create_locale(), but msvcrt.dll lacks that symbol.
* IsoLocaleName() always fails in a MinGW-built postgres.exe, so only
- * Unix-style values of the lc_messages GUC can elicit localized messages. In
+ * Unix-style values of the lc_messages GUC can elicit localized messages. In
* particular, every lc_messages setting that initdb can select automatically
* will yield only C-locale messages. XXX This could be fixed by running the
* fully-qualified locale name through a lookup table.
@@ -795,7 +797,7 @@ IsoLocaleName(const char *winlocname)
* need not standardize letter case here. So long as we do not ship
* message catalogs for which it would matter, we also need not
* translate the script/variant portion, e.g. uz-Cyrl-UZ to
- * uz_UZ@cyrillic. Simply replace the hyphen with an underscore.
+ * uz_UZ@cyrillic. Simply replace the hyphen with an underscore.
*
* Note that the locale name can be less-specific than the value we
* would derive under earlier Visual Studio releases. For example,
@@ -850,7 +852,7 @@ IsoLocaleName(const char *winlocname)
* could fail if the locale is C, so str_tolower() shouldn't call it
* in that case.
*
- * Note that we currently lack any way to flush the cache. Since we don't
+ * Note that we currently lack any way to flush the cache. Since we don't
* support ALTER COLLATION, this is OK. The worst case is that someone
* drops a collation, and a useless cache entry hangs around in existing
* backends.
@@ -1044,7 +1046,7 @@ report_newlocale_failure(const char *localename)
/*
- * Create a locale_t from a collation OID. Results are cached for the
+ * Create a locale_t from a collation OID. Results are cached for the
* lifetime of the backend. Thus, do not free the result with freelocale().
*
* As a special optimization, the default/database collation returns 0.
@@ -1170,6 +1172,7 @@ wchar2char(char *to, const wchar_t *from, size_t tolen, pg_locale_t locale)
return 0;
#ifdef WIN32
+
/*
* On Windows, the "Unicode" locales assume UTF16 not UTF8 encoding, and
* for some reason mbstowcs and wcstombs won't do this for us, so we use
@@ -1226,7 +1229,7 @@ wchar2char(char *to, const wchar_t *from, size_t tolen, pg_locale_t locale)
* This has almost the API of mbstowcs_l(), except that *from need not be
* null-terminated; instead, the number of input bytes is specified as
* fromlen. Also, we ereport() rather than returning -1 for invalid
- * input encoding. tolen is the maximum number of wchar_t's to store at *to.
+ * input encoding. tolen is the maximum number of wchar_t's to store at *to.
* The output will be zero-terminated iff there is room.
*/
size_t
diff --git a/src/backend/utils/adt/pg_lsn.c b/src/backend/utils/adt/pg_lsn.c
index e2b528a2435..d1448aee7bd 100644
--- a/src/backend/utils/adt/pg_lsn.c
+++ b/src/backend/utils/adt/pg_lsn.c
@@ -29,8 +29,10 @@ Datum
pg_lsn_in(PG_FUNCTION_ARGS)
{
char *str = PG_GETARG_CSTRING(0);
- int len1, len2;
- uint32 id, off;
+ int len1,
+ len2;
+ uint32 id,
+ off;
XLogRecPtr result;
/* Sanity check input format. */
@@ -38,12 +40,12 @@ pg_lsn_in(PG_FUNCTION_ARGS)
if (len1 < 1 || len1 > MAXPG_LSNCOMPONENT || str[len1] != '/')
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type pg_lsn: \"%s\"", str)));
+ errmsg("invalid input syntax for type pg_lsn: \"%s\"", str)));
len2 = strspn(str + len1 + 1, "0123456789abcdefABCDEF");
if (len2 < 1 || len2 > MAXPG_LSNCOMPONENT || str[len1 + 1 + len2] != '\0')
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type pg_lsn: \"%s\"", str)));
+ errmsg("invalid input syntax for type pg_lsn: \"%s\"", str)));
/* Decode result. */
id = (uint32) strtoul(str, NULL, 16);
@@ -59,7 +61,8 @@ pg_lsn_out(PG_FUNCTION_ARGS)
XLogRecPtr lsn = PG_GETARG_LSN(0);
char buf[MAXPG_LSNLEN + 1];
char *result;
- uint32 id, off;
+ uint32 id,
+ off;
/* Decode ID and offset */
id = (uint32) (lsn >> 32);
@@ -83,7 +86,7 @@ pg_lsn_recv(PG_FUNCTION_ARGS)
Datum
pg_lsn_send(PG_FUNCTION_ARGS)
{
- XLogRecPtr lsn = PG_GETARG_LSN(0);
+ XLogRecPtr lsn = PG_GETARG_LSN(0);
StringInfoData buf;
pq_begintypsend(&buf);
@@ -99,8 +102,8 @@ pg_lsn_send(PG_FUNCTION_ARGS)
Datum
pg_lsn_eq(PG_FUNCTION_ARGS)
{
- XLogRecPtr lsn1 = PG_GETARG_LSN(0);
- XLogRecPtr lsn2 = PG_GETARG_LSN(1);
+ XLogRecPtr lsn1 = PG_GETARG_LSN(0);
+ XLogRecPtr lsn2 = PG_GETARG_LSN(1);
PG_RETURN_BOOL(lsn1 == lsn2);
}
@@ -108,8 +111,8 @@ pg_lsn_eq(PG_FUNCTION_ARGS)
Datum
pg_lsn_ne(PG_FUNCTION_ARGS)
{
- XLogRecPtr lsn1 = PG_GETARG_LSN(0);
- XLogRecPtr lsn2 = PG_GETARG_LSN(1);
+ XLogRecPtr lsn1 = PG_GETARG_LSN(0);
+ XLogRecPtr lsn2 = PG_GETARG_LSN(1);
PG_RETURN_BOOL(lsn1 != lsn2);
}
@@ -117,8 +120,8 @@ pg_lsn_ne(PG_FUNCTION_ARGS)
Datum
pg_lsn_lt(PG_FUNCTION_ARGS)
{
- XLogRecPtr lsn1 = PG_GETARG_LSN(0);
- XLogRecPtr lsn2 = PG_GETARG_LSN(1);
+ XLogRecPtr lsn1 = PG_GETARG_LSN(0);
+ XLogRecPtr lsn2 = PG_GETARG_LSN(1);
PG_RETURN_BOOL(lsn1 < lsn2);
}
@@ -126,8 +129,8 @@ pg_lsn_lt(PG_FUNCTION_ARGS)
Datum
pg_lsn_gt(PG_FUNCTION_ARGS)
{
- XLogRecPtr lsn1 = PG_GETARG_LSN(0);
- XLogRecPtr lsn2 = PG_GETARG_LSN(1);
+ XLogRecPtr lsn1 = PG_GETARG_LSN(0);
+ XLogRecPtr lsn2 = PG_GETARG_LSN(1);
PG_RETURN_BOOL(lsn1 > lsn2);
}
@@ -135,8 +138,8 @@ pg_lsn_gt(PG_FUNCTION_ARGS)
Datum
pg_lsn_le(PG_FUNCTION_ARGS)
{
- XLogRecPtr lsn1 = PG_GETARG_LSN(0);
- XLogRecPtr lsn2 = PG_GETARG_LSN(1);
+ XLogRecPtr lsn1 = PG_GETARG_LSN(0);
+ XLogRecPtr lsn2 = PG_GETARG_LSN(1);
PG_RETURN_BOOL(lsn1 <= lsn2);
}
@@ -144,8 +147,8 @@ pg_lsn_le(PG_FUNCTION_ARGS)
Datum
pg_lsn_ge(PG_FUNCTION_ARGS)
{
- XLogRecPtr lsn1 = PG_GETARG_LSN(0);
- XLogRecPtr lsn2 = PG_GETARG_LSN(1);
+ XLogRecPtr lsn1 = PG_GETARG_LSN(0);
+ XLogRecPtr lsn2 = PG_GETARG_LSN(1);
PG_RETURN_BOOL(lsn1 >= lsn2);
}
@@ -158,8 +161,8 @@ pg_lsn_ge(PG_FUNCTION_ARGS)
Datum
pg_lsn_mi(PG_FUNCTION_ARGS)
{
- XLogRecPtr lsn1 = PG_GETARG_LSN(0);
- XLogRecPtr lsn2 = PG_GETARG_LSN(1);
+ XLogRecPtr lsn1 = PG_GETARG_LSN(0);
+ XLogRecPtr lsn2 = PG_GETARG_LSN(1);
char buf[256];
Datum result;
diff --git a/src/backend/utils/adt/pg_lzcompress.c b/src/backend/utils/adt/pg_lzcompress.c
index 30f1c0ab1fe..fe088901f03 100644
--- a/src/backend/utils/adt/pg_lzcompress.c
+++ b/src/backend/utils/adt/pg_lzcompress.c
@@ -576,9 +576,9 @@ pglz_compress(const char *source, int32 slen, PGLZ_Header *dest,
/*
* Experiments suggest that these hash sizes work pretty well. A large
- * hash table minimizes collision, but has a higher startup cost. For
- * a small input, the startup cost dominates. The table size must be
- * a power of two.
+ * hash table minimizes collision, but has a higher startup cost. For a
+ * small input, the startup cost dominates. The table size must be a power
+ * of two.
*/
if (slen < 128)
hashsz = 512;
@@ -615,7 +615,7 @@ pglz_compress(const char *source, int32 slen, PGLZ_Header *dest,
/*
* If we've emitted more than first_success_by bytes without finding
- * anything compressible at all, fail. This lets us fall out
+ * anything compressible at all, fail. This lets us fall out
* reasonably quickly when looking at incompressible input (such as
* pre-compressed data).
*/
@@ -639,7 +639,7 @@ pglz_compress(const char *source, int32 slen, PGLZ_Header *dest,
hist_next, hist_recycle,
dp, dend, mask);
dp++; /* Do not do this ++ in the line above! */
- /* The macro would do it four times - Jan. */
+ /* The macro would do it four times - Jan. */
}
found_match = true;
}
@@ -653,7 +653,7 @@ pglz_compress(const char *source, int32 slen, PGLZ_Header *dest,
hist_next, hist_recycle,
dp, dend, mask);
dp++; /* Do not do this ++ in the line above! */
- /* The macro would do it four times - Jan. */
+ /* The macro would do it four times - Jan. */
}
}
diff --git a/src/backend/utils/adt/pgstatfuncs.c b/src/backend/utils/adt/pgstatfuncs.c
index bf3084fce67..44ccd37e998 100644
--- a/src/backend/utils/adt/pgstatfuncs.c
+++ b/src/backend/utils/adt/pgstatfuncs.c
@@ -1797,5 +1797,5 @@ pg_stat_get_archiver(PG_FUNCTION_ARGS)
/* Returns the record as Datum */
PG_RETURN_DATUM(HeapTupleGetDatum(
- heap_form_tuple(tupdesc, values, nulls)));
+ heap_form_tuple(tupdesc, values, nulls)));
}
diff --git a/src/backend/utils/adt/pseudotypes.c b/src/backend/utils/adt/pseudotypes.c
index a553c1abf1c..475ce13abf4 100644
--- a/src/backend/utils/adt/pseudotypes.c
+++ b/src/backend/utils/adt/pseudotypes.c
@@ -6,7 +6,7 @@
* A pseudo-type isn't really a type and never has any operations, but
* we do need to supply input and output functions to satisfy the links
* in the pseudo-type's entry in pg_type. In most cases the functions
- * just throw an error if invoked. (XXX the error messages here cover
+ * just throw an error if invoked. (XXX the error messages here cover
* the most common case, but might be confusing in some contexts. Can
* we do better?)
*
@@ -139,7 +139,7 @@ anyarray_out(PG_FUNCTION_ARGS)
* anyarray_recv - binary input routine for pseudo-type ANYARRAY.
*
* XXX this could actually be made to work, since the incoming array
- * data will contain the element type OID. Need to think through
+ * data will contain the element type OID. Need to think through
* type-safety issues before allowing it, however.
*/
Datum
@@ -216,7 +216,7 @@ anyrange_out(PG_FUNCTION_ARGS)
* void_in - input routine for pseudo-type VOID.
*
* We allow this so that PL functions can return VOID without any special
- * hack in the PL handler. Whatever value the PL thinks it's returning
+ * hack in the PL handler. Whatever value the PL thinks it's returning
* will just be ignored.
*/
Datum
diff --git a/src/backend/utils/adt/rangetypes.c b/src/backend/utils/adt/rangetypes.c
index 38b51035aec..bc8a480ed3e 100644
--- a/src/backend/utils/adt/rangetypes.c
+++ b/src/backend/utils/adt/rangetypes.c
@@ -1441,7 +1441,7 @@ tstzrange_subdiff(PG_FUNCTION_ARGS)
*
* This is for use by range-related functions that follow the convention
* of using the fn_extra field as a pointer to the type cache entry for
- * the range type. Functions that need to cache more information than
+ * the range type. Functions that need to cache more information than
* that must fend for themselves.
*/
TypeCacheEntry *
@@ -1465,7 +1465,7 @@ range_get_typcache(FunctionCallInfo fcinfo, Oid rngtypid)
* range_serialize: construct a range value from bounds and empty-flag
*
* This does not force canonicalization of the range value. In most cases,
- * external callers should only be canonicalization functions. Note that
+ * external callers should only be canonicalization functions. Note that
* we perform some datatype-independent canonicalization checks anyway.
*/
RangeType *
@@ -1802,7 +1802,7 @@ range_cmp_bounds(TypeCacheEntry *typcache, RangeBound *b1, RangeBound *b2)
* Compare two range boundary point values, returning <0, 0, or >0 according
* to whether b1 is less than, equal to, or greater than b2.
*
- * This is similar to but simpler than range_cmp_bounds(). We just compare
+ * This is similar to but simpler than range_cmp_bounds(). We just compare
* the values held in b1 and b2, ignoring inclusive/exclusive flags. The
* lower/upper flags only matter for infinities, where they tell us if the
* infinity is plus or minus.
@@ -2283,7 +2283,7 @@ range_contains_elem_internal(TypeCacheEntry *typcache, RangeType *r, Datum val)
/*
* datum_compute_size() and datum_write() are used to insert the bound
- * values into a range object. They are modeled after heaptuple.c's
+ * values into a range object. They are modeled after heaptuple.c's
* heap_compute_data_size() and heap_fill_tuple(), but we need not handle
* null values here. TYPE_IS_PACKABLE must test the same conditions as
* heaptuple.c's ATT_IS_PACKABLE macro.
diff --git a/src/backend/utils/adt/rangetypes_gist.c b/src/backend/utils/adt/rangetypes_gist.c
index 13c87ea4a34..2bd28f50389 100644
--- a/src/backend/utils/adt/rangetypes_gist.c
+++ b/src/backend/utils/adt/rangetypes_gist.c
@@ -300,7 +300,7 @@ range_gist_penalty(PG_FUNCTION_ARGS)
else if (orig_lower.infinite && orig_upper.infinite)
{
/*
- * Original range requires broadening. (-inf; +inf) is most far
+ * Original range requires broadening. (-inf; +inf) is most far
* from normal range in this case.
*/
*penalty = 2 * CONTAIN_EMPTY_PENALTY;
@@ -497,7 +497,7 @@ range_gist_penalty(PG_FUNCTION_ARGS)
/*
* The GiST PickSplit method for ranges
*
- * Primarily, we try to segregate ranges of different classes. If splitting
+ * Primarily, we try to segregate ranges of different classes. If splitting
* ranges of the same class, use the appropriate split method for that class.
*/
Datum
@@ -668,7 +668,7 @@ range_gist_same(PG_FUNCTION_ARGS)
/*
* range_eq will ignore the RANGE_CONTAIN_EMPTY flag, so we have to check
- * that for ourselves. More generally, if the entries have been properly
+ * that for ourselves. More generally, if the entries have been properly
* normalized, then unequal flags bytes must mean unequal ranges ... so
* let's just test all the flag bits at once.
*/
@@ -816,7 +816,7 @@ range_gist_consistent_int(TypeCacheEntry *typcache, StrategyNumber strategy,
/*
* Empty ranges are contained by anything, so if key is or
- * contains any empty ranges, we must descend into it. Otherwise,
+ * contains any empty ranges, we must descend into it. Otherwise,
* descend only if key overlaps the query.
*/
if (RangeIsOrContainsEmpty(key))
diff --git a/src/backend/utils/adt/regexp.c b/src/backend/utils/adt/regexp.c
index 7c5b0d53bcf..caf45ef85f9 100644
--- a/src/backend/utils/adt/regexp.c
+++ b/src/backend/utils/adt/regexp.c
@@ -142,7 +142,7 @@ RE_compile_and_cache(text *text_re, int cflags, Oid collation)
char errMsg[100];
/*
- * Look for a match among previously compiled REs. Since the data
+ * Look for a match among previously compiled REs. Since the data
* structure is self-organizing with most-used entries at the front, our
* search strategy can just be to scan from the front.
*/
@@ -192,7 +192,7 @@ RE_compile_and_cache(text *text_re, int cflags, Oid collation)
/*
* Here and in other places in this file, do CHECK_FOR_INTERRUPTS
- * before reporting a regex error. This is so that if the regex
+ * before reporting a regex error. This is so that if the regex
* library aborts and returns REG_CANCEL, we don't print an error
* message that implies the regex was invalid.
*/
@@ -298,7 +298,7 @@ RE_wchar_execute(regex_t *re, pg_wchar *data, int data_len,
* dat_len --- the length of the data string
* nmatch, pmatch --- optional return area for match details
*
- * Data is given in the database encoding. We internally
+ * Data is given in the database encoding. We internally
* convert to array of pg_wchar which is what Spencer's regex package wants.
*/
static bool
diff --git a/src/backend/utils/adt/regproc.c b/src/backend/utils/adt/regproc.c
index 6210f45a195..c0314ee5322 100644
--- a/src/backend/utils/adt/regproc.c
+++ b/src/backend/utils/adt/regproc.c
@@ -85,7 +85,7 @@ regprocin(PG_FUNCTION_ARGS)
/*
* In bootstrap mode we assume the given name is not schema-qualified, and
- * just search pg_proc for a unique match. This is needed for
+ * just search pg_proc for a unique match. This is needed for
* initializing other system catalogs (pg_namespace may not exist yet, and
* certainly there are no schemas other than pg_catalog).
*/
@@ -165,8 +165,8 @@ to_regproc(PG_FUNCTION_ARGS)
FuncCandidateList clist;
/*
- * Parse the name into components and see if it matches any pg_proc entries
- * in the current search path.
+ * Parse the name into components and see if it matches any pg_proc
+ * entries in the current search path.
*/
names = stringToQualifiedNameList(pro_name);
clist = FuncnameGetCandidates(names, -1, NIL, false, false, true);
@@ -295,7 +295,7 @@ regprocedurein(PG_FUNCTION_ARGS)
/*
* Else it's a name and arguments. Parse the name and arguments, look up
* potential matches in the current namespace search list, and scan to see
- * which one exactly matches the given argument types. (There will not be
+ * which one exactly matches the given argument types. (There will not be
* more than one match.)
*
* XXX at present, this code will not work in bootstrap mode, hence this
@@ -339,7 +339,7 @@ to_regprocedure(PG_FUNCTION_ARGS)
/*
* Parse the name and arguments, look up potential matches in the current
* namespace search list, and scan to see which one exactly matches the
- * given argument types. (There will not be more than one match.)
+ * given argument types. (There will not be more than one match.)
*/
parseNameAndArgTypes(pro_name, false, &names, &nargs, argtypes);
@@ -376,7 +376,7 @@ format_procedure_qualified(Oid procedure_oid)
* Routine to produce regprocedure names; see format_procedure above.
*
* force_qualify says whether to schema-qualify; if true, the name is always
- * qualified regardless of search_path visibility. Otherwise the name is only
+ * qualified regardless of search_path visibility. Otherwise the name is only
* qualified if the function is not in path.
*/
static char *
@@ -510,7 +510,7 @@ regoperin(PG_FUNCTION_ARGS)
/*
* In bootstrap mode we assume the given name is not schema-qualified, and
- * just search pg_operator for a unique match. This is needed for
+ * just search pg_operator for a unique match. This is needed for
* initializing other system catalogs (pg_namespace may not exist yet, and
* certainly there are no schemas other than pg_catalog).
*/
@@ -724,7 +724,7 @@ regoperatorin(PG_FUNCTION_ARGS)
/*
* Else it's a name and arguments. Parse the name and arguments, look up
* potential matches in the current namespace search list, and scan to see
- * which one exactly matches the given argument types. (There will not be
+ * which one exactly matches the given argument types. (There will not be
* more than one match.)
*
* XXX at present, this code will not work in bootstrap mode, hence this
@@ -770,7 +770,7 @@ to_regoperator(PG_FUNCTION_ARGS)
/*
* Parse the name and arguments, look up potential matches in the current
* namespace search list, and scan to see which one exactly matches the
- * given argument types. (There will not be more than one match.)
+ * given argument types. (There will not be more than one match.)
*/
parseNameAndArgTypes(opr_name_or_oid, true, &names, &nargs, argtypes);
if (nargs == 1)
@@ -1006,8 +1006,8 @@ to_regclass(PG_FUNCTION_ARGS)
List *names;
/*
- * Parse the name into components and see if it matches any pg_class entries
- * in the current search path.
+ * Parse the name into components and see if it matches any pg_class
+ * entries in the current search path.
*/
names = stringToQualifiedNameList(class_name);
@@ -1045,7 +1045,7 @@ regclassout(PG_FUNCTION_ARGS)
/*
* In bootstrap mode, skip the fancy namespace stuff and just return
- * the class name. (This path is only needed for debugging output
+ * the class name. (This path is only needed for debugging output
* anyway.)
*/
if (IsBootstrapProcessingMode())
@@ -1560,7 +1560,7 @@ stringToQualifiedNameList(const char *string)
/*
* Given a C string, parse it into a qualified function or operator name
- * followed by a parenthesized list of type names. Reduce the
+ * followed by a parenthesized list of type names. Reduce the
* type names to an array of OIDs (returned into *nargs and *argtypes;
* the argtypes array should be of size FUNC_MAX_ARGS). The function or
* operator name is returned to *names as a List of Strings.
diff --git a/src/backend/utils/adt/ri_triggers.c b/src/backend/utils/adt/ri_triggers.c
index 1e1e616fa48..d30847b34e6 100644
--- a/src/backend/utils/adt/ri_triggers.c
+++ b/src/backend/utils/adt/ri_triggers.c
@@ -698,7 +698,7 @@ ri_restrict_del(TriggerData *trigdata, bool is_no_action)
/*
* If another PK row now exists providing the old key values, we
- * should not do anything. However, this check should only be
+ * should not do anything. However, this check should only be
* made in the NO ACTION case; in RESTRICT cases we don't wish to
* allow another row to be substituted.
*/
@@ -922,7 +922,7 @@ ri_restrict_upd(TriggerData *trigdata, bool is_no_action)
/*
* If another PK row now exists providing the old key values, we
- * should not do anything. However, this check should only be
+ * should not do anything. However, this check should only be
* made in the NO ACTION case; in RESTRICT cases we don't wish to
* allow another row to be substituted.
*/
@@ -1850,7 +1850,7 @@ RI_FKey_setdefault_del(PG_FUNCTION_ARGS)
* believe no check is necessary. So we need to do another lookup
* now and in case a reference still exists, abort the operation.
* That is already implemented in the NO ACTION trigger, so just
- * run it. (This recheck is only needed in the SET DEFAULT case,
+ * run it. (This recheck is only needed in the SET DEFAULT case,
* since CASCADE would remove such rows, while SET NULL is certain
* to result in rows that satisfy the FK constraint.)
*/
@@ -2041,7 +2041,7 @@ RI_FKey_setdefault_upd(PG_FUNCTION_ARGS)
* believe no check is necessary. So we need to do another lookup
* now and in case a reference still exists, abort the operation.
* That is already implemented in the NO ACTION trigger, so just
- * run it. (This recheck is only needed in the SET DEFAULT case,
+ * run it. (This recheck is only needed in the SET DEFAULT case,
* since CASCADE must change the FK key values, while SET NULL is
* certain to result in rows that satisfy the FK constraint.)
*/
@@ -2397,7 +2397,7 @@ RI_Initial_Check(Trigger *trigger, Relation fk_rel, Relation pk_rel)
* Temporarily increase work_mem so that the check query can be executed
* more efficiently. It seems okay to do this because the query is simple
* enough to not use a multiple of work_mem, and one typically would not
- * have many large foreign-key validations happening concurrently. So
+ * have many large foreign-key validations happening concurrently. So
* this seems to meet the criteria for being considered a "maintenance"
* operation, and accordingly we use maintenance_work_mem.
*
@@ -2451,7 +2451,7 @@ RI_Initial_Check(Trigger *trigger, Relation fk_rel, Relation pk_rel)
/*
* The columns to look at in the result tuple are 1..N, not whatever
- * they are in the fk_rel. Hack up riinfo so that the subroutines
+ * they are in the fk_rel. Hack up riinfo so that the subroutines
* called here will behave properly.
*
* In addition to this, we have to pass the correct tupdesc to
@@ -3180,7 +3180,7 @@ ri_ReportViolation(const RI_ConstraintInfo *riinfo,
errhint("This is most likely due to a rule having rewritten the query.")));
/*
- * Determine which relation to complain about. If tupdesc wasn't passed
+ * Determine which relation to complain about. If tupdesc wasn't passed
* by caller, assume the violator tuple came from there.
*/
onfk = (queryno == RI_PLAN_CHECK_LOOKUPPK);
diff --git a/src/backend/utils/adt/rowtypes.c b/src/backend/utils/adt/rowtypes.c
index 521c3daea7e..9543d01d492 100644
--- a/src/backend/utils/adt/rowtypes.c
+++ b/src/backend/utils/adt/rowtypes.c
@@ -279,7 +279,7 @@ record_in(PG_FUNCTION_ARGS)
/*
* We cannot return tuple->t_data because heap_form_tuple allocates it as
* part of a larger chunk, and our caller may expect to be able to pfree
- * our result. So must copy the info into a new palloc chunk.
+ * our result. So must copy the info into a new palloc chunk.
*/
result = (HeapTupleHeader) palloc(tuple->t_len);
memcpy(result, tuple->t_data, tuple->t_len);
@@ -623,7 +623,7 @@ record_recv(PG_FUNCTION_ARGS)
/*
* We cannot return tuple->t_data because heap_form_tuple allocates it as
* part of a larger chunk, and our caller may expect to be able to pfree
- * our result. So must copy the info into a new palloc chunk.
+ * our result. So must copy the info into a new palloc chunk.
*/
result = (HeapTupleHeader) palloc(tuple->t_len);
memcpy(result, tuple->t_data, tuple->t_len);
@@ -861,7 +861,7 @@ record_cmp(FunctionCallInfo fcinfo)
/*
* Scan corresponding columns, allowing for dropped columns in different
- * places in the two rows. i1 and i2 are physical column indexes, j is
+ * places in the two rows. i1 and i2 are physical column indexes, j is
* the logical column index.
*/
i1 = i2 = j = 0;
@@ -1097,7 +1097,7 @@ record_eq(PG_FUNCTION_ARGS)
/*
* Scan corresponding columns, allowing for dropped columns in different
- * places in the two rows. i1 and i2 are physical column indexes, j is
+ * places in the two rows. i1 and i2 are physical column indexes, j is
* the logical column index.
*/
i1 = i2 = j = 0;
@@ -1356,7 +1356,7 @@ record_image_cmp(FunctionCallInfo fcinfo)
/*
* Scan corresponding columns, allowing for dropped columns in different
- * places in the two rows. i1 and i2 are physical column indexes, j is
+ * places in the two rows. i1 and i2 are physical column indexes, j is
* the logical column index.
*/
i1 = i2 = j = 0;
@@ -1390,11 +1390,12 @@ record_image_cmp(FunctionCallInfo fcinfo)
format_type_be(tupdesc2->attrs[i2]->atttypid),
j + 1)));
- /*
- * The same type should have the same length (or both should be variable).
- */
- Assert(tupdesc1->attrs[i1]->attlen ==
- tupdesc2->attrs[i2]->attlen);
+ /*
+ * The same type should have the same length (or both should be
+ * variable).
+ */
+ Assert(tupdesc1->attrs[i1]->attlen ==
+ tupdesc2->attrs[i2]->attlen);
/*
* We consider two NULLs equal; NULL > not-NULL.
@@ -1421,8 +1422,8 @@ record_image_cmp(FunctionCallInfo fcinfo)
{
Size len1,
len2;
- struct varlena *arg1val;
- struct varlena *arg2val;
+ struct varlena *arg1val;
+ struct varlena *arg2val;
len1 = toast_raw_datum_size(values1[i1]);
len2 = toast_raw_datum_size(values2[i2]);
@@ -1632,7 +1633,7 @@ record_image_eq(PG_FUNCTION_ARGS)
/*
* Scan corresponding columns, allowing for dropped columns in different
- * places in the two rows. i1 and i2 are physical column indexes, j is
+ * places in the two rows. i1 and i2 are physical column indexes, j is
* the logical column index.
*/
i1 = i2 = j = 0;
@@ -1690,8 +1691,8 @@ record_image_eq(PG_FUNCTION_ARGS)
result = false;
else
{
- struct varlena *arg1val;
- struct varlena *arg2val;
+ struct varlena *arg1val;
+ struct varlena *arg2val;
arg1val = PG_DETOAST_DATUM_PACKED(values1[i1]);
arg2val = PG_DETOAST_DATUM_PACKED(values2[i2]);
diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c
index 36d9953108b..a30d8febf85 100644
--- a/src/backend/utils/adt/ruleutils.c
+++ b/src/backend/utils/adt/ruleutils.c
@@ -155,11 +155,11 @@ typedef struct
*
* Selecting aliases is unreasonably complicated because of the need to dump
* rules/views whose underlying tables may have had columns added, deleted, or
- * renamed since the query was parsed. We must nonetheless print the rule/view
+ * renamed since the query was parsed. We must nonetheless print the rule/view
* in a form that can be reloaded and will produce the same results as before.
*
* For each RTE used in the query, we must assign column aliases that are
- * unique within that RTE. SQL does not require this of the original query,
+ * unique within that RTE. SQL does not require this of the original query,
* but due to factors such as *-expansion we need to be able to uniquely
* reference every column in a decompiled query. As long as we qualify all
* column references, per-RTE uniqueness is sufficient for that.
@@ -214,8 +214,8 @@ typedef struct
/*
* new_colnames is an array containing column aliases to use for columns
* that would exist if the query was re-parsed against the current
- * definitions of its base tables. This is what to print as the column
- * alias list for the RTE. This array does not include dropped columns,
+ * definitions of its base tables. This is what to print as the column
+ * alias list for the RTE. This array does not include dropped columns,
* but it will include columns added since original parsing. Indexes in
* it therefore have little to do with current varattno values. As above,
* entries are unique unless this is for an unnamed JOIN RTE. (In such an
@@ -1077,7 +1077,7 @@ pg_get_indexdef_worker(Oid indexrelid, int colno,
context = deparse_context_for(get_relation_name(indrelid), indrelid);
/*
- * Start the index definition. Note that the index's name should never be
+ * Start the index definition. Note that the index's name should never be
* schema-qualified, but the indexed rel's name may be.
*/
initStringInfo(&buf);
@@ -1304,9 +1304,9 @@ pg_get_constraintdef_worker(Oid constraintId, bool fullCommand,
HeapTuple tup;
Form_pg_constraint conForm;
StringInfoData buf;
- SysScanDesc scandesc;
+ SysScanDesc scandesc;
ScanKeyData scankey[1];
- Snapshot snapshot = RegisterSnapshot(GetTransactionSnapshot());
+ Snapshot snapshot = RegisterSnapshot(GetTransactionSnapshot());
Relation relation = heap_open(ConstraintRelationId, AccessShareLock);
ScanKeyInit(&scankey[0],
@@ -1315,15 +1315,15 @@ pg_get_constraintdef_worker(Oid constraintId, bool fullCommand,
ObjectIdGetDatum(constraintId));
scandesc = systable_beginscan(relation,
- ConstraintOidIndexId,
- true,
- snapshot,
- 1,
- scankey);
+ ConstraintOidIndexId,
+ true,
+ snapshot,
+ 1,
+ scankey);
/*
- * We later use the tuple with SysCacheGetAttr() as if we
- * had obtained it via SearchSysCache, which works fine.
+ * We later use the tuple with SysCacheGetAttr() as if we had obtained it
+ * via SearchSysCache, which works fine.
*/
tup = systable_getnext(scandesc);
@@ -1806,7 +1806,7 @@ pg_get_serial_sequence(PG_FUNCTION_ARGS)
SysScanDesc scan;
HeapTuple tup;
- /* Look up table name. Can't lock it - we might not have privileges. */
+ /* Look up table name. Can't lock it - we might not have privileges. */
tablerv = makeRangeVarFromNameList(textToQualifiedNameList(tablename));
tableOid = RangeVarGetRelid(tablerv, NoLock, false);
@@ -2406,8 +2406,10 @@ pg_get_function_arg_default(PG_FUNCTION_ARGS)
proc = (Form_pg_proc) GETSTRUCT(proctup);
- /* Calculate index into proargdefaults: proargdefaults corresponds to the
- * last N input arguments, where N = pronargdefaults. */
+ /*
+ * Calculate index into proargdefaults: proargdefaults corresponds to the
+ * last N input arguments, where N = pronargdefaults.
+ */
nth_default = nth_inputarg - 1 - (proc->pronargs - proc->pronargdefaults);
if (nth_default < 0 || nth_default >= list_length(argdefaults))
@@ -2444,7 +2446,7 @@ deparse_expression(Node *expr, List *dpcontext,
* tree (ie, not the raw output of gram.y).
*
* dpcontext is a list of deparse_namespace nodes representing the context
- * for interpreting Vars in the node tree. It can be NIL if no Vars are
+ * for interpreting Vars in the node tree. It can be NIL if no Vars are
* expected.
*
* forceprefix is TRUE to force all Vars to be prefixed with their table names.
@@ -2484,7 +2486,7 @@ deparse_expression_pretty(Node *expr, List *dpcontext,
*
* Given the reference name (alias) and OID of a relation, build deparsing
* context for an expression referencing only that relation (as varno 1,
- * varlevelsup 0). This is sufficient for many uses of deparse_expression.
+ * varlevelsup 0). This is sufficient for many uses of deparse_expression.
* ----------
*/
List *
@@ -2555,7 +2557,7 @@ deparse_context_for_planstate(Node *planstate, List *ancestors,
dpns->ctes = NIL;
/*
- * Set up column name aliases. We will get rather bogus results for join
+ * Set up column name aliases. We will get rather bogus results for join
* RTEs, but that doesn't matter because plan trees don't contain any join
* alias Vars.
*/
@@ -3113,7 +3115,7 @@ set_relation_column_names(deparse_namespace *dpns, RangeTblEntry *rte,
/*
* Scan the columns, select a unique alias for each one, and store it in
* colinfo->colnames and colinfo->new_colnames. The former array has NULL
- * entries for dropped columns, the latter omits them. Also mark
+ * entries for dropped columns, the latter omits them. Also mark
* new_colnames entries as to whether they are new since parse time; this
* is the case for entries beyond the length of rte->eref->colnames.
*/
@@ -3168,7 +3170,7 @@ set_relation_column_names(deparse_namespace *dpns, RangeTblEntry *rte,
/*
* For a relation RTE, we need only print the alias column names if any
- * are different from the underlying "real" names. For a function RTE,
+ * are different from the underlying "real" names. For a function RTE,
* always emit a complete column alias list; this is to protect against
* possible instability of the default column names (eg, from altering
* parameter names). For other RTE types, print if we changed anything OR
@@ -3631,7 +3633,7 @@ identify_join_columns(JoinExpr *j, RangeTblEntry *jrte,
/*
* If there's a USING clause, deconstruct the join quals to identify the
- * merged columns. This is a tad painful but if we cannot rely on the
+ * merged columns. This is a tad painful but if we cannot rely on the
* column names, there is no other representation of which columns were
* joined by USING. (Unless the join type is FULL, we can't tell from the
* joinaliasvars list which columns are merged.) Note: we assume that the
@@ -3765,7 +3767,7 @@ set_deparse_planstate(deparse_namespace *dpns, PlanState *ps)
* We special-case Append and MergeAppend to pretend that the first child
* plan is the OUTER referent; we have to interpret OUTER Vars in their
* tlists according to one of the children, and the first one is the most
- * natural choice. Likewise special-case ModifyTable to pretend that the
+ * natural choice. Likewise special-case ModifyTable to pretend that the
* first child plan is the OUTER referent; this is to support RETURNING
* lists containing references to non-target relations.
*/
@@ -4167,8 +4169,8 @@ get_query_def(Query *query, StringInfo buf, List *parentnamespace,
/*
* Before we begin to examine the query, acquire locks on referenced
- * relations, and fix up deleted columns in JOIN RTEs. This ensures
- * consistent results. Note we assume it's OK to scribble on the passed
+ * relations, and fix up deleted columns in JOIN RTEs. This ensures
+ * consistent results. Note we assume it's OK to scribble on the passed
* querytree!
*
* We are only deparsing the query (we are not about to execute it), so we
@@ -4641,7 +4643,7 @@ get_target_list(List *targetList, deparse_context *context,
}
/*
- * Figure out what the result column should be called. In the context
+ * Figure out what the result column should be called. In the context
* of a view, use the view's tuple descriptor (so as to pick up the
* effects of any column RENAME that's been done on the view).
* Otherwise, just use what we can find in the TLE.
@@ -4863,7 +4865,7 @@ get_rule_sortgroupclause(SortGroupClause *srt, List *tlist, bool force_colno,
* expression is a constant, force it to be dumped with an explicit cast
* as decoration --- this is because a simple integer constant is
* ambiguous (and will be misinterpreted by findTargetlistEntry()) if we
- * dump it without any decoration. Otherwise, just dump the expression
+ * dump it without any decoration. Otherwise, just dump the expression
* normally.
*/
if (force_colno)
@@ -5558,8 +5560,8 @@ get_variable(Var *var, int levelsup, bool istoplevel, deparse_context *context)
/*
* If it's an unnamed join, look at the expansion of the alias variable.
* If it's a simple reference to one of the input vars, then recursively
- * print the name of that var instead. When it's not a simple reference,
- * we have to just print the unqualified join column name. (This can only
+ * print the name of that var instead. When it's not a simple reference,
+ * we have to just print the unqualified join column name. (This can only
* happen with "dangerous" merged columns in a JOIN USING; we took pains
* previously to make the unqualified column name unique in such cases.)
*
@@ -5587,7 +5589,7 @@ get_variable(Var *var, int levelsup, bool istoplevel, deparse_context *context)
/*
* Unnamed join has no refname. (Note: since it's unnamed, there is
* no way the user could have referenced it to create a whole-row Var
- * for it. So we don't have to cover that case below.)
+ * for it. So we don't have to cover that case below.)
*/
Assert(refname == NULL);
}
@@ -5628,7 +5630,7 @@ get_variable(Var *var, int levelsup, bool istoplevel, deparse_context *context)
/*
- * Get the name of a field of an expression of composite type. The
+ * Get the name of a field of an expression of composite type. The
* expression is usually a Var, but we handle other cases too.
*
* levelsup is an extra offset to interpret the Var's varlevelsup correctly.
@@ -5638,7 +5640,7 @@ get_variable(Var *var, int levelsup, bool istoplevel, deparse_context *context)
* could also be RECORD. Since no actual table or view column is allowed to
* have type RECORD, a Var of type RECORD must refer to a JOIN or FUNCTION RTE
* or to a subquery output. We drill down to find the ultimate defining
- * expression and attempt to infer the field name from it. We ereport if we
+ * expression and attempt to infer the field name from it. We ereport if we
* can't determine the name.
*
* Similarly, a PARAM of type RECORD has to refer to some expression of
@@ -6003,7 +6005,7 @@ get_name_for_var_field(Var *var, int fieldno,
/*
* We now have an expression we can't expand any more, so see if
- * get_expr_result_type() can do anything with it. If not, pass to
+ * get_expr_result_type() can do anything with it. If not, pass to
* lookup_rowtype_tupdesc() which will probably fail, but will give an
* appropriate error message while failing.
*/
@@ -6021,7 +6023,7 @@ get_name_for_var_field(Var *var, int fieldno,
* reference a parameter supplied by an upper NestLoop or SubPlan plan node.
*
* If successful, return the expression and set *dpns_p and *ancestor_cell_p
- * appropriately for calling push_ancestor_plan(). If no referent can be
+ * appropriately for calling push_ancestor_plan(). If no referent can be
* found, return NULL.
*/
static Node *
@@ -6153,7 +6155,7 @@ get_parameter(Param *param, deparse_context *context)
/*
* If it's a PARAM_EXEC parameter, try to locate the expression from which
- * the parameter was computed. Note that failing to find a referent isn't
+ * the parameter was computed. Note that failing to find a referent isn't
* an error, since the Param might well be a subplan output rather than an
* input.
*/
@@ -6631,10 +6633,10 @@ get_rule_expr(Node *node, deparse_context *context,
/*
* If there's a refassgnexpr, we want to print the node in the
- * format "array[subscripts] := refassgnexpr". This is not
+ * format "array[subscripts] := refassgnexpr". This is not
* legal SQL, so decompilation of INSERT or UPDATE statements
* should always use processIndirection as part of the
- * statement-level syntax. We should only see this when
+ * statement-level syntax. We should only see this when
* EXPLAIN tries to print the targetlist of a plan resulting
* from such a statement.
*/
@@ -6793,7 +6795,7 @@ get_rule_expr(Node *node, deparse_context *context,
/*
* We cannot see an already-planned subplan in rule deparsing,
- * only while EXPLAINing a query plan. We don't try to
+ * only while EXPLAINing a query plan. We don't try to
* reconstruct the original SQL, just reference the subplan
* that appears elsewhere in EXPLAIN's result.
*/
@@ -6866,14 +6868,14 @@ get_rule_expr(Node *node, deparse_context *context,
* There is no good way to represent a FieldStore as real SQL,
* so decompilation of INSERT or UPDATE statements should
* always use processIndirection as part of the
- * statement-level syntax. We should only get here when
+ * statement-level syntax. We should only get here when
* EXPLAIN tries to print the targetlist of a plan resulting
* from such a statement. The plan case is even harder than
* ordinary rules would be, because the planner tries to
* collapse multiple assignments to the same field or subfield
* into one FieldStore; so we can see a list of target fields
* not just one, and the arguments could be FieldStores
- * themselves. We don't bother to try to print the target
+ * themselves. We don't bother to try to print the target
* field names; we just print the source arguments, with a
* ROW() around them if there's more than one. This isn't
* terribly complete, but it's probably good enough for
@@ -7668,7 +7670,7 @@ get_agg_expr(Aggref *aggref, deparse_context *context)
{
/*
* Ordered-set aggregates do not use "*" syntax. Also, we needn't
- * worry about inserting VARIADIC. So we can just dump the direct
+ * worry about inserting VARIADIC. So we can just dump the direct
* args as-is.
*/
Assert(!aggref->aggvariadic);
@@ -7810,7 +7812,7 @@ get_coercion_expr(Node *arg, deparse_context *context,
* Since parse_coerce.c doesn't immediately collapse application of
* length-coercion functions to constants, what we'll typically see in
* such cases is a Const with typmod -1 and a length-coercion function
- * right above it. Avoid generating redundant output. However, beware of
+ * right above it. Avoid generating redundant output. However, beware of
* suppressing casts when the user actually wrote something like
* 'foo'::text::char(3).
*/
@@ -7892,7 +7894,7 @@ get_const_expr(Const *constval, deparse_context *context, int showtype)
/*
* These types are printed without quotes unless they contain
* values that aren't accepted by the scanner unquoted (e.g.,
- * 'NaN'). Note that strtod() and friends might accept NaN,
+ * 'NaN'). Note that strtod() and friends might accept NaN,
* so we can't use that to test.
*
* In reality we only need to defend against infinity and NaN,
@@ -8416,7 +8418,7 @@ get_from_clause_item(Node *jtnode, Query *query, deparse_context *context)
else if (rte->rtekind == RTE_FUNCTION)
{
/*
- * For a function RTE, always print alias. This covers possible
+ * For a function RTE, always print alias. This covers possible
* renaming of the function and/or instability of the
* FigureColname rules for things that aren't simple functions.
* Note we'd need to force it anyway for the columndef list case.
@@ -8672,7 +8674,7 @@ get_opclass_name(Oid opclass, Oid actual_datatype,
if (!OidIsValid(actual_datatype) ||
GetDefaultOpClass(actual_datatype, opcrec->opcmethod) != opclass)
{
- /* Okay, we need the opclass name. Do we need to qualify it? */
+ /* Okay, we need the opclass name. Do we need to qualify it? */
opcname = NameStr(opcrec->opcname);
if (OpclassIsVisible(opclass))
appendStringInfo(buf, " %s", quote_identifier(opcname));
@@ -8967,13 +8969,13 @@ generate_relation_name(Oid relid, List *namespaces)
* generate_function_name
* Compute the name to display for a function specified by OID,
* given that it is being called with the specified actual arg names and
- * types. (Those matter because of ambiguous-function resolution rules.)
+ * types. (Those matter because of ambiguous-function resolution rules.)
*
* If we're dealing with a potentially variadic function (in practice, this
* means a FuncExpr or Aggref, not some other way of calling a function), then
* has_variadic must specify whether variadic arguments have been merged,
* and *use_variadic_p will be set to indicate whether to print VARIADIC in
- * the output. For non-FuncExpr cases, has_variadic should be FALSE and
+ * the output. For non-FuncExpr cases, has_variadic should be FALSE and
* use_variadic_p can be NULL.
*
* The result includes all necessary quoting and schema-prefixing.
diff --git a/src/backend/utils/adt/selfuncs.c b/src/backend/utils/adt/selfuncs.c
index 1ffc0160b77..e932ccf0da5 100644
--- a/src/backend/utils/adt/selfuncs.c
+++ b/src/backend/utils/adt/selfuncs.c
@@ -72,7 +72,7 @@
* float8 oprjoin (internal, oid, internal, int2, internal);
*
* (Before Postgres 8.4, join estimators had only the first four of these
- * parameters. That signature is still allowed, but deprecated.) The
+ * parameters. That signature is still allowed, but deprecated.) The
* relationship between jointype and sjinfo is explained in the comments for
* clause_selectivity() --- the short version is that jointype is usually
* best ignored in favor of examining sjinfo.
@@ -209,7 +209,7 @@ static List *add_predicate_to_quals(IndexOptInfo *index, List *indexQuals);
*
* Note: this routine is also used to estimate selectivity for some
* operators that are not "=" but have comparable selectivity behavior,
- * such as "~=" (geometric approximate-match). Even for "=", we must
+ * such as "~=" (geometric approximate-match). Even for "=", we must
* keep in mind that the left and right datatypes may differ.
*/
Datum
@@ -273,7 +273,7 @@ var_eq_const(VariableStatData *vardata, Oid operator,
/*
* If we matched the var to a unique index or DISTINCT clause, assume
- * there is exactly one match regardless of anything else. (This is
+ * there is exactly one match regardless of anything else. (This is
* slightly bogus, since the index or clause's equality operator might be
* different from ours, but it's much more likely to be right than
* ignoring the information.)
@@ -296,7 +296,7 @@ var_eq_const(VariableStatData *vardata, Oid operator,
/*
* Is the constant "=" to any of the column's most common values?
* (Although the given operator may not really be "=", we will assume
- * that seeing whether it returns TRUE is an appropriate test. If you
+ * that seeing whether it returns TRUE is an appropriate test. If you
* don't like this, maybe you shouldn't be using eqsel for your
* operator...)
*/
@@ -408,7 +408,7 @@ var_eq_non_const(VariableStatData *vardata, Oid operator,
/*
* If we matched the var to a unique index or DISTINCT clause, assume
- * there is exactly one match regardless of anything else. (This is
+ * there is exactly one match regardless of anything else. (This is
* slightly bogus, since the index or clause's equality operator might be
* different from ours, but it's much more likely to be right than
* ignoring the information.)
@@ -432,7 +432,7 @@ var_eq_non_const(VariableStatData *vardata, Oid operator,
* result averaged over all possible values whether common or
* uncommon. (Essentially, we are assuming that the not-yet-known
* comparison value is equally likely to be any of the possible
- * values, regardless of their frequency in the table. Is that a good
+ * values, regardless of their frequency in the table. Is that a good
* idea?)
*/
selec = 1.0 - stats->stanullfrac;
@@ -655,7 +655,7 @@ mcv_selectivity(VariableStatData *vardata, FmgrInfo *opproc,
* essentially using the histogram just as a representative sample. However,
* small histograms are unlikely to be all that representative, so the caller
* should be prepared to fall back on some other estimation approach when the
- * histogram is missing or very small. It may also be prudent to combine this
+ * histogram is missing or very small. It may also be prudent to combine this
* approach with another one when the histogram is small.
*
* If the actual histogram size is not at least min_hist_size, we won't bother
@@ -673,7 +673,7 @@ mcv_selectivity(VariableStatData *vardata, FmgrInfo *opproc,
*
* Note that the result disregards both the most-common-values (if any) and
* null entries. The caller is expected to combine this result with
- * statistics for those portions of the column population. It may also be
+ * statistics for those portions of the column population. It may also be
* prudent to clamp the result range, ie, disbelieve exact 0 or 1 outputs.
*/
double
@@ -786,7 +786,7 @@ ineq_histogram_selectivity(PlannerInfo *root,
*
* If the binary search accesses the first or last histogram
* entry, we try to replace that endpoint with the true column min
- * or max as found by get_actual_variable_range(). This
+ * or max as found by get_actual_variable_range(). This
* ameliorates misestimates when the min or max is moving as a
* result of changes since the last ANALYZE. Note that this could
* result in effectively including MCVs into the histogram that
@@ -890,7 +890,7 @@ ineq_histogram_selectivity(PlannerInfo *root,
/*
* Watch out for the possibility that we got a NaN or
- * Infinity from the division. This can happen
+ * Infinity from the division. This can happen
* despite the previous checks, if for example "low"
* is -Infinity.
*/
@@ -905,7 +905,7 @@ ineq_histogram_selectivity(PlannerInfo *root,
* Ideally we'd produce an error here, on the grounds that
* the given operator shouldn't have scalarXXsel
* registered as its selectivity func unless we can deal
- * with its operand types. But currently, all manner of
+ * with its operand types. But currently, all manner of
* stuff is invoking scalarXXsel, so give a default
* estimate until that can be fixed.
*/
@@ -931,7 +931,7 @@ ineq_histogram_selectivity(PlannerInfo *root,
/*
* The histogram boundaries are only approximate to begin with,
- * and may well be out of date anyway. Therefore, don't believe
+ * and may well be out of date anyway. Therefore, don't believe
* extremely small or large selectivity estimates --- unless we
* got actual current endpoint values from the table.
*/
@@ -1128,7 +1128,7 @@ patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype, bool negate)
/*
* If this is for a NOT LIKE or similar operator, get the corresponding
- * positive-match operator and work with that. Set result to the correct
+ * positive-match operator and work with that. Set result to the correct
* default estimate, too.
*/
if (negate)
@@ -1214,7 +1214,7 @@ patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype, bool negate)
/*
* Pull out any fixed prefix implied by the pattern, and estimate the
- * fractional selectivity of the remainder of the pattern. Unlike many of
+ * fractional selectivity of the remainder of the pattern. Unlike many of
* the other functions in this file, we use the pattern operator's actual
* collation for this step. This is not because we expect the collation
* to make a big difference in the selectivity estimate (it seldom would),
@@ -1332,7 +1332,7 @@ patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype, bool negate)
/*
* If we have most-common-values info, add up the fractions of the MCV
* entries that satisfy MCV OP PATTERN. These fractions contribute
- * directly to the result selectivity. Also add up the total fraction
+ * directly to the result selectivity. Also add up the total fraction
* represented by MCV entries.
*/
mcv_selec = mcv_selectivity(&vardata, &opproc, constval, true,
@@ -1838,7 +1838,7 @@ scalararraysel(PlannerInfo *root,
/*
* For generic operators, we assume the probability of success is
- * independent for each array element. But for "= ANY" or "<> ALL",
+ * independent for each array element. But for "= ANY" or "<> ALL",
* if the array elements are distinct (which'd typically be the case)
* then the probabilities are disjoint, and we should just sum them.
*
@@ -2253,9 +2253,9 @@ eqjoinsel_inner(Oid operator,
if (have_mcvs1 && have_mcvs2)
{
/*
- * We have most-common-value lists for both relations. Run through
+ * We have most-common-value lists for both relations. Run through
* the lists to see which MCVs actually join to each other with the
- * given operator. This allows us to determine the exact join
+ * given operator. This allows us to determine the exact join
* selectivity for the portion of the relations represented by the MCV
* lists. We still have to estimate for the remaining population, but
* in a skewed distribution this gives us a big leg up in accuracy.
@@ -2287,7 +2287,7 @@ eqjoinsel_inner(Oid operator,
/*
* Note we assume that each MCV will match at most one member of the
- * other MCV list. If the operator isn't really equality, there could
+ * other MCV list. If the operator isn't really equality, there could
* be multiple matches --- but we don't look for them, both for speed
* and because the math wouldn't add up...
*/
@@ -2452,7 +2452,7 @@ eqjoinsel_semi(Oid operator,
/*
* We clamp nd2 to be not more than what we estimate the inner relation's
- * size to be. This is intuitively somewhat reasonable since obviously
+ * size to be. This is intuitively somewhat reasonable since obviously
* there can't be more than that many distinct values coming from the
* inner rel. The reason for the asymmetry (ie, that we don't clamp nd1
* likewise) is that this is the only pathway by which restriction clauses
@@ -2497,9 +2497,9 @@ eqjoinsel_semi(Oid operator,
if (have_mcvs1 && have_mcvs2 && OidIsValid(operator))
{
/*
- * We have most-common-value lists for both relations. Run through
+ * We have most-common-value lists for both relations. Run through
* the lists to see which MCVs actually join to each other with the
- * given operator. This allows us to determine the exact join
+ * given operator. This allows us to determine the exact join
* selectivity for the portion of the relations represented by the MCV
* lists. We still have to estimate for the remaining population, but
* in a skewed distribution this gives us a big leg up in accuracy.
@@ -2530,7 +2530,7 @@ eqjoinsel_semi(Oid operator,
/*
* Note we assume that each MCV will match at most one member of the
- * other MCV list. If the operator isn't really equality, there could
+ * other MCV list. If the operator isn't really equality, there could
* be multiple matches --- but we don't look for them, both for speed
* and because the math wouldn't add up...
*/
@@ -2567,7 +2567,7 @@ eqjoinsel_semi(Oid operator,
/*
* Now we need to estimate the fraction of relation 1 that has at
- * least one join partner. We know for certain that the matched MCVs
+ * least one join partner. We know for certain that the matched MCVs
* do, so that gives us a lower bound, but we're really in the dark
* about everything else. Our crude approach is: if nd1 <= nd2 then
* assume all non-null rel1 rows have join partners, else assume for
@@ -3165,11 +3165,11 @@ add_unique_group_var(PlannerInfo *root, List *varinfos,
* case (all possible cross-product terms actually appear as groups) since
* very often the grouped-by Vars are highly correlated. Our current approach
* is as follows:
- * 1. Expressions yielding boolean are assumed to contribute two groups,
+ * 1. Expressions yielding boolean are assumed to contribute two groups,
* independently of their content, and are ignored in the subsequent
- * steps. This is mainly because tests like "col IS NULL" break the
+ * steps. This is mainly because tests like "col IS NULL" break the
* heuristic used in step 2 especially badly.
- * 2. Reduce the given expressions to a list of unique Vars used. For
+ * 2. Reduce the given expressions to a list of unique Vars used. For
* example, GROUP BY a, a + b is treated the same as GROUP BY a, b.
* It is clearly correct not to count the same Var more than once.
* It is also reasonable to treat f(x) the same as x: f() cannot
@@ -3179,14 +3179,14 @@ add_unique_group_var(PlannerInfo *root, List *varinfos,
* As a special case, if a GROUP BY expression can be matched to an
* expressional index for which we have statistics, then we treat the
* whole expression as though it were just a Var.
- * 3. If the list contains Vars of different relations that are known equal
+ * 3. If the list contains Vars of different relations that are known equal
* due to equivalence classes, then drop all but one of the Vars from each
* known-equal set, keeping the one with smallest estimated # of values
* (since the extra values of the others can't appear in joined rows).
* Note the reason we only consider Vars of different relations is that
* if we considered ones of the same rel, we'd be double-counting the
* restriction selectivity of the equality in the next step.
- * 4. For Vars within a single source rel, we multiply together the numbers
+ * 4. For Vars within a single source rel, we multiply together the numbers
* of values, clamp to the number of rows in the rel (divided by 10 if
* more than one Var), and then multiply by the selectivity of the
* restriction clauses for that rel. When there's more than one Var,
@@ -3197,7 +3197,7 @@ add_unique_group_var(PlannerInfo *root, List *varinfos,
* by the restriction selectivity is effectively assuming that the
* restriction clauses are independent of the grouping, which is a crummy
* assumption, but it's hard to do better.
- * 5. If there are Vars from multiple rels, we repeat step 4 for each such
+ * 5. If there are Vars from multiple rels, we repeat step 4 for each such
* rel, and multiply the results together.
* Note that rels not containing grouped Vars are ignored completely, as are
* join clauses. Such rels cannot increase the number of groups, and we
@@ -3228,7 +3228,7 @@ estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows)
return 1.0;
/*
- * Count groups derived from boolean grouping expressions. For other
+ * Count groups derived from boolean grouping expressions. For other
* expressions, find the unique Vars used, treating an expression as a Var
* if we can find stats for it. For each one, record the statistical
* estimate of number of distinct values (total in its table, without
@@ -3317,7 +3317,7 @@ estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows)
* Group Vars by relation and estimate total numdistinct.
*
* For each iteration of the outer loop, we process the frontmost Var in
- * varinfos, plus all other Vars in the same relation. We remove these
+ * varinfos, plus all other Vars in the same relation. We remove these
* Vars from the newvarinfos list for the next iteration. This is the
* easiest way to group Vars of same rel together.
*/
@@ -3418,11 +3418,11 @@ estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows)
* distribution, so this will have to do for now.
*
* We are passed the number of buckets the executor will use for the given
- * input relation. If the data were perfectly distributed, with the same
+ * input relation. If the data were perfectly distributed, with the same
* number of tuples going into each available bucket, then the bucketsize
* fraction would be 1/nbuckets. But this happy state of affairs will occur
* only if (a) there are at least nbuckets distinct data values, and (b)
- * we have a not-too-skewed data distribution. Otherwise the buckets will
+ * we have a not-too-skewed data distribution. Otherwise the buckets will
* be nonuniformly occupied. If the other relation in the join has a key
* distribution similar to this one's, then the most-loaded buckets are
* exactly those that will be probed most often. Therefore, the "average"
@@ -3595,7 +3595,7 @@ convert_to_scalar(Datum value, Oid valuetypid, double *scaledvalue,
* operators to estimate selectivity for the other's. This is outright
* wrong in some cases --- in particular signed versus unsigned
* interpretation could trip us up. But it's useful enough in the
- * majority of cases that we do it anyway. Should think about more
+ * majority of cases that we do it anyway. Should think about more
* rigorous ways to do it.
*/
switch (valuetypid)
@@ -3950,6 +3950,7 @@ convert_string_datum(Datum value, Oid typid)
xfrmlen = strxfrm(NULL, val, 0);
#endif
#ifdef WIN32
+
/*
* On Windows, strxfrm returns INT_MAX when an error occurs. Instead
* of trying to allocate this much memory (and fail), just return the
@@ -4178,7 +4179,7 @@ get_restriction_variable(PlannerInfo *root, List *args, int varRelid,
right = (Node *) lsecond(args);
/*
- * Examine both sides. Note that when varRelid is nonzero, Vars of other
+ * Examine both sides. Note that when varRelid is nonzero, Vars of other
* relations will be treated as pseudoconstants.
*/
examine_variable(root, left, varRelid, vardata);
@@ -4323,7 +4324,7 @@ examine_variable(PlannerInfo *root, Node *node, int varRelid,
/*
* Okay, it's a more complicated expression. Determine variable
- * membership. Note that when varRelid isn't zero, only vars of that
+ * membership. Note that when varRelid isn't zero, only vars of that
* relation are considered "real" vars.
*/
varnos = pull_varnos(basenode);
@@ -4372,13 +4373,13 @@ examine_variable(PlannerInfo *root, Node *node, int varRelid,
if (onerel)
{
/*
- * We have an expression in vars of a single relation. Try to match
+ * We have an expression in vars of a single relation. Try to match
* it to expressional index columns, in hopes of finding some
* statistics.
*
* XXX it's conceivable that there are multiple matches with different
* index opfamilies; if so, we need to pick one that matches the
- * operator we are estimating for. FIXME later.
+ * operator we are estimating for. FIXME later.
*/
ListCell *ilist;
@@ -4580,7 +4581,7 @@ examine_simple_variable(PlannerInfo *root, Var *var,
*
* This is probably a harsher restriction than necessary; it's
* certainly OK for the selectivity estimator (which is a C function,
- * and therefore omnipotent anyway) to look at the statistics. But
+ * and therefore omnipotent anyway) to look at the statistics. But
* many selectivity estimators will happily *invoke the operator
* function* to try to work out a good estimate - and that's not OK.
* So for now, don't dig down for stats.
@@ -4633,7 +4634,7 @@ get_variable_numdistinct(VariableStatData *vardata, bool *isdefault)
*isdefault = false;
/*
- * Determine the stadistinct value to use. There are cases where we can
+ * Determine the stadistinct value to use. There are cases where we can
* get an estimate even without a pg_statistic entry, or can get a better
* value than is in pg_statistic.
*/
@@ -4757,7 +4758,7 @@ get_variable_range(PlannerInfo *root, VariableStatData *vardata, Oid sortop,
/*
* XXX It's very tempting to try to use the actual column min and max, if
- * we can get them relatively-cheaply with an index probe. However, since
+ * we can get them relatively-cheaply with an index probe. However, since
* this function is called many times during join planning, that could
* have unpleasant effects on planning speed. Need more investigation
* before enabling this.
@@ -5008,7 +5009,7 @@ get_actual_variable_range(PlannerInfo *root, VariableStatData *vardata,
* and it can be very expensive if a lot of uncommitted rows
* exist at the end of the index (because we'll laboriously
* fetch each one and reject it). What seems like a good
- * compromise is to use SnapshotDirty. That will accept
+ * compromise is to use SnapshotDirty. That will accept
* uncommitted rows, and thus avoid fetching multiple heap
* tuples in this scenario. On the other hand, it will reject
* known-dead rows, and thus not give a bogus answer when the
@@ -5147,7 +5148,7 @@ find_join_input_rel(PlannerInfo *root, Relids relids)
* Check whether char is a letter (and, hence, subject to case-folding)
*
* In multibyte character sets, we can't use isalpha, and it does not seem
- * worth trying to convert to wchar_t to use iswalpha. Instead, just assume
+ * worth trying to convert to wchar_t to use iswalpha. Instead, just assume
* any multibyte char is potentially case-varying.
*/
static int
@@ -5399,7 +5400,7 @@ pattern_fixed_prefix(Const *patt, Pattern_Type ptype, Oid collation,
* together with info about MCVs and NULLs.
*
* We use the >= and < operators from the specified btree opfamily to do the
- * estimation. The given variable and Const must be of the associated
+ * estimation. The given variable and Const must be of the associated
* datatype.
*
* XXX Note: we make use of the upper bound to estimate operator selectivity
@@ -5458,7 +5459,7 @@ prefix_selectivity(PlannerInfo *root, VariableStatData *vardata,
/*
* Merge the two selectivities in the same way as for a range query
- * (see clauselist_selectivity()). Note that we don't need to worry
+ * (see clauselist_selectivity()). Note that we don't need to worry
* about double-exclusion of nulls, since ineq_histogram_selectivity
* doesn't count those anyway.
*/
@@ -5695,7 +5696,7 @@ byte_increment(unsigned char *ptr, int len)
* that is not a bulletproof guarantee that an extension of the string might
* not sort after it; an example is that "foo " is less than "foo!", but it
* is not clear that a "dictionary" sort ordering will consider "foo!" less
- * than "foo bar". CAUTION: Therefore, this function should be used only for
+ * than "foo bar". CAUTION: Therefore, this function should be used only for
* estimation purposes when working in a non-C collation.
*
* To try to catch most cases where an extended string might otherwise sort
@@ -5952,7 +5953,7 @@ string_to_bytea_const(const char *str, size_t str_len)
* genericcostestimate is a general-purpose estimator that can be used for
* most index types. In some cases we use genericcostestimate as the base
* code and then incorporate additional index-type-specific knowledge in
- * the type-specific calling function. To avoid code duplication, we make
+ * the type-specific calling function. To avoid code duplication, we make
* genericcostestimate return a number of intermediate values as well as
* its preliminary estimates of the output cost values. The GenericCosts
* struct includes all these values.
@@ -6072,7 +6073,7 @@ genericcostestimate(PlannerInfo *root,
*
* In practice access to upper index levels is often nearly free because
* those tend to stay in cache under load; moreover, the cost involved is
- * highly dependent on index type. We therefore ignore such costs here
+ * highly dependent on index type. We therefore ignore such costs here
* and leave it to the caller to add a suitable charge if needed.
*/
if (index->pages > 1 && index->tuples > 1)
@@ -6091,9 +6092,9 @@ genericcostestimate(PlannerInfo *root,
* The above calculations are all per-index-scan. However, if we are in a
* nestloop inner scan, we can expect the scan to be repeated (with
* different search keys) for each row of the outer relation. Likewise,
- * ScalarArrayOpExpr quals result in multiple index scans. This creates
+ * ScalarArrayOpExpr quals result in multiple index scans. This creates
* the potential for cache effects to reduce the number of disk page
- * fetches needed. We want to estimate the average per-scan I/O cost in
+ * fetches needed. We want to estimate the average per-scan I/O cost in
* the presence of caching.
*
* We use the Mackert-Lohman formula (see costsize.c for details) to
@@ -6140,7 +6141,7 @@ genericcostestimate(PlannerInfo *root,
* evaluated once at the start of the scan to reduce them to runtime keys
* to pass to the index AM (see nodeIndexscan.c). We model the per-tuple
* CPU costs as cpu_index_tuple_cost plus one cpu_operator_cost per
- * indexqual operator. Because we have numIndexTuples as a per-scan
+ * indexqual operator. Because we have numIndexTuples as a per-scan
* number, we have to multiply by num_sa_scans to get the correct result
* for ScalarArrayOpExpr cases. Similarly add in costs for any index
* ORDER BY expressions.
@@ -6187,16 +6188,16 @@ genericcostestimate(PlannerInfo *root,
* ANDing the index predicate with the explicitly given indexquals produces
* a more accurate idea of the index's selectivity. However, we need to be
* careful not to insert redundant clauses, because clauselist_selectivity()
- * is easily fooled into computing a too-low selectivity estimate. Our
+ * is easily fooled into computing a too-low selectivity estimate. Our
* approach is to add only the predicate clause(s) that cannot be proven to
- * be implied by the given indexquals. This successfully handles cases such
+ * be implied by the given indexquals. This successfully handles cases such
* as a qual "x = 42" used with a partial index "WHERE x >= 40 AND x < 50".
* There are many other cases where we won't detect redundancy, leading to a
* too-low selectivity estimate, which will bias the system in favor of using
- * partial indexes where possible. That is not necessarily bad though.
+ * partial indexes where possible. That is not necessarily bad though.
*
* Note that indexQuals contains RestrictInfo nodes while the indpred
- * does not, so the output list will be mixed. This is OK for both
+ * does not, so the output list will be mixed. This is OK for both
* predicate_implied_by() and clauselist_selectivity(), but might be
* problematic if the result were passed to other things.
*/
@@ -6255,7 +6256,7 @@ btcostestimate(PG_FUNCTION_ARGS)
* the index scan). Additional quals can suppress visits to the heap, so
* it's OK to count them in indexSelectivity, but they should not count
* for estimating numIndexTuples. So we must examine the given indexquals
- * to find out which ones count as boundary quals. We rely on the
+ * to find out which ones count as boundary quals. We rely on the
* knowledge that they are given in index column order.
*
* For a RowCompareExpr, we consider only the first column, just as
@@ -6594,7 +6595,7 @@ hashcostestimate(PG_FUNCTION_ARGS)
* because the hash AM makes sure that's always one page.
*
* Likewise, we could consider charging some CPU for each index tuple in
- * the bucket, if we knew how many there were. But the per-tuple cost is
+ * the bucket, if we knew how many there were. But the per-tuple cost is
* just a hash value comparison, not a general datatype-dependent
* comparison, so any such charge ought to be quite a bit less than
* cpu_operator_cost; which makes it probably not worth worrying about.
@@ -6652,7 +6653,7 @@ gistcostestimate(PG_FUNCTION_ARGS)
/*
* Add a CPU-cost component to represent the costs of initial descent. We
* just use log(N) here not log2(N) since the branching factor isn't
- * necessarily two anyway. As for btree, charge once per SA scan.
+ * necessarily two anyway. As for btree, charge once per SA scan.
*/
if (index->tuples > 1) /* avoid computing log(0) */
{
@@ -6714,7 +6715,7 @@ spgcostestimate(PG_FUNCTION_ARGS)
/*
* Add a CPU-cost component to represent the costs of initial descent. We
* just use log(N) here not log2(N) since the branching factor isn't
- * necessarily two anyway. As for btree, charge once per SA scan.
+ * necessarily two anyway. As for btree, charge once per SA scan.
*/
if (index->tuples > 1) /* avoid computing log(0) */
{
@@ -6791,7 +6792,7 @@ gincost_pattern(IndexOptInfo *index, int indexcol,
/*
* Get the operator's strategy number and declared input data types within
- * the index opfamily. (We don't need the latter, but we use
+ * the index opfamily. (We don't need the latter, but we use
* get_op_opfamily_properties because it will throw error if it fails to
* find a matching pg_amop entry.)
*/
@@ -6937,7 +6938,7 @@ gincost_opexpr(PlannerInfo *root, IndexOptInfo *index, OpExpr *clause,
* each of which involves one value from the RHS array, plus all the
* non-array quals (if any). To model this, we average the counts across
* the RHS elements, and add the averages to the counts in *counts (which
- * correspond to per-indexscan costs). We also multiply counts->arrayScans
+ * correspond to per-indexscan costs). We also multiply counts->arrayScans
* by N, causing gincostestimate to scale up its estimates accordingly.
*/
static bool
@@ -7107,7 +7108,7 @@ gincostestimate(PG_FUNCTION_ARGS)
/*
* nPendingPages can be trusted, but the other fields are as of the last
- * VACUUM. Scale them by the ratio numPages / nTotalPages to account for
+ * VACUUM. Scale them by the ratio numPages / nTotalPages to account for
* growth since then. If the fields are zero (implying no VACUUM at all,
* and an index created pre-9.1), assume all pages are entry pages.
*/
@@ -7252,7 +7253,7 @@ gincostestimate(PG_FUNCTION_ARGS)
/*
* Add an estimate of entry pages read by partial match algorithm. It's a
- * scan over leaf pages in entry tree. We haven't any useful stats here,
+ * scan over leaf pages in entry tree. We haven't any useful stats here,
* so estimate it as proportion.
*/
entryPagesFetched += ceil(numEntryPages * counts.partialEntries / numEntries);
@@ -7294,17 +7295,17 @@ gincostestimate(PG_FUNCTION_ARGS)
*
* We assume every entry to have the same number of items, and that there
* is no overlap between them. (XXX: tsvector and array opclasses collect
- * statistics on the frequency of individual keys; it would be nice to
- * use those here.)
+ * statistics on the frequency of individual keys; it would be nice to use
+ * those here.)
*/
dataPagesFetched = ceil(numDataPages * counts.exactEntries / numEntries);
/*
- * If there is a lot of overlap among the entries, in particular if one
- * of the entries is very frequent, the above calculation can grossly
- * under-estimate. As a simple cross-check, calculate a lower bound
- * based on the overall selectivity of the quals. At a minimum, we must
- * read one item pointer for each matching entry.
+ * If there is a lot of overlap among the entries, in particular if one of
+ * the entries is very frequent, the above calculation can grossly
+ * under-estimate. As a simple cross-check, calculate a lower bound based
+ * on the overall selectivity of the quals. At a minimum, we must read
+ * one item pointer for each matching entry.
*
* The width of each item pointer varies, based on the level of
* compression. We don't have statistics on that, but an average of
diff --git a/src/backend/utils/adt/timestamp.c b/src/backend/utils/adt/timestamp.c
index efc1e9b9925..11007c6d894 100644
--- a/src/backend/utils/adt/timestamp.c
+++ b/src/backend/utils/adt/timestamp.c
@@ -41,7 +41,7 @@
#error -ffast-math is known to break this code
#endif
-#define SAMESIGN(a,b) (((a) < 0) == ((b) < 0))
+#define SAMESIGN(a,b) (((a) < 0) == ((b) < 0))
#ifndef INT64_MAX
#define INT64_MAX INT64CONST(0x7FFFFFFFFFFFFFFF)
@@ -391,7 +391,7 @@ AdjustTimestampForTypmod(Timestamp *time, int32 typmod)
* Note: this round-to-nearest code is not completely consistent about
* rounding values that are exactly halfway between integral values.
* On most platforms, rint() will implement round-to-nearest-even, but
- * the integer code always rounds up (away from zero). Is it worth
+ * the integer code always rounds up (away from zero). Is it worth
* trying to be consistent?
*/
#ifdef HAVE_INT64_TIMESTAMP
@@ -488,7 +488,7 @@ timestamptz_in(PG_FUNCTION_ARGS)
* if it's acceptable. Otherwise, an error is thrown.
*/
static int
-parse_sane_timezone(struct pg_tm *tm, text *zone)
+parse_sane_timezone(struct pg_tm * tm, text *zone)
{
char tzname[TZ_STRLEN_MAX + 1];
int rt;
@@ -497,7 +497,7 @@ parse_sane_timezone(struct pg_tm *tm, text *zone)
text_to_cstring_buffer(zone, tzname, sizeof(tzname));
/*
- * Look up the requested timezone. First we try to interpret it as a
+ * Look up the requested timezone. First we try to interpret it as a
* numeric timezone specification; if DecodeTimezone decides it doesn't
* like the format, we look in the date token table (to handle cases like
* "EST"), and if that also fails, we look in the timezone database (to
@@ -507,7 +507,7 @@ parse_sane_timezone(struct pg_tm *tm, text *zone)
* offset abbreviations.)
*
* Note pg_tzset happily parses numeric input that DecodeTimezone would
- * reject. To avoid having it accept input that would otherwise be seen
+ * reject. To avoid having it accept input that would otherwise be seen
* as invalid, it's enough to disallow having a digit in the first
* position of our input string.
*/
@@ -528,7 +528,7 @@ parse_sane_timezone(struct pg_tm *tm, text *zone)
if (rt == DTERR_TZDISP_OVERFLOW)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("numeric time zone \"%s\" out of range", tzname)));
+ errmsg("numeric time zone \"%s\" out of range", tzname)));
else if (rt != DTERR_BAD_FORMAT)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
@@ -997,7 +997,7 @@ interval_send(PG_FUNCTION_ARGS)
/*
* The interval typmod stores a "range" in its high 16 bits and a "precision"
- * in its low 16 bits. Both contribute to defining the resolution of the
+ * in its low 16 bits. Both contribute to defining the resolution of the
* type. Range addresses resolution granules larger than one second, and
* precision specifies resolution below one second. This representation can
* express all SQL standard resolutions, but we implement them all in terms of
@@ -1205,7 +1205,7 @@ interval_transform(PG_FUNCTION_ARGS)
/*
* Temporally-smaller fields occupy higher positions in the range
- * bitmap. Since only the temporally-smallest bit matters for length
+ * bitmap. Since only the temporally-smallest bit matters for length
* coercion purposes, we compare the last-set bits in the ranges.
* Precision, which is to say, sub-second precision, only affects
* ranges that include SECOND.
@@ -1294,7 +1294,7 @@ AdjustIntervalForTypmod(Interval *interval, int32 typmod)
* that fields to the right of the last one specified are zeroed out,
* but those to the left of it remain valid. Thus for example there
* is no operational difference between INTERVAL YEAR TO MONTH and
- * INTERVAL MONTH. In some cases we could meaningfully enforce that
+ * INTERVAL MONTH. In some cases we could meaningfully enforce that
* higher-order fields are zero; for example INTERVAL DAY could reject
* nonzero "month" field. However that seems a bit pointless when we
* can't do it consistently. (We cannot enforce a range limit on the
@@ -1304,9 +1304,9 @@ AdjustIntervalForTypmod(Interval *interval, int32 typmod)
*
* Note: before PG 8.4 we interpreted a limited set of fields as
* actually causing a "modulo" operation on a given value, potentially
- * losing high-order as well as low-order information. But there is
+ * losing high-order as well as low-order information. But there is
* no support for such behavior in the standard, and it seems fairly
- * undesirable on data consistency grounds anyway. Now we only
+ * undesirable on data consistency grounds anyway. Now we only
* perform truncation or rounding of low-order fields.
*/
if (range == INTERVAL_FULL_RANGE)
@@ -1426,7 +1426,7 @@ AdjustIntervalForTypmod(Interval *interval, int32 typmod)
/*
* Note: this round-to-nearest code is not completely consistent
* about rounding values that are exactly halfway between integral
- * values. On most platforms, rint() will implement
+ * values. On most platforms, rint() will implement
* round-to-nearest-even, but the integer code always rounds up
* (away from zero). Is it worth trying to be consistent?
*/
@@ -1470,7 +1470,7 @@ make_interval(PG_FUNCTION_ARGS)
Interval *result;
/*
- * Reject out-of-range inputs. We really ought to check the integer
+ * Reject out-of-range inputs. We really ought to check the integer
* inputs as well, but it's not entirely clear what limits to apply.
*/
if (isinf(secs) || isnan(secs))
@@ -1718,7 +1718,7 @@ timestamptz_to_time_t(TimestampTz t)
* Produce a C-string representation of a TimestampTz.
*
* This is mostly for use in emitting messages. The primary difference
- * from timestamptz_out is that we force the output format to ISO. Note
+ * from timestamptz_out is that we force the output format to ISO. Note
* also that the result is in a static buffer, not pstrdup'd.
*/
const char *
@@ -1862,7 +1862,7 @@ recalc_t:
*
* First, convert to an integral timestamp, avoiding possibly
* platform-specific roundoff-in-wrong-direction errors, and adjust to
- * Unix epoch. Then see if we can convert to pg_time_t without loss. This
+ * Unix epoch. Then see if we can convert to pg_time_t without loss. This
* coding avoids hardwiring any assumptions about the width of pg_time_t,
* so it should behave sanely on machines without int64.
*/
@@ -2010,7 +2010,7 @@ recalc:
int
tm2interval(struct pg_tm * tm, fsec_t fsec, Interval *span)
{
- double total_months = (double)tm->tm_year * MONTHS_PER_YEAR + tm->tm_mon;
+ double total_months = (double) tm->tm_year * MONTHS_PER_YEAR + tm->tm_mon;
if (total_months > INT_MAX || total_months < INT_MIN)
return -1;
@@ -4888,7 +4888,7 @@ timestamp_zone(PG_FUNCTION_ARGS)
PG_RETURN_TIMESTAMPTZ(timestamp);
/*
- * Look up the requested timezone. First we look in the date token table
+ * Look up the requested timezone. First we look in the date token table
* (to handle cases like "EST"), and if that fails, we look in the
* timezone database (to handle cases like "America/New_York"). (This
* matches the order in which timestamp input checks the cases; it's
@@ -5061,7 +5061,7 @@ timestamptz_zone(PG_FUNCTION_ARGS)
PG_RETURN_TIMESTAMP(timestamp);
/*
- * Look up the requested timezone. First we look in the date token table
+ * Look up the requested timezone. First we look in the date token table
* (to handle cases like "EST"), and if that fails, we look in the
* timezone database (to handle cases like "America/New_York"). (This
* matches the order in which timestamp input checks the cases; it's
diff --git a/src/backend/utils/adt/tsginidx.c b/src/backend/utils/adt/tsginidx.c
index df47105d0b2..bdef47f093c 100644
--- a/src/backend/utils/adt/tsginidx.c
+++ b/src/backend/utils/adt/tsginidx.c
@@ -204,9 +204,12 @@ checkcondition_gin(void *checkval, QueryOperand *val)
*/
static GinTernaryValue
TS_execute_ternary(QueryItem *curitem, void *checkval,
- GinTernaryValue (*chkcond) (void *checkval, QueryOperand *val))
+ GinTernaryValue (*chkcond) (void *checkval, QueryOperand *val))
{
- GinTernaryValue val1, val2, result;
+ GinTernaryValue val1,
+ val2,
+ result;
+
/* since this function recurses, it could be driven to stack overflow */
check_stack_depth();
@@ -223,7 +226,7 @@ TS_execute_ternary(QueryItem *curitem, void *checkval,
case OP_AND:
val1 = TS_execute_ternary(curitem + curitem->qoperator.left,
- checkval, chkcond);
+ checkval, chkcond);
if (val1 == GIN_FALSE)
return GIN_FALSE;
val2 = TS_execute_ternary(curitem + 1, checkval, chkcond);
@@ -236,7 +239,7 @@ TS_execute_ternary(QueryItem *curitem, void *checkval,
case OP_OR:
val1 = TS_execute_ternary(curitem + curitem->qoperator.left,
- checkval, chkcond);
+ checkval, chkcond);
if (val1 == GIN_TRUE)
return GIN_TRUE;
val2 = TS_execute_ternary(curitem + 1, checkval, chkcond);
@@ -339,7 +342,7 @@ gin_tsquery_triconsistent(PG_FUNCTION_ARGS)
* Formerly, gin_extract_tsvector had only two arguments. Now it has three,
* but we still need a pg_proc entry with two args to support reloading
* pre-9.1 contrib/tsearch2 opclass declarations. This compatibility
- * function should go away eventually. (Note: you might say "hey, but the
+ * function should go away eventually. (Note: you might say "hey, but the
* code above is only *using* two args, so let's just declare it that way".
* If you try that you'll find the opr_sanity regression test complains.)
*/
diff --git a/src/backend/utils/adt/varchar.c b/src/backend/utils/adt/varchar.c
index 502ca44e04a..72b9f99dbc9 100644
--- a/src/backend/utils/adt/varchar.c
+++ b/src/backend/utils/adt/varchar.c
@@ -257,7 +257,7 @@ bpcharsend(PG_FUNCTION_ARGS)
*
* Truncation rules: for an explicit cast, silently truncate to the given
* length; for an implicit cast, raise error unless extra characters are
- * all spaces. (This is sort-of per SQL: the spec would actually have us
+ * all spaces. (This is sort-of per SQL: the spec would actually have us
* raise a "completion condition" for the explicit cast case, but Postgres
* hasn't got such a concept.)
*/
@@ -584,7 +584,7 @@ varchar_transform(PG_FUNCTION_ARGS)
*
* Truncation rules: for an explicit cast, silently truncate to the given
* length; for an implicit cast, raise error unless extra characters are
- * all spaces. (This is sort-of per SQL: the spec would actually have us
+ * all spaces. (This is sort-of per SQL: the spec would actually have us
* raise a "completion condition" for the explicit cast case, but Postgres
* hasn't got such a concept.)
*/
diff --git a/src/backend/utils/adt/varlena.c b/src/backend/utils/adt/varlena.c
index aab4897f618..f8d9fec34e4 100644
--- a/src/backend/utils/adt/varlena.c
+++ b/src/backend/utils/adt/varlena.c
@@ -591,7 +591,7 @@ textlen(PG_FUNCTION_ARGS)
* Does the real work for textlen()
*
* This is broken out so it can be called directly by other string processing
- * functions. Note that the argument is passed as a Datum, to indicate that
+ * functions. Note that the argument is passed as a Datum, to indicate that
* it may still be in compressed form. We can avoid decompressing it at all
* in some cases.
*/
@@ -763,7 +763,7 @@ text_substr_no_len(PG_FUNCTION_ARGS)
* Does the real work for text_substr() and text_substr_no_len()
*
* This is broken out so it can be called directly by other string processing
- * functions. Note that the argument is passed as a Datum, to indicate that
+ * functions. Note that the argument is passed as a Datum, to indicate that
* it may still be in compressed/toasted form. We can avoid detoasting all
* of it in some cases.
*
@@ -1113,7 +1113,7 @@ text_position_setup(text *t1, text *t2, TextPositionState *state)
* searched (t1) and the "needle" is the pattern being sought (t2).
*
* If the needle is empty or bigger than the haystack then there is no
- * point in wasting cycles initializing the table. We also choose not to
+ * point in wasting cycles initializing the table. We also choose not to
* use B-M-H for needles of length 1, since the skip table can't possibly
* save anything in that case.
*/
@@ -1129,7 +1129,7 @@ text_position_setup(text *t1, text *t2, TextPositionState *state)
* declaration of TextPositionState allows up to 256 elements, but for
* short search problems we don't really want to have to initialize so
* many elements --- it would take too long in comparison to the
- * actual search time. So we choose a useful skip table size based on
+ * actual search time. So we choose a useful skip table size based on
* the haystack length minus the needle length. The closer the needle
* length is to the haystack length the less useful skipping becomes.
*
@@ -1161,7 +1161,7 @@ text_position_setup(text *t1, text *t2, TextPositionState *state)
state->skiptable[i] = len2;
/*
- * Now examine the needle. For each character except the last one,
+ * Now examine the needle. For each character except the last one,
* set the corresponding table element to the appropriate skip
* distance. Note that when two characters share the same skip table
* entry, the one later in the needle must determine the skip
@@ -1249,11 +1249,11 @@ text_position_next(int start_pos, TextPositionState *state)
/*
* No match, so use the haystack char at hptr to decide how
- * far to advance. If the needle had any occurrence of that
+ * far to advance. If the needle had any occurrence of that
* character (or more precisely, one sharing the same
* skiptable entry) before its last character, then we advance
* far enough to align the last such needle character with
- * that haystack position. Otherwise we can advance by the
+ * that haystack position. Otherwise we can advance by the
* whole needle length.
*/
hptr += state->skiptable[(unsigned char) *hptr & skiptablemask];
@@ -1305,11 +1305,11 @@ text_position_next(int start_pos, TextPositionState *state)
/*
* No match, so use the haystack char at hptr to decide how
- * far to advance. If the needle had any occurrence of that
+ * far to advance. If the needle had any occurrence of that
* character (or more precisely, one sharing the same
* skiptable entry) before its last character, then we advance
* far enough to align the last such needle character with
- * that haystack position. Otherwise we can advance by the
+ * that haystack position. Otherwise we can advance by the
* whole needle length.
*/
hptr += state->skiptable[*hptr & skiptablemask];
@@ -1344,7 +1344,7 @@ varstr_cmp(char *arg1, int len1, char *arg2, int len2, Oid collid)
/*
* Unfortunately, there is no strncoll(), so in the non-C locale case we
- * have to do some memory copying. This turns out to be significantly
+ * have to do some memory copying. This turns out to be significantly
* slower, so we optimize the case where LC_COLLATE is C. We also try to
* optimize relatively-short strings by avoiding palloc/pfree overhead.
*/
@@ -2334,7 +2334,7 @@ textToQualifiedNameList(text *textval)
* SplitIdentifierString --- parse a string containing identifiers
*
* This is the guts of textToQualifiedNameList, and is exported for use in
- * other situations such as parsing GUC variables. In the GUC case, it's
+ * other situations such as parsing GUC variables. In the GUC case, it's
* important to avoid memory leaks, so the API is designed to minimize the
* amount of stuff that needs to be allocated and freed.
*
@@ -2342,7 +2342,7 @@ textToQualifiedNameList(text *textval)
* rawstring: the input string; must be overwritable! On return, it's
* been modified to contain the separated identifiers.
* separator: the separator punctuation expected between identifiers
- * (typically '.' or ','). Whitespace may also appear around
+ * (typically '.' or ','). Whitespace may also appear around
* identifiers.
* Outputs:
* namelist: filled with a palloc'd list of pointers to identifiers within
@@ -2411,7 +2411,7 @@ SplitIdentifierString(char *rawstring, char separator,
*
* XXX because we want to overwrite the input in-place, we cannot
* support a downcasing transformation that increases the string
- * length. This is not a problem given the current implementation
+ * length. This is not a problem given the current implementation
* of downcase_truncate_identifier, but we'll probably have to do
* something about this someday.
*/
@@ -2468,7 +2468,7 @@ SplitIdentifierString(char *rawstring, char separator,
* Inputs:
* rawstring: the input string; must be modifiable!
* separator: the separator punctuation expected between directories
- * (typically ',' or ';'). Whitespace may also appear around
+ * (typically ',' or ';'). Whitespace may also appear around
* directories.
* Outputs:
* namelist: filled with a palloc'd list of directory names.
@@ -2875,7 +2875,7 @@ check_replace_text_has_escape_char(const text *replace_text)
* appendStringInfoRegexpSubstr
*
* Append replace_text to str, substituting regexp back references for
- * \n escapes. start_ptr is the start of the match in the source string,
+ * \n escapes. start_ptr is the start of the match in the source string,
* at logical character position data_pos.
*/
static void
@@ -2958,7 +2958,7 @@ appendStringInfoRegexpSubstr(StringInfo str, text *replace_text,
if (so != -1 && eo != -1)
{
/*
- * Copy the text that is back reference of regexp. Note so and eo
+ * Copy the text that is back reference of regexp. Note so and eo
* are counted in characters not bytes.
*/
char *chunk_start;
@@ -3836,7 +3836,7 @@ concat_internal(const char *sepstr, int argidx,
/*
* Non-null argument had better be an array. We assume that any call
* context that could let get_fn_expr_variadic return true will have
- * checked that a VARIADIC-labeled parameter actually is an array. So
+ * checked that a VARIADIC-labeled parameter actually is an array. So
* it should be okay to just Assert that it's an array rather than
* doing a full-fledged error check.
*/
@@ -4237,7 +4237,7 @@ text_format(PG_FUNCTION_ARGS)
/*
* Get the appropriate typOutput function, reusing previous one if
- * same type as previous argument. That's particularly useful in the
+ * same type as previous argument. That's particularly useful in the
* variadic-array case, but often saves work even for ordinary calls.
*/
if (typid != prev_type)
@@ -4329,12 +4329,12 @@ text_format_parse_digits(const char **ptr, const char *end_ptr, int *value)
*
* Inputs are start_ptr (the position after '%') and end_ptr (string end + 1).
* Output parameters:
- * argpos: argument position for value to be printed. -1 means unspecified.
- * widthpos: argument position for width. Zero means the argument position
+ * argpos: argument position for value to be printed. -1 means unspecified.
+ * widthpos: argument position for width. Zero means the argument position
* was unspecified (ie, take the next arg) and -1 means no width
* argument (width was omitted or specified as a constant).
* flags: bitmask of flags.
- * width: directly-specified width value. Zero means the width was omitted
+ * width: directly-specified width value. Zero means the width was omitted
* (note it's not necessary to distinguish this case from an explicit
* zero width value).
*
diff --git a/src/backend/utils/adt/xml.c b/src/backend/utils/adt/xml.c
index 765469c623e..422be69bd6d 100644
--- a/src/backend/utils/adt/xml.c
+++ b/src/backend/utils/adt/xml.c
@@ -19,7 +19,7 @@
* fail. For one thing, this avoids having to manage variant catalog
* installations. But it also has nice effects such as that you can
* dump a database containing XML type data even if the server is not
- * linked with libxml. Thus, make sure xml_out() works even if nothing
+ * linked with libxml. Thus, make sure xml_out() works even if nothing
* else does.
*/
@@ -286,7 +286,7 @@ xml_out(PG_FUNCTION_ARGS)
xmltype *x = PG_GETARG_XML_P(0);
/*
- * xml_out removes the encoding property in all cases. This is because we
+ * xml_out removes the encoding property in all cases. This is because we
* cannot control from here whether the datum will be converted to a
* different client encoding, so we'd do more harm than good by including
* it.
@@ -454,7 +454,7 @@ xmlcomment(PG_FUNCTION_ARGS)
/*
* TODO: xmlconcat needs to merge the notations and unparsed entities
- * of the argument values. Not very important in practice, though.
+ * of the argument values. Not very important in practice, though.
*/
xmltype *
xmlconcat(List *args)
@@ -589,7 +589,7 @@ xmlelement(XmlExprState *xmlExpr, ExprContext *econtext)
/*
* We first evaluate all the arguments, then start up libxml and create
- * the result. This avoids issues if one of the arguments involves a call
+ * the result. This avoids issues if one of the arguments involves a call
* to some other function or subsystem that wants to use libxml on its own
* terms.
*/
@@ -926,7 +926,7 @@ pg_xml_init_library(void)
* pg_xml_init --- set up for use of libxml and register an error handler
*
* This should be called by each function that is about to use libxml
- * facilities and requires error handling. It initializes libxml with
+ * facilities and requires error handling. It initializes libxml with
* pg_xml_init_library() and establishes our libxml error handler.
*
* strictness determines which errors are reported and which are ignored.
@@ -972,7 +972,7 @@ pg_xml_init(PgXmlStrictness strictness)
/*
* Verify that xmlSetStructuredErrorFunc set the context variable we
- * expected it to. If not, the error context pointer we just saved is not
+ * expected it to. If not, the error context pointer we just saved is not
* the correct thing to restore, and since that leaves us without a way to
* restore the context in pg_xml_done, we must fail.
*
@@ -1129,7 +1129,7 @@ parse_xml_decl(const xmlChar *str, size_t *lenp,
int utf8len;
/*
- * Only initialize libxml. We don't need error handling here, but we do
+ * Only initialize libxml. We don't need error handling here, but we do
* need to make sure libxml is initialized before calling any of its
* functions. Note that this is safe (and a no-op) if caller has already
* done pg_xml_init().
@@ -1272,7 +1272,7 @@ finished:
/*
* Write an XML declaration. On output, we adjust the XML declaration
- * as follows. (These rules are the moral equivalent of the clause
+ * as follows. (These rules are the moral equivalent of the clause
* "Serialization of an XML value" in the SQL standard.)
*
* We try to avoid generating an XML declaration if possible. This is
@@ -1496,7 +1496,7 @@ xml_pstrdup(const char *string)
/*
* xmlPgEntityLoader --- entity loader callback function
*
- * Silently prevent any external entity URL from being loaded. We don't want
+ * Silently prevent any external entity URL from being loaded. We don't want
* to throw an error, so instead make the entity appear to expand to an empty
* string.
*
@@ -1665,8 +1665,8 @@ xml_errorHandler(void *data, xmlErrorPtr error)
chopStringInfoNewlines(errorBuf);
/*
- * Legacy error handling mode. err_occurred is never set, we just add the
- * message to err_buf. This mode exists because the xml2 contrib module
+ * Legacy error handling mode. err_occurred is never set, we just add the
+ * message to err_buf. This mode exists because the xml2 contrib module
* uses our error-handling infrastructure, but we don't want to change its
* behaviour since it's deprecated anyway. This is also why we don't
* distinguish between notices, warnings and errors here --- the old-style
@@ -1887,7 +1887,7 @@ map_sql_identifier_to_xml_name(char *ident, bool fully_escaped,
static char *
unicode_to_sqlchar(pg_wchar c)
{
- char utf8string[8]; /* need room for trailing zero */
+ char utf8string[8]; /* need room for trailing zero */
char *result;
memset(utf8string, 0, sizeof(utf8string));
@@ -1939,8 +1939,8 @@ map_xml_name_to_sql_identifier(char *name)
*
* When xml_escape_strings is true, then certain characters in string
* values are replaced by entity references (&lt; etc.), as specified
- * in SQL/XML:2008 section 9.8 GR 9) a) iii). This is normally what is
- * wanted. The false case is mainly useful when the resulting value
+ * in SQL/XML:2008 section 9.8 GR 9) a) iii). This is normally what is
+ * wanted. The false case is mainly useful when the resulting value
* is used with xmlTextWriterWriteAttribute() to write out an
* attribute, because that function does the escaping itself.
*/
@@ -2221,13 +2221,13 @@ _SPI_strdup(const char *s)
*
* There are two kinds of mappings: Mapping SQL data (table contents)
* to XML documents, and mapping SQL structure (the "schema") to XML
- * Schema. And there are functions that do both at the same time.
+ * Schema. And there are functions that do both at the same time.
*
* Then you can map a database, a schema, or a table, each in both
* ways. This breaks down recursively: Mapping a database invokes
* mapping schemas, which invokes mapping tables, which invokes
* mapping rows, which invokes mapping columns, although you can't
- * call the last two from the outside. Because of this, there are a
+ * call the last two from the outside. Because of this, there are a
* number of xyz_internal() functions which are to be called both from
* the function manager wrapper and from some upper layer in a
* recursive call.
@@ -2236,7 +2236,7 @@ _SPI_strdup(const char *s)
* nulls, tableforest, and targetns mean.
*
* Some style guidelines for XML output: Use double quotes for quoting
- * XML attributes. Indent XML elements by two spaces, but remember
+ * XML attributes. Indent XML elements by two spaces, but remember
* that a lot of code is called recursively at different levels, so
* it's better not to indent rather than create output that indents
* and outdents weirdly. Add newlines to make the output look nice.
@@ -2400,12 +2400,12 @@ cursor_to_xml(PG_FUNCTION_ARGS)
* Write the start tag of the root element of a data mapping.
*
* top_level means that this is the very top level of the eventual
- * output. For example, when the user calls table_to_xml, then a call
+ * output. For example, when the user calls table_to_xml, then a call
* with a table name to this function is the top level. When the user
* calls database_to_xml, then a call with a schema name to this
* function is not the top level. If top_level is false, then the XML
* namespace declarations are omitted, because they supposedly already
- * appeared earlier in the output. Repeating them is not wrong, but
+ * appeared earlier in the output. Repeating them is not wrong, but
* it looks ugly.
*/
static void
@@ -2937,7 +2937,7 @@ map_multipart_sql_identifier_to_xml_name(char *a, char *b, char *c, char *d)
if (a)
appendStringInfoString(&result,
- map_sql_identifier_to_xml_name(a, true, true));
+ map_sql_identifier_to_xml_name(a, true, true));
if (b)
appendStringInfo(&result, ".%s",
map_sql_identifier_to_xml_name(b, true, true));
@@ -3348,7 +3348,7 @@ map_sql_typecoll_to_xmlschema_types(List *tupdesc_list)
* SQL/XML:2008 sections 9.5 and 9.6.
*
* (The distinction between 9.5 and 9.6 is basically that 9.6 adds
- * a name attribute, which this function does. The name-less version
+ * a name attribute, which this function does. The name-less version
* 9.5 doesn't appear to be required anywhere.)
*/
static const char *
@@ -3362,11 +3362,11 @@ map_sql_type_to_xmlschema_type(Oid typeoid, int typmod)
if (typeoid == XMLOID)
{
appendStringInfoString(&result,
- "<xsd:complexType mixed=\"true\">\n"
- " <xsd:sequence>\n"
- " <xsd:any name=\"element\" minOccurs=\"0\" maxOccurs=\"unbounded\" processContents=\"skip\"/>\n"
- " </xsd:sequence>\n"
- "</xsd:complexType>\n");
+ "<xsd:complexType mixed=\"true\">\n"
+ " <xsd:sequence>\n"
+ " <xsd:any name=\"element\" minOccurs=\"0\" maxOccurs=\"unbounded\" processContents=\"skip\"/>\n"
+ " </xsd:sequence>\n"
+ "</xsd:complexType>\n");
}
else
{
@@ -3440,12 +3440,12 @@ map_sql_type_to_xmlschema_type(Oid typeoid, int typmod)
case FLOAT8OID:
appendStringInfoString(&result,
- " <xsd:restriction base=\"xsd:double\"></xsd:restriction>\n");
+ " <xsd:restriction base=\"xsd:double\"></xsd:restriction>\n");
break;
case BOOLOID:
appendStringInfoString(&result,
- " <xsd:restriction base=\"xsd:boolean\"></xsd:restriction>\n");
+ " <xsd:restriction base=\"xsd:boolean\"></xsd:restriction>\n");
break;
case TIMEOID:
@@ -3496,9 +3496,9 @@ map_sql_type_to_xmlschema_type(Oid typeoid, int typmod)
case DATEOID:
appendStringInfoString(&result,
- " <xsd:restriction base=\"xsd:date\">\n"
- " <xsd:pattern value=\"\\p{Nd}{4}-\\p{Nd}{2}-\\p{Nd}{2}\"/>\n"
- " </xsd:restriction>\n");
+ " <xsd:restriction base=\"xsd:date\">\n"
+ " <xsd:pattern value=\"\\p{Nd}{4}-\\p{Nd}{2}-\\p{Nd}{2}\"/>\n"
+ " </xsd:restriction>\n");
break;
default:
@@ -3524,7 +3524,7 @@ map_sql_type_to_xmlschema_type(Oid typeoid, int typmod)
/*
* Map an SQL row to an XML element, taking the row from the active
- * SPI cursor. See also SQL/XML:2008 section 9.10.
+ * SPI cursor. See also SQL/XML:2008 section 9.10.
*/
static void
SPI_sql_row_to_xmlelement(int rownum, StringInfo result, char *tablename,
diff --git a/src/backend/utils/cache/attoptcache.c b/src/backend/utils/cache/attoptcache.c
index 516f40ba84e..5fcf0dd7c75 100644
--- a/src/backend/utils/cache/attoptcache.c
+++ b/src/backend/utils/cache/attoptcache.c
@@ -46,7 +46,7 @@ typedef struct
* Flush all cache entries when pg_attribute is updated.
*
* When pg_attribute is updated, we must flush the cache entry at least
- * for that attribute. Currently, we just flush them all. Since attribute
+ * for that attribute. Currently, we just flush them all. Since attribute
* options are not currently used in performance-critical paths (such as
* query execution), this seems OK.
*/
diff --git a/src/backend/utils/cache/catcache.c b/src/backend/utils/cache/catcache.c
index d17b6b0ba58..954b435bffa 100644
--- a/src/backend/utils/cache/catcache.c
+++ b/src/backend/utils/cache/catcache.c
@@ -836,9 +836,10 @@ RehashCatCache(CatCache *cp)
for (i = 0; i < cp->cc_nbuckets; i++)
{
dlist_mutable_iter iter;
+
dlist_foreach_modify(iter, &cp->cc_bucket[i])
{
- CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur);
+ CatCTup *ct = dlist_container(CatCTup, cache_elem, iter.cur);
int hashIndex = HASH_INDEX(ct->hash_value, newnbuckets);
dlist_delete(iter.cur);
@@ -856,7 +857,7 @@ RehashCatCache(CatCache *cp)
* CatalogCacheInitializeCache
*
* This function does final initialization of a catcache: obtain the tuple
- * descriptor and set up the hash and equality function links. We assume
+ * descriptor and set up the hash and equality function links. We assume
* that the relcache entry can be opened at this point!
*/
#ifdef CACHEDEBUG
@@ -1081,7 +1082,7 @@ IndexScanOK(CatCache *cache, ScanKey cur_skey)
* if necessary (on the first access to a particular cache).
*
* The result is NULL if not found, or a pointer to a HeapTuple in
- * the cache. The caller must not modify the tuple, and must call
+ * the cache. The caller must not modify the tuple, and must call
* ReleaseCatCache() when done with it.
*
* The search key values should be expressed as Datums of the key columns'
@@ -1214,8 +1215,8 @@ SearchCatCache(CatCache *cache,
* the relation --- for example, due to shared-cache-inval messages being
* processed during heap_open(). This is OK. It's even possible for one
* of those lookups to find and enter the very same tuple we are trying to
- * fetch here. If that happens, we will enter a second copy of the tuple
- * into the cache. The first copy will never be referenced again, and
+ * fetch here. If that happens, we will enter a second copy of the tuple
+ * into the cache. The first copy will never be referenced again, and
* will eventually age out of the cache, so there's no functional problem.
* This case is rare enough that it's not worth expending extra cycles to
* detect.
@@ -1254,7 +1255,7 @@ SearchCatCache(CatCache *cache,
*
* In bootstrap mode, we don't build negative entries, because the cache
* invalidation mechanism isn't alive and can't clear them if the tuple
- * gets created later. (Bootstrap doesn't do UPDATEs, so it doesn't need
+ * gets created later. (Bootstrap doesn't do UPDATEs, so it doesn't need
* cache inval for that.)
*/
if (ct == NULL)
@@ -1584,7 +1585,7 @@ SearchCatCacheList(CatCache *cache,
/*
* We are now past the last thing that could trigger an elog before we
* have finished building the CatCList and remembering it in the
- * resource owner. So it's OK to fall out of the PG_TRY, and indeed
+ * resource owner. So it's OK to fall out of the PG_TRY, and indeed
* we'd better do so before we start marking the members as belonging
* to the list.
*/
@@ -1673,7 +1674,7 @@ ReleaseCatCacheList(CatCList *list)
/*
* CatalogCacheCreateEntry
* Create a new CatCTup entry, copying the given HeapTuple and other
- * supplied data into it. The new entry initially has refcount 0.
+ * supplied data into it. The new entry initially has refcount 0.
*/
static CatCTup *
CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
@@ -1724,8 +1725,8 @@ CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
CacheHdr->ch_ntup++;
/*
- * If the hash table has become too full, enlarge the buckets array.
- * Quite arbitrarily, we enlarge when fill factor > 2.
+ * If the hash table has become too full, enlarge the buckets array. Quite
+ * arbitrarily, we enlarge when fill factor > 2.
*/
if (cache->cc_ntup > cache->cc_nbuckets * 2)
RehashCatCache(cache);
diff --git a/src/backend/utils/cache/inval.c b/src/backend/utils/cache/inval.c
index 115bcac5d23..59714697c69 100644
--- a/src/backend/utils/cache/inval.c
+++ b/src/backend/utils/cache/inval.c
@@ -29,23 +29,23 @@
*
* If we successfully complete the transaction, we have to broadcast all
* these invalidation events to other backends (via the SI message queue)
- * so that they can flush obsolete entries from their caches. Note we have
+ * so that they can flush obsolete entries from their caches. Note we have
* to record the transaction commit before sending SI messages, otherwise
* the other backends won't see our updated tuples as good.
*
* When a subtransaction aborts, we can process and discard any events
- * it has queued. When a subtransaction commits, we just add its events
+ * it has queued. When a subtransaction commits, we just add its events
* to the pending lists of the parent transaction.
*
* In short, we need to remember until xact end every insert or delete
- * of a tuple that might be in the system caches. Updates are treated as
+ * of a tuple that might be in the system caches. Updates are treated as
* two events, delete + insert, for simplicity. (If the update doesn't
* change the tuple hash value, catcache.c optimizes this into one event.)
*
* We do not need to register EVERY tuple operation in this way, just those
- * on tuples in relations that have associated catcaches. We do, however,
+ * on tuples in relations that have associated catcaches. We do, however,
* have to register every operation on every tuple that *could* be in a
- * catcache, whether or not it currently is in our cache. Also, if the
+ * catcache, whether or not it currently is in our cache. Also, if the
* tuple is in a relation that has multiple catcaches, we need to register
* an invalidation message for each such catcache. catcache.c's
* PrepareToInvalidateCacheTuple() routine provides the knowledge of which
@@ -113,7 +113,7 @@
/*
* To minimize palloc traffic, we keep pending requests in successively-
* larger chunks (a slightly more sophisticated version of an expansible
- * array). All request types can be stored as SharedInvalidationMessage
+ * array). All request types can be stored as SharedInvalidationMessage
* records. The ordering of requests within a list is never significant.
*/
typedef struct InvalidationChunk
@@ -650,7 +650,7 @@ AcceptInvalidationMessages(void)
*
* If you're a glutton for punishment, try CLOBBER_CACHE_RECURSIVELY. This
* slows things by at least a factor of 10000, so I wouldn't suggest
- * trying to run the entire regression tests that way. It's useful to try
+ * trying to run the entire regression tests that way. It's useful to try
* a few simple tests, to make sure that cache reload isn't subject to
* internal cache-flush hazards, but after you've done a few thousand
* recursive reloads it's unlikely you'll learn more.
@@ -863,12 +863,12 @@ ProcessCommittedInvalidationMessages(SharedInvalidationMessage *msgs,
* If isCommit, we must send out the messages in our PriorCmdInvalidMsgs list
* to the shared invalidation message queue. Note that these will be read
* not only by other backends, but also by our own backend at the next
- * transaction start (via AcceptInvalidationMessages). This means that
+ * transaction start (via AcceptInvalidationMessages). This means that
* we can skip immediate local processing of anything that's still in
* CurrentCmdInvalidMsgs, and just send that list out too.
*
* If not isCommit, we are aborting, and must locally process the messages
- * in PriorCmdInvalidMsgs. No messages need be sent to other backends,
+ * in PriorCmdInvalidMsgs. No messages need be sent to other backends,
* since they'll not have seen our changed tuples anyway. We can forget
* about CurrentCmdInvalidMsgs too, since those changes haven't touched
* the caches yet.
@@ -927,11 +927,11 @@ AtEOXact_Inval(bool isCommit)
* parent's PriorCmdInvalidMsgs list.
*
* If not isCommit, we are aborting, and must locally process the messages
- * in PriorCmdInvalidMsgs. No messages need be sent to other backends.
+ * in PriorCmdInvalidMsgs. No messages need be sent to other backends.
* We can forget about CurrentCmdInvalidMsgs too, since those changes haven't
* touched the caches yet.
*
- * In any case, pop the transaction stack. We need not physically free memory
+ * In any case, pop the transaction stack. We need not physically free memory
* here, since CurTransactionContext is about to be emptied anyway
* (if aborting). Beware of the possibility of aborting the same nesting
* level twice, though.
@@ -987,7 +987,7 @@ AtEOSubXact_Inval(bool isCommit)
* in a transaction.
*
* Here, we send no messages to the shared queue, since we don't know yet if
- * we will commit. We do need to locally process the CurrentCmdInvalidMsgs
+ * we will commit. We do need to locally process the CurrentCmdInvalidMsgs
* list, so as to flush our caches of any entries we have outdated in the
* current command. We then move the current-cmd list over to become part
* of the prior-cmds list.
@@ -1094,7 +1094,7 @@ CacheInvalidateHeapTuple(Relation relation,
* This essentially means that only backends in this same database
* will react to the relcache flush request. This is in fact
* appropriate, since only those backends could see our pg_attribute
- * change anyway. It looks a bit ugly though. (In practice, shared
+ * change anyway. It looks a bit ugly though. (In practice, shared
* relations can't have schema changes after bootstrap, so we should
* never come here for a shared rel anyway.)
*/
@@ -1106,7 +1106,7 @@ CacheInvalidateHeapTuple(Relation relation,
/*
* When a pg_index row is updated, we should send out a relcache inval
- * for the index relation. As above, we don't know the shared status
+ * for the index relation. As above, we don't know the shared status
* of the index, but in practice it doesn't matter since indexes of
* shared catalogs can't have such updates.
*/
@@ -1214,7 +1214,7 @@ CacheInvalidateRelcacheByRelid(Oid relid)
*
* Sending this type of invalidation msg forces other backends to close open
* smgr entries for the rel. This should be done to flush dangling open-file
- * references when the physical rel is being dropped or truncated. Because
+ * references when the physical rel is being dropped or truncated. Because
* these are nontransactional (i.e., not-rollback-able) operations, we just
* send the inval message immediately without any queuing.
*
diff --git a/src/backend/utils/cache/lsyscache.c b/src/backend/utils/cache/lsyscache.c
index a4ce7163ea6..4b5ef99531b 100644
--- a/src/backend/utils/cache/lsyscache.c
+++ b/src/backend/utils/cache/lsyscache.c
@@ -186,13 +186,13 @@ get_opfamily_member(Oid opfamily, Oid lefttype, Oid righttype,
* (This indicates that the operator is not a valid ordering operator.)
*
* Note: the operator could be registered in multiple families, for example
- * if someone were to build a "reverse sort" opfamily. This would result in
+ * if someone were to build a "reverse sort" opfamily. This would result in
* uncertainty as to whether "ORDER BY USING op" would default to NULLS FIRST
* or NULLS LAST, as well as inefficient planning due to failure to match up
* pathkeys that should be the same. So we want a determinate result here.
* Because of the way the syscache search works, we'll use the interpretation
* associated with the opfamily with smallest OID, which is probably
- * determinate enough. Since there is no longer any particularly good reason
+ * determinate enough. Since there is no longer any particularly good reason
* to build reverse-sort opfamilies, it doesn't seem worth expending any
* additional effort on ensuring consistency.
*/
@@ -403,7 +403,7 @@ get_ordering_op_for_equality_op(Oid opno, bool use_lhs_type)
*
* The planner currently uses simple equal() tests to compare the lists
* returned by this function, which makes the list order relevant, though
- * strictly speaking it should not be. Because of the way syscache list
+ * strictly speaking it should not be. Because of the way syscache list
* searches are handled, in normal operation the result will be sorted by OID
* so everything works fine. If running with system index usage disabled,
* the result ordering is unspecified and hence the planner might fail to
@@ -1212,7 +1212,7 @@ op_mergejoinable(Oid opno, Oid inputtype)
*
* In some cases (currently only array_eq), hashjoinability depends on the
* specific input data type the operator is invoked for, so that must be
- * passed as well. We currently assume that only one input's type is needed
+ * passed as well. We currently assume that only one input's type is needed
* to check this --- by convention, pass the left input's data type.
*/
bool
@@ -1880,7 +1880,7 @@ get_typbyval(Oid typid)
* A two-fer: given the type OID, return both typlen and typbyval.
*
* Since both pieces of info are needed to know how to copy a Datum,
- * many places need both. Might as well get them with one cache lookup
+ * many places need both. Might as well get them with one cache lookup
* instead of two. Also, this routine raises an error instead of
* returning a bogus value when given a bad type OID.
*/
diff --git a/src/backend/utils/cache/plancache.c b/src/backend/utils/cache/plancache.c
index d492cbb55e7..d03d3b3cdff 100644
--- a/src/backend/utils/cache/plancache.c
+++ b/src/backend/utils/cache/plancache.c
@@ -11,7 +11,7 @@
* The logic for choosing generic or custom plans is in choose_custom_plan,
* which see for comments.
*
- * Cache invalidation is driven off sinval events. Any CachedPlanSource
+ * Cache invalidation is driven off sinval events. Any CachedPlanSource
* that matches the event is marked invalid, as is its generic CachedPlan
* if it has one. When (and if) the next demand for a cached plan occurs,
* parse analysis and rewrite is repeated to build a new valid query tree,
@@ -27,7 +27,7 @@
* caller to notice changes and cope with them.
*
* Currently, we track exactly the dependencies of plans on relations and
- * user-defined functions. On relcache invalidation events or pg_proc
+ * user-defined functions. On relcache invalidation events or pg_proc
* syscache invalidation events, we invalidate just those plans that depend
* on the particular object being modified. (Note: this scheme assumes
* that any table modification that requires replanning will generate a
@@ -123,7 +123,7 @@ InitPlanCache(void)
* CreateCachedPlan: initially create a plan cache entry.
*
* Creation of a cached plan is divided into two steps, CreateCachedPlan and
- * CompleteCachedPlan. CreateCachedPlan should be called after running the
+ * CompleteCachedPlan. CreateCachedPlan should be called after running the
* query through raw_parser, but before doing parse analysis and rewrite;
* CompleteCachedPlan is called after that. The reason for this arrangement
* is that it can save one round of copying of the raw parse tree, since
@@ -217,7 +217,7 @@ CreateCachedPlan(Node *raw_parse_tree,
* in that context.
*
* A one-shot plan cannot be saved or copied, since we make no effort to
- * preserve the raw parse tree unmodified. There is also no support for
+ * preserve the raw parse tree unmodified. There is also no support for
* invalidation, so plan use must be completed in the current transaction,
* and DDL that might invalidate the querytree_list must be avoided as well.
*
@@ -274,13 +274,13 @@ CreateOneShotCachedPlan(Node *raw_parse_tree,
* CompleteCachedPlan: second step of creating a plan cache entry.
*
* Pass in the analyzed-and-rewritten form of the query, as well as the
- * required subsidiary data about parameters and such. All passed values will
+ * required subsidiary data about parameters and such. All passed values will
* be copied into the CachedPlanSource's memory, except as specified below.
* After this is called, GetCachedPlan can be called to obtain a plan, and
* optionally the CachedPlanSource can be saved using SaveCachedPlan.
*
* If querytree_context is not NULL, the querytree_list must be stored in that
- * context (but the other parameters need not be). The querytree_list is not
+ * context (but the other parameters need not be). The querytree_list is not
* copied, rather the given context is kept as the initial query_context of
* the CachedPlanSource. (It should have been created as a child of the
* caller's working memory context, but it will now be reparented to belong
@@ -374,7 +374,7 @@ CompleteCachedPlan(CachedPlanSource *plansource,
&plansource->invalItems);
/*
- * Also save the current search_path in the query_context. (This
+ * Also save the current search_path in the query_context. (This
* should not generate much extra cruft either, since almost certainly
* the path is already valid.) Again, we don't really need this for
* one-shot plans; and we *must* skip this for transaction control
@@ -421,7 +421,7 @@ CompleteCachedPlan(CachedPlanSource *plansource,
* This is guaranteed not to throw error, except for the caller-error case
* of trying to save a one-shot plan. Callers typically depend on that
* since this is called just before or just after adding a pointer to the
- * CachedPlanSource to some permanent data structure of their own. Up until
+ * CachedPlanSource to some permanent data structure of their own. Up until
* this is done, a CachedPlanSource is just transient data that will go away
* automatically on transaction abort.
*/
@@ -442,13 +442,13 @@ SaveCachedPlan(CachedPlanSource *plansource)
* plans from the CachedPlanSource. If there is a generic plan, moving it
* into CacheMemoryContext would be pretty risky since it's unclear
* whether the caller has taken suitable care with making references
- * long-lived. Best thing to do seems to be to discard the plan.
+ * long-lived. Best thing to do seems to be to discard the plan.
*/
ReleaseGenericPlan(plansource);
/*
* Reparent the source memory context under CacheMemoryContext so that it
- * will live indefinitely. The query_context follows along since it's
+ * will live indefinitely. The query_context follows along since it's
* already a child of the other one.
*/
MemoryContextSetParent(plansource->context, CacheMemoryContext);
@@ -466,7 +466,7 @@ SaveCachedPlan(CachedPlanSource *plansource)
* DropCachedPlan: destroy a cached plan.
*
* Actually this only destroys the CachedPlanSource: any referenced CachedPlan
- * is released, but not destroyed until its refcount goes to zero. That
+ * is released, but not destroyed until its refcount goes to zero. That
* handles the situation where DropCachedPlan is called while the plan is
* still in use.
*/
@@ -617,7 +617,7 @@ RevalidateCachedQuery(CachedPlanSource *plansource)
plansource->search_path = NULL;
/*
- * Free the query_context. We don't really expect MemoryContextDelete to
+ * Free the query_context. We don't really expect MemoryContextDelete to
* fail, but just in case, make sure the CachedPlanSource is left in a
* reasonably sane state. (The generic plan won't get unlinked yet, but
* that's acceptable.)
@@ -675,7 +675,7 @@ RevalidateCachedQuery(CachedPlanSource *plansource)
PopActiveSnapshot();
/*
- * Check or update the result tupdesc. XXX should we use a weaker
+ * Check or update the result tupdesc. XXX should we use a weaker
* condition than equalTupleDescs() here?
*
* We assume the parameter types didn't change from the first time, so no
@@ -726,7 +726,7 @@ RevalidateCachedQuery(CachedPlanSource *plansource)
&plansource->invalItems);
/*
- * Also save the current search_path in the query_context. (This should
+ * Also save the current search_path in the query_context. (This should
* not generate much extra cruft either, since almost certainly the path
* is already valid.)
*/
@@ -860,7 +860,7 @@ BuildCachedPlan(CachedPlanSource *plansource, List *qlist,
* we ought to be holding sufficient locks to prevent any invalidation.
* However, if we're building a custom plan after having built and
* rejected a generic plan, it's possible to reach here with is_valid
- * false due to an invalidation while making the generic plan. In theory
+ * false due to an invalidation while making the generic plan. In theory
* the invalidation must be a false positive, perhaps a consequence of an
* sinval reset event or the CLOBBER_CACHE_ALWAYS debug code. But for
* safety, let's treat it as real and redo the RevalidateCachedQuery call.
@@ -1043,7 +1043,7 @@ cached_plan_cost(CachedPlan *plan, bool include_planner)
* on the number of relations in the finished plan's rangetable.
* Join planning effort actually scales much worse than linearly
* in the number of relations --- but only until the join collapse
- * limits kick in. Also, while inheritance child relations surely
+ * limits kick in. Also, while inheritance child relations surely
* add to planning effort, they don't make the join situation
* worse. So the actual shape of the planning cost curve versus
* number of relations isn't all that obvious. It will take
@@ -1153,7 +1153,7 @@ GetCachedPlan(CachedPlanSource *plansource, ParamListInfo boundParams,
/*
* If we choose to plan again, we need to re-copy the query_list,
- * since the planner probably scribbled on it. We can force
+ * since the planner probably scribbled on it. We can force
* BuildCachedPlan to do that by passing NIL.
*/
qlist = NIL;
@@ -1203,7 +1203,7 @@ GetCachedPlan(CachedPlanSource *plansource, ParamListInfo boundParams,
*
* Note: useResOwner = false is used for releasing references that are in
* persistent data structures, such as the parent CachedPlanSource or a
- * Portal. Transient references should be protected by a resource owner.
+ * Portal. Transient references should be protected by a resource owner.
*/
void
ReleaseCachedPlan(CachedPlan *plan, bool useResOwner)
@@ -1267,7 +1267,7 @@ CachedPlanSetParentContext(CachedPlanSource *plansource,
*
* This is a convenience routine that does the equivalent of
* CreateCachedPlan + CompleteCachedPlan, using the data stored in the
- * input CachedPlanSource. The result is therefore "unsaved" (regardless
+ * input CachedPlanSource. The result is therefore "unsaved" (regardless
* of the state of the source), and we don't copy any generic plan either.
* The result will be currently valid, or not, the same as the source.
*/
@@ -1420,7 +1420,7 @@ AcquireExecutorLocks(List *stmt_list, bool acquire)
{
/*
* Ignore utility statements, except those (such as EXPLAIN) that
- * contain a parsed-but-not-planned query. Note: it's okay to use
+ * contain a parsed-but-not-planned query. Note: it's okay to use
* ScanQueryForLocks, even though the query hasn't been through
* rule rewriting, because rewriting doesn't change the query
* representation.
@@ -1616,7 +1616,7 @@ plan_list_is_transient(List *stmt_list)
/*
* PlanCacheComputeResultDesc: given a list of analyzed-and-rewritten Queries,
- * determine the result tupledesc it will produce. Returns NULL if the
+ * determine the result tupledesc it will produce. Returns NULL if the
* execution will not return tuples.
*
* Note: the result is created or copied into current memory context.
diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c
index c947bff4fca..5ff0d9e4fdf 100644
--- a/src/backend/utils/cache/relcache.c
+++ b/src/backend/utils/cache/relcache.c
@@ -124,7 +124,7 @@ bool criticalSharedRelcachesBuilt = false;
/*
* This counter counts relcache inval events received since backend startup
- * (but only for rels that are actually in cache). Presently, we use it only
+ * (but only for rels that are actually in cache). Presently, we use it only
* to detect whether data about to be written by write_relcache_init_file()
* might already be obsolete.
*/
@@ -167,8 +167,8 @@ static bool eoxact_list_overflowed = false;
* we don't need to access individual items except at EOXact.
*/
static TupleDesc *EOXactTupleDescArray;
-static int NextEOXactTupleDescNum = 0;
-static int EOXactTupleDescArrayLen = 0;
+static int NextEOXactTupleDescNum = 0;
+static int EOXactTupleDescArrayLen = 0;
/*
* macros to manipulate the lookup hashtables
@@ -495,7 +495,7 @@ RelationBuildTupleDesc(Relation relation)
Int16GetDatum(0));
/*
- * Open pg_attribute and begin a scan. Force heap scan if we haven't yet
+ * Open pg_attribute and begin a scan. Force heap scan if we haven't yet
* built the critical relcache entries (this includes initdb and startup
* without a pg_internal.init file).
*/
@@ -558,7 +558,7 @@ RelationBuildTupleDesc(Relation relation)
/*
* The attcacheoff values we read from pg_attribute should all be -1
- * ("unknown"). Verify this if assert checking is on. They will be
+ * ("unknown"). Verify this if assert checking is on. They will be
* computed when and if needed during tuple access.
*/
#ifdef USE_ASSERT_CHECKING
@@ -572,7 +572,7 @@ RelationBuildTupleDesc(Relation relation)
/*
* However, we can easily set the attcacheoff value for the first
- * attribute: it must be zero. This eliminates the need for special cases
+ * attribute: it must be zero. This eliminates the need for special cases
* for attnum=1 that used to exist in fastgetattr() and index_getattr().
*/
if (relation->rd_rel->relnatts > 0)
@@ -628,7 +628,7 @@ RelationBuildTupleDesc(Relation relation)
* each relcache entry that has associated rules. The context is used
* just for rule info, not for any other subsidiary data of the relcache
* entry, because that keeps the update logic in RelationClearRelation()
- * manageable. The other subsidiary data structures are simple enough
+ * manageable. The other subsidiary data structures are simple enough
* to be easy to free explicitly, anyway.
*/
static void
@@ -736,9 +736,9 @@ RelationBuildRuleLock(Relation relation)
/*
* We want the rule's table references to be checked as though by the
- * table owner, not the user referencing the rule. Therefore, scan
+ * table owner, not the user referencing the rule. Therefore, scan
* through the rule's actions and set the checkAsUser field on all
- * rtable entries. We have to look at the qual as well, in case it
+ * rtable entries. We have to look at the qual as well, in case it
* contains sublinks.
*
* The reason for doing this when the rule is loaded, rather than when
@@ -1014,25 +1014,24 @@ RelationInitPhysicalAddr(Relation relation)
if (relation->rd_rel->relfilenode)
{
/*
- * Even if we are using a decoding snapshot that doesn't represent
- * the current state of the catalog we need to make sure the
- * filenode points to the current file since the older file will
- * be gone (or truncated). The new file will still contain older
- * rows so lookups in them will work correctly. This wouldn't work
- * correctly if rewrites were allowed to change the schema in a
- * noncompatible way, but those are prevented both on catalog
- * tables and on user tables declared as additional catalog
- * tables.
+ * Even if we are using a decoding snapshot that doesn't represent the
+ * current state of the catalog we need to make sure the filenode
+ * points to the current file since the older file will be gone (or
+ * truncated). The new file will still contain older rows so lookups
+ * in them will work correctly. This wouldn't work correctly if
+ * rewrites were allowed to change the schema in a noncompatible way,
+ * but those are prevented both on catalog tables and on user tables
+ * declared as additional catalog tables.
*/
if (HistoricSnapshotActive()
&& RelationIsAccessibleInLogicalDecoding(relation)
&& IsTransactionState())
{
- HeapTuple phys_tuple;
- Form_pg_class physrel;
+ HeapTuple phys_tuple;
+ Form_pg_class physrel;
phys_tuple = ScanPgRelation(RelationGetRelid(relation),
- RelationGetRelid(relation) != ClassOidIndexId,
+ RelationGetRelid(relation) != ClassOidIndexId,
true);
if (!HeapTupleIsValid(phys_tuple))
elog(ERROR, "could not find pg_class entry for %u",
@@ -1113,7 +1112,7 @@ RelationInitIndexAccessInfo(Relation relation)
amsupport = aform->amsupport;
/*
- * Make the private context to hold index access info. The reason we need
+ * Make the private context to hold index access info. The reason we need
* a context, and not just a couple of pallocs, is so that we won't leak
* any subsidiary info attached to fmgr lookup records.
*
@@ -1161,7 +1160,7 @@ RelationInitIndexAccessInfo(Relation relation)
/*
* indcollation cannot be referenced directly through the C struct,
- * because it comes after the variable-width indkey field. Must extract
+ * because it comes after the variable-width indkey field. Must extract
* the datum the hard way...
*/
indcollDatum = fastgetattr(relation->rd_indextuple,
@@ -1186,7 +1185,7 @@ RelationInitIndexAccessInfo(Relation relation)
/*
* Fill the support procedure OID array, as well as the info about
- * opfamilies and opclass input types. (aminfo and supportinfo are left
+ * opfamilies and opclass input types. (aminfo and supportinfo are left
* as zeroes, and are filled on-the-fly when used)
*/
IndexSupportInitialize(indclass, relation->rd_support,
@@ -1274,7 +1273,7 @@ IndexSupportInitialize(oidvector *indclass,
* Note there is no provision for flushing the cache. This is OK at the
* moment because there is no way to ALTER any interesting properties of an
* existing opclass --- all you can do is drop it, which will result in
- * a useless but harmless dead entry in the cache. To support altering
+ * a useless but harmless dead entry in the cache. To support altering
* opclass membership (not the same as opfamily membership!), we'd need to
* be able to flush this cache as well as the contents of relcache entries
* for indexes.
@@ -1383,7 +1382,7 @@ LookupOpclassInfo(Oid operatorClassOid,
heap_close(rel, AccessShareLock);
/*
- * Scan pg_amproc to obtain support procs for the opclass. We only fetch
+ * Scan pg_amproc to obtain support procs for the opclass. We only fetch
* the default ones (those with lefttype = righttype = opcintype).
*/
if (numSupport > 0)
@@ -1889,11 +1888,11 @@ RelationDestroyRelation(Relation relation, bool remember_tupdesc)
{
/*
* If we Rebuilt a relcache entry during a transaction then its
- * possible we did that because the TupDesc changed as the result
- * of an ALTER TABLE that ran at less than AccessExclusiveLock.
- * It's possible someone copied that TupDesc, in which case the
- * copy would point to free'd memory. So if we rebuild an entry
- * we keep the TupDesc around until end of transaction, to be safe.
+ * possible we did that because the TupDesc changed as the result of
+ * an ALTER TABLE that ran at less than AccessExclusiveLock. It's
+ * possible someone copied that TupDesc, in which case the copy would
+ * point to free'd memory. So if we rebuild an entry we keep the
+ * TupDesc around until end of transaction, to be safe.
*/
if (remember_tupdesc)
RememberToFreeTupleDescAtEOX(relation->rd_att);
@@ -1928,7 +1927,7 @@ RelationDestroyRelation(Relation relation, bool remember_tupdesc)
*
* NB: when rebuilding, we'd better hold some lock on the relation,
* else the catalog data we need to read could be changing under us.
- * Also, a rel to be rebuilt had better have refcnt > 0. This is because
+ * Also, a rel to be rebuilt had better have refcnt > 0. This is because
* an sinval reset could happen while we're accessing the catalogs, and
* the rel would get blown away underneath us by RelationCacheInvalidate
* if it has zero refcnt.
@@ -1951,7 +1950,7 @@ RelationClearRelation(Relation relation, bool rebuild)
/*
* Make sure smgr and lower levels close the relation's files, if they
* weren't closed already. If the relation is not getting deleted, the
- * next smgr access should reopen the files automatically. This ensures
+ * next smgr access should reopen the files automatically. This ensures
* that the low-level file access state is updated after, say, a vacuum
* truncation.
*/
@@ -2047,7 +2046,7 @@ RelationClearRelation(Relation relation, bool rebuild)
* over from the old entry). This is to avoid trouble in case an
* error causes us to lose control partway through. The old entry
* will still be marked !rd_isvalid, so we'll try to rebuild it again
- * on next access. Meanwhile it's not any less valid than it was
+ * on next access. Meanwhile it's not any less valid than it was
* before, so any code that might expect to continue accessing it
* isn't hurt by the rebuild failure. (Consider for example a
* subtransaction that ALTERs a table and then gets canceled partway
@@ -2237,7 +2236,7 @@ RelationCacheInvalidateEntry(Oid relationId)
/*
* RelationCacheInvalidate
* Blow away cached relation descriptors that have zero reference counts,
- * and rebuild those with positive reference counts. Also reset the smgr
+ * and rebuild those with positive reference counts. Also reset the smgr
* relation cache and re-read relation mapping data.
*
* This is currently used only to recover from SI message buffer overflow,
@@ -2250,7 +2249,7 @@ RelationCacheInvalidateEntry(Oid relationId)
* We do this in two phases: the first pass deletes deletable items, and
* the second one rebuilds the rebuildable items. This is essential for
* safety, because hash_seq_search only copes with concurrent deletion of
- * the element it is currently visiting. If a second SI overflow were to
+ * the element it is currently visiting. If a second SI overflow were to
* occur while we are walking the table, resulting in recursive entry to
* this routine, we could crash because the inner invocation blows away
* the entry next to be visited by the outer scan. But this way is OK,
@@ -2385,7 +2384,8 @@ RememberToFreeTupleDescAtEOX(TupleDesc td)
{
if (EOXactTupleDescArray == NULL)
{
- MemoryContext oldcxt;
+ MemoryContext oldcxt;
+
oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
EOXactTupleDescArray = (TupleDesc *) palloc(16 * sizeof(TupleDesc));
@@ -2395,12 +2395,12 @@ RememberToFreeTupleDescAtEOX(TupleDesc td)
}
else if (NextEOXactTupleDescNum >= EOXactTupleDescArrayLen)
{
- int32 newlen = EOXactTupleDescArrayLen * 2;
+ int32 newlen = EOXactTupleDescArrayLen * 2;
Assert(EOXactTupleDescArrayLen > 0);
EOXactTupleDescArray = (TupleDesc *) repalloc(EOXactTupleDescArray,
- newlen * sizeof(TupleDesc));
+ newlen * sizeof(TupleDesc));
EOXactTupleDescArrayLen = newlen;
}
@@ -2437,7 +2437,7 @@ AtEOXact_RelationCache(bool isCommit)
* For simplicity, eoxact_list[] entries are not deleted till end of
* top-level transaction, even though we could remove them at
* subtransaction end in some cases, or remove relations from the list if
- * they are cleared for other reasons. Therefore we should expect the
+ * they are cleared for other reasons. Therefore we should expect the
* case that list entries are not found in the hashtable; if not, there's
* nothing to do for them.
*/
@@ -2498,7 +2498,7 @@ AtEOXact_cleanup(Relation relation, bool isCommit)
* transaction calls. (That seems bogus, but it's not worth fixing.)
*
* Note: ideally this check would be applied to every relcache entry, not
- * just those that have eoxact work to do. But it's not worth forcing a
+ * just those that have eoxact work to do. But it's not worth forcing a
* scan of the whole relcache just for this. (Moreover, doing so would
* mean that assert-enabled testing never tests the hash_search code path
* above, which seems a bad idea.)
@@ -2809,7 +2809,7 @@ RelationBuildLocalRelation(const char *relname,
/*
* Insert relation physical and logical identifiers (OIDs) into the right
- * places. For a mapped relation, we set relfilenode to zero and rely on
+ * places. For a mapped relation, we set relfilenode to zero and rely on
* RelationInitPhysicalAddr to consult the map.
*/
rel->rd_rel->relisshared = shared_relation;
@@ -3052,7 +3052,7 @@ RelationCacheInitializePhase2(void)
oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
/*
- * Try to load the shared relcache cache file. If unsuccessful, bootstrap
+ * Try to load the shared relcache cache file. If unsuccessful, bootstrap
* the cache with pre-made descriptors for the critical shared catalogs.
*/
if (!load_relcache_init_file(true))
@@ -3132,9 +3132,9 @@ RelationCacheInitializePhase3(void)
/*
* If we didn't get the critical system indexes loaded into relcache, do
- * so now. These are critical because the catcache and/or opclass cache
+ * so now. These are critical because the catcache and/or opclass cache
* depend on them for fetches done during relcache load. Thus, we have an
- * infinite-recursion problem. We can break the recursion by doing
+ * infinite-recursion problem. We can break the recursion by doing
* heapscans instead of indexscans at certain key spots. To avoid hobbling
* performance, we only want to do that until we have the critical indexes
* loaded into relcache. Thus, the flag criticalRelcachesBuilt is used to
@@ -3151,7 +3151,7 @@ RelationCacheInitializePhase3(void)
* RewriteRelRulenameIndexId and TriggerRelidNameIndexId are not critical
* in the same way as the others, because the critical catalogs don't
* (currently) have any rules or triggers, and so these indexes can be
- * rebuilt without inducing recursion. However they are used during
+ * rebuilt without inducing recursion. However they are used during
* relcache load when a rel does have rules or triggers, so we choose to
* nail them for performance reasons.
*/
@@ -3182,7 +3182,7 @@ RelationCacheInitializePhase3(void)
*
* DatabaseNameIndexId isn't critical for relcache loading, but rather for
* initial lookup of MyDatabaseId, without which we'll never find any
- * non-shared catalogs at all. Autovacuum calls InitPostgres with a
+ * non-shared catalogs at all. Autovacuum calls InitPostgres with a
* database OID, so it instead depends on DatabaseOidIndexId. We also
* need to nail up some indexes on pg_authid and pg_auth_members for use
* during client authentication.
@@ -3617,7 +3617,7 @@ RelationGetIndexList(Relation relation)
/*
* We build the list we intend to return (in the caller's context) while
- * doing the scan. After successfully completing the scan, we copy that
+ * doing the scan. After successfully completing the scan, we copy that
* list into the relcache entry. This avoids cache-context memory leakage
* if we get some sort of error partway through.
*/
@@ -3655,7 +3655,7 @@ RelationGetIndexList(Relation relation)
/*
* indclass cannot be referenced directly through the C struct,
- * because it comes after the variable-width indkey field. Must
+ * because it comes after the variable-width indkey field. Must
* extract the datum the hard way...
*/
indclassDatum = heap_getattr(htup,
@@ -3970,16 +3970,16 @@ RelationGetIndexPredicate(Relation relation)
Bitmapset *
RelationGetIndexAttrBitmap(Relation relation, IndexAttrBitmapKind attrKind)
{
- Bitmapset *indexattrs; /* indexed columns */
- Bitmapset *uindexattrs; /* columns in unique indexes */
- Bitmapset *idindexattrs; /* columns in the replica identity */
+ Bitmapset *indexattrs; /* indexed columns */
+ Bitmapset *uindexattrs; /* columns in unique indexes */
+ Bitmapset *idindexattrs; /* columns in the replica identity */
List *indexoidlist;
ListCell *l;
MemoryContext oldcxt;
/* Quick exit if we already computed the result. */
if (relation->rd_indexattr != NULL)
- switch(attrKind)
+ switch (attrKind)
{
case INDEX_ATTR_BITMAP_IDENTITY_KEY:
return bms_copy(relation->rd_idattr);
@@ -4023,8 +4023,8 @@ RelationGetIndexAttrBitmap(Relation relation, IndexAttrBitmapKind attrKind)
Relation indexDesc;
IndexInfo *indexInfo;
int i;
- bool isKey; /* candidate key */
- bool isIDKey; /* replica identity index */
+ bool isKey; /* candidate key */
+ bool isIDKey; /* replica identity index */
indexDesc = index_open(indexOid, AccessShareLock);
@@ -4052,7 +4052,7 @@ RelationGetIndexAttrBitmap(Relation relation, IndexAttrBitmapKind attrKind)
if (isIDKey)
idindexattrs = bms_add_member(idindexattrs,
- attrnum - FirstLowInvalidHeapAttributeNumber);
+ attrnum - FirstLowInvalidHeapAttributeNumber);
if (isKey)
uindexattrs = bms_add_member(uindexattrs,
@@ -4079,7 +4079,7 @@ RelationGetIndexAttrBitmap(Relation relation, IndexAttrBitmapKind attrKind)
MemoryContextSwitchTo(oldcxt);
/* We return our original working copy for caller to play with */
- switch(attrKind)
+ switch (attrKind)
{
case INDEX_ATTR_BITMAP_IDENTITY_KEY:
return idindexattrs;
@@ -4268,7 +4268,7 @@ errtablecol(Relation rel, int attnum)
* given directly rather than extracted from the relation's catalog data.
*
* Don't use this directly unless errtablecol() is inconvenient for some
- * reason. This might possibly be needed during intermediate states in ALTER
+ * reason. This might possibly be needed during intermediate states in ALTER
* TABLE, for instance.
*/
int
@@ -4688,7 +4688,7 @@ load_relcache_init_file(bool shared)
return true;
/*
- * init file is broken, so do it the hard way. We don't bother trying to
+ * init file is broken, so do it the hard way. We don't bother trying to
* free the clutter we just allocated; it's not in the relcache so it
* won't hurt.
*/
@@ -4753,7 +4753,7 @@ write_relcache_init_file(bool shared)
}
/*
- * Write a magic number to serve as a file version identifier. We can
+ * Write a magic number to serve as a file version identifier. We can
* change the magic number whenever the relcache layout changes.
*/
magic = RELCACHE_INIT_FILEMAGIC;
@@ -4978,7 +4978,7 @@ RelationCacheInitFilePostInvalidate(void)
*
* We used to keep the init files across restarts, but that is unsafe in PITR
* scenarios, and even in simple crash-recovery cases there are windows for
- * the init files to become out-of-sync with the database. So now we just
+ * the init files to become out-of-sync with the database. So now we just
* remove them during startup and expect the first backend launch to rebuild
* them. Of course, this has to happen in each database of the cluster.
*/
diff --git a/src/backend/utils/cache/relfilenodemap.c b/src/backend/utils/cache/relfilenodemap.c
index 1b009be0fd3..557ff6148d0 100644
--- a/src/backend/utils/cache/relfilenodemap.c
+++ b/src/backend/utils/cache/relfilenodemap.c
@@ -43,7 +43,7 @@ typedef struct
typedef struct
{
- RelfilenodeMapKey key; /* lookup key - must be first */
+ RelfilenodeMapKey key; /* lookup key - must be first */
Oid relid; /* pg_class.oid */
} RelfilenodeMapEntry;
@@ -143,10 +143,10 @@ RelidByRelfilenode(Oid reltablespace, Oid relfilenode)
{
RelfilenodeMapKey key;
RelfilenodeMapEntry *entry;
- bool found;
+ bool found;
SysScanDesc scandesc;
- Relation relation;
- HeapTuple ntp;
+ Relation relation;
+ HeapTuple ntp;
ScanKeyData skey[2];
Oid relid;
@@ -222,8 +222,9 @@ RelidByRelfilenode(Oid reltablespace, Oid relfilenode)
#ifdef USE_ASSERT_CHECKING
if (assert_enabled)
{
- bool isnull;
- Oid check;
+ bool isnull;
+ Oid check;
+
check = fastgetattr(ntp, Anum_pg_class_reltablespace,
RelationGetDescr(relation),
&isnull);
diff --git a/src/backend/utils/cache/relmapper.c b/src/backend/utils/cache/relmapper.c
index ebbe4d3bbe5..95a2689fd42 100644
--- a/src/backend/utils/cache/relmapper.c
+++ b/src/backend/utils/cache/relmapper.c
@@ -23,7 +23,7 @@
* mapped catalogs can only be relocated by operations such as VACUUM FULL
* and CLUSTER, which make no transactionally-significant changes: it must be
* safe for the new file to replace the old, even if the transaction itself
- * aborts. An important factor here is that the indexes and toast table of
+ * aborts. An important factor here is that the indexes and toast table of
* a mapped catalog must also be mapped, so that the rewrites/relocations of
* all these files commit in a single map file update rather than being tied
* to transaction commit.
@@ -57,13 +57,13 @@
/*
* The map file is critical data: we have no automatic method for recovering
* from loss or corruption of it. We use a CRC so that we can detect
- * corruption. To minimize the risk of failed updates, the map file should
+ * corruption. To minimize the risk of failed updates, the map file should
* be kept to no more than one standard-size disk sector (ie 512 bytes),
* and we use overwrite-in-place rather than playing renaming games.
* The struct layout below is designed to occupy exactly 512 bytes, which
* might make filesystem updates a bit more efficient.
*
- * Entries in the mappings[] array are in no particular order. We could
+ * Entries in the mappings[] array are in no particular order. We could
* speed searching by insisting on OID order, but it really shouldn't be
* worth the trouble given the intended size of the mapping sets.
*/
@@ -90,7 +90,7 @@ typedef struct RelMapFile
/*
* The currently known contents of the shared map file and our database's
- * local map file are stored here. These can be reloaded from disk
+ * local map file are stored here. These can be reloaded from disk
* immediately whenever we receive an update sinval message.
*/
static RelMapFile shared_map;
@@ -346,7 +346,7 @@ merge_map_updates(RelMapFile *map, const RelMapFile *updates, bool add_okay)
* RelationMapRemoveMapping
*
* Remove a relation's entry in the map. This is only allowed for "active"
- * (but not committed) local mappings. We need it so we can back out the
+ * (but not committed) local mappings. We need it so we can back out the
* entry for the transient target file when doing VACUUM FULL/CLUSTER on
* a mapped relation.
*/
@@ -374,7 +374,7 @@ RelationMapRemoveMapping(Oid relationId)
* RelationMapInvalidate
*
* This routine is invoked for SI cache flush messages. We must re-read
- * the indicated map file. However, we might receive a SI message in a
+ * the indicated map file. However, we might receive a SI message in a
* process that hasn't yet, and might never, load the mapping files;
* for example the autovacuum launcher, which *must not* try to read
* a local map since it is attached to no particular database.
@@ -442,7 +442,7 @@ AtCCI_RelationMap(void)
*
* During commit, this must be called as late as possible before the actual
* transaction commit, so as to minimize the window where the transaction
- * could still roll back after committing map changes. Although nothing
+ * could still roll back after committing map changes. Although nothing
* critically bad happens in such a case, we still would prefer that it
* not happen, since we'd possibly be losing useful updates to the relations'
* pg_class row(s).
@@ -509,7 +509,7 @@ AtPrepare_RelationMap(void)
/*
* CheckPointRelationMap
*
- * This is called during a checkpoint. It must ensure that any relation map
+ * This is called during a checkpoint. It must ensure that any relation map
* updates that were WAL-logged before the start of the checkpoint are
* securely flushed to disk and will not need to be replayed later. This
* seems unlikely to be a performance-critical issue, so we use a simple
@@ -700,7 +700,7 @@ load_relmap_file(bool shared)
*
* Because this may be called during WAL replay when MyDatabaseId,
* DatabasePath, etc aren't valid, we require the caller to pass in suitable
- * values. The caller is also responsible for being sure no concurrent
+ * values. The caller is also responsible for being sure no concurrent
* map update could be happening.
*/
static void
@@ -820,7 +820,7 @@ write_relmap_file(bool shared, RelMapFile *newmap,
/*
* Make sure that the files listed in the map are not deleted if the outer
- * transaction aborts. This had better be within the critical section
+ * transaction aborts. This had better be within the critical section
* too: it's not likely to fail, but if it did, we'd arrive at transaction
* abort with the files still vulnerable. PANICing will leave things in a
* good state on-disk.
diff --git a/src/backend/utils/cache/spccache.c b/src/backend/utils/cache/spccache.c
index 93b42bbc1a5..24e8679e254 100644
--- a/src/backend/utils/cache/spccache.c
+++ b/src/backend/utils/cache/spccache.c
@@ -4,7 +4,7 @@
* Tablespace cache management.
*
* We cache the parsed version of spcoptions for each tablespace to avoid
- * needing to reparse on every lookup. Right now, there doesn't appear to
+ * needing to reparse on every lookup. Right now, there doesn't appear to
* be a measurable performance gain from doing this, but that might change
* in the future as we add more options.
*
@@ -128,7 +128,7 @@ get_tablespace(Oid spcid)
return spc;
/*
- * Not found in TableSpace cache. Check catcache. If we don't find a
+ * Not found in TableSpace cache. Check catcache. If we don't find a
* valid HeapTuple, it must mean someone has managed to request tablespace
* details for a non-existent tablespace. We'll just treat that case as
* if no options were specified.
@@ -158,7 +158,7 @@ get_tablespace(Oid spcid)
}
/*
- * Now create the cache entry. It's important to do this only after
+ * Now create the cache entry. It's important to do this only after
* reading the pg_tablespace entry, since doing so could cause a cache
* flush.
*/
diff --git a/src/backend/utils/cache/syscache.c b/src/backend/utils/cache/syscache.c
index 79df5b6835f..94d951ce056 100644
--- a/src/backend/utils/cache/syscache.c
+++ b/src/backend/utils/cache/syscache.c
@@ -803,16 +803,17 @@ static CatCache *SysCache[
static int SysCacheSize = lengthof(cacheinfo);
static bool CacheInitialized = false;
-static Oid SysCacheRelationOid[lengthof(cacheinfo)];
-static int SysCacheRelationOidSize;
+static Oid SysCacheRelationOid[
+ lengthof(cacheinfo)];
+static int SysCacheRelationOidSize;
-static int oid_compare(const void *a, const void *b);
+static int oid_compare(const void *a, const void *b);
/*
* InitCatalogCache - initialize the caches
*
* Note that no database access is done here; we only allocate memory
- * and initialize the cache structure. Interrogation of the database
+ * and initialize the cache structure. Interrogation of the database
* to complete initialization of a cache happens upon first use
* of that cache.
*/
@@ -1063,7 +1064,7 @@ SearchSysCacheExistsAttName(Oid relid, const char *attname)
* extract a specific attribute.
*
* This is equivalent to using heap_getattr() on a tuple fetched
- * from a non-cached relation. Usually, this is only used for attributes
+ * from a non-cached relation. Usually, this is only used for attributes
* that could be NULL or variable length; the fixed-size attributes in
* a system table are accessed just by mapping the tuple onto the C struct
* declarations from include/catalog/.
@@ -1176,12 +1177,12 @@ RelationInvalidatesSnapshotsOnly(Oid relid)
bool
RelationHasSysCache(Oid relid)
{
- int low = 0,
- high = SysCacheRelationOidSize - 1;
+ int low = 0,
+ high = SysCacheRelationOidSize - 1;
while (low <= high)
{
- int middle = low + (high - low) / 2;
+ int middle = low + (high - low) / 2;
if (SysCacheRelationOid[middle] == relid)
return true;
@@ -1201,8 +1202,8 @@ RelationHasSysCache(Oid relid)
static int
oid_compare(const void *a, const void *b)
{
- Oid oa = *((Oid *) a);
- Oid ob = *((Oid *) b);
+ Oid oa = *((Oid *) a);
+ Oid ob = *((Oid *) b);
if (oa == ob)
return 0;
diff --git a/src/backend/utils/cache/typcache.c b/src/backend/utils/cache/typcache.c
index ad370337fe3..8c6c7fcd22a 100644
--- a/src/backend/utils/cache/typcache.c
+++ b/src/backend/utils/cache/typcache.c
@@ -11,7 +11,7 @@
*
* Several seemingly-odd choices have been made to support use of the type
* cache by generic array and record handling routines, such as array_eq(),
- * record_cmp(), and hash_array(). Because those routines are used as index
+ * record_cmp(), and hash_array(). Because those routines are used as index
* support operations, they cannot leak memory. To allow them to execute
* efficiently, all information that they would like to re-use across calls
* is kept in the type cache.
@@ -101,7 +101,7 @@ typedef struct TypeCacheEnumData
*
* Stored record types are remembered in a linear array of TupleDescs,
* which can be indexed quickly with the assigned typmod. There is also
- * a hash table to speed searches for matching TupleDescs. The hash key
+ * a hash table to speed searches for matching TupleDescs. The hash key
* uses just the first N columns' type OIDs, and so we may have multiple
* entries with the same hash key.
*/
@@ -482,7 +482,7 @@ load_typcache_tupdesc(TypeCacheEntry *typentry)
/*
* Link to the tupdesc and increment its refcount (we assert it's a
- * refcounted descriptor). We don't use IncrTupleDescRefCount() for this,
+ * refcounted descriptor). We don't use IncrTupleDescRefCount() for this,
* because the reference mustn't be entered in the current resource owner;
* it can outlive the current query.
*/
@@ -1074,7 +1074,7 @@ load_enum_cache_data(TypeCacheEntry *tcache)
/*
* Read all the information for members of the enum type. We collect the
* info in working memory in the caller's context, and then transfer it to
- * permanent memory in CacheMemoryContext. This minimizes the risk of
+ * permanent memory in CacheMemoryContext. This minimizes the risk of
* leaking memory from CacheMemoryContext in the event of an error partway
* through.
*/
diff --git a/src/backend/utils/error/elog.c b/src/backend/utils/error/elog.c
index 977fc66418c..0d92dcd036c 100644
--- a/src/backend/utils/error/elog.c
+++ b/src/backend/utils/error/elog.c
@@ -5,7 +5,7 @@
*
* Because of the extremely high rate at which log messages can be generated,
* we need to be mindful of the performance cost of obtaining any information
- * that may be logged. Also, it's important to keep in mind that this code may
+ * that may be logged. Also, it's important to keep in mind that this code may
* get called from within an aborted transaction, in which case operations
* such as syscache lookups are unsafe.
*
@@ -15,23 +15,23 @@
* if we run out of memory, it's important to be able to report that fact.
* There are a number of considerations that go into this.
*
- * First, distinguish between re-entrant use and actual recursion. It
+ * First, distinguish between re-entrant use and actual recursion. It
* is possible for an error or warning message to be emitted while the
- * parameters for an error message are being computed. In this case
+ * parameters for an error message are being computed. In this case
* errstart has been called for the outer message, and some field values
- * may have already been saved, but we are not actually recursing. We handle
- * this by providing a (small) stack of ErrorData records. The inner message
+ * may have already been saved, but we are not actually recursing. We handle
+ * this by providing a (small) stack of ErrorData records. The inner message
* can be computed and sent without disturbing the state of the outer message.
* (If the inner message is actually an error, this isn't very interesting
* because control won't come back to the outer message generator ... but
* if the inner message is only debug or log data, this is critical.)
*
* Second, actual recursion will occur if an error is reported by one of
- * the elog.c routines or something they call. By far the most probable
+ * the elog.c routines or something they call. By far the most probable
* scenario of this sort is "out of memory"; and it's also the nastiest
* to handle because we'd likely also run out of memory while trying to
* report this error! Our escape hatch for this case is to reset the
- * ErrorContext to empty before trying to process the inner error. Since
+ * ErrorContext to empty before trying to process the inner error. Since
* ErrorContext is guaranteed to have at least 8K of space in it (see mcxt.c),
* we should be able to process an "out of memory" message successfully.
* Since we lose the prior error state due to the reset, we won't be able
@@ -116,7 +116,7 @@ char *Log_destination_string = NULL;
/*
* Max string length to send to syslog(). Note that this doesn't count the
* sequence-number prefix we add, and of course it doesn't count the prefix
- * added by syslog itself. Solaris and sysklogd truncate the final message
+ * added by syslog itself. Solaris and sysklogd truncate the final message
* at 1024 bytes, so this value leaves 124 bytes for those prefixes. (Most
* other syslog implementations seem to have limits of 2KB or so.)
*/
@@ -244,7 +244,7 @@ errstart(int elevel, const char *filename, int lineno,
{
/*
* If we are inside a critical section, all errors become PANIC
- * errors. See miscadmin.h.
+ * errors. See miscadmin.h.
*/
if (CritSectionCount > 0)
elevel = PANIC;
@@ -257,7 +257,7 @@ errstart(int elevel, const char *filename, int lineno,
*
* 2. ExitOnAnyError mode switch is set (initdb uses this).
*
- * 3. the error occurred after proc_exit has begun to run. (It's
+ * 3. the error occurred after proc_exit has begun to run. (It's
* proc_exit's responsibility to see that this doesn't turn into
* infinite recursion!)
*/
@@ -350,7 +350,7 @@ errstart(int elevel, const char *filename, int lineno,
if (++errordata_stack_depth >= ERRORDATA_STACK_SIZE)
{
/*
- * Wups, stack not big enough. We treat this as a PANIC condition
+ * Wups, stack not big enough. We treat this as a PANIC condition
* because it suggests an infinite loop of errors during error
* recovery.
*/
@@ -424,8 +424,8 @@ errfinish(int dummy,...)
* may not be re-entrant.
*
* Note: other places that save-and-clear ImmediateInterruptOK also do
- * HOLD_INTERRUPTS(), but that should not be necessary here since we
- * don't call anything that could turn on ImmediateInterruptOK.
+ * HOLD_INTERRUPTS(), but that should not be necessary here since we don't
+ * call anything that could turn on ImmediateInterruptOK.
*/
save_ImmediateInterruptOK = ImmediateInterruptOK;
ImmediateInterruptOK = false;
@@ -458,7 +458,7 @@ errfinish(int dummy,...)
*
* Reset InterruptHoldoffCount in case we ereport'd from inside an
* interrupt holdoff section. (We assume here that no handler will
- * itself be inside a holdoff section. If necessary, such a handler
+ * itself be inside a holdoff section. If necessary, such a handler
* could save and restore InterruptHoldoffCount for itself, but this
* should make life easier for most.)
*
@@ -484,7 +484,7 @@ errfinish(int dummy,...)
* progress, so that we can report the message before dying. (Without
* this, pq_putmessage will refuse to send the message at all, which is
* what we want for NOTICE messages, but not for fatal exits.) This hack
- * is necessary because of poor design of old-style copy protocol. Note
+ * is necessary because of poor design of old-style copy protocol. Note
* we must do this even if client is fool enough to have set
* client_min_messages above FATAL, so don't look at output_to_client.
*/
@@ -606,7 +606,7 @@ errcode(int sqlerrcode)
/*
* errcode_for_file_access --- add SQLSTATE error code to the current error
*
- * The SQLSTATE code is chosen based on the saved errno value. We assume
+ * The SQLSTATE code is chosen based on the saved errno value. We assume
* that the failing operation was some type of disk file access.
*
* NOTE: the primary error message string should generally include %m
@@ -677,7 +677,7 @@ errcode_for_file_access(void)
/*
* errcode_for_socket_access --- add SQLSTATE error code to the current error
*
- * The SQLSTATE code is chosen based on the saved errno value. We assume
+ * The SQLSTATE code is chosen based on the saved errno value. We assume
* that the failing operation was some type of socket access.
*
* NOTE: the primary error message string should generally include %m
@@ -715,7 +715,7 @@ errcode_for_socket_access(void)
* This macro handles expansion of a format string and associated parameters;
* it's common code for errmsg(), errdetail(), etc. Must be called inside
* a routine that is declared like "const char *fmt, ..." and has an edata
- * pointer set up. The message is assigned to edata->targetfield, or
+ * pointer set up. The message is assigned to edata->targetfield, or
* appended to it if appendval is true. The message is subject to translation
* if translateit is true.
*
@@ -1296,7 +1296,7 @@ elog_start(const char *filename, int lineno, const char *funcname)
if (++errordata_stack_depth >= ERRORDATA_STACK_SIZE)
{
/*
- * Wups, stack not big enough. We treat this as a PANIC condition
+ * Wups, stack not big enough. We treat this as a PANIC condition
* because it suggests an infinite loop of errors during error
* recovery. Note that the message is intentionally not localized,
* else failure to convert it to client encoding could cause further
@@ -1467,7 +1467,7 @@ EmitErrorReport(void)
/*
* CopyErrorData --- obtain a copy of the topmost error stack entry
*
- * This is only for use in error handler code. The data is copied into the
+ * This is only for use in error handler code. The data is copied into the
* current memory context, so callers should always switch away from
* ErrorContext first; otherwise it will be lost when FlushErrorState is done.
*/
@@ -1581,7 +1581,7 @@ FlushErrorState(void)
*
* A handler can do CopyErrorData/FlushErrorState to get out of the error
* subsystem, then do some processing, and finally ReThrowError to re-throw
- * the original error. This is slower than just PG_RE_THROW() but should
+ * the original error. This is slower than just PG_RE_THROW() but should
* be used if the "some processing" is likely to incur another error.
*/
void
@@ -1598,7 +1598,7 @@ ReThrowError(ErrorData *edata)
if (++errordata_stack_depth >= ERRORDATA_STACK_SIZE)
{
/*
- * Wups, stack not big enough. We treat this as a PANIC condition
+ * Wups, stack not big enough. We treat this as a PANIC condition
* because it suggests an infinite loop of errors during error
* recovery.
*/
@@ -1713,8 +1713,8 @@ pg_re_throw(void)
char *
GetErrorContextStack(void)
{
- ErrorData *edata;
- ErrorContextCallback *econtext;
+ ErrorData *edata;
+ ErrorContextCallback *econtext;
/*
* Okay, crank up a stack entry to store the info in.
@@ -1724,7 +1724,7 @@ GetErrorContextStack(void)
if (++errordata_stack_depth >= ERRORDATA_STACK_SIZE)
{
/*
- * Wups, stack not big enough. We treat this as a PANIC condition
+ * Wups, stack not big enough. We treat this as a PANIC condition
* because it suggests an infinite loop of errors during error
* recovery.
*/
@@ -1749,8 +1749,8 @@ GetErrorContextStack(void)
* into edata->context.
*
* Errors occurring in callback functions should go through the regular
- * error handling code which should handle any recursive errors, though
- * we double-check above, just in case.
+ * error handling code which should handle any recursive errors, though we
+ * double-check above, just in case.
*/
for (econtext = error_context_stack;
econtext != NULL;
@@ -1833,7 +1833,7 @@ set_syslog_parameters(const char *ident, int facility)
{
/*
* guc.c is likely to call us repeatedly with same parameters, so don't
- * thrash the syslog connection unnecessarily. Also, we do not re-open
+ * thrash the syslog connection unnecessarily. Also, we do not re-open
* the connection until needed, since this routine will get called whether
* or not Log_destination actually mentions syslog.
*
@@ -2069,6 +2069,7 @@ write_console(const char *line, int len)
int rc;
#ifdef WIN32
+
/*
* Try to convert the message to UTF16 and write it with WriteConsoleW().
* Fall back on write() if anything fails.
@@ -2186,14 +2187,14 @@ setup_formatted_start_time(void)
static const char *
process_log_prefix_padding(const char *p, int *ppadding)
{
- int paddingsign = 1;
- int padding = 0;
+ int paddingsign = 1;
+ int padding = 0;
if (*p == '-')
{
p++;
- if (*p == '\0') /* Did the buf end in %- ? */
+ if (*p == '\0') /* Did the buf end in %- ? */
return NULL;
paddingsign = -1;
}
@@ -2268,9 +2269,9 @@ log_line_prefix(StringInfo buf, ErrorData *edata)
* process_log_prefix_padding moves p past the padding number if it
* exists.
*
- * Note: Since only '-', '0' to '9' are valid formatting characters
- * we can do a quick check here to pre-check for formatting. If the
- * char is not formatting then we can skip a useless function call.
+ * Note: Since only '-', '0' to '9' are valid formatting characters we
+ * can do a quick check here to pre-check for formatting. If the char
+ * is not formatting then we can skip a useless function call.
*
* Further note: At least on some platforms, passing %*s rather than
* %s to appendStringInfo() is substantially slower, so many of the
@@ -2337,9 +2338,10 @@ log_line_prefix(StringInfo buf, ErrorData *edata)
case 'c':
if (padding != 0)
{
- char strfbuf[128];
+ char strfbuf[128];
+
snprintf(strfbuf, sizeof(strfbuf) - 1, "%lx.%x",
- (long) (MyStartTime), MyProcPid);
+ (long) (MyStartTime), MyProcPid);
appendStringInfo(buf, "%*s", padding, strfbuf);
}
else
@@ -2411,14 +2413,15 @@ log_line_prefix(StringInfo buf, ErrorData *edata)
if (MyProcPort->remote_port && MyProcPort->remote_port[0] != '\0')
{
/*
- * This option is slightly special as the port number
- * may be appended onto the end. Here we need to build
- * 1 string which contains the remote_host and optionally
- * the remote_port (if set) so we can properly align the
- * string.
+ * This option is slightly special as the port
+ * number may be appended onto the end. Here we
+ * need to build 1 string which contains the
+ * remote_host and optionally the remote_port (if
+ * set) so we can properly align the string.
*/
- char *hostport;
+ char *hostport;
+
hostport = psprintf("%s(%s)", MyProcPort->remote_host, MyProcPort->remote_port);
appendStringInfo(buf, "%*s", padding, hostport);
pfree(hostport);
@@ -2433,7 +2436,7 @@ log_line_prefix(StringInfo buf, ErrorData *edata)
if (MyProcPort->remote_port &&
MyProcPort->remote_port[0] != '\0')
appendStringInfo(buf, "(%s)",
- MyProcPort->remote_port);
+ MyProcPort->remote_port);
}
}
@@ -2465,9 +2468,10 @@ log_line_prefix(StringInfo buf, ErrorData *edata)
{
if (padding != 0)
{
- char strfbuf[128];
+ char strfbuf[128];
+
snprintf(strfbuf, sizeof(strfbuf) - 1, "%d/%u",
- MyProc->backendId, MyProc->lxid);
+ MyProc->backendId, MyProc->lxid);
appendStringInfo(buf, "%*s", padding, strfbuf);
}
else
@@ -2898,6 +2902,7 @@ send_message_to_server_log(ErrorData *edata)
if (redirection_done && !am_syslogger)
write_pipe_chunks(buf.data, buf.len, LOG_DESTINATION_STDERR);
#ifdef WIN32
+
/*
* In a win32 service environment, there is no usable stderr. Capture
* anything going there and write it to the eventlog instead.
@@ -2951,7 +2956,7 @@ send_message_to_server_log(ErrorData *edata)
*
* Note: when there are multiple backends writing into the syslogger pipe,
* it's critical that each write go into the pipe indivisibly, and not
- * get interleaved with data from other processes. Fortunately, the POSIX
+ * get interleaved with data from other processes. Fortunately, the POSIX
* spec requires that writes to pipes be atomic so long as they are not
* more than PIPE_BUF bytes long. So we divide long messages into chunks
* that are no more than that length, and send one chunk per write() call.
@@ -3271,7 +3276,7 @@ useful_strerror(int errnum)
str = strerror(errnum);
/*
- * Some strerror()s return an empty string for out-of-range errno. This
+ * Some strerror()s return an empty string for out-of-range errno. This
* is ANSI C spec compliant, but not exactly useful. Also, we may get
* back strings of question marks if libc cannot transcode the message to
* the codeset specified by LC_CTYPE. If we get nothing useful, first try
diff --git a/src/backend/utils/fmgr/dfmgr.c b/src/backend/utils/fmgr/dfmgr.c
index fa01ade544b..042af7877bf 100644
--- a/src/backend/utils/fmgr/dfmgr.c
+++ b/src/backend/utils/fmgr/dfmgr.c
@@ -131,7 +131,7 @@ load_external_function(char *filename, char *funcname,
/*
* This function loads a shlib file without looking up any particular
- * function in it. If the same shlib has previously been loaded,
+ * function in it. If the same shlib has previously been loaded,
* unload and reload it.
*
* When 'restricted' is true, only libraries in the presumed-secure
@@ -171,7 +171,7 @@ lookup_external_function(void *filehandle, char *funcname)
/*
* Load the specified dynamic-link library file, unless it already is
- * loaded. Return the pg_dl* handle for the file.
+ * loaded. Return the pg_dl* handle for the file.
*
* Note: libname is expected to be an exact name for the library file.
*/
@@ -473,7 +473,7 @@ file_exists(const char *name)
* If name contains a slash, check if the file exists, if so return
* the name. Else (no slash) try to expand using search path (see
* find_in_dynamic_libpath below); if that works, return the fully
- * expanded file name. If the previous failed, append DLSUFFIX and
+ * expanded file name. If the previous failed, append DLSUFFIX and
* try again. If all fails, just return the original name.
*
* The result will always be freshly palloc'd.
diff --git a/src/backend/utils/fmgr/fmgr.c b/src/backend/utils/fmgr/fmgr.c
index 1fe45da50ce..a95be056dae 100644
--- a/src/backend/utils/fmgr/fmgr.c
+++ b/src/backend/utils/fmgr/fmgr.c
@@ -96,7 +96,7 @@ static Datum fmgr_security_definer(PG_FUNCTION_ARGS);
/*
- * Lookup routines for builtin-function table. We can search by either Oid
+ * Lookup routines for builtin-function table. We can search by either Oid
* or name, but search by Oid is much faster.
*/
@@ -578,7 +578,7 @@ clear_external_function_hash(void *filehandle)
* Copy an FmgrInfo struct
*
* This is inherently somewhat bogus since we can't reliably duplicate
- * language-dependent subsidiary info. We cheat by zeroing fn_extra,
+ * language-dependent subsidiary info. We cheat by zeroing fn_extra,
* instead, meaning that subsidiary info will have to be recomputed.
*/
void
@@ -858,7 +858,7 @@ fmgr_oldstyle(PG_FUNCTION_ARGS)
/*
- * Support for security-definer and proconfig-using functions. We support
+ * Support for security-definer and proconfig-using functions. We support
* both of these features using the same call handler, because they are
* often used together and it would be inefficient (as well as notationally
* messy) to have two levels of call handler involved.
@@ -878,7 +878,7 @@ struct fmgr_security_definer_cache
* (All this info is cached for the duration of the current query.)
* To execute a call, we temporarily replace the flinfo with the cached
* and looked-up one, while keeping the outer fcinfo (which contains all
- * the actual arguments, etc.) intact. This is not re-entrant, but then
+ * the actual arguments, etc.) intact. This is not re-entrant, but then
* the fcinfo itself can't be used re-entrantly anyway.
*/
static Datum
@@ -958,7 +958,7 @@ fmgr_security_definer(PG_FUNCTION_ARGS)
/*
* We don't need to restore GUC or userid settings on error, because the
- * ensuing xact or subxact abort will do that. The PG_TRY block is only
+ * ensuing xact or subxact abort will do that. The PG_TRY block is only
* needed to clean up the flinfo link.
*/
save_flinfo = fcinfo->flinfo;
@@ -1011,7 +1011,7 @@ fmgr_security_definer(PG_FUNCTION_ARGS)
/*
* These are for invocation of a specifically named function with a
* directly-computed parameter list. Note that neither arguments nor result
- * are allowed to be NULL. Also, the function cannot be one that needs to
+ * are allowed to be NULL. Also, the function cannot be one that needs to
* look at FmgrInfo, since there won't be any.
*/
Datum
@@ -1556,8 +1556,8 @@ FunctionCall9Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2,
/*
* These are for invocation of a function identified by OID with a
* directly-computed parameter list. Note that neither arguments nor result
- * are allowed to be NULL. These are essentially fmgr_info() followed
- * by FunctionCallN(). If the same function is to be invoked repeatedly,
+ * are allowed to be NULL. These are essentially fmgr_info() followed
+ * by FunctionCallN(). If the same function is to be invoked repeatedly,
* do the fmgr_info() once and then use FunctionCallN().
*/
Datum
@@ -1886,7 +1886,7 @@ OidFunctionCall9Coll(Oid functionId, Oid collation, Datum arg1, Datum arg2,
*
* One important difference from the bare function call is that we will
* push any active SPI context, allowing SPI-using I/O functions to be
- * called from other SPI functions without extra notation. This is a hack,
+ * called from other SPI functions without extra notation. This is a hack,
* but the alternative of expecting all SPI functions to do SPI_push/SPI_pop
* around I/O calls seems worse.
*/
diff --git a/src/backend/utils/fmgr/funcapi.c b/src/backend/utils/fmgr/funcapi.c
index 45108306b2b..f2824e0876c 100644
--- a/src/backend/utils/fmgr/funcapi.c
+++ b/src/backend/utils/fmgr/funcapi.c
@@ -136,7 +136,7 @@ per_MultiFuncCall(PG_FUNCTION_ARGS)
* FuncCallContext is pointing to it), but in most usage patterns the
* tuples stored in it will be in the function's per-tuple context. So at
* the beginning of each call, the Slot will hold a dangling pointer to an
- * already-recycled tuple. We clear it out here.
+ * already-recycled tuple. We clear it out here.
*
* Note: use of retval->slot is obsolete as of 8.0, and we expect that it
* will always be NULL. This is just here for backwards compatibility in
@@ -192,13 +192,13 @@ shutdown_MultiFuncCall(Datum arg)
* Given a function's call info record, determine the kind of datatype
* it is supposed to return. If resultTypeId isn't NULL, *resultTypeId
* receives the actual datatype OID (this is mainly useful for scalar
- * result types). If resultTupleDesc isn't NULL, *resultTupleDesc
+ * result types). If resultTupleDesc isn't NULL, *resultTupleDesc
* receives a pointer to a TupleDesc when the result is of a composite
* type, or NULL when it's a scalar result.
*
* One hard case that this handles is resolution of actual rowtypes for
* functions returning RECORD (from either the function's OUT parameter
- * list, or a ReturnSetInfo context node). TYPEFUNC_RECORD is returned
+ * list, or a ReturnSetInfo context node). TYPEFUNC_RECORD is returned
* only when we couldn't resolve the actual rowtype for lack of information.
*
* The other hard case that this handles is resolution of polymorphism.
@@ -281,7 +281,7 @@ get_func_result_type(Oid functionId,
/*
* internal_get_result_type -- workhorse code implementing all the above
*
- * funcid must always be supplied. call_expr and rsinfo can be NULL if not
+ * funcid must always be supplied. call_expr and rsinfo can be NULL if not
* available. We will return TYPEFUNC_RECORD, and store NULL into
* *resultTupleDesc, if we cannot deduce the complete result rowtype from
* the available information.
@@ -448,7 +448,7 @@ resolve_polymorphic_tupdesc(TupleDesc tupdesc, oidvector *declared_args,
return true;
/*
- * Otherwise, extract actual datatype(s) from input arguments. (We assume
+ * Otherwise, extract actual datatype(s) from input arguments. (We assume
* the parser already validated consistency of the arguments.)
*/
if (!call_expr)
diff --git a/src/backend/utils/hash/dynahash.c b/src/backend/utils/hash/dynahash.c
index 4ae1eb1ca3a..2b99e4b5218 100644
--- a/src/backend/utils/hash/dynahash.c
+++ b/src/backend/utils/hash/dynahash.c
@@ -5,19 +5,19 @@
*
* dynahash.c supports both local-to-a-backend hash tables and hash tables in
* shared memory. For shared hash tables, it is the caller's responsibility
- * to provide appropriate access interlocking. The simplest convention is
- * that a single LWLock protects the whole hash table. Searches (HASH_FIND or
+ * to provide appropriate access interlocking. The simplest convention is
+ * that a single LWLock protects the whole hash table. Searches (HASH_FIND or
* hash_seq_search) need only shared lock, but any update requires exclusive
* lock. For heavily-used shared tables, the single-lock approach creates a
* concurrency bottleneck, so we also support "partitioned" locking wherein
* there are multiple LWLocks guarding distinct subsets of the table. To use
* a hash table in partitioned mode, the HASH_PARTITION flag must be given
- * to hash_create. This prevents any attempt to split buckets on-the-fly.
+ * to hash_create. This prevents any attempt to split buckets on-the-fly.
* Therefore, each hash bucket chain operates independently, and no fields
* of the hash header change after init except nentries and freeList.
* A partitioned table uses a spinlock to guard changes of those two fields.
* This lets any subset of the hash buckets be treated as a separately
- * lockable partition. We expect callers to use the low-order bits of a
+ * lockable partition. We expect callers to use the low-order bits of a
* lookup key's hash value as a partition number --- this will work because
* of the way calc_bucket() maps hash values to bucket numbers.
*
@@ -81,7 +81,7 @@
* Constants
*
* A hash table has a top-level "directory", each of whose entries points
- * to a "segment" of ssize bucket headers. The maximum number of hash
+ * to a "segment" of ssize bucket headers. The maximum number of hash
* buckets is thus dsize * ssize (but dsize may be expansible). Of course,
* the number of records in the table can be larger, but we don't want a
* whole lot of records per bucket or performance goes down.
@@ -89,7 +89,7 @@
* In a hash table allocated in shared memory, the directory cannot be
* expanded because it must stay at a fixed address. The directory size
* should be selected using hash_select_dirsize (and you'd better have
- * a good idea of the maximum number of entries!). For non-shared hash
+ * a good idea of the maximum number of entries!). For non-shared hash
* tables, the initial directory size can be left at the default.
*/
#define DEF_SEGSIZE 256
@@ -341,7 +341,7 @@ hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
{
/*
* ctl structure and directory are preallocated for shared memory
- * tables. Note that HASH_DIRSIZE and HASH_ALLOC had better be set as
+ * tables. Note that HASH_DIRSIZE and HASH_ALLOC had better be set as
* well.
*/
hashp->hctl = info->hctl;
@@ -790,7 +790,7 @@ calc_bucket(HASHHDR *hctl, uint32 hash_val)
* the result is a dangling pointer that shouldn't be dereferenced!)
*
* HASH_ENTER will normally ereport a generic "out of memory" error if
- * it is unable to create a new entry. The HASH_ENTER_NULL operation is
+ * it is unable to create a new entry. The HASH_ENTER_NULL operation is
* the same except it will return NULL if out of memory. Note that
* HASH_ENTER_NULL cannot be used with the default palloc-based allocator,
* since palloc internally ereports on out-of-memory.
@@ -1042,7 +1042,7 @@ hash_update_hash_key(HTAB *hashp,
hashp->tabname);
/*
- * Lookup the existing element using its saved hash value. We need to do
+ * Lookup the existing element using its saved hash value. We need to do
* this to be able to unlink it from its hash chain, but as a side benefit
* we can verify the validity of the passed existingEntry pointer.
*/
@@ -1119,7 +1119,7 @@ hash_update_hash_key(HTAB *hashp,
/*
* If old and new hash values belong to the same bucket, we need not
* change any chain links, and indeed should not since this simplistic
- * update will corrupt the list if currBucket is the last element. (We
+ * update will corrupt the list if currBucket is the last element. (We
* cannot fall out earlier, however, since we need to scan the bucket to
* check for duplicate keys.)
*/
@@ -1405,7 +1405,7 @@ expand_table(HTAB *hashp)
}
/*
- * Relocate records to the new bucket. NOTE: because of the way the hash
+ * Relocate records to the new bucket. NOTE: because of the way the hash
* masking is done in calc_bucket, only one old bucket can need to be
* split at this point. With a different way of reducing the hash value,
* that might not be true!
@@ -1554,7 +1554,7 @@ hash_corrupted(HTAB *hashp)
{
/*
* If the corruption is in a shared hashtable, we'd better force a
- * systemwide restart. Otherwise, just shut down this one backend.
+ * systemwide restart. Otherwise, just shut down this one backend.
*/
if (hashp->isshared)
elog(PANIC, "hash table \"%s\" corrupted", hashp->tabname);
@@ -1599,7 +1599,7 @@ next_pow2_int(long num)
/************************* SEQ SCAN TRACKING ************************/
/*
- * We track active hash_seq_search scans here. The need for this mechanism
+ * We track active hash_seq_search scans here. The need for this mechanism
* comes from the fact that a scan will get confused if a bucket split occurs
* while it's in progress: it might visit entries twice, or even miss some
* entirely (if it's partway through the same bucket that splits). Hence
@@ -1619,7 +1619,7 @@ next_pow2_int(long num)
*
* This arrangement is reasonably robust if a transient hashtable is deleted
* without notifying us. The absolute worst case is we might inhibit splits
- * in another table created later at exactly the same address. We will give
+ * in another table created later at exactly the same address. We will give
* a warning at transaction end for reference leaks, so any bugs leading to
* lack of notification should be easy to catch.
*/
diff --git a/src/backend/utils/init/miscinit.c b/src/backend/utils/init/miscinit.c
index 6115ce3f337..a703c67eadd 100644
--- a/src/backend/utils/init/miscinit.c
+++ b/src/backend/utils/init/miscinit.c
@@ -59,7 +59,7 @@ static List *lock_files = NIL;
*
* NOTE: "ignoring system indexes" means we do not use the system indexes
* for lookups (either in hardwired catalog accesses or in planner-generated
- * plans). We do, however, still update the indexes when a catalog
+ * plans). We do, however, still update the indexes when a catalog
* modification is made.
* ----------------------------------------------------------------
*/
@@ -230,7 +230,7 @@ SetSessionUserId(Oid userid, bool is_superuser)
* Currently there are two valid bits in SecurityRestrictionContext:
*
* SECURITY_LOCAL_USERID_CHANGE indicates that we are inside an operation
- * that is temporarily changing CurrentUserId via these functions. This is
+ * that is temporarily changing CurrentUserId via these functions. This is
* needed to indicate that the actual value of CurrentUserId is not in sync
* with guc.c's internal state, so SET ROLE has to be disallowed.
*
@@ -251,7 +251,7 @@ SetSessionUserId(Oid userid, bool is_superuser)
* ever throw any kind of error. This is because they are used by
* StartTransaction and AbortTransaction to save/restore the settings,
* and during the first transaction within a backend, the value to be saved
- * and perhaps restored is indeed invalid. We have to be able to get
+ * and perhaps restored is indeed invalid. We have to be able to get
* through AbortTransaction without asserting in case InitPostgres fails.
*/
void
@@ -291,7 +291,7 @@ InSecurityRestrictedOperation(void)
/*
* These are obsolete versions of Get/SetUserIdAndSecContext that are
* only provided for bug-compatibility with some rather dubious code in
- * pljava. We allow the userid to be set, but only when not inside a
+ * pljava. We allow the userid to be set, but only when not inside a
* security restriction context.
*/
void
@@ -394,7 +394,7 @@ InitializeSessionUserId(const char *rolename)
* Check connection limit for this role.
*
* There is a race condition here --- we create our PGPROC before
- * checking for other PGPROCs. If two backends did this at about the
+ * checking for other PGPROCs. If two backends did this at about the
* same time, they might both think they were over the limit, while
* ideally one should succeed and one fail. Getting that to work
* exactly seems more trouble than it is worth, however; instead we
@@ -493,7 +493,7 @@ GetCurrentRoleId(void)
* Change Role ID while running (SET ROLE)
*
* If roleid is InvalidOid, we are doing SET ROLE NONE: revert to the
- * session user authorization. In this case the is_superuser argument
+ * session user authorization. In this case the is_superuser argument
* is ignored.
*
* When roleid is not InvalidOid, the caller must have checked whether
@@ -561,7 +561,7 @@ GetUserNameFromId(Oid roleid)
* ($DATADIR/postmaster.pid) and Unix-socket-file lockfiles ($SOCKFILE.lock).
* Both kinds of files contain the same info initially, although we can add
* more information to a data-directory lockfile after it's created, using
- * AddToDataDirLockFile(). See miscadmin.h for documentation of the contents
+ * AddToDataDirLockFile(). See miscadmin.h for documentation of the contents
* of these lockfiles.
*
* On successful lockfile creation, a proc_exit callback to remove the
@@ -650,7 +650,7 @@ CreateLockFile(const char *filename, bool amPostmaster,
my_gp_pid = 0;
/*
- * We need a loop here because of race conditions. But don't loop forever
+ * We need a loop here because of race conditions. But don't loop forever
* (for example, a non-writable $PGDATA directory might cause a failure
* that won't go away). 100 tries seems like plenty.
*/
@@ -659,7 +659,7 @@ CreateLockFile(const char *filename, bool amPostmaster,
/*
* Try to create the lock file --- O_EXCL makes this atomic.
*
- * Think not to make the file protection weaker than 0600. See
+ * Think not to make the file protection weaker than 0600. See
* comments below.
*/
fd = open(filename, O_RDWR | O_CREAT | O_EXCL, 0600);
@@ -727,7 +727,7 @@ CreateLockFile(const char *filename, bool amPostmaster,
* implies that the existing process has a different userid than we
* do, which means it cannot be a competing postmaster. A postmaster
* cannot successfully attach to a data directory owned by a userid
- * other than its own. (This is now checked directly in
+ * other than its own. (This is now checked directly in
* checkDataDir(), but has been true for a long time because of the
* restriction that the data directory isn't group- or
* world-accessible.) Also, since we create the lockfiles mode 600,
@@ -765,9 +765,9 @@ CreateLockFile(const char *filename, bool amPostmaster,
}
/*
- * No, the creating process did not exist. However, it could be that
+ * No, the creating process did not exist. However, it could be that
* the postmaster crashed (or more likely was kill -9'd by a clueless
- * admin) but has left orphan backends behind. Check for this by
+ * admin) but has left orphan backends behind. Check for this by
* looking to see if there is an associated shmem segment that is
* still in use.
*
@@ -808,7 +808,7 @@ CreateLockFile(const char *filename, bool amPostmaster,
/*
* Looks like nobody's home. Unlink the file and try again to create
- * it. Need a loop because of possible race condition against other
+ * it. Need a loop because of possible race condition against other
* would-be creators.
*/
if (unlink(filename) < 0)
@@ -822,8 +822,8 @@ CreateLockFile(const char *filename, bool amPostmaster,
}
/*
- * Successfully created the file, now fill it. See comment in miscadmin.h
- * about the contents. Note that we write the same first five lines into
+ * Successfully created the file, now fill it. See comment in miscadmin.h
+ * about the contents. Note that we write the same first five lines into
* both datadir and socket lockfiles; although more stuff may get added to
* the datadir lockfile later.
*/
diff --git a/src/backend/utils/init/postinit.c b/src/backend/utils/init/postinit.c
index 89a7c9e15c2..ed936d7fad8 100644
--- a/src/backend/utils/init/postinit.c
+++ b/src/backend/utils/init/postinit.c
@@ -80,7 +80,7 @@ static void process_settings(Oid databaseid, Oid roleid);
* GetDatabaseTuple -- fetch the pg_database row for a database
*
* This is used during backend startup when we don't yet have any access to
- * system catalogs in general. In the worst case, we can seqscan pg_database
+ * system catalogs in general. In the worst case, we can seqscan pg_database
* using nothing but the hard-wired descriptor that relcache.c creates for
* pg_database. In more typical cases, relcache.c was able to load
* descriptors for both pg_database and its indexes from the shared relcache
@@ -104,7 +104,7 @@ GetDatabaseTuple(const char *dbname)
CStringGetDatum(dbname));
/*
- * Open pg_database and fetch a tuple. Force heap scan if we haven't yet
+ * Open pg_database and fetch a tuple. Force heap scan if we haven't yet
* built the critical shared relcache entries (i.e., we're starting up
* without a shared relcache cache file).
*/
@@ -147,7 +147,7 @@ GetDatabaseTupleByOid(Oid dboid)
ObjectIdGetDatum(dboid));
/*
- * Open pg_database and fetch a tuple. Force heap scan if we haven't yet
+ * Open pg_database and fetch a tuple. Force heap scan if we haven't yet
* built the critical shared relcache entries (i.e., we're starting up
* without a shared relcache cache file).
*/
@@ -186,7 +186,7 @@ PerformAuthentication(Port *port)
* In EXEC_BACKEND case, we didn't inherit the contents of pg_hba.conf
* etcetera from the postmaster, and have to load them ourselves.
*
- * FIXME: [fork/exec] Ugh. Is there a way around this overhead?
+ * FIXME: [fork/exec] Ugh. Is there a way around this overhead?
*/
#ifdef EXEC_BACKEND
if (!load_hba())
@@ -310,7 +310,7 @@ CheckMyDatabase(const char *name, bool am_superuser)
name)));
/*
- * Check privilege to connect to the database. (The am_superuser test
+ * Check privilege to connect to the database. (The am_superuser test
* is redundant, but since we have the flag, might as well check it
* and save a few cycles.)
*/
@@ -326,7 +326,7 @@ CheckMyDatabase(const char *name, bool am_superuser)
* Check connection limit for this database.
*
* There is a race condition here --- we create our PGPROC before
- * checking for other PGPROCs. If two backends did this at about the
+ * checking for other PGPROCs. If two backends did this at about the
* same time, they might both think they were over the limit, while
* ideally one should succeed and one fail. Getting that to work
* exactly seems more trouble than it is worth, however; instead we
@@ -454,7 +454,7 @@ InitializeMaxBackends(void)
/* the extra unit accounts for the autovacuum launcher */
MaxBackends = MaxConnections + autovacuum_max_workers + 1 +
- + max_worker_processes;
+ +max_worker_processes;
/* internal error because the values were all checked previously */
if (MaxBackends > MAX_BACKENDS)
@@ -491,7 +491,7 @@ BaseInit(void)
* Initialize POSTGRES.
*
* The database can be specified by name, using the in_dbname parameter, or by
- * OID, using the dboid parameter. In the latter case, the actual database
+ * OID, using the dboid parameter. In the latter case, the actual database
* name can be returned to the caller in out_dbname. If out_dbname isn't
* NULL, it must point to a buffer of size NAMEDATALEN.
*
@@ -912,7 +912,7 @@ InitPostgres(const char *in_dbname, Oid dboid, const char *username,
/*
* Now process any command-line switches and any additional GUC variable
- * settings passed in the startup packet. We couldn't do this before
+ * settings passed in the startup packet. We couldn't do this before
* because we didn't know if client is a superuser.
*/
if (MyProcPort != NULL)
diff --git a/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c b/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c
index ca965390557..68615726552 100644
--- a/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c
+++ b/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c
@@ -231,7 +231,7 @@ static unsigned short BinarySearchRange
/*
* NOTE: big5 high_byte: 0xa1-0xfe, low_byte: 0x40-0x7e,
* 0xa1-0xfe (radicals: 0x00-0x3e, 0x3f-0x9c) big5 radix is
- * 0x9d. [region_low, region_high] We
+ * 0x9d. [region_low, region_high] We
* should remember big5 has two different regions (above).
* There is a bias for the distance between these regions.
* 0xa1 - 0x7e + bias = 1 (Distance between 0xa1 and 0x7e is
diff --git a/src/backend/utils/mb/mbutils.c b/src/backend/utils/mb/mbutils.c
index ad255ee6932..665ac10f06e 100644
--- a/src/backend/utils/mb/mbutils.c
+++ b/src/backend/utils/mb/mbutils.c
@@ -55,7 +55,7 @@
/*
* We maintain a simple linked list caching the fmgr lookup info for the
* currently selected conversion functions, as well as any that have been
- * selected previously in the current session. (We remember previous
+ * selected previously in the current session. (We remember previous
* settings because we must be able to restore a previous setting during
* transaction rollback, without doing any fresh catalog accesses.)
*
@@ -102,7 +102,7 @@ static int cliplen(const char *str, int len, int limit);
/*
- * Prepare for a future call to SetClientEncoding. Success should mean
+ * Prepare for a future call to SetClientEncoding. Success should mean
* that SetClientEncoding is guaranteed to succeed for this encoding request.
*
* (But note that success before backend_startup_complete does not guarantee
@@ -174,7 +174,7 @@ PrepareClientEncoding(int encoding)
/*
* We cannot yet remove any older entry for the same encoding pair,
- * since it could still be in use. SetClientEncoding will clean up.
+ * since it could still be in use. SetClientEncoding will clean up.
*/
return 0; /* success */
@@ -183,8 +183,8 @@ PrepareClientEncoding(int encoding)
{
/*
* If we're not in a live transaction, the only thing we can do is
- * restore a previous setting using the cache. This covers all
- * transaction-rollback cases. The only case it might not work for is
+ * restore a previous setting using the cache. This covers all
+ * transaction-rollback cases. The only case it might not work for is
* trying to change client_encoding on the fly by editing
* postgresql.conf and SIGHUP'ing. Which would probably be a stupid
* thing to do anyway.
@@ -564,7 +564,7 @@ pg_client_to_server(const char *s, int len)
* See the notes about string conversion functions at the top of this file.
*
* Unlike the other string conversion functions, this will apply validation
- * even if encoding == DatabaseEncoding->encoding. This is because this is
+ * even if encoding == DatabaseEncoding->encoding. This is because this is
* used to process data coming in from outside the database, and we never
* want to just assume validity.
*/
@@ -592,7 +592,7 @@ pg_any_to_server(const char *s, int len, int encoding)
* the selected client_encoding. If the client encoding is ASCII-safe
* then we just do a straight validation under that encoding. For an
* ASCII-unsafe encoding we have a problem: we dare not pass such data
- * to the parser but we have no way to convert it. We compromise by
+ * to the parser but we have no way to convert it. We compromise by
* rejecting the data if it contains any non-ASCII characters.
*/
if (PG_VALID_BE_ENCODING(encoding))
@@ -965,11 +965,11 @@ raw_pg_bind_textdomain_codeset(const char *domainname, int encoding)
* On most platforms, gettext defaults to the codeset implied by LC_CTYPE.
* When that matches the database encoding, we don't need to do anything. In
* CREATE DATABASE, we enforce or trust that the locale's codeset matches the
- * database encoding, except for the C locale. (On Windows, we also permit a
+ * database encoding, except for the C locale. (On Windows, we also permit a
* discrepancy under the UTF8 encoding.) For the C locale, explicitly bind
* gettext to the right codeset.
*
- * On Windows, gettext defaults to the Windows ANSI code page. This is a
+ * On Windows, gettext defaults to the Windows ANSI code page. This is a
* convenient departure for software that passes the strings to Windows ANSI
* APIs, but we don't do that. Compel gettext to use database encoding or,
* failing that, the LC_CTYPE encoding as it would on other platforms.
diff --git a/src/backend/utils/mb/wchar.c b/src/backend/utils/mb/wchar.c
index 5f65f1bc899..0cc753e6684 100644
--- a/src/backend/utils/mb/wchar.c
+++ b/src/backend/utils/mb/wchar.c
@@ -1512,7 +1512,7 @@ pg_utf8_islegal(const unsigned char *source, int length)
*
* Not knowing anything about the properties of the encoding in use, we just
* keep incrementing the last byte until we get a validly-encoded result,
- * or we run out of values to try. We don't bother to try incrementing
+ * or we run out of values to try. We don't bother to try incrementing
* higher-order bytes, so there's no growth in runtime for wider characters.
* (If we did try to do that, we'd need to consider the likelihood that 255
* is not a valid final byte in the encoding.)
@@ -1542,7 +1542,7 @@ pg_generic_charinc(unsigned char *charptr, int len)
* For a one-byte character less than 0x7F, we just increment the byte.
*
* For a multibyte character, every byte but the first must fall between 0x80
- * and 0xBF; and the first byte must be between 0xC0 and 0xF4. We increment
+ * and 0xBF; and the first byte must be between 0xC0 and 0xF4. We increment
* the last byte that's not already at its maximum value. If we can't find a
* byte that's less than the maximum allowable value, we simply fail. We also
* need some special-case logic to skip regions used for surrogate pair
@@ -1803,8 +1803,8 @@ int
pg_encoding_verifymb(int encoding, const char *mbstr, int len)
{
return (PG_VALID_ENCODING(encoding) ?
- ((*pg_wchar_table[encoding].mbverify) ((const unsigned char *) mbstr, len)) :
- ((*pg_wchar_table[PG_SQL_ASCII].mbverify) ((const unsigned char *) mbstr, len)));
+ ((*pg_wchar_table[encoding].mbverify) ((const unsigned char *) mbstr, len)) :
+ ((*pg_wchar_table[PG_SQL_ASCII].mbverify) ((const unsigned char *) mbstr, len)));
}
/*
diff --git a/src/backend/utils/mb/wstrcmp.c b/src/backend/utils/mb/wstrcmp.c
index 64a9cf848e2..dad3ae023a3 100644
--- a/src/backend/utils/mb/wstrcmp.c
+++ b/src/backend/utils/mb/wstrcmp.c
@@ -23,7 +23,7 @@
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/src/backend/utils/mb/wstrncmp.c b/src/backend/utils/mb/wstrncmp.c
index 87c1f5afdaa..ea4823fc6f8 100644
--- a/src/backend/utils/mb/wstrncmp.c
+++ b/src/backend/utils/mb/wstrncmp.c
@@ -22,7 +22,7 @@
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c
index 15020c4508c..0401cd4d2a6 100644
--- a/src/backend/utils/misc/guc.c
+++ b/src/backend/utils/misc/guc.c
@@ -496,7 +496,7 @@ static int max_identifier_length;
static int block_size;
static int segment_size;
static int wal_block_size;
-static bool data_checksums;
+static bool data_checksums;
static int wal_segment_size;
static bool integer_datetimes;
static int effective_io_concurrency;
@@ -2148,7 +2148,7 @@ static struct config_int ConfigureNamesInt[] =
NULL
},
&max_replication_slots,
- 0, 0, MAX_BACKENDS /* XXX?*/,
+ 0, 0, MAX_BACKENDS /* XXX? */ ,
NULL, NULL, NULL
},
@@ -2543,7 +2543,11 @@ static struct config_int ConfigureNamesInt[] =
{"track_activity_query_size", PGC_POSTMASTER, RESOURCES_MEM,
gettext_noop("Sets the size reserved for pg_stat_activity.query, in bytes."),
NULL,
- /* There is no _bytes_ unit, so the user can't supply units for this. */
+
+ /*
+ * There is no _bytes_ unit, so the user can't supply units for
+ * this.
+ */
},
&pgstat_track_activity_query_size,
1024, 100, 102400,
@@ -3778,7 +3782,7 @@ get_guc_variables(void)
/*
- * Build the sorted array. This is split out so that it could be
+ * Build the sorted array. This is split out so that it could be
* re-executed after startup (eg, we could allow loadable modules to
* add vars, and then we'd need to re-sort).
*/
@@ -3935,7 +3939,7 @@ add_placeholder_variable(const char *name, int elevel)
/*
* The char* is allocated at the end of the struct since we have no
- * 'static' place to point to. Note that the current value, as well as
+ * 'static' place to point to. Note that the current value, as well as
* the boot and reset values, start out NULL.
*/
var->variable = (char **) (var + 1);
@@ -3977,7 +3981,7 @@ find_option(const char *name, bool create_placeholders, int elevel)
return *res;
/*
- * See if the name is an obsolete name for a variable. We assume that the
+ * See if the name is an obsolete name for a variable. We assume that the
* set of supported old names is short enough that a brute-force search is
* the best way.
*/
@@ -4636,7 +4640,7 @@ NewGUCNestLevel(void)
/*
* Do GUC processing at transaction or subtransaction commit or abort, or
* when exiting a function that has proconfig settings, or when undoing a
- * transient assignment to some GUC variables. (The name is thus a bit of
+ * transient assignment to some GUC variables. (The name is thus a bit of
* a misnomer; perhaps it should be ExitGUCNestLevel or some such.)
* During abort, we discard all GUC settings that were applied at nesting
* levels >= nestLevel. nestLevel == 1 corresponds to the main transaction.
@@ -5357,7 +5361,7 @@ validate_conf_option(struct config_generic * record, const char *name,
{
ereport(elevel,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("parameter \"%s\" requires a Boolean value",
+ errmsg("parameter \"%s\" requires a Boolean value",
name)));
return 0;
}
@@ -5387,8 +5391,8 @@ validate_conf_option(struct config_generic * record, const char *name,
{
ereport(elevel,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("invalid value for parameter \"%s\": \"%s\"",
- name, value),
+ errmsg("invalid value for parameter \"%s\": \"%s\"",
+ name, value),
hintmsg ? errhint("%s", _(hintmsg)) : 0));
return 0;
}
@@ -5398,7 +5402,7 @@ validate_conf_option(struct config_generic * record, const char *name,
ereport(elevel,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("%d is outside the valid range for parameter \"%s\" (%d .. %d)",
- *((int *) newval), name, conf->min, conf->max)));
+ *((int *) newval), name, conf->min, conf->max)));
return 0;
}
@@ -5425,7 +5429,7 @@ validate_conf_option(struct config_generic * record, const char *name,
{
ereport(elevel,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("parameter \"%s\" requires a numeric value",
+ errmsg("parameter \"%s\" requires a numeric value",
name)));
return 0;
}
@@ -5435,7 +5439,7 @@ validate_conf_option(struct config_generic * record, const char *name,
ereport(elevel,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("%g is outside the valid range for parameter \"%s\" (%g .. %g)",
- *((double *) newval), name, conf->min, conf->max)));
+ *((double *) newval), name, conf->min, conf->max)));
return 0;
}
@@ -5512,9 +5516,9 @@ validate_conf_option(struct config_generic * record, const char *name,
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("invalid value for parameter \"%s\": \"%s\"",
- name, value),
- hintmsg ? errhint("%s", _(hintmsg)) : 0));
+ errmsg("invalid value for parameter \"%s\": \"%s\"",
+ name, value),
+ hintmsg ? errhint("%s", _(hintmsg)) : 0));
if (hintmsg != NULL)
pfree(hintmsg);
@@ -5667,7 +5671,7 @@ set_config_option(const char *name, const char *value,
* If a PGC_BACKEND parameter is changed in the config file,
* we want to accept the new value in the postmaster (whence
* it will propagate to subsequently-started backends), but
- * ignore it in existing backends. This is a tad klugy, but
+ * ignore it in existing backends. This is a tad klugy, but
* necessary because we don't re-read the config file during
* backend start.
*
@@ -5724,7 +5728,7 @@ set_config_option(const char *name, const char *value,
* An exception might be made if the reset value is assumed to be "safe".
*
* Note: this flag is currently used for "session_authorization" and
- * "role". We need to prohibit changing these inside a local userid
+ * "role". We need to prohibit changing these inside a local userid
* context because when we exit it, GUC won't be notified, leaving things
* out of sync. (This could be fixed by forcing a new GUC nesting level,
* but that would change behavior in possibly-undesirable ways.) Also, we
@@ -6515,7 +6519,7 @@ flatten_set_variable_args(const char *name, List *args)
else
{
/*
- * Plain string literal or identifier. For quote mode,
+ * Plain string literal or identifier. For quote mode,
* quote it if it's not a vanilla identifier.
*/
if (flags & GUC_LIST_QUOTE)
@@ -6681,7 +6685,7 @@ replace_auto_config_value(ConfigVariable **head_p, ConfigVariable **tail_p,
* configuration file (PG_AUTOCONF_FILENAME) intact.
*/
void
-AlterSystemSetConfigFile(AlterSystemStmt * altersysstmt)
+AlterSystemSetConfigFile(AlterSystemStmt *altersysstmt)
{
char *name;
char *value;
@@ -6698,7 +6702,7 @@ AlterSystemSetConfigFile(AlterSystemStmt * altersysstmt)
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- (errmsg("must be superuser to execute ALTER SYSTEM command"))));
+ (errmsg("must be superuser to execute ALTER SYSTEM command"))));
/*
* Validate the name and arguments [value1, value2 ... ].
@@ -6724,7 +6728,7 @@ AlterSystemSetConfigFile(AlterSystemStmt * altersysstmt)
if (record == NULL)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("unrecognized configuration parameter \"%s\"", name)));
+ errmsg("unrecognized configuration parameter \"%s\"", name)));
if ((record->context == PGC_INTERNAL) ||
(record->flags & GUC_DISALLOW_IN_FILE))
@@ -6737,7 +6741,7 @@ AlterSystemSetConfigFile(AlterSystemStmt * altersysstmt)
ERROR, true, NULL,
&newextra))
ereport(ERROR,
- (errmsg("invalid value for parameter \"%s\": \"%s\"", name, value)));
+ (errmsg("invalid value for parameter \"%s\": \"%s\"", name, value)));
/*
@@ -6754,8 +6758,8 @@ AlterSystemSetConfigFile(AlterSystemStmt * altersysstmt)
* One backend is allowed to operate on file PG_AUTOCONF_FILENAME, to
* ensure that we need to update the contents of the file with
* AutoFileLock. To ensure crash safety, first the contents are written to
- * a temporary file which is then renameed to PG_AUTOCONF_FILENAME. In case
- * there exists a temp file from previous crash, that can be reused.
+ * a temporary file which is then renameed to PG_AUTOCONF_FILENAME. In
+ * case there exists a temp file from previous crash, that can be reused.
*/
LWLockAcquire(AutoFileLock, LW_EXCLUSIVE);
@@ -6846,6 +6850,7 @@ ExecSetVariableStmt(VariableSetStmt *stmt, bool isTopLevel)
0);
break;
case VAR_SET_MULTI:
+
/*
* Special-case SQL syntaxes. The TRANSACTION and SESSION
* CHARACTERISTICS cases effectively set more than one variable
@@ -7131,7 +7136,7 @@ define_custom_variable(struct config_generic * variable)
* variable. Essentially, we need to duplicate all the active and stacked
* values, but with appropriate validation and datatype adjustment.
*
- * If an assignment fails, we report a WARNING and keep going. We don't
+ * If an assignment fails, we report a WARNING and keep going. We don't
* want to throw ERROR for bad values, because it'd bollix the add-on
* module that's presumably halfway through getting loaded. In such cases
* the default or previous state will become active instead.
@@ -7159,7 +7164,7 @@ define_custom_variable(struct config_generic * variable)
/*
* Free up as much as we conveniently can of the placeholder structure.
* (This neglects any stack items, so it's possible for some memory to be
- * leaked. Since this can only happen once per session per variable, it
+ * leaked. Since this can only happen once per session per variable, it
* doesn't seem worth spending much code on.)
*/
set_string_field(pHolder, pHolder->variable, NULL);
@@ -7232,7 +7237,7 @@ reapply_stacked_values(struct config_generic * variable,
else
{
/*
- * We are at the end of the stack. If the active/previous value is
+ * We are at the end of the stack. If the active/previous value is
* different from the reset value, it must represent a previously
* committed session value. Apply it, and then drop the stack entry
* that set_config_option will have created under the impression that
@@ -8424,7 +8429,7 @@ ParseLongOption(const char *string, char **name, char **value)
/*
* Handle options fetched from pg_db_role_setting.setconfig,
- * pg_proc.proconfig, etc. Caller must specify proper context/source/action.
+ * pg_proc.proconfig, etc. Caller must specify proper context/source/action.
*
* The array parameter must be an array of TEXT (it must not be NULL).
*/
@@ -8705,7 +8710,7 @@ GUCArrayReset(ArrayType *array)
* Validate a proposed option setting for GUCArrayAdd/Delete/Reset.
*
* name is the option name. value is the proposed value for the Add case,
- * or NULL for the Delete/Reset cases. If skipIfNoPermissions is true, it's
+ * or NULL for the Delete/Reset cases. If skipIfNoPermissions is true, it's
* not an error to have no permissions to set the option.
*
* Returns TRUE if OK, FALSE if skipIfNoPermissions is true and user does not
@@ -8786,7 +8791,7 @@ validate_option_array_item(const char *name, const char *value,
* ERRCODE_INVALID_PARAMETER_VALUE SQLSTATE for check hook failures.
*
* Note that GUC_check_errmsg() etc are just macros that result in a direct
- * assignment to the associated variables. That is ugly, but forced by the
+ * assignment to the associated variables. That is ugly, but forced by the
* limitations of C's macro mechanisms.
*/
void
diff --git a/src/backend/utils/misc/ps_status.c b/src/backend/utils/misc/ps_status.c
index 6294ca30ed7..3aeceae67c3 100644
--- a/src/backend/utils/misc/ps_status.c
+++ b/src/backend/utils/misc/ps_status.c
@@ -109,7 +109,7 @@ static char **save_argv;
* from being clobbered by subsequent ps_display actions.
*
* (The original argv[] will not be overwritten by this routine, but may be
- * overwritten during init_ps_display. Also, the physical location of the
+ * overwritten during init_ps_display. Also, the physical location of the
* environment strings may be moved, so this should be called before any code
* that might try to hang onto a getenv() result.)
*/
@@ -210,7 +210,7 @@ save_ps_display_args(int argc, char **argv)
/*
* Call this once during subprocess startup to set the identification
- * values. At this point, the original argv[] array may be overwritten.
+ * values. At this point, the original argv[] array may be overwritten.
*/
void
init_ps_display(const char *username, const char *dbname,
@@ -360,7 +360,7 @@ set_ps_display(const char *activity, bool force)
/*
* Returns what's currently in the ps display, in case someone needs
- * it. Note that only the activity part is returned. On some platforms
+ * it. Note that only the activity part is returned. On some platforms
* the string will not be null-terminated, so return the effective
* length into *displen.
*/
diff --git a/src/backend/utils/misc/rbtree.c b/src/backend/utils/misc/rbtree.c
index 556045dd4b6..e3efd4c08bd 100644
--- a/src/backend/utils/misc/rbtree.c
+++ b/src/backend/utils/misc/rbtree.c
@@ -13,7 +13,7 @@
*
* Red-black trees are a type of balanced binary tree wherein (1) any child of
* a red node is always black, and (2) every path from root to leaf traverses
- * an equal number of black nodes. From these properties, it follows that the
+ * an equal number of black nodes. From these properties, it follows that the
* longest path from root to leaf is only about twice as long as the shortest,
* so lookups are guaranteed to run in O(lg n) time.
*
@@ -102,7 +102,7 @@ static RBNode sentinel = {InitialState, RBBLACK, RBNIL, RBNIL, NULL};
* valid data! freefunc can be NULL if caller doesn't require retail
* space reclamation.
*
- * The RBTree node is palloc'd in the caller's memory context. Note that
+ * The RBTree node is palloc'd in the caller's memory context. Note that
* all contents of the tree are actually allocated by the caller, not here.
*
* Since tree contents are managed by the caller, there is currently not
@@ -282,10 +282,10 @@ rb_rotate_right(RBTree *rb, RBNode *x)
/*
* Maintain Red-Black tree balance after inserting node x.
*
- * The newly inserted node is always initially marked red. That may lead to
+ * The newly inserted node is always initially marked red. That may lead to
* a situation where a red node has a red child, which is prohibited. We can
* always fix the problem by a series of color changes and/or "rotations",
- * which move the problem progressively higher up in the tree. If one of the
+ * which move the problem progressively higher up in the tree. If one of the
* two red nodes is the root, we can always fix the problem by changing the
* root from red to black.
*
@@ -296,7 +296,7 @@ static void
rb_insert_fixup(RBTree *rb, RBNode *x)
{
/*
- * x is always a red node. Initially, it is the newly inserted node. Each
+ * x is always a red node. Initially, it is the newly inserted node. Each
* iteration of this loop moves it higher up in the tree.
*/
while (x != rb->root && x->parent->color == RBRED)
@@ -481,7 +481,7 @@ rb_delete_fixup(RBTree *rb, RBNode *x)
while (x != rb->root && x->color == RBBLACK)
{
/*
- * Left and right cases are symmetric. Any nodes that are children of
+ * Left and right cases are symmetric. Any nodes that are children of
* x have a black-height one less than the remainder of the nodes in
* the tree. We rotate and recolor nodes to move the problem up the
* tree: at some stage we'll either fix the problem, or reach the root
diff --git a/src/backend/utils/misc/timeout.c b/src/backend/utils/misc/timeout.c
index f56a63b7047..ec259a6f8e2 100644
--- a/src/backend/utils/misc/timeout.c
+++ b/src/backend/utils/misc/timeout.c
@@ -57,7 +57,7 @@ static timeout_params *volatile active_timeouts[MAX_TIMEOUTS];
* Note that we don't bother to reset any pending timer interrupt when we
* disable the signal handler; it's not really worth the cycles to do so,
* since the probability of the interrupt actually occurring while we have
- * it disabled is low. See comments in schedule_alarm() about that.
+ * it disabled is low. See comments in schedule_alarm() about that.
*/
static volatile sig_atomic_t alarm_enabled = false;
@@ -69,7 +69,7 @@ static volatile sig_atomic_t alarm_enabled = false;
* Internal helper functions
*
* For all of these, it is caller's responsibility to protect them from
- * interruption by the signal handler. Generally, call disable_alarm()
+ * interruption by the signal handler. Generally, call disable_alarm()
* first to prevent interruption, then update state, and last call
* schedule_alarm(), which will re-enable the signal handler if needed.
*****************************************************************************/
@@ -144,7 +144,7 @@ enable_timeout(TimeoutId id, TimestampTz now, TimestampTz fin_time)
Assert(all_timeouts[id].timeout_handler != NULL);
/*
- * If this timeout was already active, momentarily disable it. We
+ * If this timeout was already active, momentarily disable it. We
* interpret the call as a directive to reschedule the timeout.
*/
i = find_active_timeout(id);
@@ -152,7 +152,7 @@ enable_timeout(TimeoutId id, TimestampTz now, TimestampTz fin_time)
remove_timeout_index(i);
/*
- * Find out the index where to insert the new timeout. We sort by
+ * Find out the index where to insert the new timeout. We sort by
* fin_time, and for equal fin_time by priority.
*/
for (i = 0; i < num_active_timeouts; i++)
@@ -214,18 +214,18 @@ schedule_alarm(TimestampTz now)
*
* Because we didn't bother to reset the timer in disable_alarm(),
* it's possible that a previously-set interrupt will fire between
- * enable_alarm() and setitimer(). This is safe, however. There are
+ * enable_alarm() and setitimer(). This is safe, however. There are
* two possible outcomes:
*
* 1. The signal handler finds nothing to do (because the nearest
* timeout event is still in the future). It will re-set the timer
- * and return. Then we'll overwrite the timer value with a new one.
+ * and return. Then we'll overwrite the timer value with a new one.
* This will mean that the timer fires a little later than we
* intended, but only by the amount of time it takes for the signal
* handler to do nothing useful, which shouldn't be much.
*
* 2. The signal handler executes and removes one or more timeout
- * events. When it returns, either the queue is now empty or the
+ * events. When it returns, either the queue is now empty or the
* frontmost event is later than the one we looked at above. So we'll
* overwrite the timer value with one that is too soon (plus or minus
* the signal handler's execution time), causing a useless interrupt
@@ -266,14 +266,14 @@ handle_sig_alarm(SIGNAL_ARGS)
* mainline is waiting for a lock). If SIGINT or similar arrives while
* this code is running, we'd lose control and perhaps leave our data
* structures in an inconsistent state. Disable immediate interrupts, and
- * just to be real sure, bump the holdoff counter as well. (The reason
+ * just to be real sure, bump the holdoff counter as well. (The reason
* for this belt-and-suspenders-too approach is to make sure that nothing
* bad happens if a timeout handler calls code that manipulates
* ImmediateInterruptOK.)
*
* Note: it's possible for a SIGINT to interrupt handle_sig_alarm before
* we manage to do this; the net effect would be as if the SIGALRM event
- * had been silently lost. Therefore error recovery must include some
+ * had been silently lost. Therefore error recovery must include some
* action that will allow any lost interrupt to be rescheduled. Disabling
* some or all timeouts is sufficient, or if that's not appropriate,
* reschedule_timeouts() can be called. Also, the signal blocking hazard
@@ -434,7 +434,7 @@ RegisterTimeout(TimeoutId id, timeout_handler_proc handler)
*
* This can be used during error recovery in case query cancel resulted in loss
* of a SIGALRM event (due to longjmp'ing out of handle_sig_alarm before it
- * could do anything). But note it's not necessary if any of the public
+ * could do anything). But note it's not necessary if any of the public
* enable_ or disable_timeout functions are called in the same area, since
* those all do schedule_alarm() internally if needed.
*/
@@ -503,7 +503,7 @@ enable_timeout_at(TimeoutId id, TimestampTz fin_time)
* Enable multiple timeouts at once.
*
* This works like calling enable_timeout_after() and/or enable_timeout_at()
- * multiple times. Use this to reduce the number of GetCurrentTimestamp()
+ * multiple times. Use this to reduce the number of GetCurrentTimestamp()
* and setitimer() calls needed to establish multiple timeouts.
*/
void
diff --git a/src/backend/utils/misc/tzparser.c b/src/backend/utils/misc/tzparser.c
index 24ccbe56856..6a5a7b39abf 100644
--- a/src/backend/utils/misc/tzparser.c
+++ b/src/backend/utils/misc/tzparser.c
@@ -4,7 +4,7 @@
* Functions for parsing timezone offset files
*
* Note: this code is invoked from the check_hook for the GUC variable
- * timezone_abbreviations. Therefore, it should report problems using
+ * timezone_abbreviations. Therefore, it should report problems using
* GUC_check_errmsg() and related functions, and try to avoid throwing
* elog(ERROR). This is not completely bulletproof at present --- in
* particular out-of-memory will throw an error. Could probably fix with
@@ -179,7 +179,7 @@ addToArray(tzEntry **base, int *arraysize, int n,
/*
* Search the array for a duplicate; as a useful side effect, the array is
- * maintained in sorted order. We use strcmp() to ensure we match the
+ * maintained in sorted order. We use strcmp() to ensure we match the
* sort order datetime.c expects.
*/
arrayptr = *base;
diff --git a/src/backend/utils/mmgr/aset.c b/src/backend/utils/mmgr/aset.c
index 099200cb66a..743455e4bcc 100644
--- a/src/backend/utils/mmgr/aset.c
+++ b/src/backend/utils/mmgr/aset.c
@@ -38,7 +38,7 @@
* request, even if it was much larger than necessary. This led to more
* and more wasted space in allocated chunks over time. To fix, get rid
* of the midrange behavior: we now handle only "small" power-of-2-size
- * chunks as chunks. Anything "large" is passed off to malloc(). Change
+ * chunks as chunks. Anything "large" is passed off to malloc(). Change
* the number of freelists to change the small/large boundary.
*
*
@@ -54,7 +54,7 @@
* Thus, if someone makes the common error of writing past what they've
* requested, the problem is likely to go unnoticed ... until the day when
* there *isn't* any wasted space, perhaps because of different memory
- * alignment on a new platform, or some other effect. To catch this sort
+ * alignment on a new platform, or some other effect. To catch this sort
* of problem, the MEMORY_CONTEXT_CHECKING option stores 0x7E just beyond
* the requested space whenever the request is less than the actual chunk
* size, and verifies that the byte is undamaged when the chunk is freed.
@@ -189,7 +189,7 @@ typedef AllocSetContext *AllocSet;
/*
* AllocBlock
* An AllocBlock is the unit of memory that is obtained by aset.c
- * from malloc(). It contains one or more AllocChunks, which are
+ * from malloc(). It contains one or more AllocChunks, which are
* the units requested by palloc() and freed by pfree(). AllocChunks
* cannot be returned to malloc() individually, instead they are put
* on freelists by pfree() and re-used by the next palloc() that has
@@ -326,7 +326,7 @@ AllocSetFreeIndex(Size size)
/*
* At this point we need to obtain log2(tsize)+1, ie, the number of
- * not-all-zero bits at the right. We used to do this with a
+ * not-all-zero bits at the right. We used to do this with a
* shift-and-count loop, but this function is enough of a hotspot to
* justify micro-optimization effort. The best approach seems to be
* to use a lookup table. Note that this code assumes that
@@ -544,7 +544,7 @@ AllocSetInit(MemoryContext context)
* Actually, this routine has some discretion about what to do.
* It should mark all allocated chunks freed, but it need not necessarily
* give back all the resources the set owns. Our actual implementation is
- * that we hang onto any "keeper" block specified for the set. In this way,
+ * that we hang onto any "keeper" block specified for the set. In this way,
* we don't thrash malloc() when a context is repeatedly reset after small
* allocations, which is typical behavior for per-tuple contexts.
*/
@@ -794,7 +794,7 @@ AllocSetAlloc(MemoryContext context, Size size)
/*
* In most cases, we'll get back the index of the next larger
- * freelist than the one we need to put this chunk on. The
+ * freelist than the one we need to put this chunk on. The
* exception is when availchunk is exactly a power of 2.
*/
if (availchunk != ((Size) 1 << (a_fidx + ALLOC_MINBITS)))
@@ -953,7 +953,7 @@ AllocSetFree(MemoryContext context, void *pointer)
{
/*
* Big chunks are certain to have been allocated as single-chunk
- * blocks. Find the containing block and return it to malloc().
+ * blocks. Find the containing block and return it to malloc().
*/
AllocBlock block = set->blocks;
AllocBlock prevblock = NULL;
@@ -1080,7 +1080,7 @@ AllocSetRealloc(MemoryContext context, void *pointer, Size size)
if (oldsize > set->allocChunkLimit)
{
/*
- * The chunk must have been allocated as a single-chunk block. Find
+ * The chunk must have been allocated as a single-chunk block. Find
* the containing block and use realloc() to make it bigger with
* minimum space wastage.
*/
diff --git a/src/backend/utils/mmgr/mcxt.c b/src/backend/utils/mmgr/mcxt.c
index 132b04d7c91..e83e76dc0f3 100644
--- a/src/backend/utils/mmgr/mcxt.c
+++ b/src/backend/utils/mmgr/mcxt.c
@@ -183,7 +183,7 @@ MemoryContextResetChildren(MemoryContext context)
*
* The type-specific delete routine removes all subsidiary storage
* for the context, but we have to delete the context node itself,
- * as well as recurse to get the children. We must also delink the
+ * as well as recurse to get the children. We must also delink the
* node from its parent, if it has one.
*/
void
@@ -487,22 +487,22 @@ MemoryContextContains(MemoryContext context, void *pointer)
* we want to be sure that we don't leave the context tree invalid
* in case of failure (such as insufficient memory to allocate the
* context node itself). The procedure goes like this:
- * 1. Context-type-specific routine first calls MemoryContextCreate(),
+ * 1. Context-type-specific routine first calls MemoryContextCreate(),
* passing the appropriate tag/size/methods values (the methods
* pointer will ordinarily point to statically allocated data).
* The parent and name parameters usually come from the caller.
- * 2. MemoryContextCreate() attempts to allocate the context node,
+ * 2. MemoryContextCreate() attempts to allocate the context node,
* plus space for the name. If this fails we can ereport() with no
* damage done.
- * 3. We fill in all of the type-independent MemoryContext fields.
- * 4. We call the type-specific init routine (using the methods pointer).
+ * 3. We fill in all of the type-independent MemoryContext fields.
+ * 4. We call the type-specific init routine (using the methods pointer).
* The init routine is required to make the node minimally valid
* with zero chance of failure --- it can't allocate more memory,
* for example.
- * 5. Now we have a minimally valid node that can behave correctly
+ * 5. Now we have a minimally valid node that can behave correctly
* when told to reset or delete itself. We link the node to its
* parent (if any), making the node part of the context tree.
- * 6. We return to the context-type-specific routine, which finishes
+ * 6. We return to the context-type-specific routine, which finishes
* up type-specific initialization. This routine can now do things
* that might fail (like allocate more memory), so long as it's
* sure the node is left in a state that delete will handle.
@@ -514,7 +514,7 @@ MemoryContextContains(MemoryContext context, void *pointer)
*
* Normally, the context node and the name are allocated from
* TopMemoryContext (NOT from the parent context, since the node must
- * survive resets of its parent context!). However, this routine is itself
+ * survive resets of its parent context!). However, this routine is itself
* used to create TopMemoryContext! If we see that TopMemoryContext is NULL,
* we assume we are creating TopMemoryContext and use malloc() to allocate
* the node.
diff --git a/src/backend/utils/mmgr/portalmem.c b/src/backend/utils/mmgr/portalmem.c
index f5e5ad4fe9e..c1b13c360f3 100644
--- a/src/backend/utils/mmgr/portalmem.c
+++ b/src/backend/utils/mmgr/portalmem.c
@@ -144,14 +144,14 @@ GetPortalByName(const char *name)
* Get the "primary" stmt within a portal, ie, the one marked canSetTag.
*
* Returns NULL if no such stmt. If multiple PlannedStmt structs within the
- * portal are marked canSetTag, returns the first one. Neither of these
+ * portal are marked canSetTag, returns the first one. Neither of these
* cases should occur in present usages of this function.
*
* Copes if given a list of Querys --- can't happen in a portal, but this
* code also supports plancache.c, which needs both cases.
*
* Note: the reason this is just handed a List is so that plancache.c
- * can share the code. For use with a portal, use PortalGetPrimaryStmt
+ * can share the code. For use with a portal, use PortalGetPrimaryStmt
* rather than calling this directly.
*/
Node *
@@ -277,7 +277,7 @@ CreateNewPortal(void)
* you can pass a constant string, perhaps "(query not available)".)
*
* commandTag shall be NULL if and only if the original query string
- * (before rewriting) was an empty string. Also, the passed commandTag must
+ * (before rewriting) was an empty string. Also, the passed commandTag must
* be a pointer to a constant string, since it is not copied.
*
* If cplan is provided, then it is a cached plan containing the stmts, and
@@ -480,14 +480,14 @@ PortalDrop(Portal portal, bool isTopCommit)
/*
* Allow portalcmds.c to clean up the state it knows about, in particular
- * shutting down the executor if still active. This step potentially runs
+ * shutting down the executor if still active. This step potentially runs
* user-defined code so failure has to be expected. It's the cleanup
* hook's responsibility to not try to do that more than once, in the case
* that failure occurs and then we come back to drop the portal again
* during transaction abort.
*
* Note: in most paths of control, this will have been done already in
- * MarkPortalDone or MarkPortalFailed. We're just making sure.
+ * MarkPortalDone or MarkPortalFailed. We're just making sure.
*/
if (PointerIsValid(portal->cleanup))
{
@@ -507,12 +507,12 @@ PortalDrop(Portal portal, bool isTopCommit)
PortalReleaseCachedPlan(portal);
/*
- * Release any resources still attached to the portal. There are several
+ * Release any resources still attached to the portal. There are several
* cases being covered here:
*
* Top transaction commit (indicated by isTopCommit): normally we should
* do nothing here and let the regular end-of-transaction resource
- * releasing mechanism handle these resources too. However, if we have a
+ * releasing mechanism handle these resources too. However, if we have a
* FAILED portal (eg, a cursor that got an error), we'd better clean up
* its resources to avoid resource-leakage warning messages.
*
@@ -524,7 +524,7 @@ PortalDrop(Portal portal, bool isTopCommit)
* cleaned up in transaction abort.
*
* Ordinary portal drop: must release resources. However, if the portal
- * is not FAILED then we do not release its locks. The locks become the
+ * is not FAILED then we do not release its locks. The locks become the
* responsibility of the transaction's ResourceOwner (since it is the
* parent of the portal's owner) and will be released when the transaction
* eventually ends.
@@ -611,7 +611,7 @@ PortalHashTableDeleteAll(void)
* Holdable cursors created in this transaction need to be converted to
* materialized form, since we are going to close down the executor and
* release locks. Non-holdable portals created in this transaction are
- * simply removed. Portals remaining from prior transactions should be
+ * simply removed. Portals remaining from prior transactions should be
* left untouched.
*
* Returns TRUE if any portals changed state (possibly causing user-defined
diff --git a/src/backend/utils/resowner/resowner.c b/src/backend/utils/resowner/resowner.c
index 0da061a4dcb..0955bccb3ee 100644
--- a/src/backend/utils/resowner/resowner.c
+++ b/src/backend/utils/resowner/resowner.c
@@ -177,7 +177,7 @@ ResourceOwnerCreate(ResourceOwner parent, const char *name)
* but don't delete the owner objects themselves.
*
* Note that this executes just one phase of release, and so typically
- * must be called three times. We do it this way because (a) we want to
+ * must be called three times. We do it this way because (a) we want to
* do all the recursion separately for each phase, thereby preserving
* the needed order of operations; and (b) xact.c may have other operations
* to do between the phases.
@@ -251,7 +251,7 @@ ResourceOwnerReleaseInternal(ResourceOwner owner,
*
* During a commit, there shouldn't be any remaining pins --- that
* would indicate failure to clean up the executor correctly --- so
- * issue warnings. In the abort case, just clean up quietly.
+ * issue warnings. In the abort case, just clean up quietly.
*
* We are careful to do the releasing back-to-front, so as to avoid
* O(N^2) behavior in ResourceOwnerForgetBuffer().
@@ -439,7 +439,7 @@ ResourceOwnerDelete(ResourceOwner owner)
/*
* We delink the owner from its parent before deleting it, so that if
* there's an error we won't have deleted/busted owners still attached to
- * the owner tree. Better a leak than a crash.
+ * the owner tree. Better a leak than a crash.
*/
ResourceOwnerNewParent(owner, NULL);
@@ -633,7 +633,7 @@ ResourceOwnerForgetBuffer(ResourceOwner owner, Buffer buffer)
/*
* Scan back-to-front because it's more likely we are releasing a
- * recently pinned buffer. This isn't always the case of course, but
+ * recently pinned buffer. This isn't always the case of course, but
* it's the way to bet.
*/
for (i = nb1; i >= 0; i--)
diff --git a/src/backend/utils/sort/logtape.c b/src/backend/utils/sort/logtape.c
index d27e0bf1d80..106b917f72f 100644
--- a/src/backend/utils/sort/logtape.c
+++ b/src/backend/utils/sort/logtape.c
@@ -7,14 +7,14 @@
* tuplesort.c). Merging is an ideal algorithm for tape devices, but if
* we implement it on disk by creating a separate file for each "tape",
* there is an annoying problem: the peak space usage is at least twice
- * the volume of actual data to be sorted. (This must be so because each
+ * the volume of actual data to be sorted. (This must be so because each
* datum will appear in both the input and output tapes of the final
- * merge pass. For seven-tape polyphase merge, which is otherwise a
+ * merge pass. For seven-tape polyphase merge, which is otherwise a
* pretty good algorithm, peak usage is more like 4x actual data volume.)
*
* We can work around this problem by recognizing that any one tape
* dataset (with the possible exception of the final output) is written
- * and read exactly once in a perfectly sequential manner. Therefore,
+ * and read exactly once in a perfectly sequential manner. Therefore,
* a datum once read will not be required again, and we can recycle its
* space for use by the new tape dataset(s) being generated. In this way,
* the total space usage is essentially just the actual data volume, plus
@@ -55,7 +55,7 @@
* To support the above policy of writing to the lowest free block,
* ltsGetFreeBlock sorts the list of free block numbers into decreasing
* order each time it is asked for a block and the list isn't currently
- * sorted. This is an efficient way to handle it because we expect cycles
+ * sorted. This is an efficient way to handle it because we expect cycles
* of releasing many blocks followed by re-using many blocks, due to
* tuplesort.c's "preread" behavior.
*
@@ -117,7 +117,7 @@ typedef struct LogicalTape
/*
* The total data volume in the logical tape is numFullBlocks * BLCKSZ +
- * lastBlockBytes. BUT: we do not update lastBlockBytes during writing,
+ * lastBlockBytes. BUT: we do not update lastBlockBytes during writing,
* only at completion of a write phase.
*/
long numFullBlocks; /* number of complete blocks in log tape */
@@ -157,7 +157,7 @@ struct LogicalTapeSet
*
* If blocksSorted is true then the block numbers in freeBlocks are in
* *decreasing* order, so that removing the last entry gives us the lowest
- * free block. We re-sort the blocks whenever a block is demanded; this
+ * free block. We re-sort the blocks whenever a block is demanded; this
* should be reasonably efficient given the expected usage pattern.
*/
bool forgetFreeSpace; /* are we remembering free blocks? */
@@ -218,7 +218,7 @@ ltsWriteBlock(LogicalTapeSet *lts, long blocknum, void *buffer)
/*
* Read a block-sized buffer from the specified block of the underlying file.
*
- * No need for an error return convention; we ereport() on any error. This
+ * No need for an error return convention; we ereport() on any error. This
* module should never attempt to read a block it doesn't know is there.
*/
static void
@@ -353,7 +353,7 @@ ltsRecordBlockNum(LogicalTapeSet *lts, IndirectBlock *indirect,
/*
* Reset a logical tape's indirect-block hierarchy after a write pass
- * to prepare for reading. We dump out partly-filled blocks except
+ * to prepare for reading. We dump out partly-filled blocks except
* at the top of the hierarchy, and we rewind each level to the start.
* This call returns the first data block number, or -1L if the tape
* is empty.
@@ -540,7 +540,7 @@ LogicalTapeSetCreate(int ntapes)
/*
* Initialize per-tape structs. Note we allocate the I/O buffer and
* first-level indirect block for a tape only when it is first actually
- * written to. This avoids wasting memory space when tuplesort.c
+ * written to. This avoids wasting memory space when tuplesort.c
* overestimates the number of tapes needed.
*/
for (i = 0; i < ntapes; i++)
@@ -591,7 +591,7 @@ LogicalTapeSetClose(LogicalTapeSet *lts)
* Mark a logical tape set as not needing management of free space anymore.
*
* This should be called if the caller does not intend to write any more data
- * into the tape set, but is reading from un-frozen tapes. Since no more
+ * into the tape set, but is reading from un-frozen tapes. Since no more
* writes are planned, remembering free blocks is no longer useful. Setting
* this flag lets us avoid wasting time and space in ltsReleaseBlock(), which
* is not designed to handle large numbers of free blocks.
@@ -732,7 +732,7 @@ LogicalTapeRewind(LogicalTapeSet *lts, int tapenum, bool forWrite)
else
{
/*
- * Completion of a read phase. Rewind and prepare for write.
+ * Completion of a read phase. Rewind and prepare for write.
*
* NOTE: we assume the caller has read the tape to the end; otherwise
* untouched data and indirect blocks will not have been freed. We
@@ -826,7 +826,7 @@ LogicalTapeRead(LogicalTapeSet *lts, int tapenum,
*
* This *must* be called just at the end of a write pass, before the
* tape is rewound (after rewind is too late!). It performs a rewind
- * and switch to read mode "for free". An immediately following rewind-
+ * and switch to read mode "for free". An immediately following rewind-
* for-read call is OK but not necessary.
*/
void
@@ -862,7 +862,7 @@ LogicalTapeFreeze(LogicalTapeSet *lts, int tapenum)
}
/*
- * Backspace the tape a given number of bytes. (We also support a more
+ * Backspace the tape a given number of bytes. (We also support a more
* general seek interface, see below.)
*
* *Only* a frozen-for-read tape can be backed up; we don't support
@@ -966,7 +966,7 @@ LogicalTapeSeek(LogicalTapeSet *lts, int tapenum,
return false;
/*
- * OK, advance or back up to the target block. This implementation would
+ * OK, advance or back up to the target block. This implementation would
* be pretty inefficient for long seeks, but we really aren't expecting
* that (a seek over one tuple is typical).
*/
@@ -999,7 +999,7 @@ LogicalTapeSeek(LogicalTapeSet *lts, int tapenum,
* Obtain current position in a form suitable for a later LogicalTapeSeek.
*
* NOTE: it'd be OK to do this during write phase with intention of using
- * the position for a seek after freezing. Not clear if anyone needs that.
+ * the position for a seek after freezing. Not clear if anyone needs that.
*/
void
LogicalTapeTell(LogicalTapeSet *lts, int tapenum,
diff --git a/src/backend/utils/sort/tuplesort.c b/src/backend/utils/sort/tuplesort.c
index 8b520c160ce..aa0f6d8e047 100644
--- a/src/backend/utils/sort/tuplesort.c
+++ b/src/backend/utils/sort/tuplesort.c
@@ -6,7 +6,7 @@
* This module handles sorting of heap tuples, index tuples, or single
* Datums (and could easily support other kinds of sortable objects,
* if necessary). It works efficiently for both small and large amounts
- * of data. Small amounts are sorted in-memory using qsort(). Large
+ * of data. Small amounts are sorted in-memory using qsort(). Large
* amounts are sorted using temporary files and a standard external sort
* algorithm.
*
@@ -40,7 +40,7 @@
* into sorted runs in temporary tapes, emitting just enough tuples at each
* step to get back within the workMem limit. Whenever the run number at
* the top of the heap changes, we begin a new run with a new output tape
- * (selected per Algorithm D). After the end of the input is reached,
+ * (selected per Algorithm D). After the end of the input is reached,
* we dump out remaining tuples in memory into a final run (or two),
* then merge the runs using Algorithm D.
*
@@ -57,17 +57,17 @@
* access at all, defeating the read-ahead methods used by most Unix kernels.
* Worse, the output tape gets written into a very random sequence of blocks
* of the temp file, ensuring that things will be even worse when it comes
- * time to read that tape. A straightforward merge pass thus ends up doing a
+ * time to read that tape. A straightforward merge pass thus ends up doing a
* lot of waiting for disk seeks. We can improve matters by prereading from
* each source tape sequentially, loading about workMem/M bytes from each tape
* in turn. Then we run the merge algorithm, writing but not reading until
- * one of the preloaded tuple series runs out. Then we switch back to preread
+ * one of the preloaded tuple series runs out. Then we switch back to preread
* mode, fill memory again, and repeat. This approach helps to localize both
* read and write accesses.
*
* When the caller requests random access to the sort result, we form
* the final sorted run on a logical tape which is then "frozen", so
- * that we can access it randomly. When the caller does not need random
+ * that we can access it randomly. When the caller does not need random
* access, we return from tuplesort_performsort() as soon as we are down
* to one run per logical tape. The final merge is then performed
* on-the-fly as the caller repeatedly calls tuplesort_getXXX; this
@@ -77,7 +77,7 @@
* grounds that 7 is the "sweet spot" on the tapes-to-passes curve according
* to Knuth's figure 70 (section 5.4.2). However, Knuth is assuming that
* tape drives are expensive beasts, and in particular that there will always
- * be many more runs than tape drives. In our implementation a "tape drive"
+ * be many more runs than tape drives. In our implementation a "tape drive"
* doesn't cost much more than a few Kb of memory buffers, so we can afford
* to have lots of them. In particular, if we can have as many tape drives
* as sorted runs, we can eliminate any repeated I/O at all. In the current
@@ -134,28 +134,28 @@ bool optimize_bounded_sort = true;
/*
- * The objects we actually sort are SortTuple structs. These contain
+ * The objects we actually sort are SortTuple structs. These contain
* a pointer to the tuple proper (might be a MinimalTuple or IndexTuple),
* which is a separate palloc chunk --- we assume it is just one chunk and
* can be freed by a simple pfree(). SortTuples also contain the tuple's
* first key column in Datum/nullflag format, and an index integer.
*
* Storing the first key column lets us save heap_getattr or index_getattr
- * calls during tuple comparisons. We could extract and save all the key
+ * calls during tuple comparisons. We could extract and save all the key
* columns not just the first, but this would increase code complexity and
* overhead, and wouldn't actually save any comparison cycles in the common
* case where the first key determines the comparison result. Note that
* for a pass-by-reference datatype, datum1 points into the "tuple" storage.
*
* When sorting single Datums, the data value is represented directly by
- * datum1/isnull1. If the datatype is pass-by-reference and isnull1 is false,
+ * datum1/isnull1. If the datatype is pass-by-reference and isnull1 is false,
* then datum1 points to a separately palloc'd data value that is also pointed
* to by the "tuple" pointer; otherwise "tuple" is NULL.
*
* While building initial runs, tupindex holds the tuple's run number. During
* merge passes, we re-use it to hold the input tape number that each tuple in
* the heap was read from, or to hold the index of the next tuple pre-read
- * from the same tape in the case of pre-read entries. tupindex goes unused
+ * from the same tape in the case of pre-read entries. tupindex goes unused
* if the sort occurs entirely in memory.
*/
typedef struct
@@ -238,7 +238,7 @@ struct Tuplesortstate
void (*copytup) (Tuplesortstate *state, SortTuple *stup, void *tup);
/*
- * Function to write a stored tuple onto tape. The representation of the
+ * Function to write a stored tuple onto tape. The representation of the
* tuple on tape need not be the same as it is in memory; requirements on
* the tape representation are given below. After writing the tuple,
* pfree() the out-of-line data (not the SortTuple struct!), and increase
@@ -264,7 +264,7 @@ struct Tuplesortstate
void (*reversedirection) (Tuplesortstate *state);
/*
- * This array holds the tuples now in sort memory. If we are in state
+ * This array holds the tuples now in sort memory. If we are in state
* INITIAL, the tuples are in no particular order; if we are in state
* SORTEDINMEM, the tuples are in final sorted order; in states BUILDRUNS
* and FINALMERGE, the tuples are organized in "heap" order per Algorithm
@@ -412,7 +412,7 @@ struct Tuplesortstate
* If state->randomAccess is true, then the stored representation of the
* tuple must be followed by another "unsigned int" that is a copy of the
* length --- so the total tape space used is actually sizeof(unsigned int)
- * more than the stored length value. This allows read-backwards. When
+ * more than the stored length value. This allows read-backwards. When
* randomAccess is not true, the write/read routines may omit the extra
* length word.
*
@@ -422,7 +422,7 @@ struct Tuplesortstate
* the back length word (if present).
*
* The write/read routines can make use of the tuple description data
- * stored in the Tuplesortstate record, if needed. They are also expected
+ * stored in the Tuplesortstate record, if needed. They are also expected
* to adjust state->availMem by the amount of memory space (not tape space!)
* released or consumed. There is no error return from either writetup
* or readtup; they should ereport() on failure.
@@ -519,7 +519,7 @@ static void free_sort_tuple(Tuplesortstate *state, SortTuple *stup);
*
* After calling tuplesort_begin, the caller should call tuplesort_putXXX
* zero or more times, then call tuplesort_performsort when all the tuples
- * have been supplied. After performsort, retrieve the tuples in sorted
+ * have been supplied. After performsort, retrieve the tuples in sorted
* order by calling tuplesort_getXXX until it returns false/NULL. (If random
* access was requested, rescan, markpos, and restorepos can also be called.)
* Call tuplesort_end to terminate the operation and release memory/disk space.
@@ -859,7 +859,7 @@ tuplesort_begin_datum(Oid datumType, Oid sortOperator, Oid sortCollation,
*
* Advise tuplesort that at most the first N result tuples are required.
*
- * Must be called before inserting any tuples. (Actually, we could allow it
+ * Must be called before inserting any tuples. (Actually, we could allow it
* as long as the sort hasn't spilled to disk, but there seems no need for
* delayed calls at the moment.)
*
@@ -1008,7 +1008,7 @@ grow_memtuples(Tuplesortstate *state)
* strategy and instead increase as much as we safely can.
*
* To stay within allowedMem, we can't increase memtupsize by more
- * than availMem / sizeof(SortTuple) elements. In practice, we want
+ * than availMem / sizeof(SortTuple) elements. In practice, we want
* to increase it by considerably less, because we need to leave some
* space for the tuples to which the new array slots will refer. We
* assume the new tuples will be about the same size as the tuples
@@ -1062,9 +1062,9 @@ grow_memtuples(Tuplesortstate *state)
* We need to be sure that we do not cause LACKMEM to become true, else
* the space management algorithm will go nuts. The code above should
* never generate a dangerous request, but to be safe, check explicitly
- * that the array growth fits within availMem. (We could still cause
+ * that the array growth fits within availMem. (We could still cause
* LACKMEM if the memory chunk overhead associated with the memtuples
- * array were to increase. That shouldn't happen with any sane value of
+ * array were to increase. That shouldn't happen with any sane value of
* allowedMem, because at any array size large enough to risk LACKMEM,
* palloc would be treating both old and new arrays as separate chunks.
* But we'll check LACKMEM explicitly below just in case.)
@@ -1200,7 +1200,7 @@ puttuple_common(Tuplesortstate *state, SortTuple *tuple)
case TSS_INITIAL:
/*
- * Save the tuple into the unsorted array. First, grow the array
+ * Save the tuple into the unsorted array. First, grow the array
* as needed. Note that we try to grow the array when there is
* still one free slot remaining --- if we fail, there'll still be
* room to store the incoming tuple, and then we'll switch to
@@ -1221,7 +1221,7 @@ puttuple_common(Tuplesortstate *state, SortTuple *tuple)
* enough tuples to meet the bound.
*
* Note that once we enter TSS_BOUNDED state we will always try to
- * complete the sort that way. In the worst case, if later input
+ * complete the sort that way. In the worst case, if later input
* tuples are larger than earlier ones, this might cause us to
* exceed workMem significantly.
*/
@@ -1359,7 +1359,7 @@ tuplesort_performsort(Tuplesortstate *state)
/*
* We were able to accumulate all the tuples required for output
- * in memory, using a heap to eliminate excess tuples. Now we
+ * in memory, using a heap to eliminate excess tuples. Now we
* have to transform the heap to a properly-sorted array.
*/
sort_bounded_heap(state);
@@ -1373,7 +1373,7 @@ tuplesort_performsort(Tuplesortstate *state)
case TSS_BUILDRUNS:
/*
- * Finish tape-based sort. First, flush all tuples remaining in
+ * Finish tape-based sort. First, flush all tuples remaining in
* memory out to tape; then merge until we have a single remaining
* run (or, if !randomAccess, one run per tape). Note that
* mergeruns sets the correct state->status.
@@ -1434,7 +1434,7 @@ tuplesort_gettuple_common(Tuplesortstate *state, bool forward,
/*
* Complain if caller tries to retrieve more tuples than
- * originally asked for in a bounded sort. This is because
+ * originally asked for in a bounded sort. This is because
* returning EOF here might be the wrong thing.
*/
if (state->bounded && state->current >= state->bound)
@@ -1640,7 +1640,7 @@ tuplesort_gettupleslot(Tuplesortstate *state, bool forward,
/*
* Fetch the next tuple in either forward or back direction.
- * Returns NULL if no more tuples. If *should_free is set, the
+ * Returns NULL if no more tuples. If *should_free is set, the
* caller must pfree the returned tuple when done with it.
*/
HeapTuple
@@ -1659,7 +1659,7 @@ tuplesort_getheaptuple(Tuplesortstate *state, bool forward, bool *should_free)
/*
* Fetch the next index tuple in either forward or back direction.
- * Returns NULL if no more tuples. If *should_free is set, the
+ * Returns NULL if no more tuples. If *should_free is set, the
* caller must pfree the returned tuple when done with it.
*/
IndexTuple
@@ -1729,7 +1729,7 @@ tuplesort_skiptuples(Tuplesortstate *state, int64 ntuples, bool forward)
/*
* We don't actually support backwards skip yet, because no callers need
- * it. The API is designed to allow for that later, though.
+ * it. The API is designed to allow for that later, though.
*/
Assert(forward);
Assert(ntuples >= 0);
@@ -1747,7 +1747,7 @@ tuplesort_skiptuples(Tuplesortstate *state, int64 ntuples, bool forward)
/*
* Complain if caller tries to retrieve more tuples than
- * originally asked for in a bounded sort. This is because
+ * originally asked for in a bounded sort. This is because
* returning EOF here might be the wrong thing.
*/
if (state->bounded && state->current >= state->bound)
@@ -1800,7 +1800,7 @@ tuplesort_merge_order(int64 allowedMem)
/*
* We need one tape for each merge input, plus another one for the output,
- * and each of these tapes needs buffer space. In addition we want
+ * and each of these tapes needs buffer space. In addition we want
* MERGE_BUFFER_SIZE workspace per input tape (but the output tape doesn't
* count).
*
@@ -1854,11 +1854,12 @@ inittapes(Tuplesortstate *state)
* don't decrease it to the point that we have no room for tuples. (That
* case is only likely to occur if sorting pass-by-value Datums; in all
* other scenarios the memtuples[] array is unlikely to occupy more than
- * half of allowedMem. In the pass-by-value case it's not important to
+ * half of allowedMem. In the pass-by-value case it's not important to
* account for tuple space, so we don't care if LACKMEM becomes
* inaccurate.)
*/
- tapeSpace = (int64) maxTapes * TAPE_BUFFER_OVERHEAD;
+ tapeSpace = (int64) maxTapes *TAPE_BUFFER_OVERHEAD;
+
if (tapeSpace + GetMemoryChunkSpace(state->memtuples) < state->allowedMem)
USEMEM(state, tapeSpace);
@@ -1978,7 +1979,7 @@ mergeruns(Tuplesortstate *state)
/*
* If we produced only one initial run (quite likely if the total data
* volume is between 1X and 2X workMem), we can just use that tape as the
- * finished output, rather than doing a useless merge. (This obvious
+ * finished output, rather than doing a useless merge. (This obvious
* optimization is not in Knuth's algorithm.)
*/
if (state->currentRun == 1)
@@ -2084,7 +2085,7 @@ mergeruns(Tuplesortstate *state)
* the loop without performing the last iteration of step D6, we have not
* rearranged the tape unit assignment, and therefore the result is on
* TAPE[T]. We need to do it this way so that we can freeze the final
- * output tape while rewinding it. The last iteration of step D6 would be
+ * output tape while rewinding it. The last iteration of step D6 would be
* a waste of cycles anyway...
*/
state->result_tape = state->tp_tapenum[state->tapeRange];
@@ -2168,7 +2169,7 @@ mergeonerun(Tuplesortstate *state)
* beginmerge - initialize for a merge pass
*
* We decrease the counts of real and dummy runs for each tape, and mark
- * which tapes contain active input runs in mergeactive[]. Then, load
+ * which tapes contain active input runs in mergeactive[]. Then, load
* as many tuples as we can from each active input tape, and finally
* fill the merge heap with the first tuple from each active tape.
*/
@@ -2261,7 +2262,7 @@ beginmerge(Tuplesortstate *state)
* This routine exists to improve sequentiality of reads during a merge pass,
* as explained in the header comments of this file. Load tuples from each
* active source tape until the tape's run is exhausted or it has used up
- * its fair share of available memory. In any case, we guarantee that there
+ * its fair share of available memory. In any case, we guarantee that there
* is at least one preread tuple available from each unexhausted input tape.
*
* We invoke this routine at the start of a merge pass for initial load,
@@ -2524,7 +2525,7 @@ tuplesort_get_stats(Tuplesortstate *state,
* accurately once we have begun to return tuples to the caller (since we
* don't account for pfree's the caller is expected to do), so we cannot
* rely on availMem in a disk sort. This does not seem worth the overhead
- * to fix. Is it worth creating an API for the memory context code to
+ * to fix. Is it worth creating an API for the memory context code to
* tell us how much is actually used in sortcontext?
*/
if (state->tapeset)
@@ -2562,7 +2563,7 @@ tuplesort_get_stats(Tuplesortstate *state,
/*
* Heap manipulation routines, per Knuth's Algorithm 5.2.3H.
*
- * Compare two SortTuples. If checkIndex is true, use the tuple index
+ * Compare two SortTuples. If checkIndex is true, use the tuple index
* as the front of the sort key; otherwise, no.
*/
@@ -2667,7 +2668,7 @@ sort_bounded_heap(Tuplesortstate *state)
/*
* Insert a new tuple into an empty or existing heap, maintaining the
- * heap invariant. Caller is responsible for ensuring there's room.
+ * heap invariant. Caller is responsible for ensuring there's room.
*
* Note: we assume *tuple is a temporary variable that can be scribbled on.
* For some callers, tuple actually points to a memtuples[] entry above the
diff --git a/src/backend/utils/sort/tuplestore.c b/src/backend/utils/sort/tuplestore.c
index a5a56be91b2..8b968a8b62f 100644
--- a/src/backend/utils/sort/tuplestore.c
+++ b/src/backend/utils/sort/tuplestore.c
@@ -8,7 +8,7 @@
* a dumbed-down version of tuplesort.c; it does no sorting of tuples
* but can only store and regurgitate a sequence of tuples. However,
* because no sort is required, it is allowed to start reading the sequence
- * before it has all been written. This is particularly useful for cursors,
+ * before it has all been written. This is particularly useful for cursors,
* because it allows random access within the already-scanned portion of
* a query without having to process the underlying scan to completion.
* Also, it is possible to support multiple independent read pointers.
@@ -17,7 +17,7 @@
* space limit specified by the caller.
*
* The (approximate) amount of memory allowed to the tuplestore is specified
- * in kilobytes by the caller. We absorb tuples and simply store them in an
+ * in kilobytes by the caller. We absorb tuples and simply store them in an
* in-memory array as long as we haven't exceeded maxKBytes. If we do exceed
* maxKBytes, we dump all the tuples into a temp file and then read from that
* when needed.
@@ -29,7 +29,7 @@
* When the caller requests backward-scan capability, we write the temp file
* in a format that allows either forward or backward scan. Otherwise, only
* forward scan is allowed. A request for backward scan must be made before
- * putting any tuples into the tuplestore. Rewind is normally allowed but
+ * putting any tuples into the tuplestore. Rewind is normally allowed but
* can be turned off via tuplestore_set_eflags; turning off rewind for all
* read pointers enables truncation of the tuplestore at the oldest read point
* for minimal memory usage. (The caller must explicitly call tuplestore_trim
@@ -64,7 +64,7 @@
/*
- * Possible states of a Tuplestore object. These denote the states that
+ * Possible states of a Tuplestore object. These denote the states that
* persist between calls of Tuplestore routines.
*/
typedef enum
@@ -83,7 +83,7 @@ typedef enum
*
* Special case: if eof_reached is true, then the pointer's read position is
* implicitly equal to the write position, and current/file/offset aren't
- * maintained. This way we need not update all the read pointers each time
+ * maintained. This way we need not update all the read pointers each time
* we write.
*/
typedef struct
@@ -128,7 +128,7 @@ struct Tuplestorestate
void *(*copytup) (Tuplestorestate *state, void *tup);
/*
- * Function to write a stored tuple onto tape. The representation of the
+ * Function to write a stored tuple onto tape. The representation of the
* tuple on tape need not be the same as it is in memory; requirements on
* the tape representation are given below. After writing the tuple,
* pfree() it, and increase state->availMem by the amount of memory space
@@ -197,7 +197,7 @@ struct Tuplestorestate
* If state->backward is true, then the stored representation of
* the tuple must be followed by another "unsigned int" that is a copy of the
* length --- so the total tape space used is actually sizeof(unsigned int)
- * more than the stored length value. This allows read-backwards. When
+ * more than the stored length value. This allows read-backwards. When
* state->backward is not set, the write/read routines may omit the extra
* length word.
*
@@ -295,7 +295,7 @@ tuplestore_begin_common(int eflags, bool interXact, int maxKBytes)
* tuple store are allowed.
*
* interXact: if true, the files used for on-disk storage persist beyond the
- * end of the current transaction. NOTE: It's the caller's responsibility to
+ * end of the current transaction. NOTE: It's the caller's responsibility to
* create such a tuplestore in a memory context and resource owner that will
* also survive transaction boundaries, and to ensure the tuplestore is closed
* when it's no longer wanted.
@@ -334,7 +334,7 @@ tuplestore_begin_heap(bool randomAccess, bool interXact, int maxKBytes)
* any data into the tuplestore.
*
* eflags is a bitmask following the meanings used for executor node
- * startup flags (see executor.h). tuplestore pays attention to these bits:
+ * startup flags (see executor.h). tuplestore pays attention to these bits:
* EXEC_FLAG_REWIND need rewind to start
* EXEC_FLAG_BACKWARD need backward fetch
* If tuplestore_set_eflags is not called, REWIND is allowed, and BACKWARD
@@ -633,9 +633,9 @@ grow_memtuples(Tuplestorestate *state)
* We need to be sure that we do not cause LACKMEM to become true, else
* the space management algorithm will go nuts. The code above should
* never generate a dangerous request, but to be safe, check explicitly
- * that the array growth fits within availMem. (We could still cause
+ * that the array growth fits within availMem. (We could still cause
* LACKMEM if the memory chunk overhead associated with the memtuples
- * array were to increase. That shouldn't happen with any sane value of
+ * array were to increase. That shouldn't happen with any sane value of
* allowedMem, because at any array size large enough to risk LACKMEM,
* palloc would be treating both old and new arrays as separate chunks.
* But we'll check LACKMEM explicitly below just in case.)
@@ -704,7 +704,7 @@ tuplestore_puttuple(Tuplestorestate *state, HeapTuple tuple)
MemoryContext oldcxt = MemoryContextSwitchTo(state->context);
/*
- * Copy the tuple. (Must do this even in WRITEFILE case. Note that
+ * Copy the tuple. (Must do this even in WRITEFILE case. Note that
* COPYTUP includes USEMEM, so we needn't do that here.)
*/
tuple = COPYTUP(state, tuple);
@@ -861,7 +861,7 @@ tuplestore_puttuple_common(Tuplestorestate *state, void *tuple)
/*
* Fetch the next tuple in either forward or back direction.
- * Returns NULL if no more tuples. If should_free is set, the
+ * Returns NULL if no more tuples. If should_free is set, the
* caller must pfree the returned tuple when done with it.
*
* Backward scan is only allowed if randomAccess was set true or
@@ -1090,7 +1090,7 @@ tuplestore_advance(Tuplestorestate *state, bool forward)
/*
* Advance over N tuples in either forward or back direction,
- * without returning any data. N<=0 is a no-op.
+ * without returning any data. N<=0 is a no-op.
* Returns TRUE if successful, FALSE if ran out of tuples.
*/
bool
diff --git a/src/backend/utils/time/combocid.c b/src/backend/utils/time/combocid.c
index a0678069894..52b612b15a1 100644
--- a/src/backend/utils/time/combocid.c
+++ b/src/backend/utils/time/combocid.c
@@ -15,7 +15,7 @@
* this module.
*
* To allow reusing existing combo cids, we also keep a hash table that
- * maps cmin,cmax pairs to combo cids. This keeps the data structure size
+ * maps cmin,cmax pairs to combo cids. This keeps the data structure size
* reasonable in most cases, since the number of unique pairs used by any
* one transaction is likely to be small.
*
@@ -148,8 +148,8 @@ HeapTupleHeaderAdjustCmax(HeapTupleHeader tup,
/*
* If we're marking a tuple deleted that was inserted by (any
* subtransaction of) our transaction, we need to use a combo command id.
- * Test for HeapTupleHeaderXminCommitted() first, because it's cheaper than a
- * TransactionIdIsCurrentTransactionId call.
+ * Test for HeapTupleHeaderXminCommitted() first, because it's cheaper
+ * than a TransactionIdIsCurrentTransactionId call.
*/
if (!HeapTupleHeaderXminCommitted(tup) &&
TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetRawXmin(tup)))
diff --git a/src/backend/utils/time/snapmgr.c b/src/backend/utils/time/snapmgr.c
index 9802fa7ded7..2834753d734 100644
--- a/src/backend/utils/time/snapmgr.c
+++ b/src/backend/utils/time/snapmgr.c
@@ -11,7 +11,7 @@
* regd_count and count it in RegisteredSnapshots, but this reference is not
* tracked by a resource owner. We used to use the TopTransactionResourceOwner
* to track this snapshot reference, but that introduces logical circularity
- * and thus makes it impossible to clean up in a sane fashion. It's better to
+ * and thus makes it impossible to clean up in a sane fashion. It's better to
* handle this reference as an internally-tracked registration, so that this
* module is entirely lower-level than ResourceOwners.
*
@@ -24,9 +24,9 @@
* transaction).
*
* These arrangements let us reset MyPgXact->xmin when there are no snapshots
- * referenced by this transaction. (One possible improvement would be to be
+ * referenced by this transaction. (One possible improvement would be to be
* able to advance Xmin when the snapshot with the earliest Xmin is no longer
- * referenced. That's a bit harder though, it requires more locking, and
+ * referenced. That's a bit harder though, it requires more locking, and
* anyway it should be rather uncommon to keep temporary snapshots referenced
* for too long.)
*
@@ -63,7 +63,7 @@
* CurrentSnapshot points to the only snapshot taken in transaction-snapshot
* mode, and to the latest one taken in a read-committed transaction.
* SecondarySnapshot is a snapshot that's always up-to-date as of the current
- * instant, even in transaction-snapshot mode. It should only be used for
+ * instant, even in transaction-snapshot mode. It should only be used for
* special-purpose code (say, RI checking.) CatalogSnapshot points to an
* MVCC snapshot intended to be used for catalog scans; we must refresh it
* whenever a system catalog change occurs.
@@ -135,7 +135,7 @@ static int RegisteredSnapshots = 0;
bool FirstSnapshotSet = false;
/*
- * Remember the serializable transaction snapshot, if any. We cannot trust
+ * Remember the serializable transaction snapshot, if any. We cannot trust
* FirstSnapshotSet in combination with IsolationUsesXactSnapshot(), because
* GUC may be reset before us, changing the value of IsolationUsesXactSnapshot.
*/
@@ -169,9 +169,9 @@ Snapshot
GetTransactionSnapshot(void)
{
/*
- * Return historic snapshot if doing logical decoding. We'll never
- * need a non-historic transaction snapshot in this (sub-)transaction, so
- * there's no need to be careful to set one up for later calls to
+ * Return historic snapshot if doing logical decoding. We'll never need a
+ * non-historic transaction snapshot in this (sub-)transaction, so there's
+ * no need to be careful to set one up for later calls to
* GetTransactionSnapshot().
*/
if (HistoricSnapshotActive())
@@ -238,8 +238,7 @@ GetLatestSnapshot(void)
{
/*
* So far there are no cases requiring support for GetLatestSnapshot()
- * during logical decoding, but it wouldn't be hard to add if
- * required.
+ * during logical decoding, but it wouldn't be hard to add if required.
*/
Assert(!HistoricSnapshotActive());
@@ -283,12 +282,11 @@ Snapshot
GetNonHistoricCatalogSnapshot(Oid relid)
{
/*
- * If the caller is trying to scan a relation that has no syscache,
- * no catcache invalidations will be sent when it is updated. For a
- * a few key relations, snapshot invalidations are sent instead. If
- * we're trying to scan a relation for which neither catcache nor
- * snapshot invalidations are sent, we must refresh the snapshot every
- * time.
+ * If the caller is trying to scan a relation that has no syscache, no
+ * catcache invalidations will be sent when it is updated. For a a few
+ * key relations, snapshot invalidations are sent instead. If we're
+ * trying to scan a relation for which neither catcache nor snapshot
+ * invalidations are sent, we must refresh the snapshot every time.
*/
if (!CatalogSnapshotStale && !RelationInvalidatesSnapshotsOnly(relid) &&
!RelationHasSysCache(relid))
@@ -403,7 +401,7 @@ SetTransactionSnapshot(Snapshot sourcesnap, TransactionId sourcexid)
/*
* In transaction-snapshot mode, the first snapshot must live until end of
- * xact, so we must make a copy of it. Furthermore, if we're running in
+ * xact, so we must make a copy of it. Furthermore, if we're running in
* serializable mode, predicate.c needs to do its own processing.
*/
if (IsolationUsesXactSnapshot())
@@ -499,7 +497,7 @@ FreeSnapshot(Snapshot snapshot)
*
* If the passed snapshot is a statically-allocated one, or it is possibly
* subject to a future command counter update, create a new long-lived copy
- * with active refcount=1. Otherwise, only increment the refcount.
+ * with active refcount=1. Otherwise, only increment the refcount.
*/
void
PushActiveSnapshot(Snapshot snap)
@@ -868,7 +866,7 @@ ExportSnapshot(Snapshot snapshot)
* However, we haven't got enough information to do that, since we don't
* know if we're at top level or not. For example, we could be inside a
* plpgsql function that is going to fire off other transactions via
- * dblink. Rather than disallow perfectly legitimate usages, don't make a
+ * dblink. Rather than disallow perfectly legitimate usages, don't make a
* check.
*
* Also note that we don't make any restriction on the transaction's
@@ -1081,7 +1079,7 @@ parseXidFromText(const char *prefix, char **s, const char *filename)
/*
* ImportSnapshot
- * Import a previously exported snapshot. The argument should be a
+ * Import a previously exported snapshot. The argument should be a
* filename in SNAPSHOT_EXPORT_DIR. Load the snapshot from that file.
* This is called by "SET TRANSACTION SNAPSHOT 'foo'".
*/
diff --git a/src/backend/utils/time/tqual.c b/src/backend/utils/time/tqual.c
index 89f5ff85e9d..75cd53e1fd0 100644
--- a/src/backend/utils/time/tqual.c
+++ b/src/backend/utils/time/tqual.c
@@ -20,7 +20,7 @@
* TransactionIdDidCommit will both return true. If we check only
* TransactionIdDidCommit, we could consider a tuple committed when a
* later GetSnapshotData call will still think the originating transaction
- * is in progress, which leads to application-level inconsistency. The
+ * is in progress, which leads to application-level inconsistency. The
* upshot is that we gotta check TransactionIdIsInProgress first in all
* code paths, except for a few cases where we are looking at
* subtransactions of our own main transaction and so there can't be any
@@ -89,13 +89,13 @@ static bool XidInMVCCSnapshot(TransactionId xid, Snapshot snapshot);
* just refrain from setting the hint bit until some future re-examination
* of the tuple.
*
- * We can always set hint bits when marking a transaction aborted. (Some
+ * We can always set hint bits when marking a transaction aborted. (Some
* code in heapam.c relies on that!)
*
* Also, if we are cleaning up HEAP_MOVED_IN or HEAP_MOVED_OFF entries, then
* we can always set the hint bits, since pre-9.0 VACUUM FULL always used
* synchronous commits and didn't move tuples that weren't previously
- * hinted. (This is not known by this subroutine, but is applied by its
+ * hinted. (This is not known by this subroutine, but is applied by its
* callers.) Note: old-style VACUUM FULL is gone, but we have to keep this
* module's support for MOVED_OFF/MOVED_IN flag bits for as long as we
* support in-place update from pre-9.0 databases.
@@ -165,6 +165,7 @@ bool
HeapTupleSatisfiesSelf(HeapTuple htup, Snapshot snapshot, Buffer buffer)
{
HeapTupleHeader tuple = htup->t_data;
+
Assert(ItemPointerIsValid(&htup->t_self));
Assert(htup->t_tableOid != InvalidOid);
@@ -343,7 +344,7 @@ HeapTupleSatisfiesAny(HeapTuple htup, Snapshot snapshot, Buffer buffer)
* This is a simplified version that only checks for VACUUM moving conditions.
* It's appropriate for TOAST usage because TOAST really doesn't want to do
* its own time qual checks; if you can see the main table row that contains
- * a TOAST reference, you should be able to see the TOASTed value. However,
+ * a TOAST reference, you should be able to see the TOASTed value. However,
* vacuuming a TOAST table is independent of the main table, and in case such
* a vacuum fails partway through, we'd better do this much checking.
*
@@ -355,6 +356,7 @@ HeapTupleSatisfiesToast(HeapTuple htup, Snapshot snapshot,
Buffer buffer)
{
HeapTupleHeader tuple = htup->t_data;
+
Assert(ItemPointerIsValid(&htup->t_self));
Assert(htup->t_tableOid != InvalidOid);
@@ -440,6 +442,7 @@ HeapTupleSatisfiesUpdate(HeapTuple htup, CommandId curcid,
Buffer buffer)
{
HeapTupleHeader tuple = htup->t_data;
+
Assert(ItemPointerIsValid(&htup->t_self));
Assert(htup->t_tableOid != InvalidOid);
@@ -497,7 +500,7 @@ HeapTupleSatisfiesUpdate(HeapTuple htup, CommandId curcid,
if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask))
{
- TransactionId xmax;
+ TransactionId xmax;
xmax = HeapTupleHeaderGetRawXmax(tuple);
@@ -600,7 +603,7 @@ HeapTupleSatisfiesUpdate(HeapTuple htup, CommandId curcid,
{
/*
* If it's only locked but neither EXCL_LOCK nor KEYSHR_LOCK is
- * set, it cannot possibly be running. Otherwise need to check.
+ * set, it cannot possibly be running. Otherwise need to check.
*/
if ((tuple->t_infomask & (HEAP_XMAX_EXCL_LOCK |
HEAP_XMAX_KEYSHR_LOCK)) &&
@@ -712,6 +715,7 @@ HeapTupleSatisfiesDirty(HeapTuple htup, Snapshot snapshot,
Buffer buffer)
{
HeapTupleHeader tuple = htup->t_data;
+
Assert(ItemPointerIsValid(&htup->t_self));
Assert(htup->t_tableOid != InvalidOid);
@@ -908,6 +912,7 @@ HeapTupleSatisfiesMVCC(HeapTuple htup, Snapshot snapshot,
Buffer buffer)
{
HeapTupleHeader tuple = htup->t_data;
+
Assert(ItemPointerIsValid(&htup->t_self));
Assert(htup->t_tableOid != InvalidOid);
@@ -1100,7 +1105,7 @@ HeapTupleSatisfiesMVCC(HeapTuple htup, Snapshot snapshot,
* we mainly want to know is if a tuple is potentially visible to *any*
* running transaction. If so, it can't be removed yet by VACUUM.
*
- * OldestXmin is a cutoff XID (obtained from GetOldestXmin()). Tuples
+ * OldestXmin is a cutoff XID (obtained from GetOldestXmin()). Tuples
* deleted by XIDs >= OldestXmin are deemed "recently dead"; they might
* still be visible to some open transaction, so we can't remove them,
* even if we see that the deleting transaction has committed.
@@ -1110,6 +1115,7 @@ HeapTupleSatisfiesVacuum(HeapTuple htup, TransactionId OldestXmin,
Buffer buffer)
{
HeapTupleHeader tuple = htup->t_data;
+
Assert(ItemPointerIsValid(&htup->t_self));
Assert(htup->t_tableOid != InvalidOid);
@@ -1192,7 +1198,7 @@ HeapTupleSatisfiesVacuum(HeapTuple htup, TransactionId OldestXmin,
}
/*
- * Okay, the inserter committed, so it was good at some point. Now what
+ * Okay, the inserter committed, so it was good at some point. Now what
* about the deleting transaction?
*/
if (tuple->t_infomask & HEAP_XMAX_INVALID)
@@ -1332,7 +1338,7 @@ HeapTupleSatisfiesVacuum(HeapTuple htup, TransactionId OldestXmin,
* in lieu of HeapTupleSatisifesVacuum when the tuple has just been
* tested by HeapTupleSatisfiesMVCC and, therefore, any hint bits that
* can be set should already be set. We assume that if no hint bits
- * either for xmin or xmax, the transaction is still running. This is
+ * either for xmin or xmax, the transaction is still running. This is
* therefore faster than HeapTupleSatisfiesVacuum, because we don't
* consult CLOG (and also because we don't need to give an exact answer,
* just whether or not the tuple is surely dead).
@@ -1341,6 +1347,7 @@ bool
HeapTupleIsSurelyDead(HeapTuple htup, TransactionId OldestXmin)
{
HeapTupleHeader tuple = htup->t_data;
+
Assert(ItemPointerIsValid(&htup->t_self));
Assert(htup->t_tableOid != InvalidOid);
@@ -1397,7 +1404,7 @@ XidInMVCCSnapshot(TransactionId xid, Snapshot snapshot)
/*
* Make a quick range check to eliminate most XIDs without looking at the
- * xip arrays. Note that this is OK even if we convert a subxact XID to
+ * xip arrays. Note that this is OK even if we convert a subxact XID to
* its parent below, because a subxact with XID < xmin has surely also got
* a parent with XID < xmin, while one with XID >= xmax must belong to a
* parent that was not yet committed at the time of this snapshot.
@@ -1574,7 +1581,7 @@ TransactionIdInArray(TransactionId xid, TransactionId *xip, Size num)
*/
bool
HeapTupleSatisfiesHistoricMVCC(HeapTuple htup, Snapshot snapshot,
- Buffer buffer)
+ Buffer buffer)
{
HeapTupleHeader tuple = htup->t_data;
TransactionId xmin = HeapTupleHeaderGetXmin(tuple);
@@ -1598,8 +1605,8 @@ HeapTupleSatisfiesHistoricMVCC(HeapTuple htup, Snapshot snapshot,
/*
* another transaction might have (tried to) delete this tuple or
- * cmin/cmax was stored in a combocid. So we need to lookup the
- * actual values externally.
+ * cmin/cmax was stored in a combocid. So we need to lookup the actual
+ * values externally.
*/
resolved = ResolveCminCmaxDuringDecoding(HistoricSnapshotGetTupleCids(), snapshot,
htup, buffer,
@@ -1611,7 +1618,7 @@ HeapTupleSatisfiesHistoricMVCC(HeapTuple htup, Snapshot snapshot,
Assert(cmin != InvalidCommandId);
if (cmin >= snapshot->curcid)
- return false; /* inserted after scan started */
+ return false; /* inserted after scan started */
/* fall through */
}
/* committed before our xmin horizon. Do a normal visibility check. */
@@ -1632,13 +1639,14 @@ HeapTupleSatisfiesHistoricMVCC(HeapTuple htup, Snapshot snapshot,
return false;
}
/* check if it's a committed transaction in [xmin, xmax) */
- else if(TransactionIdInArray(xmin, snapshot->xip, snapshot->xcnt))
+ else if (TransactionIdInArray(xmin, snapshot->xip, snapshot->xcnt))
{
/* fall through */
}
+
/*
- * none of the above, i.e. between [xmin, xmax) but hasn't
- * committed. I.e. invisible.
+ * none of the above, i.e. between [xmin, xmax) but hasn't committed. I.e.
+ * invisible.
*/
else
{
@@ -1653,9 +1661,10 @@ HeapTupleSatisfiesHistoricMVCC(HeapTuple htup, Snapshot snapshot,
/* locked tuples are always visible */
else if (HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_infomask))
return true;
+
/*
- * We can see multis here if we're looking at user tables or if
- * somebody SELECT ... FOR SHARE/UPDATE a system table.
+ * We can see multis here if we're looking at user tables or if somebody
+ * SELECT ... FOR SHARE/UPDATE a system table.
*/
else if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
{
@@ -1665,9 +1674,9 @@ HeapTupleSatisfiesHistoricMVCC(HeapTuple htup, Snapshot snapshot,
/* check if it's one of our txids, toplevel is also in there */
if (TransactionIdInArray(xmax, snapshot->subxip, snapshot->subxcnt))
{
- bool resolved;
- CommandId cmin;
- CommandId cmax = HeapTupleHeaderGetRawCommandId(tuple);
+ bool resolved;
+ CommandId cmin;
+ CommandId cmax = HeapTupleHeaderGetRawCommandId(tuple);
/* Lookup actual cmin/cmax values */
resolved = ResolveCminCmaxDuringDecoding(HistoricSnapshotGetTupleCids(), snapshot,
@@ -1680,9 +1689,9 @@ HeapTupleSatisfiesHistoricMVCC(HeapTuple htup, Snapshot snapshot,
Assert(cmax != InvalidCommandId);
if (cmax >= snapshot->curcid)
- return true; /* deleted after scan started */
+ return true; /* deleted after scan started */
else
- return false; /* deleted before scan started */
+ return false; /* deleted before scan started */
}
/* below xmin horizon, normal transaction state is valid */
else if (TransactionIdPrecedes(xmax, snapshot->xmin))