aboutsummaryrefslogtreecommitdiff
path: root/src/backend/utils
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/utils')
-rw-r--r--src/backend/utils/adt/acl.c18
-rw-r--r--src/backend/utils/adt/array_selfuncs.c20
-rw-r--r--src/backend/utils/adt/array_typanalyze.c14
-rw-r--r--src/backend/utils/adt/array_userfuncs.c4
-rw-r--r--src/backend/utils/adt/arrayfuncs.c26
-rw-r--r--src/backend/utils/adt/arrayutils.c2
-rw-r--r--src/backend/utils/adt/char.c2
-rw-r--r--src/backend/utils/adt/date.c6
-rw-r--r--src/backend/utils/adt/datetime.c14
-rw-r--r--src/backend/utils/adt/datum.c2
-rw-r--r--src/backend/utils/adt/dbsize.c2
-rw-r--r--src/backend/utils/adt/domains.c8
-rw-r--r--src/backend/utils/adt/float.c10
-rw-r--r--src/backend/utils/adt/format_type.c6
-rw-r--r--src/backend/utils/adt/formatting.c4
-rw-r--r--src/backend/utils/adt/geo_selfuncs.c4
-rw-r--r--src/backend/utils/adt/inet_cidr_ntop.c2
-rw-r--r--src/backend/utils/adt/int.c30
-rw-r--r--src/backend/utils/adt/int8.c44
-rw-r--r--src/backend/utils/adt/json.c12
-rw-r--r--src/backend/utils/adt/like.c4
-rw-r--r--src/backend/utils/adt/misc.c12
-rw-r--r--src/backend/utils/adt/nabstime.c4
-rw-r--r--src/backend/utils/adt/network.c14
-rw-r--r--src/backend/utils/adt/numeric.c50
-rw-r--r--src/backend/utils/adt/oid.c2
-rw-r--r--src/backend/utils/adt/pg_locale.c26
-rw-r--r--src/backend/utils/adt/pg_lzcompress.c6
-rw-r--r--src/backend/utils/adt/pseudotypes.c6
-rw-r--r--src/backend/utils/adt/rangetypes.c8
-rw-r--r--src/backend/utils/adt/rangetypes_gist.c8
-rw-r--r--src/backend/utils/adt/regexp.c6
-rw-r--r--src/backend/utils/adt/regproc.c14
-rw-r--r--src/backend/utils/adt/ri_triggers.c14
-rw-r--r--src/backend/utils/adt/rowtypes.c8
-rw-r--r--src/backend/utils/adt/ruleutils.c72
-rw-r--r--src/backend/utils/adt/selfuncs.c128
-rw-r--r--src/backend/utils/adt/timestamp.c22
-rw-r--r--src/backend/utils/adt/tsginidx.c2
-rw-r--r--src/backend/utils/adt/varchar.c4
-rw-r--r--src/backend/utils/adt/varlena.c40
-rw-r--r--src/backend/utils/adt/xml.c40
-rw-r--r--src/backend/utils/cache/attoptcache.c2
-rw-r--r--src/backend/utils/cache/catcache.c14
-rw-r--r--src/backend/utils/cache/evtcache.c2
-rw-r--r--src/backend/utils/cache/inval.c30
-rw-r--r--src/backend/utils/cache/lsyscache.c10
-rw-r--r--src/backend/utils/cache/plancache.c42
-rw-r--r--src/backend/utils/cache/relcache.c64
-rw-r--r--src/backend/utils/cache/relmapper.c20
-rw-r--r--src/backend/utils/cache/spccache.c6
-rw-r--r--src/backend/utils/cache/syscache.c4
-rw-r--r--src/backend/utils/cache/ts_cache.c2
-rw-r--r--src/backend/utils/cache/typcache.c8
-rw-r--r--src/backend/utils/error/elog.c46
-rw-r--r--src/backend/utils/fmgr/dfmgr.c6
-rw-r--r--src/backend/utils/fmgr/fmgr.c18
-rw-r--r--src/backend/utils/fmgr/funcapi.c10
-rw-r--r--src/backend/utils/hash/dynahash.c28
-rw-r--r--src/backend/utils/init/miscinit.c32
-rw-r--r--src/backend/utils/init/postinit.c16
-rw-r--r--src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c2
-rw-r--r--src/backend/utils/mb/mbutils.c14
-rw-r--r--src/backend/utils/mb/wchar.c4
-rw-r--r--src/backend/utils/mb/wstrcmp.c2
-rw-r--r--src/backend/utils/mb/wstrncmp.c2
-rw-r--r--src/backend/utils/misc/guc.c26
-rw-r--r--src/backend/utils/misc/ps_status.c6
-rw-r--r--src/backend/utils/misc/rbtree.c12
-rw-r--r--src/backend/utils/misc/timeout.c22
-rw-r--r--src/backend/utils/misc/tzparser.c4
-rw-r--r--src/backend/utils/mmgr/aset.c16
-rw-r--r--src/backend/utils/mmgr/mcxt.c16
-rw-r--r--src/backend/utils/mmgr/portalmem.c18
-rw-r--r--src/backend/utils/resowner/resowner.c8
-rw-r--r--src/backend/utils/sort/logtape.c30
-rw-r--r--src/backend/utils/sort/tuplesort.c70
-rw-r--r--src/backend/utils/sort/tuplestore.c26
-rw-r--r--src/backend/utils/time/combocid.c2
-rw-r--r--src/backend/utils/time/snapmgr.c20
-rw-r--r--src/backend/utils/time/tqual.c18
81 files changed, 699 insertions, 699 deletions
diff --git a/src/backend/utils/adt/acl.c b/src/backend/utils/adt/acl.c
index 5869ec2e589..23b66b3e721 100644
--- a/src/backend/utils/adt/acl.c
+++ b/src/backend/utils/adt/acl.c
@@ -123,7 +123,7 @@ static Oid get_role_oid_or_public(const char *rolname);
/*
* getid
* Consumes the first alphanumeric string (identifier) found in string
- * 's', ignoring any leading white space. If it finds a double quote
+ * 's', ignoring any leading white space. If it finds a double quote
* it returns the word inside the quotes.
*
* RETURNS:
@@ -229,7 +229,7 @@ putid(char *p, const char *s)
*
* RETURNS:
* the string position in 's' immediately following the ACL
- * specification. Also:
+ * specification. Also:
* - loads the structure pointed to by 'aip' with the appropriate
* UID/GID, id type identifier and mode type values.
*/
@@ -837,7 +837,7 @@ acldefault(GrantObjectType objtype, Oid ownerId)
/*
- * SQL-accessible version of acldefault(). Hackish mapping from "char" type to
+ * SQL-accessible version of acldefault(). Hackish mapping from "char" type to
* ACL_OBJECT_* values, but it's only used in the information schema, not
* documented for general use.
*/
@@ -1006,7 +1006,7 @@ aclupdate(const Acl *old_acl, const AclItem *mod_aip,
}
/*
- * Remove abandoned privileges (cascading revoke). Currently we can only
+ * Remove abandoned privileges (cascading revoke). Currently we can only
* handle this when the grantee is not PUBLIC.
*/
if ((old_goptions & ~new_goptions) != 0)
@@ -1072,7 +1072,7 @@ aclnewowner(const Acl *old_acl, Oid oldOwnerId, Oid newOwnerId)
/*
* If the old ACL contained any references to the new owner, then we may
- * now have generated an ACL containing duplicate entries. Find them and
+ * now have generated an ACL containing duplicate entries. Find them and
* merge them so that there are not duplicates. (This is relatively
* expensive since we use a stupid O(N^2) algorithm, but it's unlikely to
* be the normal case.)
@@ -1083,7 +1083,7 @@ aclnewowner(const Acl *old_acl, Oid oldOwnerId, Oid newOwnerId)
* remove privilege-free entries, should there be any in the input.) dst
* is the next output slot, targ is the currently considered input slot
* (always >= dst), and src scans entries to the right of targ looking for
- * duplicates. Once an entry has been emitted to dst it is known
+ * duplicates. Once an entry has been emitted to dst it is known
* duplicate-free and need not be considered anymore.
*/
if (newpresent)
@@ -2468,7 +2468,7 @@ column_privilege_check(Oid tableoid, AttrNumber attnum,
* existence of the pg_class row before risking calling pg_class_aclcheck.
* Note: it might seem there's a race condition against concurrent DROP,
* but really it's safe because there will be no syscache flush between
- * here and there. So if we see the row in the syscache, so will
+ * here and there. So if we see the row in the syscache, so will
* pg_class_aclcheck.
*/
if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(tableoid)))
@@ -5015,14 +5015,14 @@ count_one_bits(AclMode mask)
* The grantor must always be either the object owner or some role that has
* been explicitly granted grant options. This ensures that all granted
* privileges appear to flow from the object owner, and there are never
- * multiple "original sources" of a privilege. Therefore, if the would-be
+ * multiple "original sources" of a privilege. Therefore, if the would-be
* grantor is a member of a role that has the needed grant options, we have
* to do the grant as that role instead.
*
* It is possible that the would-be grantor is a member of several roles
* that have different subsets of the desired grant options, but no one
* role has 'em all. In this case we pick a role with the largest number
- * of desired options. Ties are broken in favor of closer ancestors.
+ * of desired options. Ties are broken in favor of closer ancestors.
*
* roleId: the role attempting to do the GRANT/REVOKE
* privileges: the privileges to be granted/revoked
diff --git a/src/backend/utils/adt/array_selfuncs.c b/src/backend/utils/adt/array_selfuncs.c
index a66bbe32802..4cb5f419321 100644
--- a/src/backend/utils/adt/array_selfuncs.c
+++ b/src/backend/utils/adt/array_selfuncs.c
@@ -524,7 +524,7 @@ mcelem_array_selec(ArrayType *array, TypeCacheEntry *typentry,
/*
* Estimate selectivity of "column @> const" and "column && const" based on
- * most common element statistics. This estimation assumes element
+ * most common element statistics. This estimation assumes element
* occurrences are independent.
*
* mcelem (of length nmcelem) and numbers (of length nnumbers) are from
@@ -689,7 +689,7 @@ mcelem_array_contain_overlap_selec(Datum *mcelem, int nmcelem,
* In the "column @> const" and "column && const" cases, we usually have a
* "const" with low number of elements (otherwise we have selectivity close
* to 0 or 1 respectively). That's why the effect of dependence related
- * to distinct element count distribution is negligible there. In the
+ * to distinct element count distribution is negligible there. In the
* "column <@ const" case, number of elements is usually high (otherwise we
* have selectivity close to 0). That's why we should do a correction with
* the array distinct element count distribution here.
@@ -848,7 +848,7 @@ mcelem_array_contained_selec(Datum *mcelem, int nmcelem,
/*
* The presence of many distinct rare elements materially decreases
* selectivity. Use the Poisson distribution to estimate the probability
- * of a column value having zero occurrences of such elements. See above
+ * of a column value having zero occurrences of such elements. See above
* for the definition of "rest".
*/
mult *= exp(-rest);
@@ -856,7 +856,7 @@ mcelem_array_contained_selec(Datum *mcelem, int nmcelem,
/*----------
* Using the distinct element count histogram requires
* O(unique_nitems * (nmcelem + unique_nitems))
- * operations. Beyond a certain computational cost threshold, it's
+ * operations. Beyond a certain computational cost threshold, it's
* reasonable to sacrifice accuracy for decreased planning time. We limit
* the number of operations to EFFORT * nmcelem; since nmcelem is limited
* by the column's statistics target, the work done is user-controllable.
@@ -868,7 +868,7 @@ mcelem_array_contained_selec(Datum *mcelem, int nmcelem,
* elements to start with, we'd have to remove any discarded elements'
* frequencies from "mult", but since this is only an approximation
* anyway, we don't bother with that. Therefore it's sufficient to qsort
- * elem_selec[] and take the largest elements. (They will no longer match
+ * elem_selec[] and take the largest elements. (They will no longer match
* up with the elements of array_data[], but we don't care.)
*----------
*/
@@ -878,7 +878,7 @@ mcelem_array_contained_selec(Datum *mcelem, int nmcelem,
unique_nitems > EFFORT * nmcelem / (nmcelem + unique_nitems))
{
/*
- * Use the quadratic formula to solve for largest allowable N. We
+ * Use the quadratic formula to solve for largest allowable N. We
* have A = 1, B = nmcelem, C = - EFFORT * nmcelem.
*/
double b = (double) nmcelem;
@@ -953,7 +953,7 @@ calc_hist(const float4 *hist, int nhist, int n)
/*
* frac is a probability contribution for each interval between histogram
- * values. We have nhist - 1 intervals, so contribution of each one will
+ * values. We have nhist - 1 intervals, so contribution of each one will
* be 1 / (nhist - 1).
*/
frac = 1.0f / ((float) (nhist - 1));
@@ -1020,8 +1020,8 @@ calc_hist(const float4 *hist, int nhist, int n)
* "rest" is the sum of the probabilities of all low-probability events not
* included in p.
*
- * Imagine matrix M of size (n + 1) x (m + 1). Element M[i,j] denotes the
- * probability that exactly j of first i events occur. Obviously M[0,0] = 1.
+ * Imagine matrix M of size (n + 1) x (m + 1). Element M[i,j] denotes the
+ * probability that exactly j of first i events occur. Obviously M[0,0] = 1.
* For any constant j, each increment of i increases the probability iff the
* event occurs. So, by the law of total probability:
* M[i,j] = M[i - 1, j] * (1 - p[i]) + M[i - 1, j - 1] * p[i]
@@ -1143,7 +1143,7 @@ floor_log2(uint32 n)
/*
* find_next_mcelem binary-searches a most common elements array, starting
- * from *index, for the first member >= value. It saves the position of the
+ * from *index, for the first member >= value. It saves the position of the
* match into *index and returns true if it's an exact match. (Note: we
* assume the mcelem elements are distinct so there can't be more than one
* exact match.)
diff --git a/src/backend/utils/adt/array_typanalyze.c b/src/backend/utils/adt/array_typanalyze.c
index ae7bb8a8b81..6b7cd9791da 100644
--- a/src/backend/utils/adt/array_typanalyze.c
+++ b/src/backend/utils/adt/array_typanalyze.c
@@ -160,13 +160,13 @@ array_typanalyze(PG_FUNCTION_ARGS)
* compute_array_stats() -- compute statistics for a array column
*
* This function computes statistics useful for determining selectivity of
- * the array operators <@, &&, and @>. It is invoked by ANALYZE via the
+ * the array operators <@, &&, and @>. It is invoked by ANALYZE via the
* compute_stats hook after sample rows have been collected.
*
* We also invoke the standard compute_stats function, which will compute
* "scalar" statistics relevant to the btree-style array comparison operators.
* However, exact duplicates of an entire array may be rare despite many
- * arrays sharing individual elements. This especially afflicts long arrays,
+ * arrays sharing individual elements. This especially afflicts long arrays,
* which are also liable to lack all scalar statistics due to the low
* WIDTH_THRESHOLD used in analyze.c. So, in addition to the standard stats,
* we find the most common array elements and compute a histogram of distinct
@@ -201,7 +201,7 @@ array_typanalyze(PG_FUNCTION_ARGS)
* In the absence of a principled basis for other particular values, we
* follow ts_typanalyze() and use parameters s = 0.07/K, epsilon = s/10.
* But we leave out the correction for stopwords, which do not apply to
- * arrays. These parameters give bucket width w = K/0.007 and maximum
+ * arrays. These parameters give bucket width w = K/0.007 and maximum
* expected hashtable size of about 1000 * K.
*
* Elements may repeat within an array. Since duplicates do not change the
@@ -463,7 +463,7 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
/*
* Construct an array of the interesting hashtable items, that is,
- * those meeting the cutoff frequency (s - epsilon)*N. Also identify
+ * those meeting the cutoff frequency (s - epsilon)*N. Also identify
* the minimum and maximum frequencies among these items.
*
* Since epsilon = s/10 and bucket_width = 1/epsilon, the cutoff
@@ -498,7 +498,7 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
/*
* If we obtained more elements than we really want, get rid of those
- * with least frequencies. The easiest way is to qsort the array into
+ * with least frequencies. The easiest way is to qsort the array into
* descending frequency order and truncate the array.
*/
if (num_mcelem < track_len)
@@ -532,7 +532,7 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
/*
* We sorted statistics on the element value, but we want to be
* able to find the minimal and maximal frequencies without going
- * through all the values. We also want the frequency of null
+ * through all the values. We also want the frequency of null
* elements. Store these three values at the end of mcelem_freqs.
*/
mcelem_values = (Datum *) palloc(num_mcelem * sizeof(Datum));
@@ -623,7 +623,7 @@ compute_array_stats(VacAttrStats *stats, AnalyzeAttrFetchFunc fetchfunc,
* (compare the histogram-making loop in compute_scalar_stats()).
* But instead of that we have the sorted_count_items[] array,
* which holds unique DEC values with their frequencies (that is,
- * a run-length-compressed version of the full array). So we
+ * a run-length-compressed version of the full array). So we
* control advancing through sorted_count_items[] with the
* variable "frac", which is defined as (x - y) * (num_hist - 1),
* where x is the index in the notional DECs array corresponding
diff --git a/src/backend/utils/adt/array_userfuncs.c b/src/backend/utils/adt/array_userfuncs.c
index 7ce1cce9876..10d5de9d60d 100644
--- a/src/backend/utils/adt/array_userfuncs.c
+++ b/src/backend/utils/adt/array_userfuncs.c
@@ -502,7 +502,7 @@ array_agg_transfn(PG_FUNCTION_ARGS)
/*
* The transition type for array_agg() is declared to be "internal", which
- * is a pass-by-value type the same size as a pointer. So we can safely
+ * is a pass-by-value type the same size as a pointer. So we can safely
* pass the ArrayBuildState pointer through nodeAgg.c's machinations.
*/
PG_RETURN_POINTER(state);
@@ -517,7 +517,7 @@ array_agg_finalfn(PG_FUNCTION_ARGS)
int lbs[1];
/*
- * Test for null before Asserting we are in right context. This is to
+ * Test for null before Asserting we are in right context. This is to
* avoid possible Assert failure in 8.4beta installations, where it is
* possible for users to create NULL constants of type internal.
*/
diff --git a/src/backend/utils/adt/arrayfuncs.c b/src/backend/utils/adt/arrayfuncs.c
index 438c3d0e9e6..1cb29ed4416 100644
--- a/src/backend/utils/adt/arrayfuncs.c
+++ b/src/backend/utils/adt/arrayfuncs.c
@@ -694,7 +694,7 @@ ReadArrayStr(char *arrayStr,
/*
* We have to remove " and \ characters to create a clean item value to
- * pass to the datatype input routine. We overwrite each item value
+ * pass to the datatype input routine. We overwrite each item value
* in-place within arrayStr to do this. srcptr is the current scan point,
* and dstptr is where we are copying to.
*
@@ -894,7 +894,7 @@ ReadArrayStr(char *arrayStr,
* referenced by Datums after copying them.
*
* If the input data is of varlena type, the caller must have ensured that
- * the values are not toasted. (Doing it here doesn't work since the
+ * the values are not toasted. (Doing it here doesn't work since the
* caller has already allocated space for the array...)
*/
static void
@@ -1990,7 +1990,7 @@ array_get_slice(ArrayType *array,
memcpy(ARR_DIMS(newarray), span, ndim * sizeof(int));
/*
- * Lower bounds of the new array are set to 1. Formerly (before 7.3) we
+ * Lower bounds of the new array are set to 1. Formerly (before 7.3) we
* copied the given lowerIndx values ... but that seems confusing.
*/
newlb = ARR_LBOUND(newarray);
@@ -2622,7 +2622,7 @@ array_set_slice(ArrayType *array,
/*
* array_map()
*
- * Map an array through an arbitrary function. Return a new array with
+ * Map an array through an arbitrary function. Return a new array with
* same dimensions and each source element transformed by fn(). Each
* source element is passed as the first argument to fn(); additional
* arguments to be passed to fn() can be specified by the caller.
@@ -2637,9 +2637,9 @@ array_set_slice(ArrayType *array,
* first argument position initially holds the input array value.
* * inpType: OID of element type of input array. This must be the same as,
* or binary-compatible with, the first argument type of fn().
- * * retType: OID of element type of output array. This must be the same as,
+ * * retType: OID of element type of output array. This must be the same as,
* or binary-compatible with, the result type of fn().
- * * amstate: workspace for array_map. Must be zeroed by caller before
+ * * amstate: workspace for array_map. Must be zeroed by caller before
* first call, and not touched after that.
*
* It is legitimate to pass a freshly-zeroed ArrayMapState on each call,
@@ -3493,7 +3493,7 @@ array_cmp(FunctionCallInfo fcinfo)
/*
* If arrays contain same data (up to end of shorter one), apply
- * additional rules to sort by dimensionality. The relative significance
+ * additional rules to sort by dimensionality. The relative significance
* of the different bits of information is historical; mainly we just care
* that we don't say "equal" for arrays of different dimensionality.
*/
@@ -3755,7 +3755,7 @@ array_contain_compare(ArrayType *array1, ArrayType *array2, Oid collation,
/*
* We assume that the comparison operator is strict, so a NULL can't
- * match anything. XXX this diverges from the "NULL=NULL" behavior of
+ * match anything. XXX this diverges from the "NULL=NULL" behavior of
* array_eq, should we act like that?
*/
if (isnull1)
@@ -4246,7 +4246,7 @@ array_copy(char *destptr, int nitems,
*
* Note: this could certainly be optimized using standard bitblt methods.
* However, it's not clear that the typical Postgres array has enough elements
- * to make it worth worrying too much. For the moment, KISS.
+ * to make it worth worrying too much. For the moment, KISS.
*/
void
array_bitmap_copy(bits8 *destbitmap, int destoffset,
@@ -4443,7 +4443,7 @@ array_extract_slice(ArrayType *newarray,
* Insert a slice into an array.
*
* ndim/dim[]/lb[] are dimensions of the original array. A new array with
- * those same dimensions is to be constructed. destArray must already
+ * those same dimensions is to be constructed. destArray must already
* have been allocated and its header initialized.
*
* st[]/endp[] identify the slice to be replaced. Elements within the slice
@@ -5111,7 +5111,7 @@ array_unnest(PG_FUNCTION_ARGS)
* Get the array value and detoast if needed. We can't do this
* earlier because if we have to detoast, we want the detoasted copy
* to be in multi_call_memory_ctx, so it will go away when we're done
- * and not before. (If no detoast happens, we assume the originally
+ * and not before. (If no detoast happens, we assume the originally
* passed array will stick around till then.)
*/
arr = PG_GETARG_ARRAYTYPE_P(0);
@@ -5187,7 +5187,7 @@ array_unnest(PG_FUNCTION_ARGS)
*
* Find all array entries matching (not distinct from) search/search_isnull,
* and delete them if remove is true, else replace them with
- * replace/replace_isnull. Comparisons are done using the specified
+ * replace/replace_isnull. Comparisons are done using the specified
* collation. fcinfo is passed only for caching purposes.
*/
static ArrayType *
@@ -5259,7 +5259,7 @@ array_replace_internal(ArrayType *array,
typalign = typentry->typalign;
/*
- * Detoast values if they are toasted. The replacement value must be
+ * Detoast values if they are toasted. The replacement value must be
* detoasted for insertion into the result array, while detoasting the
* search value only once saves cycles.
*/
diff --git a/src/backend/utils/adt/arrayutils.c b/src/backend/utils/adt/arrayutils.c
index 73e8d55be41..06e09b6d1f6 100644
--- a/src/backend/utils/adt/arrayutils.c
+++ b/src/backend/utils/adt/arrayutils.c
@@ -193,7 +193,7 @@ mda_next_tuple(int n, int *curr, const int *span)
/*
* ArrayGetIntegerTypmods: verify that argument is a 1-D cstring array,
- * and get the contents converted to integers. Returns a palloc'd array
+ * and get the contents converted to integers. Returns a palloc'd array
* and places the length at *n.
*/
int32 *
diff --git a/src/backend/utils/adt/char.c b/src/backend/utils/adt/char.c
index 4fe61c7aafe..7fedaf617c7 100644
--- a/src/backend/utils/adt/char.c
+++ b/src/backend/utils/adt/char.c
@@ -59,7 +59,7 @@ charout(PG_FUNCTION_ARGS)
* charrecv - converts external binary format to char
*
* The external representation is one byte, with no character set
- * conversion. This is somewhat dubious, perhaps, but in many
+ * conversion. This is somewhat dubious, perhaps, but in many
* cases people use char for a 1-byte binary type.
*/
Datum
diff --git a/src/backend/utils/adt/date.c b/src/backend/utils/adt/date.c
index 8677520cb6f..c92cc8d47fa 100644
--- a/src/backend/utils/adt/date.c
+++ b/src/backend/utils/adt/date.c
@@ -1290,7 +1290,7 @@ AdjustTimeForTypmod(TimeADT *time, int32 typmod)
* Note: this round-to-nearest code is not completely consistent about
* rounding values that are exactly halfway between integral values.
* On most platforms, rint() will implement round-to-nearest-even, but
- * the integer code always rounds up (away from zero). Is it worth
+ * the integer code always rounds up (away from zero). Is it worth
* trying to be consistent?
*/
#ifdef HAVE_INT64_TIMESTAMP
@@ -1638,7 +1638,7 @@ time_interval(PG_FUNCTION_ARGS)
* Convert interval to time data type.
*
* This is defined as producing the fractional-day portion of the interval.
- * Therefore, we can just ignore the months field. It is not real clear
+ * Therefore, we can just ignore the months field. It is not real clear
* what to do with negative intervals, but we choose to subtract the floor,
* so that, say, '-2 hours' becomes '22:00:00'.
*/
@@ -2627,7 +2627,7 @@ timetz_zone(PG_FUNCTION_ARGS)
pg_tz *tzp;
/*
- * Look up the requested timezone. First we look in the date token table
+ * Look up the requested timezone. First we look in the date token table
* (to handle cases like "EST"), and if that fails, we look in the
* timezone database (to handle cases like "America/New_York"). (This
* matches the order in which timestamp input checks the cases; it's
diff --git a/src/backend/utils/adt/datetime.c b/src/backend/utils/adt/datetime.c
index 6979e921324..6df3823a08a 100644
--- a/src/backend/utils/adt/datetime.c
+++ b/src/backend/utils/adt/datetime.c
@@ -354,7 +354,7 @@ j2date(int jd, int *year, int *month, int *day)
* j2day - convert Julian date to day-of-week (0..6 == Sun..Sat)
*
* Note: various places use the locution j2day(date - 1) to produce a
- * result according to the convention 0..6 = Mon..Sun. This is a bit of
+ * result according to the convention 0..6 = Mon..Sun. This is a bit of
* a crock, but will work as long as the computation here is just a modulo.
*/
int
@@ -2480,7 +2480,7 @@ DecodeNumber(int flen, char *str, bool haveTextMonth, int fmask,
/*
* Nothing so far; make a decision about what we think the input
- * is. There used to be lots of heuristics here, but the
+ * is. There used to be lots of heuristics here, but the
* consensus now is to be paranoid. It *must* be either
* YYYY-MM-DD (with a more-than-two-digit year field), or the
* field order defined by DateOrder.
@@ -2513,9 +2513,9 @@ DecodeNumber(int flen, char *str, bool haveTextMonth, int fmask,
{
/*
* We are at the first numeric field of a date that included a
- * textual month name. We want to support the variants
+ * textual month name. We want to support the variants
* MON-DD-YYYY, DD-MON-YYYY, and YYYY-MON-DD as unambiguous
- * inputs. We will also accept MON-DD-YY or DD-MON-YY in
+ * inputs. We will also accept MON-DD-YY or DD-MON-YY in
* either DMY or MDY modes, as well as YY-MON-DD in YMD mode.
*/
if (flen >= 3 || DateOrder == DATEORDER_YMD)
@@ -2889,7 +2889,7 @@ DecodeInterval(char **field, int *ftype, int nf, int range,
Assert(*field[i] == '-' || *field[i] == '+');
/*
- * Check for signed hh:mm or hh:mm:ss. If so, process exactly
+ * Check for signed hh:mm or hh:mm:ss. If so, process exactly
* like DTK_TIME case above, plus handling the sign.
*/
if (strchr(field[i] + 1, ':') != NULL &&
@@ -3323,7 +3323,7 @@ DecodeISO8601Interval(char *str,
return dterr;
/*
- * Note: we could step off the end of the string here. Code below
+ * Note: we could step off the end of the string here. Code below
* *must* exit the loop if unit == '\0'.
*/
unit = *str++;
@@ -4126,7 +4126,7 @@ EncodeInterval(struct pg_tm * tm, fsec_t fsec, int style, char *str)
/*
* We've been burnt by stupid errors in the ordering of the datetkn tables
- * once too often. Arrange to check them during postmaster start.
+ * once too often. Arrange to check them during postmaster start.
*/
static bool
CheckDateTokenTable(const char *tablename, const datetkn *base, int nel)
diff --git a/src/backend/utils/adt/datum.c b/src/backend/utils/adt/datum.c
index 612b7ef7e5a..3a0dad29d88 100644
--- a/src/backend/utils/adt/datum.c
+++ b/src/backend/utils/adt/datum.c
@@ -181,7 +181,7 @@ datumIsEqual(Datum value1, Datum value2, bool typByVal, int typLen)
/*
* just compare the two datums. NOTE: just comparing "len" bytes will
* not do the work, because we do not know how these bytes are aligned
- * inside the "Datum". We assume instead that any given datatype is
+ * inside the "Datum". We assume instead that any given datatype is
* consistent about how it fills extraneous bits in the Datum.
*/
res = (value1 == value2);
diff --git a/src/backend/utils/adt/dbsize.c b/src/backend/utils/adt/dbsize.c
index 4c4e1ed8202..3d32dcad8d1 100644
--- a/src/backend/utils/adt/dbsize.c
+++ b/src/backend/utils/adt/dbsize.c
@@ -695,7 +695,7 @@ pg_size_pretty_numeric(PG_FUNCTION_ARGS)
* This is expected to be used in queries like
* SELECT pg_relation_filenode(oid) FROM pg_class;
* That leads to a couple of choices. We work from the pg_class row alone
- * rather than actually opening each relation, for efficiency. We don't
+ * rather than actually opening each relation, for efficiency. We don't
* fail if we can't find the relation --- some rows might be visible in
* the query's MVCC snapshot but already dead according to SnapshotNow.
* (Note: we could avoid using the catcache, but there's little point
diff --git a/src/backend/utils/adt/domains.c b/src/backend/utils/adt/domains.c
index 0a26222c39b..c7852185c54 100644
--- a/src/backend/utils/adt/domains.c
+++ b/src/backend/utils/adt/domains.c
@@ -12,11 +12,11 @@
* The overhead required for constraint checking can be high, since examining
* the catalogs to discover the constraints for a given domain is not cheap.
* We have three mechanisms for minimizing this cost:
- * 1. In a nest of domains, we flatten the checking of all the levels
+ * 1. In a nest of domains, we flatten the checking of all the levels
* into just one operation.
- * 2. We cache the list of constraint items in the FmgrInfo struct
+ * 2. We cache the list of constraint items in the FmgrInfo struct
* passed by the caller.
- * 3. If there are CHECK constraints, we cache a standalone ExprContext
+ * 3. If there are CHECK constraints, we cache a standalone ExprContext
* to evaluate them in.
*
*
@@ -311,7 +311,7 @@ domain_recv(PG_FUNCTION_ARGS)
/*
* domain_check - check that a datum satisfies the constraints of a
- * domain. extra and mcxt can be passed if they are available from,
+ * domain. extra and mcxt can be passed if they are available from,
* say, a FmgrInfo structure, or they can be NULL, in which case the
* setup is repeated for each call.
*/
diff --git a/src/backend/utils/adt/float.c b/src/backend/utils/adt/float.c
index b035e231d1e..6d1c92df9b6 100644
--- a/src/backend/utils/adt/float.c
+++ b/src/backend/utils/adt/float.c
@@ -276,7 +276,7 @@ float4in(PG_FUNCTION_ARGS)
/*
* Some platforms return ERANGE for denormalized numbers (those
* that are not zero, but are too close to zero to have full
- * precision). We'd prefer not to throw error for that, so try to
+ * precision). We'd prefer not to throw error for that, so try to
* detect whether it's a "real" out-of-range condition by checking
* to see if the result is zero or huge.
*/
@@ -309,7 +309,7 @@ float4in(PG_FUNCTION_ARGS)
/*
* In some IRIX versions, strtod() recognizes only "inf", so if the input
- * is "infinity" we have to skip over "inity". Also, it may return
+ * is "infinity" we have to skip over "inity". Also, it may return
* positive infinity for "-inf".
*/
if (isinf(val))
@@ -507,7 +507,7 @@ float8in(PG_FUNCTION_ARGS)
/*
* Some platforms return ERANGE for denormalized numbers (those
* that are not zero, but are too close to zero to have full
- * precision). We'd prefer not to throw error for that, so try to
+ * precision). We'd prefer not to throw error for that, so try to
* detect whether it's a "real" out-of-range condition by checking
* to see if the result is zero or huge.
*/
@@ -540,7 +540,7 @@ float8in(PG_FUNCTION_ARGS)
/*
* In some IRIX versions, strtod() recognizes only "inf", so if the input
- * is "infinity" we have to skip over "inity". Also, it may return
+ * is "infinity" we have to skip over "inity". Also, it may return
* positive infinity for "-inf".
*/
if (isinf(val))
@@ -2118,7 +2118,7 @@ float8_stddev_samp(PG_FUNCTION_ARGS)
* in that order. Note that Y is the first argument to the aggregates!
*
* It might seem attractive to optimize this by having multiple accumulator
- * functions that only calculate the sums actually needed. But on most
+ * functions that only calculate the sums actually needed. But on most
* modern machines, a couple of extra floating-point multiplies will be
* insignificant compared to the other per-tuple overhead, so I've chosen
* to minimize code space instead.
diff --git a/src/backend/utils/adt/format_type.c b/src/backend/utils/adt/format_type.c
index cd164c7e7ea..c69ba3c1439 100644
--- a/src/backend/utils/adt/format_type.c
+++ b/src/backend/utils/adt/format_type.c
@@ -48,14 +48,14 @@ __attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3)));
* double quoted if it contains funny characters or matches a keyword.
*
* If typemod is NULL then we are formatting a type name in a context where
- * no typemod is available, eg a function argument or result type. This
+ * no typemod is available, eg a function argument or result type. This
* yields a slightly different result from specifying typemod = -1 in some
* cases. Given typemod = -1 we feel compelled to produce an output that
* the parser will interpret as having typemod -1, so that pg_dump will
- * produce CREATE TABLE commands that recreate the original state. But
+ * produce CREATE TABLE commands that recreate the original state. But
* given NULL typemod, we assume that the parser's interpretation of
* typemod doesn't matter, and so we are willing to output a slightly
- * "prettier" representation of the same type. For example, type = bpchar
+ * "prettier" representation of the same type. For example, type = bpchar
* and typemod = NULL gets you "character", whereas typemod = -1 gets you
* "bpchar" --- the former will be interpreted as character(1) by the
* parser, which does not yield typemod -1.
diff --git a/src/backend/utils/adt/formatting.c b/src/backend/utils/adt/formatting.c
index 7b854062f0d..a46db73e0cd 100644
--- a/src/backend/utils/adt/formatting.c
+++ b/src/backend/utils/adt/formatting.c
@@ -1828,7 +1828,7 @@ str_initcap(const char *buff, size_t nbytes, Oid collid)
/*
* Note: we assume that toupper_l()/tolower_l() will not be so broken
- * as to need guard tests. When using the default collation, we apply
+ * as to need guard tests. When using the default collation, we apply
* the traditional Postgres behavior that forces ASCII-style treatment
* of I/i, but in non-default collations you get exactly what the
* collation says.
@@ -3617,7 +3617,7 @@ do_to_timestamp(text *date_txt, text *fmt,
{
/*
* The month and day field have not been set, so we use the
- * day-of-year field to populate them. Depending on the date mode,
+ * day-of-year field to populate them. Depending on the date mode,
* this field may be interpreted as a Gregorian day-of-year, or an ISO
* week date day-of-year.
*/
diff --git a/src/backend/utils/adt/geo_selfuncs.c b/src/backend/utils/adt/geo_selfuncs.c
index bd30773c11e..7b30e3d951f 100644
--- a/src/backend/utils/adt/geo_selfuncs.c
+++ b/src/backend/utils/adt/geo_selfuncs.c
@@ -22,7 +22,7 @@
/*
- * Selectivity functions for geometric operators. These are bogus -- unless
+ * Selectivity functions for geometric operators. These are bogus -- unless
* we know the actual key distribution in the index, we can't make a good
* prediction of the selectivity of these operators.
*
@@ -34,7 +34,7 @@
* In general, GiST needs to search multiple subtrees in order to guarantee
* that all occurrences of the same key have been found. Because of this,
* the estimated cost for scanning the index ought to be higher than the
- * output selectivity would indicate. gistcostestimate(), over in selfuncs.c,
+ * output selectivity would indicate. gistcostestimate(), over in selfuncs.c,
* ought to be adjusted accordingly --- but until we can generate somewhat
* realistic numbers here, it hardly matters...
*/
diff --git a/src/backend/utils/adt/inet_cidr_ntop.c b/src/backend/utils/adt/inet_cidr_ntop.c
index 5f2a3d361d9..d33534ec173 100644
--- a/src/backend/utils/adt/inet_cidr_ntop.c
+++ b/src/backend/utils/adt/inet_cidr_ntop.c
@@ -196,7 +196,7 @@ inet_cidr_ntop_ipv6(const u_char *src, int bits, char *dst, size_t size)
}
else
{
- /* Copy src to private buffer. Zero host part. */
+ /* Copy src to private buffer. Zero host part. */
p = (bits + 7) / 8;
memcpy(inbuf, src, p);
memset(inbuf + p, 0, 16 - p);
diff --git a/src/backend/utils/adt/int.c b/src/backend/utils/adt/int.c
index 4a9bc0a969f..7beee67fe80 100644
--- a/src/backend/utils/adt/int.c
+++ b/src/backend/utils/adt/int.c
@@ -642,7 +642,7 @@ int4pl(PG_FUNCTION_ARGS)
result = arg1 + arg2;
/*
- * Overflow check. If the inputs are of different signs then their sum
+ * Overflow check. If the inputs are of different signs then their sum
* cannot overflow. If the inputs are of the same sign, their sum had
* better be that sign too.
*/
@@ -663,8 +663,8 @@ int4mi(PG_FUNCTION_ARGS)
result = arg1 - arg2;
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then the
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
* result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
@@ -684,7 +684,7 @@ int4mul(PG_FUNCTION_ARGS)
result = arg1 * arg2;
/*
- * Overflow check. We basically check to see if result / arg2 gives arg1
+ * Overflow check. We basically check to see if result / arg2 gives arg1
* again. There are two cases where this fails: arg2 = 0 (which cannot
* overflow) and arg1 = INT_MIN, arg2 = -1 (where the division itself will
* overflow and thus incorrectly match).
@@ -794,7 +794,7 @@ int2pl(PG_FUNCTION_ARGS)
result = arg1 + arg2;
/*
- * Overflow check. If the inputs are of different signs then their sum
+ * Overflow check. If the inputs are of different signs then their sum
* cannot overflow. If the inputs are of the same sign, their sum had
* better be that sign too.
*/
@@ -815,8 +815,8 @@ int2mi(PG_FUNCTION_ARGS)
result = arg1 - arg2;
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then the
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
* result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
@@ -897,7 +897,7 @@ int24pl(PG_FUNCTION_ARGS)
result = arg1 + arg2;
/*
- * Overflow check. If the inputs are of different signs then their sum
+ * Overflow check. If the inputs are of different signs then their sum
* cannot overflow. If the inputs are of the same sign, their sum had
* better be that sign too.
*/
@@ -918,8 +918,8 @@ int24mi(PG_FUNCTION_ARGS)
result = arg1 - arg2;
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then the
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
* result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
@@ -939,7 +939,7 @@ int24mul(PG_FUNCTION_ARGS)
result = arg1 * arg2;
/*
- * Overflow check. We basically check to see if result / arg2 gives arg1
+ * Overflow check. We basically check to see if result / arg2 gives arg1
* again. There is one case where this fails: arg2 = 0 (which cannot
* overflow).
*
@@ -985,7 +985,7 @@ int42pl(PG_FUNCTION_ARGS)
result = arg1 + arg2;
/*
- * Overflow check. If the inputs are of different signs then their sum
+ * Overflow check. If the inputs are of different signs then their sum
* cannot overflow. If the inputs are of the same sign, their sum had
* better be that sign too.
*/
@@ -1006,8 +1006,8 @@ int42mi(PG_FUNCTION_ARGS)
result = arg1 - arg2;
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then the
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
* result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
@@ -1027,7 +1027,7 @@ int42mul(PG_FUNCTION_ARGS)
result = arg1 * arg2;
/*
- * Overflow check. We basically check to see if result / arg1 gives arg2
+ * Overflow check. We basically check to see if result / arg1 gives arg2
* again. There is one case where this fails: arg1 = 0 (which cannot
* overflow).
*
diff --git a/src/backend/utils/adt/int8.c b/src/backend/utils/adt/int8.c
index 4b20616416c..0586d69bb54 100644
--- a/src/backend/utils/adt/int8.c
+++ b/src/backend/utils/adt/int8.c
@@ -73,7 +73,7 @@ scanint8(const char *str, bool errorOK, int64 *result)
ptr++;
/*
- * Do an explicit check for INT64_MIN. Ugly though this is, it's
+ * Do an explicit check for INT64_MIN. Ugly though this is, it's
* cleaner than trying to get the loop below to handle it portably.
*/
if (strncmp(ptr, "9223372036854775808", 19) == 0)
@@ -519,7 +519,7 @@ int8pl(PG_FUNCTION_ARGS)
result = arg1 + arg2;
/*
- * Overflow check. If the inputs are of different signs then their sum
+ * Overflow check. If the inputs are of different signs then their sum
* cannot overflow. If the inputs are of the same sign, their sum had
* better be that sign too.
*/
@@ -540,8 +540,8 @@ int8mi(PG_FUNCTION_ARGS)
result = arg1 - arg2;
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then the
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
* result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
@@ -561,7 +561,7 @@ int8mul(PG_FUNCTION_ARGS)
result = arg1 * arg2;
/*
- * Overflow check. We basically check to see if result / arg2 gives arg1
+ * Overflow check. We basically check to see if result / arg2 gives arg1
* again. There are two cases where this fails: arg2 = 0 (which cannot
* overflow) and arg1 = INT64_MIN, arg2 = -1 (where the division itself
* will overflow and thus incorrectly match).
@@ -719,7 +719,7 @@ int8inc(PG_FUNCTION_ARGS)
/*
* These functions are exactly like int8inc but are used for aggregates that
- * count only non-null values. Since the functions are declared strict,
+ * count only non-null values. Since the functions are declared strict,
* the null checks happen before we ever get here, and all we need do is
* increment the state value. We could actually make these pg_proc entries
* point right at int8inc, but then the opr_sanity regression test would
@@ -773,7 +773,7 @@ int84pl(PG_FUNCTION_ARGS)
result = arg1 + arg2;
/*
- * Overflow check. If the inputs are of different signs then their sum
+ * Overflow check. If the inputs are of different signs then their sum
* cannot overflow. If the inputs are of the same sign, their sum had
* better be that sign too.
*/
@@ -794,8 +794,8 @@ int84mi(PG_FUNCTION_ARGS)
result = arg1 - arg2;
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then the
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
* result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
@@ -815,7 +815,7 @@ int84mul(PG_FUNCTION_ARGS)
result = arg1 * arg2;
/*
- * Overflow check. We basically check to see if result / arg1 gives arg2
+ * Overflow check. We basically check to see if result / arg1 gives arg2
* again. There is one case where this fails: arg1 = 0 (which cannot
* overflow).
*
@@ -882,7 +882,7 @@ int48pl(PG_FUNCTION_ARGS)
result = arg1 + arg2;
/*
- * Overflow check. If the inputs are of different signs then their sum
+ * Overflow check. If the inputs are of different signs then their sum
* cannot overflow. If the inputs are of the same sign, their sum had
* better be that sign too.
*/
@@ -903,8 +903,8 @@ int48mi(PG_FUNCTION_ARGS)
result = arg1 - arg2;
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then the
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
* result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
@@ -924,7 +924,7 @@ int48mul(PG_FUNCTION_ARGS)
result = arg1 * arg2;
/*
- * Overflow check. We basically check to see if result / arg2 gives arg1
+ * Overflow check. We basically check to see if result / arg2 gives arg1
* again. There is one case where this fails: arg2 = 0 (which cannot
* overflow).
*
@@ -970,7 +970,7 @@ int82pl(PG_FUNCTION_ARGS)
result = arg1 + arg2;
/*
- * Overflow check. If the inputs are of different signs then their sum
+ * Overflow check. If the inputs are of different signs then their sum
* cannot overflow. If the inputs are of the same sign, their sum had
* better be that sign too.
*/
@@ -991,8 +991,8 @@ int82mi(PG_FUNCTION_ARGS)
result = arg1 - arg2;
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then the
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
* result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
@@ -1012,7 +1012,7 @@ int82mul(PG_FUNCTION_ARGS)
result = arg1 * arg2;
/*
- * Overflow check. We basically check to see if result / arg1 gives arg2
+ * Overflow check. We basically check to see if result / arg1 gives arg2
* again. There is one case where this fails: arg1 = 0 (which cannot
* overflow).
*
@@ -1079,7 +1079,7 @@ int28pl(PG_FUNCTION_ARGS)
result = arg1 + arg2;
/*
- * Overflow check. If the inputs are of different signs then their sum
+ * Overflow check. If the inputs are of different signs then their sum
* cannot overflow. If the inputs are of the same sign, their sum had
* better be that sign too.
*/
@@ -1100,8 +1100,8 @@ int28mi(PG_FUNCTION_ARGS)
result = arg1 - arg2;
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then the
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
* result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
@@ -1121,7 +1121,7 @@ int28mul(PG_FUNCTION_ARGS)
result = arg1 * arg2;
/*
- * Overflow check. We basically check to see if result / arg2 gives arg1
+ * Overflow check. We basically check to see if result / arg2 gives arg1
* again. There is one case where this fails: arg2 = 0 (which cannot
* overflow).
*
diff --git a/src/backend/utils/adt/json.c b/src/backend/utils/adt/json.c
index 7cdf13653e2..7ccbad49750 100644
--- a/src/backend/utils/adt/json.c
+++ b/src/backend/utils/adt/json.c
@@ -588,10 +588,10 @@ json_lex(JsonLexContext *lex)
/*
* We're not dealing with a string, number, legal
- * punctuation mark, or end of string. The only legal
+ * punctuation mark, or end of string. The only legal
* tokens we might find here are true, false, and null,
* but for error reporting purposes we scan until we see a
- * non-alphanumeric character. That way, we can report
+ * non-alphanumeric character. That way, we can report
* the whole word as an unexpected token, rather than just
* some unintuitive prefix thereof.
*/
@@ -885,12 +885,12 @@ json_lex_string(JsonLexContext *lex)
* begin with a '0'.
*
* (3) An optional decimal part, consisting of a period ('.') followed by
- * one or more digits. (Note: While this part can be omitted
+ * one or more digits. (Note: While this part can be omitted
* completely, it's not OK to have only the decimal point without
* any digits afterwards.)
*
* (4) An optional exponent part, consisting of 'e' or 'E', optionally
- * followed by '+' or '-', followed by one or more digits. (Note:
+ * followed by '+' or '-', followed by one or more digits. (Note:
* As with the decimal part, if 'e' or 'E' is present, it must be
* followed by at least one digit.)
*
@@ -968,7 +968,7 @@ json_lex_number(JsonLexContext *lex, char *s, bool *num_err)
}
/*
- * Check for trailing garbage. As in json_lex(), any alphanumeric stuff
+ * Check for trailing garbage. As in json_lex(), any alphanumeric stuff
* here should be considered part of the token for error-reporting
* purposes.
*/
@@ -1763,7 +1763,7 @@ json_agg_transfn(PG_FUNCTION_ARGS)
/*
* The transition type for array_agg() is declared to be "internal", which
- * is a pass-by-value type the same size as a pointer. So we can safely
+ * is a pass-by-value type the same size as a pointer. So we can safely
* pass the ArrayBuildState pointer through nodeAgg.c's machinations.
*/
PG_RETURN_POINTER(state);
diff --git a/src/backend/utils/adt/like.c b/src/backend/utils/adt/like.c
index 4f6c5b39a46..605204528e4 100644
--- a/src/backend/utils/adt/like.c
+++ b/src/backend/utils/adt/like.c
@@ -76,12 +76,12 @@ wchareq(char *p1, char *p2)
/*
* Formerly we had a routine iwchareq() here that tried to do case-insensitive
- * comparison of multibyte characters. It did not work at all, however,
+ * comparison of multibyte characters. It did not work at all, however,
* because it relied on tolower() which has a single-byte API ... and
* towlower() wouldn't be much better since we have no suitably cheap way
* of getting a single character transformed to the system's wchar_t format.
* So now, we just downcase the strings using lower() and apply regular LIKE
- * comparison. This should be revisited when we install better locale support.
+ * comparison. This should be revisited when we install better locale support.
*/
/*
diff --git a/src/backend/utils/adt/misc.c b/src/backend/utils/adt/misc.c
index aecdcd056cd..b47ddb6045f 100644
--- a/src/backend/utils/adt/misc.c
+++ b/src/backend/utils/adt/misc.c
@@ -96,7 +96,7 @@ pg_signal_backend(int pid, int sig)
/*
* BackendPidGetProc returns NULL if the pid isn't valid; but by the time
* we reach kill(), a process for which we get a valid proc here might
- * have terminated on its own. There's no way to acquire a lock on an
+ * have terminated on its own. There's no way to acquire a lock on an
* arbitrary process to prevent that. But since so far all the callers of
* this mechanism involve some request for ending the process anyway, that
* it might end on its own first is not a problem.
@@ -120,7 +120,7 @@ pg_signal_backend(int pid, int sig)
* recycled for a new process, before reaching here? Then we'd be trying
* to kill the wrong thing. Seems near impossible when sequential pid
* assignment and wraparound is used. Perhaps it could happen on a system
- * where pid re-use is randomized. That race condition possibility seems
+ * where pid re-use is randomized. That race condition possibility seems
* too unlikely to worry about.
*/
@@ -140,7 +140,7 @@ pg_signal_backend(int pid, int sig)
}
/*
- * Signal to cancel a backend process. This is allowed if you are superuser or
+ * Signal to cancel a backend process. This is allowed if you are superuser or
* have the same role as the process being canceled.
*/
Datum
@@ -333,7 +333,7 @@ pg_tablespace_location(PG_FUNCTION_ARGS)
/*
* It's useful to apply this function to pg_class.reltablespace, wherein
- * zero means "the database's default tablespace". So, rather than
+ * zero means "the database's default tablespace". So, rather than
* throwing an error for zero, we choose to assume that's what is meant.
*/
if (tablespaceOid == InvalidOid)
@@ -391,7 +391,7 @@ pg_sleep(PG_FUNCTION_ARGS)
* loop.
*
* By computing the intended stop time initially, we avoid accumulation of
- * extra delay across multiple sleeps. This also ensures we won't delay
+ * extra delay across multiple sleeps. This also ensures we won't delay
* less than the specified time when WaitLatch is terminated early by a
* non-query-cancelling signal such as SIGHUP.
*/
@@ -558,7 +558,7 @@ pg_relation_is_updatable(PG_FUNCTION_ARGS)
* non-updatable columns.
*
* Also, this function encapsulates the decision about just what
- * information_schema.columns.is_updatable actually means. It's not clear
+ * information_schema.columns.is_updatable actually means. It's not clear
* whether deletability of the column's relation should be required, so
* we want that decision in C code where we could change it without initdb.
*/
diff --git a/src/backend/utils/adt/nabstime.c b/src/backend/utils/adt/nabstime.c
index 32f1726402e..6545d31fc9c 100644
--- a/src/backend/utils/adt/nabstime.c
+++ b/src/backend/utils/adt/nabstime.c
@@ -199,7 +199,7 @@ tm2abstime(struct pg_tm * tm, int tz)
sec = tm->tm_sec + tz + (tm->tm_min + (day * HOURS_PER_DAY + tm->tm_hour) * MINS_PER_HOUR) * SECS_PER_MINUTE;
/*
- * check for overflow. We need a little slop here because the H/M/S plus
+ * check for overflow. We need a little slop here because the H/M/S plus
* TZ offset could add up to more than 1 day.
*/
if ((day >= MAX_DAYNUM - 10 && sec < 0) ||
@@ -1164,7 +1164,7 @@ tintervalsame(PG_FUNCTION_ARGS)
* 1. The interval length computations overflow at 2^31 seconds, causing
* intervals longer than that to sort oddly compared to those shorter.
* 2. infinity and minus infinity (NOEND_ABSTIME and NOSTART_ABSTIME) are
- * just ordinary integers. Since this code doesn't handle them specially,
+ * just ordinary integers. Since this code doesn't handle them specially,
* it's possible for [a b] to be considered longer than [c infinity] for
* finite abstimes a, b, c. In combination with the previous point, the
* interval [-infinity infinity] is treated as being shorter than many finite
diff --git a/src/backend/utils/adt/network.c b/src/backend/utils/adt/network.c
index f2c337cb8d1..737eeff86da 100644
--- a/src/backend/utils/adt/network.c
+++ b/src/backend/utils/adt/network.c
@@ -29,7 +29,7 @@ static int ip_addrsize(inet *inetptr);
static inet *internal_inetpl(inet *ip, int64 addend);
/*
- * Access macros. We use VARDATA_ANY so that we can process short-header
+ * Access macros. We use VARDATA_ANY so that we can process short-header
* varlena values without detoasting them. This requires a trick:
* VARDATA_ANY assumes the varlena header is already filled in, which is
* not the case when constructing a new value (until SET_INET_VARSIZE is
@@ -88,7 +88,7 @@ network_in(char *src, bool is_cidr)
dst = (inet *) palloc0(sizeof(inet));
/*
- * First, check to see if this is an IPv6 or IPv4 address. IPv6 addresses
+ * First, check to see if this is an IPv6 or IPv4 address. IPv6 addresses
* will have a : somewhere in them (several, in fact) so if there is one
* present, assume it's V6, otherwise assume it's V4.
*/
@@ -193,7 +193,7 @@ cidr_out(PG_FUNCTION_ARGS)
* family, bits, is_cidr, address length, address in network byte order.
*
* Presence of is_cidr is largely for historical reasons, though it might
- * allow some code-sharing on the client side. We send it correctly on
+ * allow some code-sharing on the client side. We send it correctly on
* output, but ignore the value on input.
*/
static inet *
@@ -1392,7 +1392,7 @@ inetmi(PG_FUNCTION_ARGS)
/*
* We form the difference using the traditional complement, increment,
* and add rule, with the increment part being handled by starting the
- * carry off at 1. If you don't think integer arithmetic is done in
+ * carry off at 1. If you don't think integer arithmetic is done in
* two's complement, too bad.
*/
int nb = ip_addrsize(ip);
@@ -1414,7 +1414,7 @@ inetmi(PG_FUNCTION_ARGS)
else
{
/*
- * Input wider than int64: check for overflow. All bytes to
+ * Input wider than int64: check for overflow. All bytes to
* the left of what will fit should be 0 or 0xFF, depending on
* sign of the now-complete result.
*/
@@ -1445,9 +1445,9 @@ inetmi(PG_FUNCTION_ARGS)
* XXX This should go away someday!
*
* This is a kluge needed because we don't yet support zones in stored inet
- * values. Since the result of getnameinfo() might include a zone spec,
+ * values. Since the result of getnameinfo() might include a zone spec,
* call this to remove it anywhere we want to feed getnameinfo's output to
- * network_in. Beats failing entirely.
+ * network_in. Beats failing entirely.
*
* An alternative approach would be to let network_in ignore %-parts for
* itself, but that would mean we'd silently drop zone specs in user input,
diff --git a/src/backend/utils/adt/numeric.c b/src/backend/utils/adt/numeric.c
index b4d639428ac..eddf7f0385e 100644
--- a/src/backend/utils/adt/numeric.c
+++ b/src/backend/utils/adt/numeric.c
@@ -50,7 +50,7 @@
* Numeric values are represented in a base-NBASE floating point format.
* Each "digit" ranges from 0 to NBASE-1. The type NumericDigit is signed
* and wide enough to store a digit. We assume that NBASE*NBASE can fit in
- * an int. Although the purely calculational routines could handle any even
+ * an int. Although the purely calculational routines could handle any even
* NBASE that's less than sqrt(INT_MAX), in practice we are only interested
* in NBASE a power of ten, so that I/O conversions and decimal rounding
* are easy. Also, it's actually more efficient if NBASE is rather less than
@@ -95,11 +95,11 @@ typedef int16 NumericDigit;
* If the high bits of the first word of a NumericChoice (n_header, or
* n_short.n_header, or n_long.n_sign_dscale) are NUMERIC_SHORT, then the
* numeric follows the NumericShort format; if they are NUMERIC_POS or
- * NUMERIC_NEG, it follows the NumericLong format. If they are NUMERIC_NAN,
+ * NUMERIC_NEG, it follows the NumericLong format. If they are NUMERIC_NAN,
* it is a NaN. We currently always store a NaN using just two bytes (i.e.
* only n_header), but previous releases used only the NumericLong format,
* so we might find 4-byte NaNs on disk if a database has been migrated using
- * pg_upgrade. In either case, when the high bits indicate a NaN, the
+ * pg_upgrade. In either case, when the high bits indicate a NaN, the
* remaining bits are never examined. Currently, we always initialize these
* to zero, but it might be possible to use them for some other purpose in
* the future.
@@ -207,19 +207,19 @@ struct NumericData
: ((n)->choice.n_long.n_weight))
/* ----------
- * NumericVar is the format we use for arithmetic. The digit-array part
+ * NumericVar is the format we use for arithmetic. The digit-array part
* is the same as the NumericData storage format, but the header is more
* complex.
*
* The value represented by a NumericVar is determined by the sign, weight,
* ndigits, and digits[] array.
* Note: the first digit of a NumericVar's value is assumed to be multiplied
- * by NBASE ** weight. Another way to say it is that there are weight+1
+ * by NBASE ** weight. Another way to say it is that there are weight+1
* digits before the decimal point. It is possible to have weight < 0.
*
* buf points at the physical start of the palloc'd digit buffer for the
- * NumericVar. digits points at the first digit in actual use (the one
- * with the specified weight). We normally leave an unused digit or two
+ * NumericVar. digits points at the first digit in actual use (the one
+ * with the specified weight). We normally leave an unused digit or two
* (preset to zeroes) between buf and digits, so that there is room to store
* a carry out of the top digit without reallocating space. We just need to
* decrement digits (and increment weight) to make room for the carry digit.
@@ -596,7 +596,7 @@ numeric_maximum_size(int32 typmod)
* In most cases, the size of a numeric will be smaller than the value
* computed below, because the varlena header will typically get toasted
* down to a single byte before being stored on disk, and it may also be
- * possible to use a short numeric header. But our job here is to compute
+ * possible to use a short numeric header. But our job here is to compute
* the worst case.
*/
return NUMERIC_HDRSZ + (numeric_digits * sizeof(NumericDigit));
@@ -716,7 +716,7 @@ numeric_send(PG_FUNCTION_ARGS)
*
* Flatten calls to numeric's length coercion function that solely represent
* increases in allowable precision. Scale changes mutate every datum, so
- * they are unoptimizable. Some values, e.g. 1E-1001, can only fit into an
+ * they are unoptimizable. Some values, e.g. 1E-1001, can only fit into an
* unconstrained numeric, so a change from an unconstrained numeric to any
* constrained numeric is also unoptimizable.
*/
@@ -746,7 +746,7 @@ numeric_transform(PG_FUNCTION_ARGS)
* If new_typmod < VARHDRSZ, the destination is unconstrained; that's
* always OK. If old_typmod >= VARHDRSZ, the source is constrained,
* and we're OK if the scale is unchanged and the precision is not
- * decreasing. See further notes in function header comment.
+ * decreasing. See further notes in function header comment.
*/
if (new_typmod < (int32) VARHDRSZ ||
(old_typmod >= (int32) VARHDRSZ &&
@@ -958,7 +958,7 @@ numeric_uminus(PG_FUNCTION_ARGS)
/*
* The packed format is known to be totally zero digit trimmed always. So
- * we can identify a ZERO by the fact that there are no digits at all. Do
+ * we can identify a ZERO by the fact that there are no digits at all. Do
* nothing to a zero.
*/
if (NUMERIC_NDIGITS(num) != 0)
@@ -1934,7 +1934,7 @@ numeric_sqrt(PG_FUNCTION_ARGS)
PG_RETURN_NUMERIC(make_result(&const_nan));
/*
- * Unpack the argument and determine the result scale. We choose a scale
+ * Unpack the argument and determine the result scale. We choose a scale
* to give at least NUMERIC_MIN_SIG_DIGITS significant digits; but in any
* case not less than the input's dscale.
*/
@@ -1985,7 +1985,7 @@ numeric_exp(PG_FUNCTION_ARGS)
PG_RETURN_NUMERIC(make_result(&const_nan));
/*
- * Unpack the argument and determine the result scale. We choose a scale
+ * Unpack the argument and determine the result scale. We choose a scale
* to give at least NUMERIC_MIN_SIG_DIGITS significant digits; but in any
* case not less than the input's dscale.
*/
@@ -2568,9 +2568,9 @@ numeric_avg_accum(PG_FUNCTION_ARGS)
/*
* Integer data types all use Numeric accumulators to share code and
- * avoid risk of overflow. For int2 and int4 inputs, Numeric accumulation
+ * avoid risk of overflow. For int2 and int4 inputs, Numeric accumulation
* is overkill for the N and sum(X) values, but definitely not overkill
- * for the sum(X*X) value. Hence, we use int2_accum and int4_accum only
+ * for the sum(X*X) value. Hence, we use int2_accum and int4_accum only
* for stddev/variance --- there are faster special-purpose accumulator
* routines for SUM and AVG of these datatypes.
*/
@@ -2828,7 +2828,7 @@ numeric_stddev_pop(PG_FUNCTION_ARGS)
* the initial condition of the transition data value needs to be NULL. This
* means we can't rely on ExecAgg to automatically insert the first non-null
* data value into the transition data: it doesn't know how to do the type
- * conversion. The upshot is that these routines have to be marked non-strict
+ * conversion. The upshot is that these routines have to be marked non-strict
* and handle substitution of the first non-null input themselves.
*/
@@ -3226,7 +3226,7 @@ set_var_from_str(const char *str, const char *cp, NumericVar *dest)
/*
* We first parse the string to extract decimal digits and determine the
- * correct decimal weight. Then convert to NBASE representation.
+ * correct decimal weight. Then convert to NBASE representation.
*/
switch (*cp)
{
@@ -3834,7 +3834,7 @@ apply_typmod(NumericVar *var, int32 typmod)
/*
* Convert numeric to int8, rounding if needed.
*
- * If overflow, return FALSE (no error is raised). Return TRUE if okay.
+ * If overflow, return FALSE (no error is raised). Return TRUE if okay.
*/
static bool
numericvar_to_int8(NumericVar *var, int64 *result)
@@ -4305,7 +4305,7 @@ sub_var(NumericVar *var1, NumericVar *var2, NumericVar *result)
* mul_var() -
*
* Multiplication on variable level. Product of var1 * var2 is stored
- * in result. Result is rounded to no more than rscale fractional digits.
+ * in result. Result is rounded to no more than rscale fractional digits.
*/
static void
mul_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
@@ -4349,7 +4349,7 @@ mul_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
/*
* Determine number of result digits to compute. If the exact result
* would have more than rscale fractional digits, truncate the computation
- * with MUL_GUARD_DIGITS guard digits. We do that by pretending that one
+ * with MUL_GUARD_DIGITS guard digits. We do that by pretending that one
* or both inputs have fewer digits than they really do.
*/
res_ndigits = var1ndigits + var2ndigits + 1;
@@ -4592,7 +4592,7 @@ div_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
*
* We need the first divisor digit to be >= NBASE/2. If it isn't,
* make it so by scaling up both the divisor and dividend by the
- * factor "d". (The reason for allocating dividend[0] above is to
+ * factor "d". (The reason for allocating dividend[0] above is to
* leave room for possible carry here.)
*/
if (divisor[1] < HALF_NBASE)
@@ -4636,7 +4636,7 @@ div_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
/*
* If next2digits are 0, then quotient digit must be 0 and there's
- * no need to adjust the working dividend. It's worth testing
+ * no need to adjust the working dividend. It's worth testing
* here to fall out ASAP when processing trailing zeroes in a
* dividend.
*/
@@ -4654,7 +4654,7 @@ div_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
/*
* Adjust quotient digit if it's too large. Knuth proves that
* after this step, the quotient digit will be either correct or
- * just one too large. (Note: it's OK to use dividend[j+2] here
+ * just one too large. (Note: it's OK to use dividend[j+2] here
* because we know the divisor length is at least 2.)
*/
while (divisor2 * qhat >
@@ -4829,7 +4829,7 @@ div_var_fast(NumericVar *var1, NumericVar *var2, NumericVar *result,
* dividend's digits (plus appended zeroes to reach the desired precision
* including guard digits). Each step of the main loop computes an
* (approximate) quotient digit and stores it into div[], removing one
- * position of dividend space. A final pass of carry propagation takes
+ * position of dividend space. A final pass of carry propagation takes
* care of any mistaken quotient digits.
*/
div = (int *) palloc0((div_ndigits + 1) * sizeof(int));
@@ -5679,7 +5679,7 @@ power_var_int(NumericVar *base, int exp, NumericVar *result, int rscale)
/*
* The general case repeatedly multiplies base according to the bit
- * pattern of exp. We do the multiplications with some extra precision.
+ * pattern of exp. We do the multiplications with some extra precision.
*/
neg = (exp < 0);
exp = Abs(exp);
diff --git a/src/backend/utils/adt/oid.c b/src/backend/utils/adt/oid.c
index 399973b74b9..0373ea52ee5 100644
--- a/src/backend/utils/adt/oid.c
+++ b/src/backend/utils/adt/oid.c
@@ -318,7 +318,7 @@ oidparse(Node *node)
/*
* Values too large for int4 will be represented as Float
- * constants by the lexer. Accept these if they are valid OID
+ * constants by the lexer. Accept these if they are valid OID
* strings.
*/
return oidin_subr(strVal(node), NULL);
diff --git a/src/backend/utils/adt/pg_locale.c b/src/backend/utils/adt/pg_locale.c
index 7081b00500b..9f0327bd1b1 100644
--- a/src/backend/utils/adt/pg_locale.c
+++ b/src/backend/utils/adt/pg_locale.c
@@ -20,12 +20,12 @@
*
* The other categories, LC_MONETARY, LC_NUMERIC, and LC_TIME are also
* settable at run-time. However, we don't actually set those locale
- * categories permanently. This would have bizarre effects like no
+ * categories permanently. This would have bizarre effects like no
* longer accepting standard floating-point literals in some locales.
* Instead, we only set the locales briefly when needed, cache the
* required information obtained from localeconv(), and set them back.
* The cached information is only used by the formatting functions
- * (to_char, etc.) and the money type. For the user, this should all be
+ * (to_char, etc.) and the money type. For the user, this should all be
* transparent.
*
* !!! NOW HEAR THIS !!!
@@ -39,7 +39,7 @@
* fail = true;
* setlocale(category, save);
* DOES NOT WORK RELIABLY: on some platforms the second setlocale() call
- * will change the memory save is pointing at. To do this sort of thing
+ * will change the memory save is pointing at. To do this sort of thing
* safely, you *must* pstrdup what setlocale returns the first time.
*
* FYI, The Open Group locale standard is defined here:
@@ -225,7 +225,7 @@ pg_perm_setlocale(int category, const char *locale)
* Is the locale name valid for the locale category?
*
* If successful, and canonname isn't NULL, a palloc'd copy of the locale's
- * canonical name is stored there. This is especially useful for figuring out
+ * canonical name is stored there. This is especially useful for figuring out
* what locale name "" means (ie, the server environment value). (Actually,
* it seems that on most implementations that's the only thing it's good for;
* we could wish that setlocale gave back a canonically spelled version of
@@ -268,7 +268,7 @@ check_locale(int category, const char *locale, char **canonname)
*
* For most locale categories, the assign hook doesn't actually set the locale
* permanently, just reset flags so that the next use will cache the
- * appropriate values. (See explanation at the top of this file.)
+ * appropriate values. (See explanation at the top of this file.)
*
* Note: we accept value = "" as selecting the postmaster's environment
* value, whatever it was (so long as the environment setting is legal).
@@ -718,13 +718,13 @@ cache_locale_time(void)
* Convert a Windows setlocale() argument to a Unix-style one.
*
* Regardless of platform, we install message catalogs under a Unix-style
- * LL[_CC][.ENCODING][@VARIANT] naming convention. Only LC_MESSAGES settings
+ * LL[_CC][.ENCODING][@VARIANT] naming convention. Only LC_MESSAGES settings
* following that style will elicit localized interface strings.
*
* Before Visual Studio 2012 (msvcr110.dll), Windows setlocale() accepted "C"
* (but not "c") and strings of the form <Language>[_<Country>][.<CodePage>],
* case-insensitive. setlocale() returns the fully-qualified form; for
- * example, setlocale("thaI") returns "Thai_Thailand.874". Internally,
+ * example, setlocale("thaI") returns "Thai_Thailand.874". Internally,
* setlocale() and _create_locale() select a "locale identifier"[1] and store
* it in an undocumented _locale_t field. From that LCID, we can retrieve the
* ISO 639 language and the ISO 3166 country. Character encoding does not
@@ -735,12 +735,12 @@ cache_locale_time(void)
* Studio 2012, setlocale() accepts locale names in addition to the strings it
* accepted historically. It does not standardize them; setlocale("Th-tH")
* returns "Th-tH". setlocale(category, "") still returns a traditional
- * string. Furthermore, msvcr110.dll changed the undocumented _locale_t
+ * string. Furthermore, msvcr110.dll changed the undocumented _locale_t
* content to carry locale names instead of locale identifiers.
*
* MinGW headers declare _create_locale(), but msvcrt.dll lacks that symbol.
* IsoLocaleName() always fails in a MinGW-built postgres.exe, so only
- * Unix-style values of the lc_messages GUC can elicit localized messages. In
+ * Unix-style values of the lc_messages GUC can elicit localized messages. In
* particular, every lc_messages setting that initdb can select automatically
* will yield only C-locale messages. XXX This could be fixed by running the
* fully-qualified locale name through a lookup table.
@@ -784,7 +784,7 @@ IsoLocaleName(const char *winlocname)
* need not standardize letter case here. So long as we do not ship
* message catalogs for which it would matter, we also need not
* translate the script/variant portion, e.g. uz-Cyrl-UZ to
- * uz_UZ@cyrillic. Simply replace the hyphen with an underscore.
+ * uz_UZ@cyrillic. Simply replace the hyphen with an underscore.
*
* Note that the locale name can be less-specific than the value we
* would derive under earlier Visual Studio releases. For example,
@@ -839,7 +839,7 @@ IsoLocaleName(const char *winlocname)
* could fail if the locale is C, so str_tolower() shouldn't call it
* in that case.
*
- * Note that we currently lack any way to flush the cache. Since we don't
+ * Note that we currently lack any way to flush the cache. Since we don't
* support ALTER COLLATION, this is OK. The worst case is that someone
* drops a collation, and a useless cache entry hangs around in existing
* backends.
@@ -1033,7 +1033,7 @@ report_newlocale_failure(const char *localename)
/*
- * Create a locale_t from a collation OID. Results are cached for the
+ * Create a locale_t from a collation OID. Results are cached for the
* lifetime of the backend. Thus, do not free the result with freelocale().
*
* As a special optimization, the default/database collation returns 0.
@@ -1216,7 +1216,7 @@ wchar2char(char *to, const wchar_t *from, size_t tolen, pg_locale_t locale)
* This has almost the API of mbstowcs_l(), except that *from need not be
* null-terminated; instead, the number of input bytes is specified as
* fromlen. Also, we ereport() rather than returning -1 for invalid
- * input encoding. tolen is the maximum number of wchar_t's to store at *to.
+ * input encoding. tolen is the maximum number of wchar_t's to store at *to.
* The output will be zero-terminated iff there is room.
*/
size_t
diff --git a/src/backend/utils/adt/pg_lzcompress.c b/src/backend/utils/adt/pg_lzcompress.c
index 66c64c198f0..70af1aa32c2 100644
--- a/src/backend/utils/adt/pg_lzcompress.c
+++ b/src/backend/utils/adt/pg_lzcompress.c
@@ -578,7 +578,7 @@ pglz_compress(const char *source, int32 slen, PGLZ_Header *dest,
/*
* If we've emitted more than first_success_by bytes without finding
- * anything compressible at all, fail. This lets us fall out
+ * anything compressible at all, fail. This lets us fall out
* reasonably quickly when looking at incompressible input (such as
* pre-compressed data).
*/
@@ -602,7 +602,7 @@ pglz_compress(const char *source, int32 slen, PGLZ_Header *dest,
hist_next, hist_recycle,
dp, dend);
dp++; /* Do not do this ++ in the line above! */
- /* The macro would do it four times - Jan. */
+ /* The macro would do it four times - Jan. */
}
found_match = true;
}
@@ -616,7 +616,7 @@ pglz_compress(const char *source, int32 slen, PGLZ_Header *dest,
hist_next, hist_recycle,
dp, dend);
dp++; /* Do not do this ++ in the line above! */
- /* The macro would do it four times - Jan. */
+ /* The macro would do it four times - Jan. */
}
}
diff --git a/src/backend/utils/adt/pseudotypes.c b/src/backend/utils/adt/pseudotypes.c
index 04650d8ba4a..5494d70c923 100644
--- a/src/backend/utils/adt/pseudotypes.c
+++ b/src/backend/utils/adt/pseudotypes.c
@@ -6,7 +6,7 @@
* A pseudo-type isn't really a type and never has any operations, but
* we do need to supply input and output functions to satisfy the links
* in the pseudo-type's entry in pg_type. In most cases the functions
- * just throw an error if invoked. (XXX the error messages here cover
+ * just throw an error if invoked. (XXX the error messages here cover
* the most common case, but might be confusing in some contexts. Can
* we do better?)
*
@@ -139,7 +139,7 @@ anyarray_out(PG_FUNCTION_ARGS)
* anyarray_recv - binary input routine for pseudo-type ANYARRAY.
*
* XXX this could actually be made to work, since the incoming array
- * data will contain the element type OID. Need to think through
+ * data will contain the element type OID. Need to think through
* type-safety issues before allowing it, however.
*/
Datum
@@ -216,7 +216,7 @@ anyrange_out(PG_FUNCTION_ARGS)
* void_in - input routine for pseudo-type VOID.
*
* We allow this so that PL functions can return VOID without any special
- * hack in the PL handler. Whatever value the PL thinks it's returning
+ * hack in the PL handler. Whatever value the PL thinks it's returning
* will just be ignored.
*/
Datum
diff --git a/src/backend/utils/adt/rangetypes.c b/src/backend/utils/adt/rangetypes.c
index 023df8c3e9e..3da08bb8370 100644
--- a/src/backend/utils/adt/rangetypes.c
+++ b/src/backend/utils/adt/rangetypes.c
@@ -1441,7 +1441,7 @@ tstzrange_subdiff(PG_FUNCTION_ARGS)
*
* This is for use by range-related functions that follow the convention
* of using the fn_extra field as a pointer to the type cache entry for
- * the range type. Functions that need to cache more information than
+ * the range type. Functions that need to cache more information than
* that must fend for themselves.
*/
TypeCacheEntry *
@@ -1465,7 +1465,7 @@ range_get_typcache(FunctionCallInfo fcinfo, Oid rngtypid)
* range_serialize: construct a range value from bounds and empty-flag
*
* This does not force canonicalization of the range value. In most cases,
- * external callers should only be canonicalization functions. Note that
+ * external callers should only be canonicalization functions. Note that
* we perform some datatype-independent canonicalization checks anyway.
*/
RangeType *
@@ -1802,7 +1802,7 @@ range_cmp_bounds(TypeCacheEntry *typcache, RangeBound *b1, RangeBound *b2)
* Compare two range boundary point values, returning <0, 0, or >0 according
* to whether b1 is less than, equal to, or greater than b2.
*
- * This is similar to but simpler than range_cmp_bounds(). We just compare
+ * This is similar to but simpler than range_cmp_bounds(). We just compare
* the values held in b1 and b2, ignoring inclusive/exclusive flags. The
* lower/upper flags only matter for infinities, where they tell us if the
* infinity is plus or minus.
@@ -2283,7 +2283,7 @@ range_contains_elem_internal(TypeCacheEntry *typcache, RangeType *r, Datum val)
/*
* datum_compute_size() and datum_write() are used to insert the bound
- * values into a range object. They are modeled after heaptuple.c's
+ * values into a range object. They are modeled after heaptuple.c's
* heap_compute_data_size() and heap_fill_tuple(), but we need not handle
* null values here. TYPE_IS_PACKABLE must test the same conditions as
* heaptuple.c's ATT_IS_PACKABLE macro.
diff --git a/src/backend/utils/adt/rangetypes_gist.c b/src/backend/utils/adt/rangetypes_gist.c
index 464b37fe1fd..ad659a1a89b 100644
--- a/src/backend/utils/adt/rangetypes_gist.c
+++ b/src/backend/utils/adt/rangetypes_gist.c
@@ -300,7 +300,7 @@ range_gist_penalty(PG_FUNCTION_ARGS)
else if (orig_lower.infinite && orig_upper.infinite)
{
/*
- * Original range requires broadening. (-inf; +inf) is most far
+ * Original range requires broadening. (-inf; +inf) is most far
* from normal range in this case.
*/
*penalty = 2 * CONTAIN_EMPTY_PENALTY;
@@ -497,7 +497,7 @@ range_gist_penalty(PG_FUNCTION_ARGS)
/*
* The GiST PickSplit method for ranges
*
- * Primarily, we try to segregate ranges of different classes. If splitting
+ * Primarily, we try to segregate ranges of different classes. If splitting
* ranges of the same class, use the appropriate split method for that class.
*/
Datum
@@ -668,7 +668,7 @@ range_gist_same(PG_FUNCTION_ARGS)
/*
* range_eq will ignore the RANGE_CONTAIN_EMPTY flag, so we have to check
- * that for ourselves. More generally, if the entries have been properly
+ * that for ourselves. More generally, if the entries have been properly
* normalized, then unequal flags bytes must mean unequal ranges ... so
* let's just test all the flag bits at once.
*/
@@ -816,7 +816,7 @@ range_gist_consistent_int(TypeCacheEntry *typcache, StrategyNumber strategy,
/*
* Empty ranges are contained by anything, so if key is or
- * contains any empty ranges, we must descend into it. Otherwise,
+ * contains any empty ranges, we must descend into it. Otherwise,
* descend only if key overlaps the query.
*/
if (RangeIsOrContainsEmpty(key))
diff --git a/src/backend/utils/adt/regexp.c b/src/backend/utils/adt/regexp.c
index 4dbb1163bd3..30a05e6d584 100644
--- a/src/backend/utils/adt/regexp.c
+++ b/src/backend/utils/adt/regexp.c
@@ -142,7 +142,7 @@ RE_compile_and_cache(text *text_re, int cflags, Oid collation)
char errMsg[100];
/*
- * Look for a match among previously compiled REs. Since the data
+ * Look for a match among previously compiled REs. Since the data
* structure is self-organizing with most-used entries at the front, our
* search strategy can just be to scan from the front.
*/
@@ -192,7 +192,7 @@ RE_compile_and_cache(text *text_re, int cflags, Oid collation)
/*
* Here and in other places in this file, do CHECK_FOR_INTERRUPTS
- * before reporting a regex error. This is so that if the regex
+ * before reporting a regex error. This is so that if the regex
* library aborts and returns REG_CANCEL, we don't print an error
* message that implies the regex was invalid.
*/
@@ -298,7 +298,7 @@ RE_wchar_execute(regex_t *re, pg_wchar *data, int data_len,
* dat_len --- the length of the data string
* nmatch, pmatch --- optional return area for match details
*
- * Data is given in the database encoding. We internally
+ * Data is given in the database encoding. We internally
* convert to array of pg_wchar which is what Spencer's regex package wants.
*/
static bool
diff --git a/src/backend/utils/adt/regproc.c b/src/backend/utils/adt/regproc.c
index 0d1ff61bf9f..88b1c2b5648 100644
--- a/src/backend/utils/adt/regproc.c
+++ b/src/backend/utils/adt/regproc.c
@@ -85,7 +85,7 @@ regprocin(PG_FUNCTION_ARGS)
/*
* In bootstrap mode we assume the given name is not schema-qualified, and
- * just search pg_proc for a unique match. This is needed for
+ * just search pg_proc for a unique match. This is needed for
* initializing other system catalogs (pg_namespace may not exist yet, and
* certainly there are no schemas other than pg_catalog).
*/
@@ -270,7 +270,7 @@ regprocedurein(PG_FUNCTION_ARGS)
/*
* Else it's a name and arguments. Parse the name and arguments, look up
* potential matches in the current namespace search list, and scan to see
- * which one exactly matches the given argument types. (There will not be
+ * which one exactly matches the given argument types. (There will not be
* more than one match.)
*
* XXX at present, this code will not work in bootstrap mode, hence this
@@ -319,7 +319,7 @@ format_procedure_qualified(Oid procedure_oid)
* Routine to produce regprocedure names; see format_procedure above.
*
* force_qualify says whether to schema-qualify; if true, the name is always
- * qualified regardless of search_path visibility. Otherwise the name is only
+ * qualified regardless of search_path visibility. Otherwise the name is only
* qualified if the function is not in path.
*/
static char *
@@ -453,7 +453,7 @@ regoperin(PG_FUNCTION_ARGS)
/*
* In bootstrap mode we assume the given name is not schema-qualified, and
- * just search pg_operator for a unique match. This is needed for
+ * just search pg_operator for a unique match. This is needed for
* initializing other system catalogs (pg_namespace may not exist yet, and
* certainly there are no schemas other than pg_catalog).
*/
@@ -642,7 +642,7 @@ regoperatorin(PG_FUNCTION_ARGS)
/*
* Else it's a name and arguments. Parse the name and arguments, look up
* potential matches in the current namespace search list, and scan to see
- * which one exactly matches the given argument types. (There will not be
+ * which one exactly matches the given argument types. (There will not be
* more than one match.)
*
* XXX at present, this code will not work in bootstrap mode, hence this
@@ -897,7 +897,7 @@ regclassout(PG_FUNCTION_ARGS)
/*
* In bootstrap mode, skip the fancy namespace stuff and just return
- * the class name. (This path is only needed for debugging output
+ * the class name. (This path is only needed for debugging output
* anyway.)
*/
if (IsBootstrapProcessingMode())
@@ -1389,7 +1389,7 @@ stringToQualifiedNameList(const char *string)
/*
* Given a C string, parse it into a qualified function or operator name
- * followed by a parenthesized list of type names. Reduce the
+ * followed by a parenthesized list of type names. Reduce the
* type names to an array of OIDs (returned into *nargs and *argtypes;
* the argtypes array should be of size FUNC_MAX_ARGS). The function or
* operator name is returned to *names as a List of Strings.
diff --git a/src/backend/utils/adt/ri_triggers.c b/src/backend/utils/adt/ri_triggers.c
index 65edc1fb04e..100e770bc7e 100644
--- a/src/backend/utils/adt/ri_triggers.c
+++ b/src/backend/utils/adt/ri_triggers.c
@@ -698,7 +698,7 @@ ri_restrict_del(TriggerData *trigdata, bool is_no_action)
/*
* If another PK row now exists providing the old key values, we
- * should not do anything. However, this check should only be
+ * should not do anything. However, this check should only be
* made in the NO ACTION case; in RESTRICT cases we don't wish to
* allow another row to be substituted.
*/
@@ -922,7 +922,7 @@ ri_restrict_upd(TriggerData *trigdata, bool is_no_action)
/*
* If another PK row now exists providing the old key values, we
- * should not do anything. However, this check should only be
+ * should not do anything. However, this check should only be
* made in the NO ACTION case; in RESTRICT cases we don't wish to
* allow another row to be substituted.
*/
@@ -1850,7 +1850,7 @@ RI_FKey_setdefault_del(PG_FUNCTION_ARGS)
* believe no check is necessary. So we need to do another lookup
* now and in case a reference still exists, abort the operation.
* That is already implemented in the NO ACTION trigger, so just
- * run it. (This recheck is only needed in the SET DEFAULT case,
+ * run it. (This recheck is only needed in the SET DEFAULT case,
* since CASCADE would remove such rows, while SET NULL is certain
* to result in rows that satisfy the FK constraint.)
*/
@@ -2041,7 +2041,7 @@ RI_FKey_setdefault_upd(PG_FUNCTION_ARGS)
* believe no check is necessary. So we need to do another lookup
* now and in case a reference still exists, abort the operation.
* That is already implemented in the NO ACTION trigger, so just
- * run it. (This recheck is only needed in the SET DEFAULT case,
+ * run it. (This recheck is only needed in the SET DEFAULT case,
* since CASCADE must change the FK key values, while SET NULL is
* certain to result in rows that satisfy the FK constraint.)
*/
@@ -2397,7 +2397,7 @@ RI_Initial_Check(Trigger *trigger, Relation fk_rel, Relation pk_rel)
* Temporarily increase work_mem so that the check query can be executed
* more efficiently. It seems okay to do this because the query is simple
* enough to not use a multiple of work_mem, and one typically would not
- * have many large foreign-key validations happening concurrently. So
+ * have many large foreign-key validations happening concurrently. So
* this seems to meet the criteria for being considered a "maintenance"
* operation, and accordingly we use maintenance_work_mem.
*
@@ -2451,7 +2451,7 @@ RI_Initial_Check(Trigger *trigger, Relation fk_rel, Relation pk_rel)
/*
* The columns to look at in the result tuple are 1..N, not whatever
- * they are in the fk_rel. Hack up riinfo so that the subroutines
+ * they are in the fk_rel. Hack up riinfo so that the subroutines
* called here will behave properly.
*
* In addition to this, we have to pass the correct tupdesc to
@@ -3180,7 +3180,7 @@ ri_ReportViolation(const RI_ConstraintInfo *riinfo,
errhint("This is most likely due to a rule having rewritten the query.")));
/*
- * Determine which relation to complain about. If tupdesc wasn't passed
+ * Determine which relation to complain about. If tupdesc wasn't passed
* by caller, assume the violator tuple came from there.
*/
onfk = (queryno == RI_PLAN_CHECK_LOOKUPPK);
diff --git a/src/backend/utils/adt/rowtypes.c b/src/backend/utils/adt/rowtypes.c
index 1ed0c5b205a..36ef4eda85e 100644
--- a/src/backend/utils/adt/rowtypes.c
+++ b/src/backend/utils/adt/rowtypes.c
@@ -278,7 +278,7 @@ record_in(PG_FUNCTION_ARGS)
/*
* We cannot return tuple->t_data because heap_form_tuple allocates it as
* part of a larger chunk, and our caller may expect to be able to pfree
- * our result. So must copy the info into a new palloc chunk.
+ * our result. So must copy the info into a new palloc chunk.
*/
result = (HeapTupleHeader) palloc(tuple->t_len);
memcpy(result, tuple->t_data, tuple->t_len);
@@ -622,7 +622,7 @@ record_recv(PG_FUNCTION_ARGS)
/*
* We cannot return tuple->t_data because heap_form_tuple allocates it as
* part of a larger chunk, and our caller may expect to be able to pfree
- * our result. So must copy the info into a new palloc chunk.
+ * our result. So must copy the info into a new palloc chunk.
*/
result = (HeapTupleHeader) palloc(tuple->t_len);
memcpy(result, tuple->t_data, tuple->t_len);
@@ -860,7 +860,7 @@ record_cmp(FunctionCallInfo fcinfo)
/*
* Scan corresponding columns, allowing for dropped columns in different
- * places in the two rows. i1 and i2 are physical column indexes, j is
+ * places in the two rows. i1 and i2 are physical column indexes, j is
* the logical column index.
*/
i1 = i2 = j = 0;
@@ -1095,7 +1095,7 @@ record_eq(PG_FUNCTION_ARGS)
/*
* Scan corresponding columns, allowing for dropped columns in different
- * places in the two rows. i1 and i2 are physical column indexes, j is
+ * places in the two rows. i1 and i2 are physical column indexes, j is
* the logical column index.
*/
i1 = i2 = j = 0;
diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c
index 9e451349489..bbd575077d9 100644
--- a/src/backend/utils/adt/ruleutils.c
+++ b/src/backend/utils/adt/ruleutils.c
@@ -153,11 +153,11 @@ typedef struct
*
* Selecting aliases is unreasonably complicated because of the need to dump
* rules/views whose underlying tables may have had columns added, deleted, or
- * renamed since the query was parsed. We must nonetheless print the rule/view
+ * renamed since the query was parsed. We must nonetheless print the rule/view
* in a form that can be reloaded and will produce the same results as before.
*
* For each RTE used in the query, we must assign column aliases that are
- * unique within that RTE. SQL does not require this of the original query,
+ * unique within that RTE. SQL does not require this of the original query,
* but due to factors such as *-expansion we need to be able to uniquely
* reference every column in a decompiled query. As long as we qualify all
* column references, per-RTE uniqueness is sufficient for that.
@@ -212,8 +212,8 @@ typedef struct
/*
* new_colnames is an array containing column aliases to use for columns
* that would exist if the query was re-parsed against the current
- * definitions of its base tables. This is what to print as the column
- * alias list for the RTE. This array does not include dropped columns,
+ * definitions of its base tables. This is what to print as the column
+ * alias list for the RTE. This array does not include dropped columns,
* but it will include columns added since original parsing. Indexes in
* it therefore have little to do with current varattno values. As above,
* entries are unique unless this is for an unnamed JOIN RTE. (In such an
@@ -1075,7 +1075,7 @@ pg_get_indexdef_worker(Oid indexrelid, int colno,
context = deparse_context_for(get_relation_name(indrelid), indrelid);
/*
- * Start the index definition. Note that the index's name should never be
+ * Start the index definition. Note that the index's name should never be
* schema-qualified, but the indexed rel's name may be.
*/
initStringInfo(&buf);
@@ -1776,7 +1776,7 @@ pg_get_serial_sequence(PG_FUNCTION_ARGS)
SysScanDesc scan;
HeapTuple tup;
- /* Look up table name. Can't lock it - we might not have privileges. */
+ /* Look up table name. Can't lock it - we might not have privileges. */
tablerv = makeRangeVarFromNameList(textToQualifiedNameList(tablename));
tableOid = RangeVarGetRelid(tablerv, NoLock, false);
@@ -2297,7 +2297,7 @@ deparse_expression(Node *expr, List *dpcontext,
* tree (ie, not the raw output of gram.y).
*
* dpcontext is a list of deparse_namespace nodes representing the context
- * for interpreting Vars in the node tree. It can be NIL if no Vars are
+ * for interpreting Vars in the node tree. It can be NIL if no Vars are
* expected.
*
* forceprefix is TRUE to force all Vars to be prefixed with their table names.
@@ -2337,7 +2337,7 @@ deparse_expression_pretty(Node *expr, List *dpcontext,
*
* Given the reference name (alias) and OID of a relation, build deparsing
* context for an expression referencing only that relation (as varno 1,
- * varlevelsup 0). This is sufficient for many uses of deparse_expression.
+ * varlevelsup 0). This is sufficient for many uses of deparse_expression.
* ----------
*/
List *
@@ -2408,7 +2408,7 @@ deparse_context_for_planstate(Node *planstate, List *ancestors,
dpns->ctes = NIL;
/*
- * Set up column name aliases. We will get rather bogus results for join
+ * Set up column name aliases. We will get rather bogus results for join
* RTEs, but that doesn't matter because plan trees don't contain any join
* alias Vars.
*/
@@ -2966,7 +2966,7 @@ set_relation_column_names(deparse_namespace *dpns, RangeTblEntry *rte,
/*
* Scan the columns, select a unique alias for each one, and store it in
* colinfo->colnames and colinfo->new_colnames. The former array has NULL
- * entries for dropped columns, the latter omits them. Also mark
+ * entries for dropped columns, the latter omits them. Also mark
* new_colnames entries as to whether they are new since parse time; this
* is the case for entries beyond the length of rte->eref->colnames.
*/
@@ -3021,7 +3021,7 @@ set_relation_column_names(deparse_namespace *dpns, RangeTblEntry *rte,
/*
* For a relation RTE, we need only print the alias column names if any
- * are different from the underlying "real" names. For a function RTE,
+ * are different from the underlying "real" names. For a function RTE,
* always emit a complete column alias list; this is to protect against
* possible instability of the default column names (eg, from altering
* parameter names). For other RTE types, print if we changed anything OR
@@ -3484,7 +3484,7 @@ identify_join_columns(JoinExpr *j, RangeTblEntry *jrte,
/*
* If there's a USING clause, deconstruct the join quals to identify the
- * merged columns. This is a tad painful but if we cannot rely on the
+ * merged columns. This is a tad painful but if we cannot rely on the
* column names, there is no other representation of which columns were
* joined by USING. (Unless the join type is FULL, we can't tell from the
* joinaliasvars list which columns are merged.) Note: we assume that the
@@ -3618,7 +3618,7 @@ set_deparse_planstate(deparse_namespace *dpns, PlanState *ps)
* We special-case Append and MergeAppend to pretend that the first child
* plan is the OUTER referent; we have to interpret OUTER Vars in their
* tlists according to one of the children, and the first one is the most
- * natural choice. Likewise special-case ModifyTable to pretend that the
+ * natural choice. Likewise special-case ModifyTable to pretend that the
* first child plan is the OUTER referent; this is to support RETURNING
* lists containing references to non-target relations.
*/
@@ -4034,8 +4034,8 @@ get_query_def(Query *query, StringInfo buf, List *parentnamespace,
/*
* Before we begin to examine the query, acquire locks on referenced
- * relations, and fix up deleted columns in JOIN RTEs. This ensures
- * consistent results. Note we assume it's OK to scribble on the passed
+ * relations, and fix up deleted columns in JOIN RTEs. This ensures
+ * consistent results. Note we assume it's OK to scribble on the passed
* querytree!
*
* We are only deparsing the query (we are not about to execute it), so we
@@ -4508,7 +4508,7 @@ get_target_list(List *targetList, deparse_context *context,
}
/*
- * Figure out what the result column should be called. In the context
+ * Figure out what the result column should be called. In the context
* of a view, use the view's tuple descriptor (so as to pick up the
* effects of any column RENAME that's been done on the view).
* Otherwise, just use what we can find in the TLE.
@@ -4730,7 +4730,7 @@ get_rule_sortgroupclause(SortGroupClause *srt, List *tlist, bool force_colno,
* expression is a constant, force it to be dumped with an explicit cast
* as decoration --- this is because a simple integer constant is
* ambiguous (and will be misinterpreted by findTargetlistEntry()) if we
- * dump it without any decoration. Otherwise, just dump the expression
+ * dump it without any decoration. Otherwise, just dump the expression
* normally.
*/
if (force_colno)
@@ -5425,8 +5425,8 @@ get_variable(Var *var, int levelsup, bool istoplevel, deparse_context *context)
/*
* If it's an unnamed join, look at the expansion of the alias variable.
* If it's a simple reference to one of the input vars, then recursively
- * print the name of that var instead. When it's not a simple reference,
- * we have to just print the unqualified join column name. (This can only
+ * print the name of that var instead. When it's not a simple reference,
+ * we have to just print the unqualified join column name. (This can only
* happen with "dangerous" merged columns in a JOIN USING; we took pains
* previously to make the unqualified column name unique in such cases.)
*
@@ -5454,7 +5454,7 @@ get_variable(Var *var, int levelsup, bool istoplevel, deparse_context *context)
/*
* Unnamed join has no refname. (Note: since it's unnamed, there is
* no way the user could have referenced it to create a whole-row Var
- * for it. So we don't have to cover that case below.)
+ * for it. So we don't have to cover that case below.)
*/
Assert(refname == NULL);
}
@@ -5495,7 +5495,7 @@ get_variable(Var *var, int levelsup, bool istoplevel, deparse_context *context)
/*
- * Get the name of a field of an expression of composite type. The
+ * Get the name of a field of an expression of composite type. The
* expression is usually a Var, but we handle other cases too.
*
* levelsup is an extra offset to interpret the Var's varlevelsup correctly.
@@ -5505,7 +5505,7 @@ get_variable(Var *var, int levelsup, bool istoplevel, deparse_context *context)
* could also be RECORD. Since no actual table or view column is allowed to
* have type RECORD, a Var of type RECORD must refer to a JOIN or FUNCTION RTE
* or to a subquery output. We drill down to find the ultimate defining
- * expression and attempt to infer the field name from it. We ereport if we
+ * expression and attempt to infer the field name from it. We ereport if we
* can't determine the name.
*
* Similarly, a PARAM of type RECORD has to refer to some expression of
@@ -5870,7 +5870,7 @@ get_name_for_var_field(Var *var, int fieldno,
/*
* We now have an expression we can't expand any more, so see if
- * get_expr_result_type() can do anything with it. If not, pass to
+ * get_expr_result_type() can do anything with it. If not, pass to
* lookup_rowtype_tupdesc() which will probably fail, but will give an
* appropriate error message while failing.
*/
@@ -5888,7 +5888,7 @@ get_name_for_var_field(Var *var, int fieldno,
* reference a parameter supplied by an upper NestLoop or SubPlan plan node.
*
* If successful, return the expression and set *dpns_p and *ancestor_cell_p
- * appropriately for calling push_ancestor_plan(). If no referent can be
+ * appropriately for calling push_ancestor_plan(). If no referent can be
* found, return NULL.
*/
static Node *
@@ -6020,7 +6020,7 @@ get_parameter(Param *param, deparse_context *context)
/*
* If it's a PARAM_EXEC parameter, try to locate the expression from which
- * the parameter was computed. Note that failing to find a referent isn't
+ * the parameter was computed. Note that failing to find a referent isn't
* an error, since the Param might well be a subplan output rather than an
* input.
*/
@@ -6498,10 +6498,10 @@ get_rule_expr(Node *node, deparse_context *context,
/*
* If there's a refassgnexpr, we want to print the node in the
- * format "array[subscripts] := refassgnexpr". This is not
+ * format "array[subscripts] := refassgnexpr". This is not
* legal SQL, so decompilation of INSERT or UPDATE statements
* should always use processIndirection as part of the
- * statement-level syntax. We should only see this when
+ * statement-level syntax. We should only see this when
* EXPLAIN tries to print the targetlist of a plan resulting
* from such a statement.
*/
@@ -6660,7 +6660,7 @@ get_rule_expr(Node *node, deparse_context *context,
/*
* We cannot see an already-planned subplan in rule deparsing,
- * only while EXPLAINing a query plan. We don't try to
+ * only while EXPLAINing a query plan. We don't try to
* reconstruct the original SQL, just reference the subplan
* that appears elsewhere in EXPLAIN's result.
*/
@@ -6733,14 +6733,14 @@ get_rule_expr(Node *node, deparse_context *context,
* There is no good way to represent a FieldStore as real SQL,
* so decompilation of INSERT or UPDATE statements should
* always use processIndirection as part of the
- * statement-level syntax. We should only get here when
+ * statement-level syntax. We should only get here when
* EXPLAIN tries to print the targetlist of a plan resulting
* from such a statement. The plan case is even harder than
* ordinary rules would be, because the planner tries to
* collapse multiple assignments to the same field or subfield
* into one FieldStore; so we can see a list of target fields
* not just one, and the arguments could be FieldStores
- * themselves. We don't bother to try to print the target
+ * themselves. We don't bother to try to print the target
* field names; we just print the source arguments, with a
* ROW() around them if there's more than one. This isn't
* terribly complete, but it's probably good enough for
@@ -7642,7 +7642,7 @@ get_coercion_expr(Node *arg, deparse_context *context,
* Since parse_coerce.c doesn't immediately collapse application of
* length-coercion functions to constants, what we'll typically see in
* such cases is a Const with typmod -1 and a length-coercion function
- * right above it. Avoid generating redundant output. However, beware of
+ * right above it. Avoid generating redundant output. However, beware of
* suppressing casts when the user actually wrote something like
* 'foo'::text::char(3).
*/
@@ -7724,7 +7724,7 @@ get_const_expr(Const *constval, deparse_context *context, int showtype)
/*
* These types are printed without quotes unless they contain
* values that aren't accepted by the scanner unquoted (e.g.,
- * 'NaN'). Note that strtod() and friends might accept NaN,
+ * 'NaN'). Note that strtod() and friends might accept NaN,
* so we can't use that to test.
*
* In reality we only need to defend against infinity and NaN,
@@ -8156,7 +8156,7 @@ get_from_clause_item(Node *jtnode, Query *query, deparse_context *context)
else if (rte->rtekind == RTE_FUNCTION)
{
/*
- * For a function RTE, always print alias. This covers possible
+ * For a function RTE, always print alias. This covers possible
* renaming of the function and/or instability of the
* FigureColname rules for things that aren't simple functions.
* Also note we'd need to force it anyway for the RECORD case.
@@ -8398,7 +8398,7 @@ get_opclass_name(Oid opclass, Oid actual_datatype,
if (!OidIsValid(actual_datatype) ||
GetDefaultOpClass(actual_datatype, opcrec->opcmethod) != opclass)
{
- /* Okay, we need the opclass name. Do we need to qualify it? */
+ /* Okay, we need the opclass name. Do we need to qualify it? */
opcname = NameStr(opcrec->opcname);
if (OpclassIsVisible(opclass))
appendStringInfo(buf, " %s", quote_identifier(opcname));
@@ -8693,13 +8693,13 @@ generate_relation_name(Oid relid, List *namespaces)
* generate_function_name
* Compute the name to display for a function specified by OID,
* given that it is being called with the specified actual arg names and
- * types. (Those matter because of ambiguous-function resolution rules.)
+ * types. (Those matter because of ambiguous-function resolution rules.)
*
* If we're dealing with a potentially variadic function (in practice, this
* means a FuncExpr and not some other way of calling the function), then
* was_variadic should specify whether variadic arguments have been merged,
* and *use_variadic_p will be set to indicate whether to print VARIADIC in
- * the output. For non-FuncExpr cases, was_variadic should be FALSE and
+ * the output. For non-FuncExpr cases, was_variadic should be FALSE and
* use_variadic_p can be NULL.
*
* The result includes all necessary quoting and schema-prefixing.
diff --git a/src/backend/utils/adt/selfuncs.c b/src/backend/utils/adt/selfuncs.c
index 9982bf54d4b..cc93fd5a44e 100644
--- a/src/backend/utils/adt/selfuncs.c
+++ b/src/backend/utils/adt/selfuncs.c
@@ -72,7 +72,7 @@
* float8 oprjoin (internal, oid, internal, int2, internal);
*
* (Before Postgres 8.4, join estimators had only the first four of these
- * parameters. That signature is still allowed, but deprecated.) The
+ * parameters. That signature is still allowed, but deprecated.) The
* relationship between jointype and sjinfo is explained in the comments for
* clause_selectivity() --- the short version is that jointype is usually
* best ignored in favor of examining sjinfo.
@@ -209,7 +209,7 @@ static List *add_predicate_to_quals(IndexOptInfo *index, List *indexQuals);
*
* Note: this routine is also used to estimate selectivity for some
* operators that are not "=" but have comparable selectivity behavior,
- * such as "~=" (geometric approximate-match). Even for "=", we must
+ * such as "~=" (geometric approximate-match). Even for "=", we must
* keep in mind that the left and right datatypes may differ.
*/
Datum
@@ -273,7 +273,7 @@ var_eq_const(VariableStatData *vardata, Oid operator,
/*
* If we matched the var to a unique index or DISTINCT clause, assume
- * there is exactly one match regardless of anything else. (This is
+ * there is exactly one match regardless of anything else. (This is
* slightly bogus, since the index or clause's equality operator might be
* different from ours, but it's much more likely to be right than
* ignoring the information.)
@@ -296,7 +296,7 @@ var_eq_const(VariableStatData *vardata, Oid operator,
/*
* Is the constant "=" to any of the column's most common values?
* (Although the given operator may not really be "=", we will assume
- * that seeing whether it returns TRUE is an appropriate test. If you
+ * that seeing whether it returns TRUE is an appropriate test. If you
* don't like this, maybe you shouldn't be using eqsel for your
* operator...)
*/
@@ -408,7 +408,7 @@ var_eq_non_const(VariableStatData *vardata, Oid operator,
/*
* If we matched the var to a unique index or DISTINCT clause, assume
- * there is exactly one match regardless of anything else. (This is
+ * there is exactly one match regardless of anything else. (This is
* slightly bogus, since the index or clause's equality operator might be
* different from ours, but it's much more likely to be right than
* ignoring the information.)
@@ -432,7 +432,7 @@ var_eq_non_const(VariableStatData *vardata, Oid operator,
* result averaged over all possible values whether common or
* uncommon. (Essentially, we are assuming that the not-yet-known
* comparison value is equally likely to be any of the possible
- * values, regardless of their frequency in the table. Is that a good
+ * values, regardless of their frequency in the table. Is that a good
* idea?)
*/
selec = 1.0 - stats->stanullfrac;
@@ -655,7 +655,7 @@ mcv_selectivity(VariableStatData *vardata, FmgrInfo *opproc,
* essentially using the histogram just as a representative sample. However,
* small histograms are unlikely to be all that representative, so the caller
* should be prepared to fall back on some other estimation approach when the
- * histogram is missing or very small. It may also be prudent to combine this
+ * histogram is missing or very small. It may also be prudent to combine this
* approach with another one when the histogram is small.
*
* If the actual histogram size is not at least min_hist_size, we won't bother
@@ -673,7 +673,7 @@ mcv_selectivity(VariableStatData *vardata, FmgrInfo *opproc,
*
* Note that the result disregards both the most-common-values (if any) and
* null entries. The caller is expected to combine this result with
- * statistics for those portions of the column population. It may also be
+ * statistics for those portions of the column population. It may also be
* prudent to clamp the result range, ie, disbelieve exact 0 or 1 outputs.
*/
double
@@ -786,7 +786,7 @@ ineq_histogram_selectivity(PlannerInfo *root,
*
* If the binary search accesses the first or last histogram
* entry, we try to replace that endpoint with the true column min
- * or max as found by get_actual_variable_range(). This
+ * or max as found by get_actual_variable_range(). This
* ameliorates misestimates when the min or max is moving as a
* result of changes since the last ANALYZE. Note that this could
* result in effectively including MCVs into the histogram that
@@ -890,7 +890,7 @@ ineq_histogram_selectivity(PlannerInfo *root,
/*
* Watch out for the possibility that we got a NaN or
- * Infinity from the division. This can happen
+ * Infinity from the division. This can happen
* despite the previous checks, if for example "low"
* is -Infinity.
*/
@@ -905,7 +905,7 @@ ineq_histogram_selectivity(PlannerInfo *root,
* Ideally we'd produce an error here, on the grounds that
* the given operator shouldn't have scalarXXsel
* registered as its selectivity func unless we can deal
- * with its operand types. But currently, all manner of
+ * with its operand types. But currently, all manner of
* stuff is invoking scalarXXsel, so give a default
* estimate until that can be fixed.
*/
@@ -931,7 +931,7 @@ ineq_histogram_selectivity(PlannerInfo *root,
/*
* The histogram boundaries are only approximate to begin with,
- * and may well be out of date anyway. Therefore, don't believe
+ * and may well be out of date anyway. Therefore, don't believe
* extremely small or large selectivity estimates --- unless we
* got actual current endpoint values from the table.
*/
@@ -1128,7 +1128,7 @@ patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype, bool negate)
/*
* If this is for a NOT LIKE or similar operator, get the corresponding
- * positive-match operator and work with that. Set result to the correct
+ * positive-match operator and work with that. Set result to the correct
* default estimate, too.
*/
if (negate)
@@ -1214,7 +1214,7 @@ patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype, bool negate)
/*
* Pull out any fixed prefix implied by the pattern, and estimate the
- * fractional selectivity of the remainder of the pattern. Unlike many of
+ * fractional selectivity of the remainder of the pattern. Unlike many of
* the other functions in this file, we use the pattern operator's actual
* collation for this step. This is not because we expect the collation
* to make a big difference in the selectivity estimate (it seldom would),
@@ -1332,7 +1332,7 @@ patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype, bool negate)
/*
* If we have most-common-values info, add up the fractions of the MCV
* entries that satisfy MCV OP PATTERN. These fractions contribute
- * directly to the result selectivity. Also add up the total fraction
+ * directly to the result selectivity. Also add up the total fraction
* represented by MCV entries.
*/
mcv_selec = mcv_selectivity(&vardata, &opproc, constval, true,
@@ -1838,7 +1838,7 @@ scalararraysel(PlannerInfo *root,
/*
* For generic operators, we assume the probability of success is
- * independent for each array element. But for "= ANY" or "<> ALL",
+ * independent for each array element. But for "= ANY" or "<> ALL",
* if the array elements are distinct (which'd typically be the case)
* then the probabilities are disjoint, and we should just sum them.
*
@@ -2253,9 +2253,9 @@ eqjoinsel_inner(Oid operator,
if (have_mcvs1 && have_mcvs2)
{
/*
- * We have most-common-value lists for both relations. Run through
+ * We have most-common-value lists for both relations. Run through
* the lists to see which MCVs actually join to each other with the
- * given operator. This allows us to determine the exact join
+ * given operator. This allows us to determine the exact join
* selectivity for the portion of the relations represented by the MCV
* lists. We still have to estimate for the remaining population, but
* in a skewed distribution this gives us a big leg up in accuracy.
@@ -2287,7 +2287,7 @@ eqjoinsel_inner(Oid operator,
/*
* Note we assume that each MCV will match at most one member of the
- * other MCV list. If the operator isn't really equality, there could
+ * other MCV list. If the operator isn't really equality, there could
* be multiple matches --- but we don't look for them, both for speed
* and because the math wouldn't add up...
*/
@@ -2452,7 +2452,7 @@ eqjoinsel_semi(Oid operator,
/*
* We clamp nd2 to be not more than what we estimate the inner relation's
- * size to be. This is intuitively somewhat reasonable since obviously
+ * size to be. This is intuitively somewhat reasonable since obviously
* there can't be more than that many distinct values coming from the
* inner rel. The reason for the asymmetry (ie, that we don't clamp nd1
* likewise) is that this is the only pathway by which restriction clauses
@@ -2497,9 +2497,9 @@ eqjoinsel_semi(Oid operator,
if (have_mcvs1 && have_mcvs2 && OidIsValid(operator))
{
/*
- * We have most-common-value lists for both relations. Run through
+ * We have most-common-value lists for both relations. Run through
* the lists to see which MCVs actually join to each other with the
- * given operator. This allows us to determine the exact join
+ * given operator. This allows us to determine the exact join
* selectivity for the portion of the relations represented by the MCV
* lists. We still have to estimate for the remaining population, but
* in a skewed distribution this gives us a big leg up in accuracy.
@@ -2530,7 +2530,7 @@ eqjoinsel_semi(Oid operator,
/*
* Note we assume that each MCV will match at most one member of the
- * other MCV list. If the operator isn't really equality, there could
+ * other MCV list. If the operator isn't really equality, there could
* be multiple matches --- but we don't look for them, both for speed
* and because the math wouldn't add up...
*/
@@ -2567,7 +2567,7 @@ eqjoinsel_semi(Oid operator,
/*
* Now we need to estimate the fraction of relation 1 that has at
- * least one join partner. We know for certain that the matched MCVs
+ * least one join partner. We know for certain that the matched MCVs
* do, so that gives us a lower bound, but we're really in the dark
* about everything else. Our crude approach is: if nd1 <= nd2 then
* assume all non-null rel1 rows have join partners, else assume for
@@ -3165,11 +3165,11 @@ add_unique_group_var(PlannerInfo *root, List *varinfos,
* case (all possible cross-product terms actually appear as groups) since
* very often the grouped-by Vars are highly correlated. Our current approach
* is as follows:
- * 1. Expressions yielding boolean are assumed to contribute two groups,
+ * 1. Expressions yielding boolean are assumed to contribute two groups,
* independently of their content, and are ignored in the subsequent
- * steps. This is mainly because tests like "col IS NULL" break the
+ * steps. This is mainly because tests like "col IS NULL" break the
* heuristic used in step 2 especially badly.
- * 2. Reduce the given expressions to a list of unique Vars used. For
+ * 2. Reduce the given expressions to a list of unique Vars used. For
* example, GROUP BY a, a + b is treated the same as GROUP BY a, b.
* It is clearly correct not to count the same Var more than once.
* It is also reasonable to treat f(x) the same as x: f() cannot
@@ -3179,14 +3179,14 @@ add_unique_group_var(PlannerInfo *root, List *varinfos,
* As a special case, if a GROUP BY expression can be matched to an
* expressional index for which we have statistics, then we treat the
* whole expression as though it were just a Var.
- * 3. If the list contains Vars of different relations that are known equal
+ * 3. If the list contains Vars of different relations that are known equal
* due to equivalence classes, then drop all but one of the Vars from each
* known-equal set, keeping the one with smallest estimated # of values
* (since the extra values of the others can't appear in joined rows).
* Note the reason we only consider Vars of different relations is that
* if we considered ones of the same rel, we'd be double-counting the
* restriction selectivity of the equality in the next step.
- * 4. For Vars within a single source rel, we multiply together the numbers
+ * 4. For Vars within a single source rel, we multiply together the numbers
* of values, clamp to the number of rows in the rel (divided by 10 if
* more than one Var), and then multiply by the selectivity of the
* restriction clauses for that rel. When there's more than one Var,
@@ -3197,7 +3197,7 @@ add_unique_group_var(PlannerInfo *root, List *varinfos,
* by the restriction selectivity is effectively assuming that the
* restriction clauses are independent of the grouping, which is a crummy
* assumption, but it's hard to do better.
- * 5. If there are Vars from multiple rels, we repeat step 4 for each such
+ * 5. If there are Vars from multiple rels, we repeat step 4 for each such
* rel, and multiply the results together.
* Note that rels not containing grouped Vars are ignored completely, as are
* join clauses. Such rels cannot increase the number of groups, and we
@@ -3228,7 +3228,7 @@ estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows)
return 1.0;
/*
- * Count groups derived from boolean grouping expressions. For other
+ * Count groups derived from boolean grouping expressions. For other
* expressions, find the unique Vars used, treating an expression as a Var
* if we can find stats for it. For each one, record the statistical
* estimate of number of distinct values (total in its table, without
@@ -3317,7 +3317,7 @@ estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows)
* Group Vars by relation and estimate total numdistinct.
*
* For each iteration of the outer loop, we process the frontmost Var in
- * varinfos, plus all other Vars in the same relation. We remove these
+ * varinfos, plus all other Vars in the same relation. We remove these
* Vars from the newvarinfos list for the next iteration. This is the
* easiest way to group Vars of same rel together.
*/
@@ -3418,11 +3418,11 @@ estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows)
* distribution, so this will have to do for now.
*
* We are passed the number of buckets the executor will use for the given
- * input relation. If the data were perfectly distributed, with the same
+ * input relation. If the data were perfectly distributed, with the same
* number of tuples going into each available bucket, then the bucketsize
* fraction would be 1/nbuckets. But this happy state of affairs will occur
* only if (a) there are at least nbuckets distinct data values, and (b)
- * we have a not-too-skewed data distribution. Otherwise the buckets will
+ * we have a not-too-skewed data distribution. Otherwise the buckets will
* be nonuniformly occupied. If the other relation in the join has a key
* distribution similar to this one's, then the most-loaded buckets are
* exactly those that will be probed most often. Therefore, the "average"
@@ -3595,7 +3595,7 @@ convert_to_scalar(Datum value, Oid valuetypid, double *scaledvalue,
* operators to estimate selectivity for the other's. This is outright
* wrong in some cases --- in particular signed versus unsigned
* interpretation could trip us up. But it's useful enough in the
- * majority of cases that we do it anyway. Should think about more
+ * majority of cases that we do it anyway. Should think about more
* rigorous ways to do it.
*/
switch (valuetypid)
@@ -4179,7 +4179,7 @@ get_restriction_variable(PlannerInfo *root, List *args, int varRelid,
right = (Node *) lsecond(args);
/*
- * Examine both sides. Note that when varRelid is nonzero, Vars of other
+ * Examine both sides. Note that when varRelid is nonzero, Vars of other
* relations will be treated as pseudoconstants.
*/
examine_variable(root, left, varRelid, vardata);
@@ -4324,7 +4324,7 @@ examine_variable(PlannerInfo *root, Node *node, int varRelid,
/*
* Okay, it's a more complicated expression. Determine variable
- * membership. Note that when varRelid isn't zero, only vars of that
+ * membership. Note that when varRelid isn't zero, only vars of that
* relation are considered "real" vars.
*/
varnos = pull_varnos(basenode);
@@ -4373,13 +4373,13 @@ examine_variable(PlannerInfo *root, Node *node, int varRelid,
if (onerel)
{
/*
- * We have an expression in vars of a single relation. Try to match
+ * We have an expression in vars of a single relation. Try to match
* it to expressional index columns, in hopes of finding some
* statistics.
*
* XXX it's conceivable that there are multiple matches with different
* index opfamilies; if so, we need to pick one that matches the
- * operator we are estimating for. FIXME later.
+ * operator we are estimating for. FIXME later.
*/
ListCell *ilist;
@@ -4581,7 +4581,7 @@ examine_simple_variable(PlannerInfo *root, Var *var,
*
* This is probably a harsher restriction than necessary; it's
* certainly OK for the selectivity estimator (which is a C function,
- * and therefore omnipotent anyway) to look at the statistics. But
+ * and therefore omnipotent anyway) to look at the statistics. But
* many selectivity estimators will happily *invoke the operator
* function* to try to work out a good estimate - and that's not OK.
* So for now, don't dig down for stats.
@@ -4634,7 +4634,7 @@ get_variable_numdistinct(VariableStatData *vardata, bool *isdefault)
*isdefault = false;
/*
- * Determine the stadistinct value to use. There are cases where we can
+ * Determine the stadistinct value to use. There are cases where we can
* get an estimate even without a pg_statistic entry, or can get a better
* value than is in pg_statistic.
*/
@@ -4758,7 +4758,7 @@ get_variable_range(PlannerInfo *root, VariableStatData *vardata, Oid sortop,
/*
* XXX It's very tempting to try to use the actual column min and max, if
- * we can get them relatively-cheaply with an index probe. However, since
+ * we can get them relatively-cheaply with an index probe. However, since
* this function is called many times during join planning, that could
* have unpleasant effects on planning speed. Need more investigation
* before enabling this.
@@ -5009,7 +5009,7 @@ get_actual_variable_range(PlannerInfo *root, VariableStatData *vardata,
* and it can be very expensive if a lot of uncommitted rows
* exist at the end of the index (because we'll laboriously
* fetch each one and reject it). What seems like a good
- * compromise is to use SnapshotDirty. That will accept
+ * compromise is to use SnapshotDirty. That will accept
* uncommitted rows, and thus avoid fetching multiple heap
* tuples in this scenario. On the other hand, it will reject
* known-dead rows, and thus not give a bogus answer when the
@@ -5148,7 +5148,7 @@ find_join_input_rel(PlannerInfo *root, Relids relids)
* Check whether char is a letter (and, hence, subject to case-folding)
*
* In multibyte character sets, we can't use isalpha, and it does not seem
- * worth trying to convert to wchar_t to use iswalpha. Instead, just assume
+ * worth trying to convert to wchar_t to use iswalpha. Instead, just assume
* any multibyte char is potentially case-varying.
*/
static int
@@ -5400,7 +5400,7 @@ pattern_fixed_prefix(Const *patt, Pattern_Type ptype, Oid collation,
* together with info about MCVs and NULLs.
*
* We use the >= and < operators from the specified btree opfamily to do the
- * estimation. The given variable and Const must be of the associated
+ * estimation. The given variable and Const must be of the associated
* datatype.
*
* XXX Note: we make use of the upper bound to estimate operator selectivity
@@ -5459,7 +5459,7 @@ prefix_selectivity(PlannerInfo *root, VariableStatData *vardata,
/*
* Merge the two selectivities in the same way as for a range query
- * (see clauselist_selectivity()). Note that we don't need to worry
+ * (see clauselist_selectivity()). Note that we don't need to worry
* about double-exclusion of nulls, since ineq_histogram_selectivity
* doesn't count those anyway.
*/
@@ -5696,7 +5696,7 @@ byte_increment(unsigned char *ptr, int len)
* that is not a bulletproof guarantee that an extension of the string might
* not sort after it; an example is that "foo " is less than "foo!", but it
* is not clear that a "dictionary" sort ordering will consider "foo!" less
- * than "foo bar". CAUTION: Therefore, this function should be used only for
+ * than "foo bar". CAUTION: Therefore, this function should be used only for
* estimation purposes when working in a non-C collation.
*
* To try to catch most cases where an extended string might otherwise sort
@@ -5953,7 +5953,7 @@ string_to_bytea_const(const char *str, size_t str_len)
* genericcostestimate is a general-purpose estimator that can be used for
* most index types. In some cases we use genericcostestimate as the base
* code and then incorporate additional index-type-specific knowledge in
- * the type-specific calling function. To avoid code duplication, we make
+ * the type-specific calling function. To avoid code duplication, we make
* genericcostestimate return a number of intermediate values as well as
* its preliminary estimates of the output cost values. The GenericCosts
* struct includes all these values.
@@ -6073,7 +6073,7 @@ genericcostestimate(PlannerInfo *root,
*
* In practice access to upper index levels is often nearly free because
* those tend to stay in cache under load; moreover, the cost involved is
- * highly dependent on index type. We therefore ignore such costs here
+ * highly dependent on index type. We therefore ignore such costs here
* and leave it to the caller to add a suitable charge if needed.
*/
if (index->pages > 1 && index->tuples > 1)
@@ -6092,9 +6092,9 @@ genericcostestimate(PlannerInfo *root,
* The above calculations are all per-index-scan. However, if we are in a
* nestloop inner scan, we can expect the scan to be repeated (with
* different search keys) for each row of the outer relation. Likewise,
- * ScalarArrayOpExpr quals result in multiple index scans. This creates
+ * ScalarArrayOpExpr quals result in multiple index scans. This creates
* the potential for cache effects to reduce the number of disk page
- * fetches needed. We want to estimate the average per-scan I/O cost in
+ * fetches needed. We want to estimate the average per-scan I/O cost in
* the presence of caching.
*
* We use the Mackert-Lohman formula (see costsize.c for details) to
@@ -6141,7 +6141,7 @@ genericcostestimate(PlannerInfo *root,
* evaluated once at the start of the scan to reduce them to runtime keys
* to pass to the index AM (see nodeIndexscan.c). We model the per-tuple
* CPU costs as cpu_index_tuple_cost plus one cpu_operator_cost per
- * indexqual operator. Because we have numIndexTuples as a per-scan
+ * indexqual operator. Because we have numIndexTuples as a per-scan
* number, we have to multiply by num_sa_scans to get the correct result
* for ScalarArrayOpExpr cases. Similarly add in costs for any index
* ORDER BY expressions.
@@ -6188,16 +6188,16 @@ genericcostestimate(PlannerInfo *root,
* ANDing the index predicate with the explicitly given indexquals produces
* a more accurate idea of the index's selectivity. However, we need to be
* careful not to insert redundant clauses, because clauselist_selectivity()
- * is easily fooled into computing a too-low selectivity estimate. Our
+ * is easily fooled into computing a too-low selectivity estimate. Our
* approach is to add only the predicate clause(s) that cannot be proven to
- * be implied by the given indexquals. This successfully handles cases such
+ * be implied by the given indexquals. This successfully handles cases such
* as a qual "x = 42" used with a partial index "WHERE x >= 40 AND x < 50".
* There are many other cases where we won't detect redundancy, leading to a
* too-low selectivity estimate, which will bias the system in favor of using
- * partial indexes where possible. That is not necessarily bad though.
+ * partial indexes where possible. That is not necessarily bad though.
*
* Note that indexQuals contains RestrictInfo nodes while the indpred
- * does not, so the output list will be mixed. This is OK for both
+ * does not, so the output list will be mixed. This is OK for both
* predicate_implied_by() and clauselist_selectivity(), but might be
* problematic if the result were passed to other things.
*/
@@ -6256,7 +6256,7 @@ btcostestimate(PG_FUNCTION_ARGS)
* the index scan). Additional quals can suppress visits to the heap, so
* it's OK to count them in indexSelectivity, but they should not count
* for estimating numIndexTuples. So we must examine the given indexquals
- * to find out which ones count as boundary quals. We rely on the
+ * to find out which ones count as boundary quals. We rely on the
* knowledge that they are given in index column order.
*
* For a RowCompareExpr, we consider only the first column, just as
@@ -6595,7 +6595,7 @@ hashcostestimate(PG_FUNCTION_ARGS)
* because the hash AM makes sure that's always one page.
*
* Likewise, we could consider charging some CPU for each index tuple in
- * the bucket, if we knew how many there were. But the per-tuple cost is
+ * the bucket, if we knew how many there were. But the per-tuple cost is
* just a hash value comparison, not a general datatype-dependent
* comparison, so any such charge ought to be quite a bit less than
* cpu_operator_cost; which makes it probably not worth worrying about.
@@ -6653,7 +6653,7 @@ gistcostestimate(PG_FUNCTION_ARGS)
/*
* Add a CPU-cost component to represent the costs of initial descent. We
* just use log(N) here not log2(N) since the branching factor isn't
- * necessarily two anyway. As for btree, charge once per SA scan.
+ * necessarily two anyway. As for btree, charge once per SA scan.
*/
if (index->tuples > 1) /* avoid computing log(0) */
{
@@ -6715,7 +6715,7 @@ spgcostestimate(PG_FUNCTION_ARGS)
/*
* Add a CPU-cost component to represent the costs of initial descent. We
* just use log(N) here not log2(N) since the branching factor isn't
- * necessarily two anyway. As for btree, charge once per SA scan.
+ * necessarily two anyway. As for btree, charge once per SA scan.
*/
if (index->tuples > 1) /* avoid computing log(0) */
{
@@ -6792,7 +6792,7 @@ gincost_pattern(IndexOptInfo *index, int indexcol,
/*
* Get the operator's strategy number and declared input data types within
- * the index opfamily. (We don't need the latter, but we use
+ * the index opfamily. (We don't need the latter, but we use
* get_op_opfamily_properties because it will throw error if it fails to
* find a matching pg_amop entry.)
*/
@@ -6938,7 +6938,7 @@ gincost_opexpr(PlannerInfo *root, IndexOptInfo *index, OpExpr *clause,
* each of which involves one value from the RHS array, plus all the
* non-array quals (if any). To model this, we average the counts across
* the RHS elements, and add the averages to the counts in *counts (which
- * correspond to per-indexscan costs). We also multiply counts->arrayScans
+ * correspond to per-indexscan costs). We also multiply counts->arrayScans
* by N, causing gincostestimate to scale up its estimates accordingly.
*/
static bool
@@ -7108,7 +7108,7 @@ gincostestimate(PG_FUNCTION_ARGS)
/*
* nPendingPages can be trusted, but the other fields are as of the last
- * VACUUM. Scale them by the ratio numPages / nTotalPages to account for
+ * VACUUM. Scale them by the ratio numPages / nTotalPages to account for
* growth since then. If the fields are zero (implying no VACUUM at all,
* and an index created pre-9.1), assume all pages are entry pages.
*/
@@ -7253,7 +7253,7 @@ gincostestimate(PG_FUNCTION_ARGS)
/*
* Add an estimate of entry pages read by partial match algorithm. It's a
- * scan over leaf pages in entry tree. We haven't any useful stats here,
+ * scan over leaf pages in entry tree. We haven't any useful stats here,
* so estimate it as proportion.
*/
entryPagesFetched += ceil(numEntryPages * counts.partialEntries / numEntries);
diff --git a/src/backend/utils/adt/timestamp.c b/src/backend/utils/adt/timestamp.c
index 94b2a3608a6..1dc4e4d7f46 100644
--- a/src/backend/utils/adt/timestamp.c
+++ b/src/backend/utils/adt/timestamp.c
@@ -385,7 +385,7 @@ AdjustTimestampForTypmod(Timestamp *time, int32 typmod)
* Note: this round-to-nearest code is not completely consistent about
* rounding values that are exactly halfway between integral values.
* On most platforms, rint() will implement round-to-nearest-even, but
- * the integer code always rounds up (away from zero). Is it worth
+ * the integer code always rounds up (away from zero). Is it worth
* trying to be consistent?
*/
#ifdef HAVE_INT64_TIMESTAMP
@@ -757,7 +757,7 @@ interval_send(PG_FUNCTION_ARGS)
/*
* The interval typmod stores a "range" in its high 16 bits and a "precision"
- * in its low 16 bits. Both contribute to defining the resolution of the
+ * in its low 16 bits. Both contribute to defining the resolution of the
* type. Range addresses resolution granules larger than one second, and
* precision specifies resolution below one second. This representation can
* express all SQL standard resolutions, but we implement them all in terms of
@@ -965,7 +965,7 @@ interval_transform(PG_FUNCTION_ARGS)
/*
* Temporally-smaller fields occupy higher positions in the range
- * bitmap. Since only the temporally-smallest bit matters for length
+ * bitmap. Since only the temporally-smallest bit matters for length
* coercion purposes, we compare the last-set bits in the ranges.
* Precision, which is to say, sub-second precision, only affects
* ranges that include SECOND.
@@ -1054,7 +1054,7 @@ AdjustIntervalForTypmod(Interval *interval, int32 typmod)
* that fields to the right of the last one specified are zeroed out,
* but those to the left of it remain valid. Thus for example there
* is no operational difference between INTERVAL YEAR TO MONTH and
- * INTERVAL MONTH. In some cases we could meaningfully enforce that
+ * INTERVAL MONTH. In some cases we could meaningfully enforce that
* higher-order fields are zero; for example INTERVAL DAY could reject
* nonzero "month" field. However that seems a bit pointless when we
* can't do it consistently. (We cannot enforce a range limit on the
@@ -1064,9 +1064,9 @@ AdjustIntervalForTypmod(Interval *interval, int32 typmod)
*
* Note: before PG 8.4 we interpreted a limited set of fields as
* actually causing a "modulo" operation on a given value, potentially
- * losing high-order as well as low-order information. But there is
+ * losing high-order as well as low-order information. But there is
* no support for such behavior in the standard, and it seems fairly
- * undesirable on data consistency grounds anyway. Now we only
+ * undesirable on data consistency grounds anyway. Now we only
* perform truncation or rounding of low-order fields.
*/
if (range == INTERVAL_FULL_RANGE)
@@ -1186,7 +1186,7 @@ AdjustIntervalForTypmod(Interval *interval, int32 typmod)
/*
* Note: this round-to-nearest code is not completely consistent
* about rounding values that are exactly halfway between integral
- * values. On most platforms, rint() will implement
+ * values. On most platforms, rint() will implement
* round-to-nearest-even, but the integer code always rounds up
* (away from zero). Is it worth trying to be consistent?
*/
@@ -1440,7 +1440,7 @@ timestamptz_to_time_t(TimestampTz t)
* Produce a C-string representation of a TimestampTz.
*
* This is mostly for use in emitting messages. The primary difference
- * from timestamptz_out is that we force the output format to ISO. Note
+ * from timestamptz_out is that we force the output format to ISO. Note
* also that the result is in a static buffer, not pstrdup'd.
*/
const char *
@@ -1610,7 +1610,7 @@ recalc_t:
*
* First, convert to an integral timestamp, avoiding possibly
* platform-specific roundoff-in-wrong-direction errors, and adjust to
- * Unix epoch. Then see if we can convert to pg_time_t without loss. This
+ * Unix epoch. Then see if we can convert to pg_time_t without loss. This
* coding avoids hardwiring any assumptions about the width of pg_time_t,
* so it should behave sanely on machines without int64.
*/
@@ -4540,7 +4540,7 @@ timestamp_zone(PG_FUNCTION_ARGS)
PG_RETURN_TIMESTAMPTZ(timestamp);
/*
- * Look up the requested timezone. First we look in the date token table
+ * Look up the requested timezone. First we look in the date token table
* (to handle cases like "EST"), and if that fails, we look in the
* timezone database (to handle cases like "America/New_York"). (This
* matches the order in which timestamp input checks the cases; it's
@@ -4713,7 +4713,7 @@ timestamptz_zone(PG_FUNCTION_ARGS)
PG_RETURN_TIMESTAMP(timestamp);
/*
- * Look up the requested timezone. First we look in the date token table
+ * Look up the requested timezone. First we look in the date token table
* (to handle cases like "EST"), and if that fails, we look in the
* timezone database (to handle cases like "America/New_York"). (This
* matches the order in which timestamp input checks the cases; it's
diff --git a/src/backend/utils/adt/tsginidx.c b/src/backend/utils/adt/tsginidx.c
index cf118334d76..7d3845a313b 100644
--- a/src/backend/utils/adt/tsginidx.c
+++ b/src/backend/utils/adt/tsginidx.c
@@ -237,7 +237,7 @@ gin_tsquery_consistent(PG_FUNCTION_ARGS)
* Formerly, gin_extract_tsvector had only two arguments. Now it has three,
* but we still need a pg_proc entry with two args to support reloading
* pre-9.1 contrib/tsearch2 opclass declarations. This compatibility
- * function should go away eventually. (Note: you might say "hey, but the
+ * function should go away eventually. (Note: you might say "hey, but the
* code above is only *using* two args, so let's just declare it that way".
* If you try that you'll find the opr_sanity regression test complains.)
*/
diff --git a/src/backend/utils/adt/varchar.c b/src/backend/utils/adt/varchar.c
index 1a615934cd7..531fbbb7aa1 100644
--- a/src/backend/utils/adt/varchar.c
+++ b/src/backend/utils/adt/varchar.c
@@ -257,7 +257,7 @@ bpcharsend(PG_FUNCTION_ARGS)
*
* Truncation rules: for an explicit cast, silently truncate to the given
* length; for an implicit cast, raise error unless extra characters are
- * all spaces. (This is sort-of per SQL: the spec would actually have us
+ * all spaces. (This is sort-of per SQL: the spec would actually have us
* raise a "completion condition" for the explicit cast case, but Postgres
* hasn't got such a concept.)
*/
@@ -584,7 +584,7 @@ varchar_transform(PG_FUNCTION_ARGS)
*
* Truncation rules: for an explicit cast, silently truncate to the given
* length; for an implicit cast, raise error unless extra characters are
- * all spaces. (This is sort-of per SQL: the spec would actually have us
+ * all spaces. (This is sort-of per SQL: the spec would actually have us
* raise a "completion condition" for the explicit cast case, but Postgres
* hasn't got such a concept.)
*/
diff --git a/src/backend/utils/adt/varlena.c b/src/backend/utils/adt/varlena.c
index 6fc26cee6ed..f990877ffb7 100644
--- a/src/backend/utils/adt/varlena.c
+++ b/src/backend/utils/adt/varlena.c
@@ -591,7 +591,7 @@ textlen(PG_FUNCTION_ARGS)
* Does the real work for textlen()
*
* This is broken out so it can be called directly by other string processing
- * functions. Note that the argument is passed as a Datum, to indicate that
+ * functions. Note that the argument is passed as a Datum, to indicate that
* it may still be in compressed form. We can avoid decompressing it at all
* in some cases.
*/
@@ -763,7 +763,7 @@ text_substr_no_len(PG_FUNCTION_ARGS)
* Does the real work for text_substr() and text_substr_no_len()
*
* This is broken out so it can be called directly by other string processing
- * functions. Note that the argument is passed as a Datum, to indicate that
+ * functions. Note that the argument is passed as a Datum, to indicate that
* it may still be in compressed/toasted form. We can avoid detoasting all
* of it in some cases.
*
@@ -1113,7 +1113,7 @@ text_position_setup(text *t1, text *t2, TextPositionState *state)
* searched (t1) and the "needle" is the pattern being sought (t2).
*
* If the needle is empty or bigger than the haystack then there is no
- * point in wasting cycles initializing the table. We also choose not to
+ * point in wasting cycles initializing the table. We also choose not to
* use B-M-H for needles of length 1, since the skip table can't possibly
* save anything in that case.
*/
@@ -1129,7 +1129,7 @@ text_position_setup(text *t1, text *t2, TextPositionState *state)
* declaration of TextPositionState allows up to 256 elements, but for
* short search problems we don't really want to have to initialize so
* many elements --- it would take too long in comparison to the
- * actual search time. So we choose a useful skip table size based on
+ * actual search time. So we choose a useful skip table size based on
* the haystack length minus the needle length. The closer the needle
* length is to the haystack length the less useful skipping becomes.
*
@@ -1161,7 +1161,7 @@ text_position_setup(text *t1, text *t2, TextPositionState *state)
state->skiptable[i] = len2;
/*
- * Now examine the needle. For each character except the last one,
+ * Now examine the needle. For each character except the last one,
* set the corresponding table element to the appropriate skip
* distance. Note that when two characters share the same skip table
* entry, the one later in the needle must determine the skip
@@ -1249,11 +1249,11 @@ text_position_next(int start_pos, TextPositionState *state)
/*
* No match, so use the haystack char at hptr to decide how
- * far to advance. If the needle had any occurrence of that
+ * far to advance. If the needle had any occurrence of that
* character (or more precisely, one sharing the same
* skiptable entry) before its last character, then we advance
* far enough to align the last such needle character with
- * that haystack position. Otherwise we can advance by the
+ * that haystack position. Otherwise we can advance by the
* whole needle length.
*/
hptr += state->skiptable[(unsigned char) *hptr & skiptablemask];
@@ -1305,11 +1305,11 @@ text_position_next(int start_pos, TextPositionState *state)
/*
* No match, so use the haystack char at hptr to decide how
- * far to advance. If the needle had any occurrence of that
+ * far to advance. If the needle had any occurrence of that
* character (or more precisely, one sharing the same
* skiptable entry) before its last character, then we advance
* far enough to align the last such needle character with
- * that haystack position. Otherwise we can advance by the
+ * that haystack position. Otherwise we can advance by the
* whole needle length.
*/
hptr += state->skiptable[*hptr & skiptablemask];
@@ -1344,7 +1344,7 @@ varstr_cmp(char *arg1, int len1, char *arg2, int len2, Oid collid)
/*
* Unfortunately, there is no strncoll(), so in the non-C locale case we
- * have to do some memory copying. This turns out to be significantly
+ * have to do some memory copying. This turns out to be significantly
* slower, so we optimize the case where LC_COLLATE is C. We also try to
* optimize relatively-short strings by avoiding palloc/pfree overhead.
*/
@@ -2334,7 +2334,7 @@ textToQualifiedNameList(text *textval)
* SplitIdentifierString --- parse a string containing identifiers
*
* This is the guts of textToQualifiedNameList, and is exported for use in
- * other situations such as parsing GUC variables. In the GUC case, it's
+ * other situations such as parsing GUC variables. In the GUC case, it's
* important to avoid memory leaks, so the API is designed to minimize the
* amount of stuff that needs to be allocated and freed.
*
@@ -2342,7 +2342,7 @@ textToQualifiedNameList(text *textval)
* rawstring: the input string; must be overwritable! On return, it's
* been modified to contain the separated identifiers.
* separator: the separator punctuation expected between identifiers
- * (typically '.' or ','). Whitespace may also appear around
+ * (typically '.' or ','). Whitespace may also appear around
* identifiers.
* Outputs:
* namelist: filled with a palloc'd list of pointers to identifiers within
@@ -2411,7 +2411,7 @@ SplitIdentifierString(char *rawstring, char separator,
*
* XXX because we want to overwrite the input in-place, we cannot
* support a downcasing transformation that increases the string
- * length. This is not a problem given the current implementation
+ * length. This is not a problem given the current implementation
* of downcase_truncate_identifier, but we'll probably have to do
* something about this someday.
*/
@@ -2468,7 +2468,7 @@ SplitIdentifierString(char *rawstring, char separator,
* Inputs:
* rawstring: the input string; must be modifiable!
* separator: the separator punctuation expected between directories
- * (typically ',' or ';'). Whitespace may also appear around
+ * (typically ',' or ';'). Whitespace may also appear around
* directories.
* Outputs:
* namelist: filled with a palloc'd list of directory names.
@@ -2875,7 +2875,7 @@ check_replace_text_has_escape_char(const text *replace_text)
* appendStringInfoRegexpSubstr
*
* Append replace_text to str, substituting regexp back references for
- * \n escapes. start_ptr is the start of the match in the source string,
+ * \n escapes. start_ptr is the start of the match in the source string,
* at logical character position data_pos.
*/
static void
@@ -2958,7 +2958,7 @@ appendStringInfoRegexpSubstr(StringInfo str, text *replace_text,
if (so != -1 && eo != -1)
{
/*
- * Copy the text that is back reference of regexp. Note so and eo
+ * Copy the text that is back reference of regexp. Note so and eo
* are counted in characters not bytes.
*/
char *chunk_start;
@@ -4249,7 +4249,7 @@ text_format(PG_FUNCTION_ARGS)
/*
* Get the appropriate typOutput function, reusing previous one if
- * same type as previous argument. That's particularly useful in the
+ * same type as previous argument. That's particularly useful in the
* variadic-array case, but often saves work even for ordinary calls.
*/
if (typid != prev_type)
@@ -4341,12 +4341,12 @@ text_format_parse_digits(const char **ptr, const char *end_ptr, int *value)
*
* Inputs are start_ptr (the position after '%') and end_ptr (string end + 1).
* Output parameters:
- * argpos: argument position for value to be printed. -1 means unspecified.
- * widthpos: argument position for width. Zero means the argument position
+ * argpos: argument position for value to be printed. -1 means unspecified.
+ * widthpos: argument position for width. Zero means the argument position
* was unspecified (ie, take the next arg) and -1 means no width
* argument (width was omitted or specified as a constant).
* flags: bitmask of flags.
- * width: directly-specified width value. Zero means the width was omitted
+ * width: directly-specified width value. Zero means the width was omitted
* (note it's not necessary to distinguish this case from an explicit
* zero width value).
*
diff --git a/src/backend/utils/adt/xml.c b/src/backend/utils/adt/xml.c
index 25ab79b1979..335e4d38ca3 100644
--- a/src/backend/utils/adt/xml.c
+++ b/src/backend/utils/adt/xml.c
@@ -19,7 +19,7 @@
* fail. For one thing, this avoids having to manage variant catalog
* installations. But it also has nice effects such as that you can
* dump a database containing XML type data even if the server is not
- * linked with libxml. Thus, make sure xml_out() works even if nothing
+ * linked with libxml. Thus, make sure xml_out() works even if nothing
* else does.
*/
@@ -286,7 +286,7 @@ xml_out(PG_FUNCTION_ARGS)
xmltype *x = PG_GETARG_XML_P(0);
/*
- * xml_out removes the encoding property in all cases. This is because we
+ * xml_out removes the encoding property in all cases. This is because we
* cannot control from here whether the datum will be converted to a
* different client encoding, so we'd do more harm than good by including
* it.
@@ -457,7 +457,7 @@ xmlcomment(PG_FUNCTION_ARGS)
/*
* TODO: xmlconcat needs to merge the notations and unparsed entities
- * of the argument values. Not very important in practice, though.
+ * of the argument values. Not very important in practice, though.
*/
xmltype *
xmlconcat(List *args)
@@ -592,7 +592,7 @@ xmlelement(XmlExprState *xmlExpr, ExprContext *econtext)
/*
* We first evaluate all the arguments, then start up libxml and create
- * the result. This avoids issues if one of the arguments involves a call
+ * the result. This avoids issues if one of the arguments involves a call
* to some other function or subsystem that wants to use libxml on its own
* terms.
*/
@@ -929,7 +929,7 @@ pg_xml_init_library(void)
* pg_xml_init --- set up for use of libxml and register an error handler
*
* This should be called by each function that is about to use libxml
- * facilities and requires error handling. It initializes libxml with
+ * facilities and requires error handling. It initializes libxml with
* pg_xml_init_library() and establishes our libxml error handler.
*
* strictness determines which errors are reported and which are ignored.
@@ -975,7 +975,7 @@ pg_xml_init(PgXmlStrictness strictness)
/*
* Verify that xmlSetStructuredErrorFunc set the context variable we
- * expected it to. If not, the error context pointer we just saved is not
+ * expected it to. If not, the error context pointer we just saved is not
* the correct thing to restore, and since that leaves us without a way to
* restore the context in pg_xml_done, we must fail.
*
@@ -1132,7 +1132,7 @@ parse_xml_decl(const xmlChar *str, size_t *lenp,
int utf8len;
/*
- * Only initialize libxml. We don't need error handling here, but we do
+ * Only initialize libxml. We don't need error handling here, but we do
* need to make sure libxml is initialized before calling any of its
* functions. Note that this is safe (and a no-op) if caller has already
* done pg_xml_init().
@@ -1275,7 +1275,7 @@ finished:
/*
* Write an XML declaration. On output, we adjust the XML declaration
- * as follows. (These rules are the moral equivalent of the clause
+ * as follows. (These rules are the moral equivalent of the clause
* "Serialization of an XML value" in the SQL standard.)
*
* We try to avoid generating an XML declaration if possible. This is
@@ -1499,7 +1499,7 @@ xml_pstrdup(const char *string)
/*
* xmlPgEntityLoader --- entity loader callback function
*
- * Silently prevent any external entity URL from being loaded. We don't want
+ * Silently prevent any external entity URL from being loaded. We don't want
* to throw an error, so instead make the entity appear to expand to an empty
* string.
*
@@ -1668,8 +1668,8 @@ xml_errorHandler(void *data, xmlErrorPtr error)
chopStringInfoNewlines(errorBuf);
/*
- * Legacy error handling mode. err_occurred is never set, we just add the
- * message to err_buf. This mode exists because the xml2 contrib module
+ * Legacy error handling mode. err_occurred is never set, we just add the
+ * message to err_buf. This mode exists because the xml2 contrib module
* uses our error-handling infrastructure, but we don't want to change its
* behaviour since it's deprecated anyway. This is also why we don't
* distinguish between notices, warnings and errors here --- the old-style
@@ -1948,8 +1948,8 @@ map_xml_name_to_sql_identifier(char *name)
*
* When xml_escape_strings is true, then certain characters in string
* values are replaced by entity references (&lt; etc.), as specified
- * in SQL/XML:2008 section 9.8 GR 9) a) iii). This is normally what is
- * wanted. The false case is mainly useful when the resulting value
+ * in SQL/XML:2008 section 9.8 GR 9) a) iii). This is normally what is
+ * wanted. The false case is mainly useful when the resulting value
* is used with xmlTextWriterWriteAttribute() to write out an
* attribute, because that function does the escaping itself.
*/
@@ -2230,13 +2230,13 @@ _SPI_strdup(const char *s)
*
* There are two kinds of mappings: Mapping SQL data (table contents)
* to XML documents, and mapping SQL structure (the "schema") to XML
- * Schema. And there are functions that do both at the same time.
+ * Schema. And there are functions that do both at the same time.
*
* Then you can map a database, a schema, or a table, each in both
* ways. This breaks down recursively: Mapping a database invokes
* mapping schemas, which invokes mapping tables, which invokes
* mapping rows, which invokes mapping columns, although you can't
- * call the last two from the outside. Because of this, there are a
+ * call the last two from the outside. Because of this, there are a
* number of xyz_internal() functions which are to be called both from
* the function manager wrapper and from some upper layer in a
* recursive call.
@@ -2245,7 +2245,7 @@ _SPI_strdup(const char *s)
* nulls, tableforest, and targetns mean.
*
* Some style guidelines for XML output: Use double quotes for quoting
- * XML attributes. Indent XML elements by two spaces, but remember
+ * XML attributes. Indent XML elements by two spaces, but remember
* that a lot of code is called recursively at different levels, so
* it's better not to indent rather than create output that indents
* and outdents weirdly. Add newlines to make the output look nice.
@@ -2409,12 +2409,12 @@ cursor_to_xml(PG_FUNCTION_ARGS)
* Write the start tag of the root element of a data mapping.
*
* top_level means that this is the very top level of the eventual
- * output. For example, when the user calls table_to_xml, then a call
+ * output. For example, when the user calls table_to_xml, then a call
* with a table name to this function is the top level. When the user
* calls database_to_xml, then a call with a schema name to this
* function is not the top level. If top_level is false, then the XML
* namespace declarations are omitted, because they supposedly already
- * appeared earlier in the output. Repeating them is not wrong, but
+ * appeared earlier in the output. Repeating them is not wrong, but
* it looks ugly.
*/
static void
@@ -3357,7 +3357,7 @@ map_sql_typecoll_to_xmlschema_types(List *tupdesc_list)
* SQL/XML:2008 sections 9.5 and 9.6.
*
* (The distinction between 9.5 and 9.6 is basically that 9.6 adds
- * a name attribute, which this function does. The name-less version
+ * a name attribute, which this function does. The name-less version
* 9.5 doesn't appear to be required anywhere.)
*/
static const char *
@@ -3535,7 +3535,7 @@ map_sql_type_to_xmlschema_type(Oid typeoid, int typmod)
/*
* Map an SQL row to an XML element, taking the row from the active
- * SPI cursor. See also SQL/XML:2008 section 9.10.
+ * SPI cursor. See also SQL/XML:2008 section 9.10.
*/
static void
SPI_sql_row_to_xmlelement(int rownum, StringInfo result, char *tablename,
diff --git a/src/backend/utils/cache/attoptcache.c b/src/backend/utils/cache/attoptcache.c
index 134db1a1bb2..2ef3d08664f 100644
--- a/src/backend/utils/cache/attoptcache.c
+++ b/src/backend/utils/cache/attoptcache.c
@@ -46,7 +46,7 @@ typedef struct
* Flush all cache entries when pg_attribute is updated.
*
* When pg_attribute is updated, we must flush the cache entry at least
- * for that attribute. Currently, we just flush them all. Since attribute
+ * for that attribute. Currently, we just flush them all. Since attribute
* options are not currently used in performance-critical paths (such as
* query execution), this seems OK.
*/
diff --git a/src/backend/utils/cache/catcache.c b/src/backend/utils/cache/catcache.c
index cc91406582b..05b3b752579 100644
--- a/src/backend/utils/cache/catcache.c
+++ b/src/backend/utils/cache/catcache.c
@@ -816,7 +816,7 @@ InitCatCache(int id,
* CatalogCacheInitializeCache
*
* This function does final initialization of a catcache: obtain the tuple
- * descriptor and set up the hash and equality function links. We assume
+ * descriptor and set up the hash and equality function links. We assume
* that the relcache entry can be opened at this point!
*/
#ifdef CACHEDEBUG
@@ -1041,7 +1041,7 @@ IndexScanOK(CatCache *cache, ScanKey cur_skey)
* if necessary (on the first access to a particular cache).
*
* The result is NULL if not found, or a pointer to a HeapTuple in
- * the cache. The caller must not modify the tuple, and must call
+ * the cache. The caller must not modify the tuple, and must call
* ReleaseCatCache() when done with it.
*
* The search key values should be expressed as Datums of the key columns'
@@ -1171,8 +1171,8 @@ SearchCatCache(CatCache *cache,
* the relation --- for example, due to shared-cache-inval messages being
* processed during heap_open(). This is OK. It's even possible for one
* of those lookups to find and enter the very same tuple we are trying to
- * fetch here. If that happens, we will enter a second copy of the tuple
- * into the cache. The first copy will never be referenced again, and
+ * fetch here. If that happens, we will enter a second copy of the tuple
+ * into the cache. The first copy will never be referenced again, and
* will eventually age out of the cache, so there's no functional problem.
* This case is rare enough that it's not worth expending extra cycles to
* detect.
@@ -1211,7 +1211,7 @@ SearchCatCache(CatCache *cache,
*
* In bootstrap mode, we don't build negative entries, because the cache
* invalidation mechanism isn't alive and can't clear them if the tuple
- * gets created later. (Bootstrap doesn't do UPDATEs, so it doesn't need
+ * gets created later. (Bootstrap doesn't do UPDATEs, so it doesn't need
* cache inval for that.)
*/
if (ct == NULL)
@@ -1541,7 +1541,7 @@ SearchCatCacheList(CatCache *cache,
/*
* We are now past the last thing that could trigger an elog before we
* have finished building the CatCList and remembering it in the
- * resource owner. So it's OK to fall out of the PG_TRY, and indeed
+ * resource owner. So it's OK to fall out of the PG_TRY, and indeed
* we'd better do so before we start marking the members as belonging
* to the list.
*/
@@ -1630,7 +1630,7 @@ ReleaseCatCacheList(CatCList *list)
/*
* CatalogCacheCreateEntry
* Create a new CatCTup entry, copying the given HeapTuple and other
- * supplied data into it. The new entry initially has refcount 0.
+ * supplied data into it. The new entry initially has refcount 0.
*/
static CatCTup *
CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
diff --git a/src/backend/utils/cache/evtcache.c b/src/backend/utils/cache/evtcache.c
index 2180f2abcc1..628e1bd1542 100644
--- a/src/backend/utils/cache/evtcache.c
+++ b/src/backend/utils/cache/evtcache.c
@@ -129,7 +129,7 @@ BuildEventTriggerCache(void)
HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
/*
- * Prepare to scan pg_event_trigger in name order. We use an MVCC
+ * Prepare to scan pg_event_trigger in name order. We use an MVCC
* snapshot to avoid getting inconsistent results if the table is being
* concurrently updated.
*/
diff --git a/src/backend/utils/cache/inval.c b/src/backend/utils/cache/inval.c
index e0dc1267076..62c4369b171 100644
--- a/src/backend/utils/cache/inval.c
+++ b/src/backend/utils/cache/inval.c
@@ -29,23 +29,23 @@
*
* If we successfully complete the transaction, we have to broadcast all
* these invalidation events to other backends (via the SI message queue)
- * so that they can flush obsolete entries from their caches. Note we have
+ * so that they can flush obsolete entries from their caches. Note we have
* to record the transaction commit before sending SI messages, otherwise
* the other backends won't see our updated tuples as good.
*
* When a subtransaction aborts, we can process and discard any events
- * it has queued. When a subtransaction commits, we just add its events
+ * it has queued. When a subtransaction commits, we just add its events
* to the pending lists of the parent transaction.
*
* In short, we need to remember until xact end every insert or delete
- * of a tuple that might be in the system caches. Updates are treated as
+ * of a tuple that might be in the system caches. Updates are treated as
* two events, delete + insert, for simplicity. (If the update doesn't
* change the tuple hash value, catcache.c optimizes this into one event.)
*
* We do not need to register EVERY tuple operation in this way, just those
- * on tuples in relations that have associated catcaches. We do, however,
+ * on tuples in relations that have associated catcaches. We do, however,
* have to register every operation on every tuple that *could* be in a
- * catcache, whether or not it currently is in our cache. Also, if the
+ * catcache, whether or not it currently is in our cache. Also, if the
* tuple is in a relation that has multiple catcaches, we need to register
* an invalidation message for each such catcache. catcache.c's
* PrepareToInvalidateCacheTuple() routine provides the knowledge of which
@@ -112,7 +112,7 @@
/*
* To minimize palloc traffic, we keep pending requests in successively-
* larger chunks (a slightly more sophisticated version of an expansible
- * array). All request types can be stored as SharedInvalidationMessage
+ * array). All request types can be stored as SharedInvalidationMessage
* records. The ordering of requests within a list is never significant.
*/
typedef struct InvalidationChunk
@@ -600,7 +600,7 @@ AcceptInvalidationMessages(void)
*
* If you're a glutton for punishment, try CLOBBER_CACHE_RECURSIVELY. This
* slows things by at least a factor of 10000, so I wouldn't suggest
- * trying to run the entire regression tests that way. It's useful to try
+ * trying to run the entire regression tests that way. It's useful to try
* a few simple tests, to make sure that cache reload isn't subject to
* internal cache-flush hazards, but after you've done a few thousand
* recursive reloads it's unlikely you'll learn more.
@@ -813,12 +813,12 @@ ProcessCommittedInvalidationMessages(SharedInvalidationMessage *msgs,
* If isCommit, we must send out the messages in our PriorCmdInvalidMsgs list
* to the shared invalidation message queue. Note that these will be read
* not only by other backends, but also by our own backend at the next
- * transaction start (via AcceptInvalidationMessages). This means that
+ * transaction start (via AcceptInvalidationMessages). This means that
* we can skip immediate local processing of anything that's still in
* CurrentCmdInvalidMsgs, and just send that list out too.
*
* If not isCommit, we are aborting, and must locally process the messages
- * in PriorCmdInvalidMsgs. No messages need be sent to other backends,
+ * in PriorCmdInvalidMsgs. No messages need be sent to other backends,
* since they'll not have seen our changed tuples anyway. We can forget
* about CurrentCmdInvalidMsgs too, since those changes haven't touched
* the caches yet.
@@ -877,11 +877,11 @@ AtEOXact_Inval(bool isCommit)
* parent's PriorCmdInvalidMsgs list.
*
* If not isCommit, we are aborting, and must locally process the messages
- * in PriorCmdInvalidMsgs. No messages need be sent to other backends.
+ * in PriorCmdInvalidMsgs. No messages need be sent to other backends.
* We can forget about CurrentCmdInvalidMsgs too, since those changes haven't
* touched the caches yet.
*
- * In any case, pop the transaction stack. We need not physically free memory
+ * In any case, pop the transaction stack. We need not physically free memory
* here, since CurTransactionContext is about to be emptied anyway
* (if aborting). Beware of the possibility of aborting the same nesting
* level twice, though.
@@ -937,7 +937,7 @@ AtEOSubXact_Inval(bool isCommit)
* in a transaction.
*
* Here, we send no messages to the shared queue, since we don't know yet if
- * we will commit. We do need to locally process the CurrentCmdInvalidMsgs
+ * we will commit. We do need to locally process the CurrentCmdInvalidMsgs
* list, so as to flush our caches of any entries we have outdated in the
* current command. We then move the current-cmd list over to become part
* of the prior-cmds list.
@@ -1039,7 +1039,7 @@ CacheInvalidateHeapTuple(Relation relation,
* This essentially means that only backends in this same database
* will react to the relcache flush request. This is in fact
* appropriate, since only those backends could see our pg_attribute
- * change anyway. It looks a bit ugly though. (In practice, shared
+ * change anyway. It looks a bit ugly though. (In practice, shared
* relations can't have schema changes after bootstrap, so we should
* never come here for a shared rel anyway.)
*/
@@ -1051,7 +1051,7 @@ CacheInvalidateHeapTuple(Relation relation,
/*
* When a pg_index row is updated, we should send out a relcache inval
- * for the index relation. As above, we don't know the shared status
+ * for the index relation. As above, we don't know the shared status
* of the index, but in practice it doesn't matter since indexes of
* shared catalogs can't have such updates.
*/
@@ -1159,7 +1159,7 @@ CacheInvalidateRelcacheByRelid(Oid relid)
*
* Sending this type of invalidation msg forces other backends to close open
* smgr entries for the rel. This should be done to flush dangling open-file
- * references when the physical rel is being dropped or truncated. Because
+ * references when the physical rel is being dropped or truncated. Because
* these are nontransactional (i.e., not-rollback-able) operations, we just
* send the inval message immediately without any queuing.
*
diff --git a/src/backend/utils/cache/lsyscache.c b/src/backend/utils/cache/lsyscache.c
index 586596258d3..0b45fcb4943 100644
--- a/src/backend/utils/cache/lsyscache.c
+++ b/src/backend/utils/cache/lsyscache.c
@@ -186,13 +186,13 @@ get_opfamily_member(Oid opfamily, Oid lefttype, Oid righttype,
* (This indicates that the operator is not a valid ordering operator.)
*
* Note: the operator could be registered in multiple families, for example
- * if someone were to build a "reverse sort" opfamily. This would result in
+ * if someone were to build a "reverse sort" opfamily. This would result in
* uncertainty as to whether "ORDER BY USING op" would default to NULLS FIRST
* or NULLS LAST, as well as inefficient planning due to failure to match up
* pathkeys that should be the same. So we want a determinate result here.
* Because of the way the syscache search works, we'll use the interpretation
* associated with the opfamily with smallest OID, which is probably
- * determinate enough. Since there is no longer any particularly good reason
+ * determinate enough. Since there is no longer any particularly good reason
* to build reverse-sort opfamilies, it doesn't seem worth expending any
* additional effort on ensuring consistency.
*/
@@ -403,7 +403,7 @@ get_ordering_op_for_equality_op(Oid opno, bool use_lhs_type)
*
* The planner currently uses simple equal() tests to compare the lists
* returned by this function, which makes the list order relevant, though
- * strictly speaking it should not be. Because of the way syscache list
+ * strictly speaking it should not be. Because of the way syscache list
* searches are handled, in normal operation the result will be sorted by OID
* so everything works fine. If running with system index usage disabled,
* the result ordering is unspecified and hence the planner might fail to
@@ -1212,7 +1212,7 @@ op_mergejoinable(Oid opno, Oid inputtype)
*
* In some cases (currently only array_eq), hashjoinability depends on the
* specific input data type the operator is invoked for, so that must be
- * passed as well. We currently assume that only one input's type is needed
+ * passed as well. We currently assume that only one input's type is needed
* to check this --- by convention, pass the left input's data type.
*/
bool
@@ -1861,7 +1861,7 @@ get_typbyval(Oid typid)
* A two-fer: given the type OID, return both typlen and typbyval.
*
* Since both pieces of info are needed to know how to copy a Datum,
- * many places need both. Might as well get them with one cache lookup
+ * many places need both. Might as well get them with one cache lookup
* instead of two. Also, this routine raises an error instead of
* returning a bogus value when given a bad type OID.
*/
diff --git a/src/backend/utils/cache/plancache.c b/src/backend/utils/cache/plancache.c
index cf740a94cae..0e210a0d1c5 100644
--- a/src/backend/utils/cache/plancache.c
+++ b/src/backend/utils/cache/plancache.c
@@ -11,7 +11,7 @@
* The logic for choosing generic or custom plans is in choose_custom_plan,
* which see for comments.
*
- * Cache invalidation is driven off sinval events. Any CachedPlanSource
+ * Cache invalidation is driven off sinval events. Any CachedPlanSource
* that matches the event is marked invalid, as is its generic CachedPlan
* if it has one. When (and if) the next demand for a cached plan occurs,
* parse analysis and rewrite is repeated to build a new valid query tree,
@@ -27,7 +27,7 @@
* caller to notice changes and cope with them.
*
* Currently, we track exactly the dependencies of plans on relations and
- * user-defined functions. On relcache invalidation events or pg_proc
+ * user-defined functions. On relcache invalidation events or pg_proc
* syscache invalidation events, we invalidate just those plans that depend
* on the particular object being modified. (Note: this scheme assumes
* that any table modification that requires replanning will generate a
@@ -123,7 +123,7 @@ InitPlanCache(void)
* CreateCachedPlan: initially create a plan cache entry.
*
* Creation of a cached plan is divided into two steps, CreateCachedPlan and
- * CompleteCachedPlan. CreateCachedPlan should be called after running the
+ * CompleteCachedPlan. CreateCachedPlan should be called after running the
* query through raw_parser, but before doing parse analysis and rewrite;
* CompleteCachedPlan is called after that. The reason for this arrangement
* is that it can save one round of copying of the raw parse tree, since
@@ -217,7 +217,7 @@ CreateCachedPlan(Node *raw_parse_tree,
* in that context.
*
* A one-shot plan cannot be saved or copied, since we make no effort to
- * preserve the raw parse tree unmodified. There is also no support for
+ * preserve the raw parse tree unmodified. There is also no support for
* invalidation, so plan use must be completed in the current transaction,
* and DDL that might invalidate the querytree_list must be avoided as well.
*
@@ -274,13 +274,13 @@ CreateOneShotCachedPlan(Node *raw_parse_tree,
* CompleteCachedPlan: second step of creating a plan cache entry.
*
* Pass in the analyzed-and-rewritten form of the query, as well as the
- * required subsidiary data about parameters and such. All passed values will
+ * required subsidiary data about parameters and such. All passed values will
* be copied into the CachedPlanSource's memory, except as specified below.
* After this is called, GetCachedPlan can be called to obtain a plan, and
* optionally the CachedPlanSource can be saved using SaveCachedPlan.
*
* If querytree_context is not NULL, the querytree_list must be stored in that
- * context (but the other parameters need not be). The querytree_list is not
+ * context (but the other parameters need not be). The querytree_list is not
* copied, rather the given context is kept as the initial query_context of
* the CachedPlanSource. (It should have been created as a child of the
* caller's working memory context, but it will now be reparented to belong
@@ -374,7 +374,7 @@ CompleteCachedPlan(CachedPlanSource *plansource,
&plansource->invalItems);
/*
- * Also save the current search_path in the query_context. (This
+ * Also save the current search_path in the query_context. (This
* should not generate much extra cruft either, since almost certainly
* the path is already valid.) Again, we don't really need this for
* one-shot plans; and we *must* skip this for transaction control
@@ -421,7 +421,7 @@ CompleteCachedPlan(CachedPlanSource *plansource,
* This is guaranteed not to throw error, except for the caller-error case
* of trying to save a one-shot plan. Callers typically depend on that
* since this is called just before or just after adding a pointer to the
- * CachedPlanSource to some permanent data structure of their own. Up until
+ * CachedPlanSource to some permanent data structure of their own. Up until
* this is done, a CachedPlanSource is just transient data that will go away
* automatically on transaction abort.
*/
@@ -442,13 +442,13 @@ SaveCachedPlan(CachedPlanSource *plansource)
* plans from the CachedPlanSource. If there is a generic plan, moving it
* into CacheMemoryContext would be pretty risky since it's unclear
* whether the caller has taken suitable care with making references
- * long-lived. Best thing to do seems to be to discard the plan.
+ * long-lived. Best thing to do seems to be to discard the plan.
*/
ReleaseGenericPlan(plansource);
/*
* Reparent the source memory context under CacheMemoryContext so that it
- * will live indefinitely. The query_context follows along since it's
+ * will live indefinitely. The query_context follows along since it's
* already a child of the other one.
*/
MemoryContextSetParent(plansource->context, CacheMemoryContext);
@@ -466,7 +466,7 @@ SaveCachedPlan(CachedPlanSource *plansource)
* DropCachedPlan: destroy a cached plan.
*
* Actually this only destroys the CachedPlanSource: any referenced CachedPlan
- * is released, but not destroyed until its refcount goes to zero. That
+ * is released, but not destroyed until its refcount goes to zero. That
* handles the situation where DropCachedPlan is called while the plan is
* still in use.
*/
@@ -617,7 +617,7 @@ RevalidateCachedQuery(CachedPlanSource *plansource)
plansource->search_path = NULL;
/*
- * Free the query_context. We don't really expect MemoryContextDelete to
+ * Free the query_context. We don't really expect MemoryContextDelete to
* fail, but just in case, make sure the CachedPlanSource is left in a
* reasonably sane state. (The generic plan won't get unlinked yet, but
* that's acceptable.)
@@ -675,7 +675,7 @@ RevalidateCachedQuery(CachedPlanSource *plansource)
PopActiveSnapshot();
/*
- * Check or update the result tupdesc. XXX should we use a weaker
+ * Check or update the result tupdesc. XXX should we use a weaker
* condition than equalTupleDescs() here?
*
* We assume the parameter types didn't change from the first time, so no
@@ -726,7 +726,7 @@ RevalidateCachedQuery(CachedPlanSource *plansource)
&plansource->invalItems);
/*
- * Also save the current search_path in the query_context. (This should
+ * Also save the current search_path in the query_context. (This should
* not generate much extra cruft either, since almost certainly the path
* is already valid.)
*/
@@ -860,7 +860,7 @@ BuildCachedPlan(CachedPlanSource *plansource, List *qlist,
* we ought to be holding sufficient locks to prevent any invalidation.
* However, if we're building a custom plan after having built and
* rejected a generic plan, it's possible to reach here with is_valid
- * false due to an invalidation while making the generic plan. In theory
+ * false due to an invalidation while making the generic plan. In theory
* the invalidation must be a false positive, perhaps a consequence of an
* sinval reset event or the CLOBBER_CACHE_ALWAYS debug code. But for
* safety, let's treat it as real and redo the RevalidateCachedQuery call.
@@ -1043,7 +1043,7 @@ cached_plan_cost(CachedPlan *plan, bool include_planner)
* on the number of relations in the finished plan's rangetable.
* Join planning effort actually scales much worse than linearly
* in the number of relations --- but only until the join collapse
- * limits kick in. Also, while inheritance child relations surely
+ * limits kick in. Also, while inheritance child relations surely
* add to planning effort, they don't make the join situation
* worse. So the actual shape of the planning cost curve versus
* number of relations isn't all that obvious. It will take
@@ -1153,7 +1153,7 @@ GetCachedPlan(CachedPlanSource *plansource, ParamListInfo boundParams,
/*
* If we choose to plan again, we need to re-copy the query_list,
- * since the planner probably scribbled on it. We can force
+ * since the planner probably scribbled on it. We can force
* BuildCachedPlan to do that by passing NIL.
*/
qlist = NIL;
@@ -1203,7 +1203,7 @@ GetCachedPlan(CachedPlanSource *plansource, ParamListInfo boundParams,
*
* Note: useResOwner = false is used for releasing references that are in
* persistent data structures, such as the parent CachedPlanSource or a
- * Portal. Transient references should be protected by a resource owner.
+ * Portal. Transient references should be protected by a resource owner.
*/
void
ReleaseCachedPlan(CachedPlan *plan, bool useResOwner)
@@ -1267,7 +1267,7 @@ CachedPlanSetParentContext(CachedPlanSource *plansource,
*
* This is a convenience routine that does the equivalent of
* CreateCachedPlan + CompleteCachedPlan, using the data stored in the
- * input CachedPlanSource. The result is therefore "unsaved" (regardless
+ * input CachedPlanSource. The result is therefore "unsaved" (regardless
* of the state of the source), and we don't copy any generic plan either.
* The result will be currently valid, or not, the same as the source.
*/
@@ -1420,7 +1420,7 @@ AcquireExecutorLocks(List *stmt_list, bool acquire)
{
/*
* Ignore utility statements, except those (such as EXPLAIN) that
- * contain a parsed-but-not-planned query. Note: it's okay to use
+ * contain a parsed-but-not-planned query. Note: it's okay to use
* ScanQueryForLocks, even though the query hasn't been through
* rule rewriting, because rewriting doesn't change the query
* representation.
@@ -1616,7 +1616,7 @@ plan_list_is_transient(List *stmt_list)
/*
* PlanCacheComputeResultDesc: given a list of analyzed-and-rewritten Queries,
- * determine the result tupledesc it will produce. Returns NULL if the
+ * determine the result tupledesc it will produce. Returns NULL if the
* execution will not return tuples.
*
* Note: the result is created or copied into current memory context.
diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c
index f1140385883..8058169d067 100644
--- a/src/backend/utils/cache/relcache.c
+++ b/src/backend/utils/cache/relcache.c
@@ -124,7 +124,7 @@ bool criticalSharedRelcachesBuilt = false;
/*
* This counter counts relcache inval events received since backend startup
- * (but only for rels that are actually in cache). Presently, we use it only
+ * (but only for rels that are actually in cache). Presently, we use it only
* to detect whether data about to be written by write_relcache_init_file()
* might already be obsolete.
*/
@@ -472,7 +472,7 @@ RelationBuildTupleDesc(Relation relation)
Int16GetDatum(0));
/*
- * Open pg_attribute and begin a scan. Force heap scan if we haven't yet
+ * Open pg_attribute and begin a scan. Force heap scan if we haven't yet
* built the critical relcache entries (this includes initdb and startup
* without a pg_internal.init file).
*/
@@ -535,7 +535,7 @@ RelationBuildTupleDesc(Relation relation)
/*
* The attcacheoff values we read from pg_attribute should all be -1
- * ("unknown"). Verify this if assert checking is on. They will be
+ * ("unknown"). Verify this if assert checking is on. They will be
* computed when and if needed during tuple access.
*/
#ifdef USE_ASSERT_CHECKING
@@ -549,7 +549,7 @@ RelationBuildTupleDesc(Relation relation)
/*
* However, we can easily set the attcacheoff value for the first
- * attribute: it must be zero. This eliminates the need for special cases
+ * attribute: it must be zero. This eliminates the need for special cases
* for attnum=1 that used to exist in fastgetattr() and index_getattr().
*/
if (relation->rd_rel->relnatts > 0)
@@ -605,7 +605,7 @@ RelationBuildTupleDesc(Relation relation)
* each relcache entry that has associated rules. The context is used
* just for rule info, not for any other subsidiary data of the relcache
* entry, because that keeps the update logic in RelationClearRelation()
- * manageable. The other subsidiary data structures are simple enough
+ * manageable. The other subsidiary data structures are simple enough
* to be easy to free explicitly, anyway.
*/
static void
@@ -714,9 +714,9 @@ RelationBuildRuleLock(Relation relation)
/*
* We want the rule's table references to be checked as though by the
- * table owner, not the user referencing the rule. Therefore, scan
+ * table owner, not the user referencing the rule. Therefore, scan
* through the rule's actions and set the checkAsUser field on all
- * rtable entries. We have to look at the qual as well, in case it
+ * rtable entries. We have to look at the qual as well, in case it
* contains sublinks.
*
* The reason for doing this when the rule is loaded, rather than when
@@ -1059,7 +1059,7 @@ RelationInitIndexAccessInfo(Relation relation)
amsupport = aform->amsupport;
/*
- * Make the private context to hold index access info. The reason we need
+ * Make the private context to hold index access info. The reason we need
* a context, and not just a couple of pallocs, is so that we won't leak
* any subsidiary info attached to fmgr lookup records.
*
@@ -1107,7 +1107,7 @@ RelationInitIndexAccessInfo(Relation relation)
/*
* indcollation cannot be referenced directly through the C struct,
- * because it comes after the variable-width indkey field. Must extract
+ * because it comes after the variable-width indkey field. Must extract
* the datum the hard way...
*/
indcollDatum = fastgetattr(relation->rd_indextuple,
@@ -1132,7 +1132,7 @@ RelationInitIndexAccessInfo(Relation relation)
/*
* Fill the support procedure OID array, as well as the info about
- * opfamilies and opclass input types. (aminfo and supportinfo are left
+ * opfamilies and opclass input types. (aminfo and supportinfo are left
* as zeroes, and are filled on-the-fly when used)
*/
IndexSupportInitialize(indclass, relation->rd_support,
@@ -1220,7 +1220,7 @@ IndexSupportInitialize(oidvector *indclass,
* Note there is no provision for flushing the cache. This is OK at the
* moment because there is no way to ALTER any interesting properties of an
* existing opclass --- all you can do is drop it, which will result in
- * a useless but harmless dead entry in the cache. To support altering
+ * a useless but harmless dead entry in the cache. To support altering
* opclass membership (not the same as opfamily membership!), we'd need to
* be able to flush this cache as well as the contents of relcache entries
* for indexes.
@@ -1329,7 +1329,7 @@ LookupOpclassInfo(Oid operatorClassOid,
heap_close(rel, AccessShareLock);
/*
- * Scan pg_amproc to obtain support procs for the opclass. We only fetch
+ * Scan pg_amproc to obtain support procs for the opclass. We only fetch
* the default ones (those with lefttype = righttype = opcintype).
*/
if (numSupport > 0)
@@ -1855,7 +1855,7 @@ RelationDestroyRelation(Relation relation)
*
* NB: when rebuilding, we'd better hold some lock on the relation,
* else the catalog data we need to read could be changing under us.
- * Also, a rel to be rebuilt had better have refcnt > 0. This is because
+ * Also, a rel to be rebuilt had better have refcnt > 0. This is because
* an sinval reset could happen while we're accessing the catalogs, and
* the rel would get blown away underneath us by RelationCacheInvalidate
* if it has zero refcnt.
@@ -1878,7 +1878,7 @@ RelationClearRelation(Relation relation, bool rebuild)
/*
* Make sure smgr and lower levels close the relation's files, if they
* weren't closed already. If the relation is not getting deleted, the
- * next smgr access should reopen the files automatically. This ensures
+ * next smgr access should reopen the files automatically. This ensures
* that the low-level file access state is updated after, say, a vacuum
* truncation.
*/
@@ -1890,7 +1890,7 @@ RelationClearRelation(Relation relation, bool rebuild)
* in case it is a mapped relation whose mapping changed.
*
* If it's a nailed index, then we need to re-read the pg_class row to see
- * if its relfilenode changed. We can't necessarily do that here, because
+ * if its relfilenode changed. We can't necessarily do that here, because
* we might be in a failed transaction. We assume it's okay to do it if
* there are open references to the relcache entry (cf notes for
* AtEOXact_RelationCache). Otherwise just mark the entry as possibly
@@ -1951,7 +1951,7 @@ RelationClearRelation(Relation relation, bool rebuild)
* over from the old entry). This is to avoid trouble in case an
* error causes us to lose control partway through. The old entry
* will still be marked !rd_isvalid, so we'll try to rebuild it again
- * on next access. Meanwhile it's not any less valid than it was
+ * on next access. Meanwhile it's not any less valid than it was
* before, so any code that might expect to continue accessing it
* isn't hurt by the rebuild failure. (Consider for example a
* subtransaction that ALTERs a table and then gets canceled partway
@@ -2140,7 +2140,7 @@ RelationCacheInvalidateEntry(Oid relationId)
/*
* RelationCacheInvalidate
* Blow away cached relation descriptors that have zero reference counts,
- * and rebuild those with positive reference counts. Also reset the smgr
+ * and rebuild those with positive reference counts. Also reset the smgr
* relation cache and re-read relation mapping data.
*
* This is currently used only to recover from SI message buffer overflow,
@@ -2153,7 +2153,7 @@ RelationCacheInvalidateEntry(Oid relationId)
* We do this in two phases: the first pass deletes deletable items, and
* the second one rebuilds the rebuildable items. This is essential for
* safety, because hash_seq_search only copes with concurrent deletion of
- * the element it is currently visiting. If a second SI overflow were to
+ * the element it is currently visiting. If a second SI overflow were to
* occur while we are walking the table, resulting in recursive entry to
* this routine, we could crash because the inner invocation blows away
* the entry next to be visited by the outer scan. But this way is OK,
@@ -2313,7 +2313,7 @@ AtEOXact_RelationCache(bool isCommit)
* For simplicity, eoxact_list[] entries are not deleted till end of
* top-level transaction, even though we could remove them at
* subtransaction end in some cases, or remove relations from the list if
- * they are cleared for other reasons. Therefore we should expect the
+ * they are cleared for other reasons. Therefore we should expect the
* case that list entries are not found in the hashtable; if not, there's
* nothing to do for them.
*/
@@ -2363,7 +2363,7 @@ AtEOXact_cleanup(Relation relation, bool isCommit)
* transaction calls. (That seems bogus, but it's not worth fixing.)
*
* Note: ideally this check would be applied to every relcache entry, not
- * just those that have eoxact work to do. But it's not worth forcing a
+ * just those that have eoxact work to do. But it's not worth forcing a
* scan of the whole relcache just for this. (Moreover, doing so would
* mean that assert-enabled testing never tests the hash_search code path
* above, which seems a bad idea.)
@@ -2667,7 +2667,7 @@ RelationBuildLocalRelation(const char *relname,
/*
* Insert relation physical and logical identifiers (OIDs) into the right
- * places. For a mapped relation, we set relfilenode to zero and rely on
+ * places. For a mapped relation, we set relfilenode to zero and rely on
* RelationInitPhysicalAddr to consult the map.
*/
rel->rd_rel->relisshared = shared_relation;
@@ -2910,7 +2910,7 @@ RelationCacheInitializePhase2(void)
oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
/*
- * Try to load the shared relcache cache file. If unsuccessful, bootstrap
+ * Try to load the shared relcache cache file. If unsuccessful, bootstrap
* the cache with pre-made descriptors for the critical shared catalogs.
*/
if (!load_relcache_init_file(true))
@@ -2990,9 +2990,9 @@ RelationCacheInitializePhase3(void)
/*
* If we didn't get the critical system indexes loaded into relcache, do
- * so now. These are critical because the catcache and/or opclass cache
+ * so now. These are critical because the catcache and/or opclass cache
* depend on them for fetches done during relcache load. Thus, we have an
- * infinite-recursion problem. We can break the recursion by doing
+ * infinite-recursion problem. We can break the recursion by doing
* heapscans instead of indexscans at certain key spots. To avoid hobbling
* performance, we only want to do that until we have the critical indexes
* loaded into relcache. Thus, the flag criticalRelcachesBuilt is used to
@@ -3009,7 +3009,7 @@ RelationCacheInitializePhase3(void)
* RewriteRelRulenameIndexId and TriggerRelidNameIndexId are not critical
* in the same way as the others, because the critical catalogs don't
* (currently) have any rules or triggers, and so these indexes can be
- * rebuilt without inducing recursion. However they are used during
+ * rebuilt without inducing recursion. However they are used during
* relcache load when a rel does have rules or triggers, so we choose to
* nail them for performance reasons.
*/
@@ -3040,7 +3040,7 @@ RelationCacheInitializePhase3(void)
*
* DatabaseNameIndexId isn't critical for relcache loading, but rather for
* initial lookup of MyDatabaseId, without which we'll never find any
- * non-shared catalogs at all. Autovacuum calls InitPostgres with a
+ * non-shared catalogs at all. Autovacuum calls InitPostgres with a
* database OID, so it instead depends on DatabaseOidIndexId. We also
* need to nail up some indexes on pg_authid and pg_auth_members for use
* during client authentication.
@@ -3472,7 +3472,7 @@ RelationGetIndexList(Relation relation)
/*
* We build the list we intend to return (in the caller's context) while
- * doing the scan. After successfully completing the scan, we copy that
+ * doing the scan. After successfully completing the scan, we copy that
* list into the relcache entry. This avoids cache-context memory leakage
* if we get some sort of error partway through.
*/
@@ -3510,7 +3510,7 @@ RelationGetIndexList(Relation relation)
/*
* indclass cannot be referenced directly through the C struct,
- * because it comes after the variable-width indkey field. Must
+ * because it comes after the variable-width indkey field. Must
* extract the datum the hard way...
*/
indclassDatum = heap_getattr(htup,
@@ -4060,7 +4060,7 @@ errtablecol(Relation rel, int attnum)
* given directly rather than extracted from the relation's catalog data.
*
* Don't use this directly unless errtablecol() is inconvenient for some
- * reason. This might possibly be needed during intermediate states in ALTER
+ * reason. This might possibly be needed during intermediate states in ALTER
* TABLE, for instance.
*/
int
@@ -4480,7 +4480,7 @@ load_relcache_init_file(bool shared)
return true;
/*
- * init file is broken, so do it the hard way. We don't bother trying to
+ * init file is broken, so do it the hard way. We don't bother trying to
* free the clutter we just allocated; it's not in the relcache so it
* won't hurt.
*/
@@ -4545,7 +4545,7 @@ write_relcache_init_file(bool shared)
}
/*
- * Write a magic number to serve as a file version identifier. We can
+ * Write a magic number to serve as a file version identifier. We can
* change the magic number whenever the relcache layout changes.
*/
magic = RELCACHE_INIT_FILEMAGIC;
@@ -4770,7 +4770,7 @@ RelationCacheInitFilePostInvalidate(void)
*
* We used to keep the init files across restarts, but that is unsafe in PITR
* scenarios, and even in simple crash-recovery cases there are windows for
- * the init files to become out-of-sync with the database. So now we just
+ * the init files to become out-of-sync with the database. So now we just
* remove them during startup and expect the first backend launch to rebuild
* them. Of course, this has to happen in each database of the cluster.
*/
diff --git a/src/backend/utils/cache/relmapper.c b/src/backend/utils/cache/relmapper.c
index 2c7d9f3287b..57ff7de4dbb 100644
--- a/src/backend/utils/cache/relmapper.c
+++ b/src/backend/utils/cache/relmapper.c
@@ -23,7 +23,7 @@
* mapped catalogs can only be relocated by operations such as VACUUM FULL
* and CLUSTER, which make no transactionally-significant changes: it must be
* safe for the new file to replace the old, even if the transaction itself
- * aborts. An important factor here is that the indexes and toast table of
+ * aborts. An important factor here is that the indexes and toast table of
* a mapped catalog must also be mapped, so that the rewrites/relocations of
* all these files commit in a single map file update rather than being tied
* to transaction commit.
@@ -57,13 +57,13 @@
/*
* The map file is critical data: we have no automatic method for recovering
* from loss or corruption of it. We use a CRC so that we can detect
- * corruption. To minimize the risk of failed updates, the map file should
+ * corruption. To minimize the risk of failed updates, the map file should
* be kept to no more than one standard-size disk sector (ie 512 bytes),
* and we use overwrite-in-place rather than playing renaming games.
* The struct layout below is designed to occupy exactly 512 bytes, which
* might make filesystem updates a bit more efficient.
*
- * Entries in the mappings[] array are in no particular order. We could
+ * Entries in the mappings[] array are in no particular order. We could
* speed searching by insisting on OID order, but it really shouldn't be
* worth the trouble given the intended size of the mapping sets.
*/
@@ -90,7 +90,7 @@ typedef struct RelMapFile
/*
* The currently known contents of the shared map file and our database's
- * local map file are stored here. These can be reloaded from disk
+ * local map file are stored here. These can be reloaded from disk
* immediately whenever we receive an update sinval message.
*/
static RelMapFile shared_map;
@@ -293,7 +293,7 @@ merge_map_updates(RelMapFile *map, const RelMapFile *updates, bool add_okay)
* RelationMapRemoveMapping
*
* Remove a relation's entry in the map. This is only allowed for "active"
- * (but not committed) local mappings. We need it so we can back out the
+ * (but not committed) local mappings. We need it so we can back out the
* entry for the transient target file when doing VACUUM FULL/CLUSTER on
* a mapped relation.
*/
@@ -321,7 +321,7 @@ RelationMapRemoveMapping(Oid relationId)
* RelationMapInvalidate
*
* This routine is invoked for SI cache flush messages. We must re-read
- * the indicated map file. However, we might receive a SI message in a
+ * the indicated map file. However, we might receive a SI message in a
* process that hasn't yet, and might never, load the mapping files;
* for example the autovacuum launcher, which *must not* try to read
* a local map since it is attached to no particular database.
@@ -389,7 +389,7 @@ AtCCI_RelationMap(void)
*
* During commit, this must be called as late as possible before the actual
* transaction commit, so as to minimize the window where the transaction
- * could still roll back after committing map changes. Although nothing
+ * could still roll back after committing map changes. Although nothing
* critically bad happens in such a case, we still would prefer that it
* not happen, since we'd possibly be losing useful updates to the relations'
* pg_class row(s).
@@ -456,7 +456,7 @@ AtPrepare_RelationMap(void)
/*
* CheckPointRelationMap
*
- * This is called during a checkpoint. It must ensure that any relation map
+ * This is called during a checkpoint. It must ensure that any relation map
* updates that were WAL-logged before the start of the checkpoint are
* securely flushed to disk and will not need to be replayed later. This
* seems unlikely to be a performance-critical issue, so we use a simple
@@ -647,7 +647,7 @@ load_relmap_file(bool shared)
*
* Because this may be called during WAL replay when MyDatabaseId,
* DatabasePath, etc aren't valid, we require the caller to pass in suitable
- * values. The caller is also responsible for being sure no concurrent
+ * values. The caller is also responsible for being sure no concurrent
* map update could be happening.
*/
static void
@@ -767,7 +767,7 @@ write_relmap_file(bool shared, RelMapFile *newmap,
/*
* Make sure that the files listed in the map are not deleted if the outer
- * transaction aborts. This had better be within the critical section
+ * transaction aborts. This had better be within the critical section
* too: it's not likely to fail, but if it did, we'd arrive at transaction
* abort with the files still vulnerable. PANICing will leave things in a
* good state on-disk.
diff --git a/src/backend/utils/cache/spccache.c b/src/backend/utils/cache/spccache.c
index e689291b2ec..5f257749a1e 100644
--- a/src/backend/utils/cache/spccache.c
+++ b/src/backend/utils/cache/spccache.c
@@ -4,7 +4,7 @@
* Tablespace cache management.
*
* We cache the parsed version of spcoptions for each tablespace to avoid
- * needing to reparse on every lookup. Right now, there doesn't appear to
+ * needing to reparse on every lookup. Right now, there doesn't appear to
* be a measurable performance gain from doing this, but that might change
* in the future as we add more options.
*
@@ -128,7 +128,7 @@ get_tablespace(Oid spcid)
return spc;
/*
- * Not found in TableSpace cache. Check catcache. If we don't find a
+ * Not found in TableSpace cache. Check catcache. If we don't find a
* valid HeapTuple, it must mean someone has managed to request tablespace
* details for a non-existent tablespace. We'll just treat that case as
* if no options were specified.
@@ -158,7 +158,7 @@ get_tablespace(Oid spcid)
}
/*
- * Now create the cache entry. It's important to do this only after
+ * Now create the cache entry. It's important to do this only after
* reading the pg_tablespace entry, since doing so could cause a cache
* flush.
*/
diff --git a/src/backend/utils/cache/syscache.c b/src/backend/utils/cache/syscache.c
index ecb0f96d467..9aa5ee5ac05 100644
--- a/src/backend/utils/cache/syscache.c
+++ b/src/backend/utils/cache/syscache.c
@@ -801,7 +801,7 @@ static bool CacheInitialized = false;
* InitCatalogCache - initialize the caches
*
* Note that no database access is done here; we only allocate memory
- * and initialize the cache structure. Interrogation of the database
+ * and initialize the cache structure. Interrogation of the database
* to complete initialization of a cache happens upon first use
* of that cache.
*/
@@ -1038,7 +1038,7 @@ SearchSysCacheExistsAttName(Oid relid, const char *attname)
* extract a specific attribute.
*
* This is equivalent to using heap_getattr() on a tuple fetched
- * from a non-cached relation. Usually, this is only used for attributes
+ * from a non-cached relation. Usually, this is only used for attributes
* that could be NULL or variable length; the fixed-size attributes in
* a system table are accessed just by mapping the tuple onto the C struct
* declarations from include/catalog/.
diff --git a/src/backend/utils/cache/ts_cache.c b/src/backend/utils/cache/ts_cache.c
index 65a8ad7be1a..f6b61a2c3ed 100644
--- a/src/backend/utils/cache/ts_cache.c
+++ b/src/backend/utils/cache/ts_cache.c
@@ -605,7 +605,7 @@ check_TSCurrentConfig(char **newval, void **extra, GucSource source)
/*
* When source == PGC_S_TEST, we are checking the argument of an ALTER
- * DATABASE SET or ALTER USER SET command. It could be that the
+ * DATABASE SET or ALTER USER SET command. It could be that the
* intended use of the setting is for some other database, so we
* should not error out if the text search configuration is not
* present in the current database. We issue a NOTICE instead.
diff --git a/src/backend/utils/cache/typcache.c b/src/backend/utils/cache/typcache.c
index 2fa6d335350..fdda169cf38 100644
--- a/src/backend/utils/cache/typcache.c
+++ b/src/backend/utils/cache/typcache.c
@@ -11,7 +11,7 @@
*
* Several seemingly-odd choices have been made to support use of the type
* cache by generic array and record handling routines, such as array_eq(),
- * record_cmp(), and hash_array(). Because those routines are used as index
+ * record_cmp(), and hash_array(). Because those routines are used as index
* support operations, they cannot leak memory. To allow them to execute
* efficiently, all information that they would like to re-use across calls
* is kept in the type cache.
@@ -101,7 +101,7 @@ typedef struct TypeCacheEnumData
*
* Stored record types are remembered in a linear array of TupleDescs,
* which can be indexed quickly with the assigned typmod. There is also
- * a hash table to speed searches for matching TupleDescs. The hash key
+ * a hash table to speed searches for matching TupleDescs. The hash key
* uses just the first N columns' type OIDs, and so we may have multiple
* entries with the same hash key.
*/
@@ -482,7 +482,7 @@ load_typcache_tupdesc(TypeCacheEntry *typentry)
/*
* Link to the tupdesc and increment its refcount (we assert it's a
- * refcounted descriptor). We don't use IncrTupleDescRefCount() for this,
+ * refcounted descriptor). We don't use IncrTupleDescRefCount() for this,
* because the reference mustn't be entered in the current resource owner;
* it can outlive the current query.
*/
@@ -1074,7 +1074,7 @@ load_enum_cache_data(TypeCacheEntry *tcache)
/*
* Read all the information for members of the enum type. We collect the
* info in working memory in the caller's context, and then transfer it to
- * permanent memory in CacheMemoryContext. This minimizes the risk of
+ * permanent memory in CacheMemoryContext. This minimizes the risk of
* leaking memory from CacheMemoryContext in the event of an error partway
* through.
*/
diff --git a/src/backend/utils/error/elog.c b/src/backend/utils/error/elog.c
index 59415f698d2..c82707165fa 100644
--- a/src/backend/utils/error/elog.c
+++ b/src/backend/utils/error/elog.c
@@ -5,7 +5,7 @@
*
* Because of the extremely high rate at which log messages can be generated,
* we need to be mindful of the performance cost of obtaining any information
- * that may be logged. Also, it's important to keep in mind that this code may
+ * that may be logged. Also, it's important to keep in mind that this code may
* get called from within an aborted transaction, in which case operations
* such as syscache lookups are unsafe.
*
@@ -15,23 +15,23 @@
* if we run out of memory, it's important to be able to report that fact.
* There are a number of considerations that go into this.
*
- * First, distinguish between re-entrant use and actual recursion. It
+ * First, distinguish between re-entrant use and actual recursion. It
* is possible for an error or warning message to be emitted while the
- * parameters for an error message are being computed. In this case
+ * parameters for an error message are being computed. In this case
* errstart has been called for the outer message, and some field values
- * may have already been saved, but we are not actually recursing. We handle
- * this by providing a (small) stack of ErrorData records. The inner message
+ * may have already been saved, but we are not actually recursing. We handle
+ * this by providing a (small) stack of ErrorData records. The inner message
* can be computed and sent without disturbing the state of the outer message.
* (If the inner message is actually an error, this isn't very interesting
* because control won't come back to the outer message generator ... but
* if the inner message is only debug or log data, this is critical.)
*
* Second, actual recursion will occur if an error is reported by one of
- * the elog.c routines or something they call. By far the most probable
+ * the elog.c routines or something they call. By far the most probable
* scenario of this sort is "out of memory"; and it's also the nastiest
* to handle because we'd likely also run out of memory while trying to
* report this error! Our escape hatch for this case is to reset the
- * ErrorContext to empty before trying to process the inner error. Since
+ * ErrorContext to empty before trying to process the inner error. Since
* ErrorContext is guaranteed to have at least 8K of space in it (see mcxt.c),
* we should be able to process an "out of memory" message successfully.
* Since we lose the prior error state due to the reset, we won't be able
@@ -116,7 +116,7 @@ char *Log_destination_string = NULL;
/*
* Max string length to send to syslog(). Note that this doesn't count the
* sequence-number prefix we add, and of course it doesn't count the prefix
- * added by syslog itself. Solaris and sysklogd truncate the final message
+ * added by syslog itself. Solaris and sysklogd truncate the final message
* at 1024 bytes, so this value leaves 124 bytes for those prefixes. (Most
* other syslog implementations seem to have limits of 2KB or so.)
*/
@@ -243,7 +243,7 @@ errstart(int elevel, const char *filename, int lineno,
{
/*
* If we are inside a critical section, all errors become PANIC
- * errors. See miscadmin.h.
+ * errors. See miscadmin.h.
*/
if (CritSectionCount > 0)
elevel = PANIC;
@@ -256,7 +256,7 @@ errstart(int elevel, const char *filename, int lineno,
*
* 2. ExitOnAnyError mode switch is set (initdb uses this).
*
- * 3. the error occurred after proc_exit has begun to run. (It's
+ * 3. the error occurred after proc_exit has begun to run. (It's
* proc_exit's responsibility to see that this doesn't turn into
* infinite recursion!)
*/
@@ -349,7 +349,7 @@ errstart(int elevel, const char *filename, int lineno,
if (++errordata_stack_depth >= ERRORDATA_STACK_SIZE)
{
/*
- * Wups, stack not big enough. We treat this as a PANIC condition
+ * Wups, stack not big enough. We treat this as a PANIC condition
* because it suggests an infinite loop of errors during error
* recovery.
*/
@@ -451,7 +451,7 @@ errfinish(int dummy,...)
*
* Reset InterruptHoldoffCount in case we ereport'd from inside an
* interrupt holdoff section. (We assume here that no handler will
- * itself be inside a holdoff section. If necessary, such a handler
+ * itself be inside a holdoff section. If necessary, such a handler
* could save and restore InterruptHoldoffCount for itself, but this
* should make life easier for most.)
*
@@ -477,7 +477,7 @@ errfinish(int dummy,...)
* progress, so that we can report the message before dying. (Without
* this, pq_putmessage will refuse to send the message at all, which is
* what we want for NOTICE messages, but not for fatal exits.) This hack
- * is necessary because of poor design of old-style copy protocol. Note
+ * is necessary because of poor design of old-style copy protocol. Note
* we must do this even if client is fool enough to have set
* client_min_messages above FATAL, so don't look at output_to_client.
*/
@@ -599,7 +599,7 @@ errcode(int sqlerrcode)
/*
* errcode_for_file_access --- add SQLSTATE error code to the current error
*
- * The SQLSTATE code is chosen based on the saved errno value. We assume
+ * The SQLSTATE code is chosen based on the saved errno value. We assume
* that the failing operation was some type of disk file access.
*
* NOTE: the primary error message string should generally include %m
@@ -670,7 +670,7 @@ errcode_for_file_access(void)
/*
* errcode_for_socket_access --- add SQLSTATE error code to the current error
*
- * The SQLSTATE code is chosen based on the saved errno value. We assume
+ * The SQLSTATE code is chosen based on the saved errno value. We assume
* that the failing operation was some type of socket access.
*
* NOTE: the primary error message string should generally include %m
@@ -708,7 +708,7 @@ errcode_for_socket_access(void)
* This macro handles expansion of a format string and associated parameters;
* it's common code for errmsg(), errdetail(), etc. Must be called inside
* a routine that is declared like "const char *fmt, ..." and has an edata
- * pointer set up. The message is assigned to edata->targetfield, or
+ * pointer set up. The message is assigned to edata->targetfield, or
* appended to it if appendval is true. The message is subject to translation
* if translateit is true.
*
@@ -1267,7 +1267,7 @@ elog_start(const char *filename, int lineno, const char *funcname)
if (++errordata_stack_depth >= ERRORDATA_STACK_SIZE)
{
/*
- * Wups, stack not big enough. We treat this as a PANIC condition
+ * Wups, stack not big enough. We treat this as a PANIC condition
* because it suggests an infinite loop of errors during error
* recovery. Note that the message is intentionally not localized,
* else failure to convert it to client encoding could cause further
@@ -1435,7 +1435,7 @@ EmitErrorReport(void)
/*
* CopyErrorData --- obtain a copy of the topmost error stack entry
*
- * This is only for use in error handler code. The data is copied into the
+ * This is only for use in error handler code. The data is copied into the
* current memory context, so callers should always switch away from
* ErrorContext first; otherwise it will be lost when FlushErrorState is done.
*/
@@ -1546,7 +1546,7 @@ FlushErrorState(void)
*
* A handler can do CopyErrorData/FlushErrorState to get out of the error
* subsystem, then do some processing, and finally ReThrowError to re-throw
- * the original error. This is slower than just PG_RE_THROW() but should
+ * the original error. This is slower than just PG_RE_THROW() but should
* be used if the "some processing" is likely to incur another error.
*/
void
@@ -1563,7 +1563,7 @@ ReThrowError(ErrorData *edata)
if (++errordata_stack_depth >= ERRORDATA_STACK_SIZE)
{
/*
- * Wups, stack not big enough. We treat this as a PANIC condition
+ * Wups, stack not big enough. We treat this as a PANIC condition
* because it suggests an infinite loop of errors during error
* recovery.
*/
@@ -1720,7 +1720,7 @@ set_syslog_parameters(const char *ident, int facility)
{
/*
* guc.c is likely to call us repeatedly with same parameters, so don't
- * thrash the syslog connection unnecessarily. Also, we do not re-open
+ * thrash the syslog connection unnecessarily. Also, we do not re-open
* the connection until needed, since this routine will get called whether
* or not Log_destination actually mentions syslog.
*
@@ -2654,7 +2654,7 @@ send_message_to_server_log(ErrorData *edata)
*
* Note: when there are multiple backends writing into the syslogger pipe,
* it's critical that each write go into the pipe indivisibly, and not
- * get interleaved with data from other processes. Fortunately, the POSIX
+ * get interleaved with data from other processes. Fortunately, the POSIX
* spec requires that writes to pipes be atomic so long as they are not
* more than PIPE_BUF bytes long. So we divide long messages into chunks
* that are no more than that length, and send one chunk per write() call.
@@ -2974,7 +2974,7 @@ useful_strerror(int errnum)
str = strerror(errnum);
/*
- * Some strerror()s return an empty string for out-of-range errno. This
+ * Some strerror()s return an empty string for out-of-range errno. This
* is ANSI C spec compliant, but not exactly useful. Also, we may get
* back strings of question marks if libc cannot transcode the message to
* the codeset specified by LC_CTYPE. If we get nothing useful, first try
diff --git a/src/backend/utils/fmgr/dfmgr.c b/src/backend/utils/fmgr/dfmgr.c
index 562a7c9ab0c..5528eded636 100644
--- a/src/backend/utils/fmgr/dfmgr.c
+++ b/src/backend/utils/fmgr/dfmgr.c
@@ -131,7 +131,7 @@ load_external_function(char *filename, char *funcname,
/*
* This function loads a shlib file without looking up any particular
- * function in it. If the same shlib has previously been loaded,
+ * function in it. If the same shlib has previously been loaded,
* unload and reload it.
*
* When 'restricted' is true, only libraries in the presumed-secure
@@ -171,7 +171,7 @@ lookup_external_function(void *filehandle, char *funcname)
/*
* Load the specified dynamic-link library file, unless it already is
- * loaded. Return the pg_dl* handle for the file.
+ * loaded. Return the pg_dl* handle for the file.
*
* Note: libname is expected to be an exact name for the library file.
*/
@@ -473,7 +473,7 @@ file_exists(const char *name)
* If name contains a slash, check if the file exists, if so return
* the name. Else (no slash) try to expand using search path (see
* find_in_dynamic_libpath below); if that works, return the fully
- * expanded file name. If the previous failed, append DLSUFFIX and
+ * expanded file name. If the previous failed, append DLSUFFIX and
* try again. If all fails, just return the original name.
*
* The result will always be freshly palloc'd.
diff --git a/src/backend/utils/fmgr/fmgr.c b/src/backend/utils/fmgr/fmgr.c
index 778f75aefcd..0e804c19116 100644
--- a/src/backend/utils/fmgr/fmgr.c
+++ b/src/backend/utils/fmgr/fmgr.c
@@ -96,7 +96,7 @@ static Datum fmgr_security_definer(PG_FUNCTION_ARGS);
/*
- * Lookup routines for builtin-function table. We can search by either Oid
+ * Lookup routines for builtin-function table. We can search by either Oid
* or name, but search by Oid is much faster.
*/
@@ -581,7 +581,7 @@ clear_external_function_hash(void *filehandle)
* Copy an FmgrInfo struct
*
* This is inherently somewhat bogus since we can't reliably duplicate
- * language-dependent subsidiary info. We cheat by zeroing fn_extra,
+ * language-dependent subsidiary info. We cheat by zeroing fn_extra,
* instead, meaning that subsidiary info will have to be recomputed.
*/
void
@@ -861,7 +861,7 @@ fmgr_oldstyle(PG_FUNCTION_ARGS)
/*
- * Support for security-definer and proconfig-using functions. We support
+ * Support for security-definer and proconfig-using functions. We support
* both of these features using the same call handler, because they are
* often used together and it would be inefficient (as well as notationally
* messy) to have two levels of call handler involved.
@@ -881,7 +881,7 @@ struct fmgr_security_definer_cache
* (All this info is cached for the duration of the current query.)
* To execute a call, we temporarily replace the flinfo with the cached
* and looked-up one, while keeping the outer fcinfo (which contains all
- * the actual arguments, etc.) intact. This is not re-entrant, but then
+ * the actual arguments, etc.) intact. This is not re-entrant, but then
* the fcinfo itself can't be used re-entrantly anyway.
*/
static Datum
@@ -961,7 +961,7 @@ fmgr_security_definer(PG_FUNCTION_ARGS)
/*
* We don't need to restore GUC or userid settings on error, because the
- * ensuing xact or subxact abort will do that. The PG_TRY block is only
+ * ensuing xact or subxact abort will do that. The PG_TRY block is only
* needed to clean up the flinfo link.
*/
save_flinfo = fcinfo->flinfo;
@@ -1014,7 +1014,7 @@ fmgr_security_definer(PG_FUNCTION_ARGS)
/*
* These are for invocation of a specifically named function with a
* directly-computed parameter list. Note that neither arguments nor result
- * are allowed to be NULL. Also, the function cannot be one that needs to
+ * are allowed to be NULL. Also, the function cannot be one that needs to
* look at FmgrInfo, since there won't be any.
*/
Datum
@@ -1559,8 +1559,8 @@ FunctionCall9Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2,
/*
* These are for invocation of a function identified by OID with a
* directly-computed parameter list. Note that neither arguments nor result
- * are allowed to be NULL. These are essentially fmgr_info() followed
- * by FunctionCallN(). If the same function is to be invoked repeatedly,
+ * are allowed to be NULL. These are essentially fmgr_info() followed
+ * by FunctionCallN(). If the same function is to be invoked repeatedly,
* do the fmgr_info() once and then use FunctionCallN().
*/
Datum
@@ -1889,7 +1889,7 @@ OidFunctionCall9Coll(Oid functionId, Oid collation, Datum arg1, Datum arg2,
*
* One important difference from the bare function call is that we will
* push any active SPI context, allowing SPI-using I/O functions to be
- * called from other SPI functions without extra notation. This is a hack,
+ * called from other SPI functions without extra notation. This is a hack,
* but the alternative of expecting all SPI functions to do SPI_push/SPI_pop
* around I/O calls seems worse.
*/
diff --git a/src/backend/utils/fmgr/funcapi.c b/src/backend/utils/fmgr/funcapi.c
index 6347a8f1ac3..5341923fce8 100644
--- a/src/backend/utils/fmgr/funcapi.c
+++ b/src/backend/utils/fmgr/funcapi.c
@@ -136,7 +136,7 @@ per_MultiFuncCall(PG_FUNCTION_ARGS)
* FuncCallContext is pointing to it), but in most usage patterns the
* tuples stored in it will be in the function's per-tuple context. So at
* the beginning of each call, the Slot will hold a dangling pointer to an
- * already-recycled tuple. We clear it out here.
+ * already-recycled tuple. We clear it out here.
*
* Note: use of retval->slot is obsolete as of 8.0, and we expect that it
* will always be NULL. This is just here for backwards compatibility in
@@ -192,13 +192,13 @@ shutdown_MultiFuncCall(Datum arg)
* Given a function's call info record, determine the kind of datatype
* it is supposed to return. If resultTypeId isn't NULL, *resultTypeId
* receives the actual datatype OID (this is mainly useful for scalar
- * result types). If resultTupleDesc isn't NULL, *resultTupleDesc
+ * result types). If resultTupleDesc isn't NULL, *resultTupleDesc
* receives a pointer to a TupleDesc when the result is of a composite
* type, or NULL when it's a scalar result.
*
* One hard case that this handles is resolution of actual rowtypes for
* functions returning RECORD (from either the function's OUT parameter
- * list, or a ReturnSetInfo context node). TYPEFUNC_RECORD is returned
+ * list, or a ReturnSetInfo context node). TYPEFUNC_RECORD is returned
* only when we couldn't resolve the actual rowtype for lack of information.
*
* The other hard case that this handles is resolution of polymorphism.
@@ -281,7 +281,7 @@ get_func_result_type(Oid functionId,
/*
* internal_get_result_type -- workhorse code implementing all the above
*
- * funcid must always be supplied. call_expr and rsinfo can be NULL if not
+ * funcid must always be supplied. call_expr and rsinfo can be NULL if not
* available. We will return TYPEFUNC_RECORD, and store NULL into
* *resultTupleDesc, if we cannot deduce the complete result rowtype from
* the available information.
@@ -448,7 +448,7 @@ resolve_polymorphic_tupdesc(TupleDesc tupdesc, oidvector *declared_args,
return true;
/*
- * Otherwise, extract actual datatype(s) from input arguments. (We assume
+ * Otherwise, extract actual datatype(s) from input arguments. (We assume
* the parser already validated consistency of the arguments.)
*/
if (!call_expr)
diff --git a/src/backend/utils/hash/dynahash.c b/src/backend/utils/hash/dynahash.c
index 7c3f9206e5e..a3db973afe1 100644
--- a/src/backend/utils/hash/dynahash.c
+++ b/src/backend/utils/hash/dynahash.c
@@ -5,19 +5,19 @@
*
* dynahash.c supports both local-to-a-backend hash tables and hash tables in
* shared memory. For shared hash tables, it is the caller's responsibility
- * to provide appropriate access interlocking. The simplest convention is
- * that a single LWLock protects the whole hash table. Searches (HASH_FIND or
+ * to provide appropriate access interlocking. The simplest convention is
+ * that a single LWLock protects the whole hash table. Searches (HASH_FIND or
* hash_seq_search) need only shared lock, but any update requires exclusive
* lock. For heavily-used shared tables, the single-lock approach creates a
* concurrency bottleneck, so we also support "partitioned" locking wherein
* there are multiple LWLocks guarding distinct subsets of the table. To use
* a hash table in partitioned mode, the HASH_PARTITION flag must be given
- * to hash_create. This prevents any attempt to split buckets on-the-fly.
+ * to hash_create. This prevents any attempt to split buckets on-the-fly.
* Therefore, each hash bucket chain operates independently, and no fields
* of the hash header change after init except nentries and freeList.
* A partitioned table uses a spinlock to guard changes of those two fields.
* This lets any subset of the hash buckets be treated as a separately
- * lockable partition. We expect callers to use the low-order bits of a
+ * lockable partition. We expect callers to use the low-order bits of a
* lookup key's hash value as a partition number --- this will work because
* of the way calc_bucket() maps hash values to bucket numbers.
*
@@ -81,7 +81,7 @@
* Constants
*
* A hash table has a top-level "directory", each of whose entries points
- * to a "segment" of ssize bucket headers. The maximum number of hash
+ * to a "segment" of ssize bucket headers. The maximum number of hash
* buckets is thus dsize * ssize (but dsize may be expansible). Of course,
* the number of records in the table can be larger, but we don't want a
* whole lot of records per bucket or performance goes down.
@@ -89,7 +89,7 @@
* In a hash table allocated in shared memory, the directory cannot be
* expanded because it must stay at a fixed address. The directory size
* should be selected using hash_select_dirsize (and you'd better have
- * a good idea of the maximum number of entries!). For non-shared hash
+ * a good idea of the maximum number of entries!). For non-shared hash
* tables, the initial directory size can be left at the default.
*/
#define DEF_SEGSIZE 256
@@ -341,7 +341,7 @@ hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
{
/*
* ctl structure and directory are preallocated for shared memory
- * tables. Note that HASH_DIRSIZE and HASH_ALLOC had better be set as
+ * tables. Note that HASH_DIRSIZE and HASH_ALLOC had better be set as
* well.
*/
hashp->hctl = info->hctl;
@@ -790,7 +790,7 @@ calc_bucket(HASHHDR *hctl, uint32 hash_val)
* the result is a dangling pointer that shouldn't be dereferenced!)
*
* HASH_ENTER will normally ereport a generic "out of memory" error if
- * it is unable to create a new entry. The HASH_ENTER_NULL operation is
+ * it is unable to create a new entry. The HASH_ENTER_NULL operation is
* the same except it will return NULL if out of memory. Note that
* HASH_ENTER_NULL cannot be used with the default palloc-based allocator,
* since palloc internally ereports on out-of-memory.
@@ -1042,7 +1042,7 @@ hash_update_hash_key(HTAB *hashp,
hashp->tabname);
/*
- * Lookup the existing element using its saved hash value. We need to do
+ * Lookup the existing element using its saved hash value. We need to do
* this to be able to unlink it from its hash chain, but as a side benefit
* we can verify the validity of the passed existingEntry pointer.
*/
@@ -1119,7 +1119,7 @@ hash_update_hash_key(HTAB *hashp,
/*
* If old and new hash values belong to the same bucket, we need not
* change any chain links, and indeed should not since this simplistic
- * update will corrupt the list if currBucket is the last element. (We
+ * update will corrupt the list if currBucket is the last element. (We
* cannot fall out earlier, however, since we need to scan the bucket to
* check for duplicate keys.)
*/
@@ -1405,7 +1405,7 @@ expand_table(HTAB *hashp)
}
/*
- * Relocate records to the new bucket. NOTE: because of the way the hash
+ * Relocate records to the new bucket. NOTE: because of the way the hash
* masking is done in calc_bucket, only one old bucket can need to be
* split at this point. With a different way of reducing the hash value,
* that might not be true!
@@ -1554,7 +1554,7 @@ hash_corrupted(HTAB *hashp)
{
/*
* If the corruption is in a shared hashtable, we'd better force a
- * systemwide restart. Otherwise, just shut down this one backend.
+ * systemwide restart. Otherwise, just shut down this one backend.
*/
if (hashp->isshared)
elog(PANIC, "hash table \"%s\" corrupted", hashp->tabname);
@@ -1599,7 +1599,7 @@ next_pow2_int(long num)
/************************* SEQ SCAN TRACKING ************************/
/*
- * We track active hash_seq_search scans here. The need for this mechanism
+ * We track active hash_seq_search scans here. The need for this mechanism
* comes from the fact that a scan will get confused if a bucket split occurs
* while it's in progress: it might visit entries twice, or even miss some
* entirely (if it's partway through the same bucket that splits). Hence
@@ -1619,7 +1619,7 @@ next_pow2_int(long num)
*
* This arrangement is reasonably robust if a transient hashtable is deleted
* without notifying us. The absolute worst case is we might inhibit splits
- * in another table created later at exactly the same address. We will give
+ * in another table created later at exactly the same address. We will give
* a warning at transaction end for reference leaks, so any bugs leading to
* lack of notification should be easy to catch.
*/
diff --git a/src/backend/utils/init/miscinit.c b/src/backend/utils/init/miscinit.c
index cb78caf8ebd..cf566f1283e 100644
--- a/src/backend/utils/init/miscinit.c
+++ b/src/backend/utils/init/miscinit.c
@@ -59,7 +59,7 @@ static List *lock_files = NIL;
*
* NOTE: "ignoring system indexes" means we do not use the system indexes
* for lookups (either in hardwired catalog accesses or in planner-generated
- * plans). We do, however, still update the indexes when a catalog
+ * plans). We do, however, still update the indexes when a catalog
* modification is made.
* ----------------------------------------------------------------
*/
@@ -301,7 +301,7 @@ SetSessionUserId(Oid userid, bool is_superuser)
* Currently there are two valid bits in SecurityRestrictionContext:
*
* SECURITY_LOCAL_USERID_CHANGE indicates that we are inside an operation
- * that is temporarily changing CurrentUserId via these functions. This is
+ * that is temporarily changing CurrentUserId via these functions. This is
* needed to indicate that the actual value of CurrentUserId is not in sync
* with guc.c's internal state, so SET ROLE has to be disallowed.
*
@@ -322,7 +322,7 @@ SetSessionUserId(Oid userid, bool is_superuser)
* ever throw any kind of error. This is because they are used by
* StartTransaction and AbortTransaction to save/restore the settings,
* and during the first transaction within a backend, the value to be saved
- * and perhaps restored is indeed invalid. We have to be able to get
+ * and perhaps restored is indeed invalid. We have to be able to get
* through AbortTransaction without asserting in case InitPostgres fails.
*/
void
@@ -362,7 +362,7 @@ InSecurityRestrictedOperation(void)
/*
* These are obsolete versions of Get/SetUserIdAndSecContext that are
* only provided for bug-compatibility with some rather dubious code in
- * pljava. We allow the userid to be set, but only when not inside a
+ * pljava. We allow the userid to be set, but only when not inside a
* security restriction context.
*/
void
@@ -465,7 +465,7 @@ InitializeSessionUserId(const char *rolename)
* Check connection limit for this role.
*
* There is a race condition here --- we create our PGPROC before
- * checking for other PGPROCs. If two backends did this at about the
+ * checking for other PGPROCs. If two backends did this at about the
* same time, they might both think they were over the limit, while
* ideally one should succeed and one fail. Getting that to work
* exactly seems more trouble than it is worth, however; instead we
@@ -564,7 +564,7 @@ GetCurrentRoleId(void)
* Change Role ID while running (SET ROLE)
*
* If roleid is InvalidOid, we are doing SET ROLE NONE: revert to the
- * session user authorization. In this case the is_superuser argument
+ * session user authorization. In this case the is_superuser argument
* is ignored.
*
* When roleid is not InvalidOid, the caller must have checked whether
@@ -632,7 +632,7 @@ GetUserNameFromId(Oid roleid)
* ($DATADIR/postmaster.pid) and Unix-socket-file lockfiles ($SOCKFILE.lock).
* Both kinds of files contain the same info initially, although we can add
* more information to a data-directory lockfile after it's created, using
- * AddToDataDirLockFile(). See miscadmin.h for documentation of the contents
+ * AddToDataDirLockFile(). See miscadmin.h for documentation of the contents
* of these lockfiles.
*
* On successful lockfile creation, a proc_exit callback to remove the
@@ -721,7 +721,7 @@ CreateLockFile(const char *filename, bool amPostmaster,
my_gp_pid = 0;
/*
- * We need a loop here because of race conditions. But don't loop forever
+ * We need a loop here because of race conditions. But don't loop forever
* (for example, a non-writable $PGDATA directory might cause a failure
* that won't go away). 100 tries seems like plenty.
*/
@@ -730,7 +730,7 @@ CreateLockFile(const char *filename, bool amPostmaster,
/*
* Try to create the lock file --- O_EXCL makes this atomic.
*
- * Think not to make the file protection weaker than 0600. See
+ * Think not to make the file protection weaker than 0600. See
* comments below.
*/
fd = open(filename, O_RDWR | O_CREAT | O_EXCL, 0600);
@@ -798,7 +798,7 @@ CreateLockFile(const char *filename, bool amPostmaster,
* implies that the existing process has a different userid than we
* do, which means it cannot be a competing postmaster. A postmaster
* cannot successfully attach to a data directory owned by a userid
- * other than its own. (This is now checked directly in
+ * other than its own. (This is now checked directly in
* checkDataDir(), but has been true for a long time because of the
* restriction that the data directory isn't group- or
* world-accessible.) Also, since we create the lockfiles mode 600,
@@ -836,9 +836,9 @@ CreateLockFile(const char *filename, bool amPostmaster,
}
/*
- * No, the creating process did not exist. However, it could be that
+ * No, the creating process did not exist. However, it could be that
* the postmaster crashed (or more likely was kill -9'd by a clueless
- * admin) but has left orphan backends behind. Check for this by
+ * admin) but has left orphan backends behind. Check for this by
* looking to see if there is an associated shmem segment that is
* still in use.
*
@@ -879,7 +879,7 @@ CreateLockFile(const char *filename, bool amPostmaster,
/*
* Looks like nobody's home. Unlink the file and try again to create
- * it. Need a loop because of possible race condition against other
+ * it. Need a loop because of possible race condition against other
* would-be creators.
*/
if (unlink(filename) < 0)
@@ -893,8 +893,8 @@ CreateLockFile(const char *filename, bool amPostmaster,
}
/*
- * Successfully created the file, now fill it. See comment in miscadmin.h
- * about the contents. Note that we write the same first five lines into
+ * Successfully created the file, now fill it. See comment in miscadmin.h
+ * about the contents. Note that we write the same first five lines into
* both datadir and socket lockfiles; although more stuff may get added to
* the datadir lockfile later.
*/
@@ -1263,7 +1263,7 @@ load_libraries(const char *libraries, const char *gucname, bool restricted)
/*
* Choose notice level: avoid repeat messages when re-loading a library
- * that was preloaded into the postmaster. (Only possible in EXEC_BACKEND
+ * that was preloaded into the postmaster. (Only possible in EXEC_BACKEND
* configurations)
*/
#ifdef EXEC_BACKEND
diff --git a/src/backend/utils/init/postinit.c b/src/backend/utils/init/postinit.c
index e0abff1145a..da5638173fd 100644
--- a/src/backend/utils/init/postinit.c
+++ b/src/backend/utils/init/postinit.c
@@ -80,7 +80,7 @@ static void process_settings(Oid databaseid, Oid roleid);
* GetDatabaseTuple -- fetch the pg_database row for a database
*
* This is used during backend startup when we don't yet have any access to
- * system catalogs in general. In the worst case, we can seqscan pg_database
+ * system catalogs in general. In the worst case, we can seqscan pg_database
* using nothing but the hard-wired descriptor that relcache.c creates for
* pg_database. In more typical cases, relcache.c was able to load
* descriptors for both pg_database and its indexes from the shared relcache
@@ -104,7 +104,7 @@ GetDatabaseTuple(const char *dbname)
CStringGetDatum(dbname));
/*
- * Open pg_database and fetch a tuple. Force heap scan if we haven't yet
+ * Open pg_database and fetch a tuple. Force heap scan if we haven't yet
* built the critical shared relcache entries (i.e., we're starting up
* without a shared relcache cache file).
*/
@@ -147,7 +147,7 @@ GetDatabaseTupleByOid(Oid dboid)
ObjectIdGetDatum(dboid));
/*
- * Open pg_database and fetch a tuple. Force heap scan if we haven't yet
+ * Open pg_database and fetch a tuple. Force heap scan if we haven't yet
* built the critical shared relcache entries (i.e., we're starting up
* without a shared relcache cache file).
*/
@@ -186,7 +186,7 @@ PerformAuthentication(Port *port)
* In EXEC_BACKEND case, we didn't inherit the contents of pg_hba.conf
* etcetera from the postmaster, and have to load them ourselves.
*
- * FIXME: [fork/exec] Ugh. Is there a way around this overhead?
+ * FIXME: [fork/exec] Ugh. Is there a way around this overhead?
*/
#ifdef EXEC_BACKEND
if (!load_hba())
@@ -292,7 +292,7 @@ CheckMyDatabase(const char *name, bool am_superuser)
name)));
/*
- * Check privilege to connect to the database. (The am_superuser test
+ * Check privilege to connect to the database. (The am_superuser test
* is redundant, but since we have the flag, might as well check it
* and save a few cycles.)
*/
@@ -308,7 +308,7 @@ CheckMyDatabase(const char *name, bool am_superuser)
* Check connection limit for this database.
*
* There is a race condition here --- we create our PGPROC before
- * checking for other PGPROCs. If two backends did this at about the
+ * checking for other PGPROCs. If two backends did this at about the
* same time, they might both think they were over the limit, while
* ideally one should succeed and one fail. Getting that to work
* exactly seems more trouble than it is worth, however; instead we
@@ -478,7 +478,7 @@ BaseInit(void)
* Initialize POSTGRES.
*
* The database can be specified by name, using the in_dbname parameter, or by
- * OID, using the dboid parameter. In the latter case, the actual database
+ * OID, using the dboid parameter. In the latter case, the actual database
* name can be returned to the caller in out_dbname. If out_dbname isn't
* NULL, it must point to a buffer of size NAMEDATALEN.
*
@@ -895,7 +895,7 @@ InitPostgres(const char *in_dbname, Oid dboid, const char *username,
/*
* Now process any command-line switches and any additional GUC variable
- * settings passed in the startup packet. We couldn't do this before
+ * settings passed in the startup packet. We couldn't do this before
* because we didn't know if client is a superuser.
*/
if (MyProcPort != NULL)
diff --git a/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c b/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c
index ca965390557..68615726552 100644
--- a/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c
+++ b/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c
@@ -231,7 +231,7 @@ static unsigned short BinarySearchRange
/*
* NOTE: big5 high_byte: 0xa1-0xfe, low_byte: 0x40-0x7e,
* 0xa1-0xfe (radicals: 0x00-0x3e, 0x3f-0x9c) big5 radix is
- * 0x9d. [region_low, region_high] We
+ * 0x9d. [region_low, region_high] We
* should remember big5 has two different regions (above).
* There is a bias for the distance between these regions.
* 0xa1 - 0x7e + bias = 1 (Distance between 0xa1 and 0x7e is
diff --git a/src/backend/utils/mb/mbutils.c b/src/backend/utils/mb/mbutils.c
index 4582219af73..96f7c74d2d1 100644
--- a/src/backend/utils/mb/mbutils.c
+++ b/src/backend/utils/mb/mbutils.c
@@ -29,7 +29,7 @@
/*
* We maintain a simple linked list caching the fmgr lookup info for the
* currently selected conversion functions, as well as any that have been
- * selected previously in the current session. (We remember previous
+ * selected previously in the current session. (We remember previous
* settings because we must be able to restore a previous setting during
* transaction rollback, without doing any fresh catalog accesses.)
*
@@ -76,7 +76,7 @@ static int cliplen(const char *str, int len, int limit);
/*
- * Prepare for a future call to SetClientEncoding. Success should mean
+ * Prepare for a future call to SetClientEncoding. Success should mean
* that SetClientEncoding is guaranteed to succeed for this encoding request.
*
* (But note that success before backend_startup_complete does not guarantee
@@ -148,7 +148,7 @@ PrepareClientEncoding(int encoding)
/*
* We cannot yet remove any older entry for the same encoding pair,
- * since it could still be in use. SetClientEncoding will clean up.
+ * since it could still be in use. SetClientEncoding will clean up.
*/
return 0; /* success */
@@ -157,8 +157,8 @@ PrepareClientEncoding(int encoding)
{
/*
* If we're not in a live transaction, the only thing we can do is
- * restore a previous setting using the cache. This covers all
- * transaction-rollback cases. The only case it might not work for is
+ * restore a previous setting using the cache. This covers all
+ * transaction-rollback cases. The only case it might not work for is
* trying to change client_encoding on the fly by editing
* postgresql.conf and SIGHUP'ing. Which would probably be a stupid
* thing to do anyway.
@@ -316,7 +316,7 @@ pg_get_client_encoding_name(void)
*
* CAUTION: although the presence of a length argument means that callers
* can pass non-null-terminated strings, care is required because the same
- * string will be passed back if no conversion occurs. Such callers *must*
+ * string will be passed back if no conversion occurs. Such callers *must*
* check whether result == src and handle that case differently.
*
* Note: we try to avoid raising error, since that could get us into
@@ -572,7 +572,7 @@ pg_any_to_server(const char *s, int len, int encoding)
* the selected client_encoding. If the client encoding is ASCII-safe
* then we just do a straight validation under that encoding. For an
* ASCII-unsafe encoding we have a problem: we dare not pass such data
- * to the parser but we have no way to convert it. We compromise by
+ * to the parser but we have no way to convert it. We compromise by
* rejecting the data if it contains any non-ASCII characters.
*/
if (PG_VALID_BE_ENCODING(encoding))
diff --git a/src/backend/utils/mb/wchar.c b/src/backend/utils/mb/wchar.c
index 45bc3c1604b..dbb7b77d867 100644
--- a/src/backend/utils/mb/wchar.c
+++ b/src/backend/utils/mb/wchar.c
@@ -1512,7 +1512,7 @@ pg_utf8_islegal(const unsigned char *source, int length)
*
* Not knowing anything about the properties of the encoding in use, we just
* keep incrementing the last byte until we get a validly-encoded result,
- * or we run out of values to try. We don't bother to try incrementing
+ * or we run out of values to try. We don't bother to try incrementing
* higher-order bytes, so there's no growth in runtime for wider characters.
* (If we did try to do that, we'd need to consider the likelihood that 255
* is not a valid final byte in the encoding.)
@@ -1542,7 +1542,7 @@ pg_generic_charinc(unsigned char *charptr, int len)
* For a one-byte character less than 0x7F, we just increment the byte.
*
* For a multibyte character, every byte but the first must fall between 0x80
- * and 0xBF; and the first byte must be between 0xC0 and 0xF4. We increment
+ * and 0xBF; and the first byte must be between 0xC0 and 0xF4. We increment
* the last byte that's not already at its maximum value. If we can't find a
* byte that's less than the maximum allowable value, we simply fail. We also
* need some special-case logic to skip regions used for surrogate pair
diff --git a/src/backend/utils/mb/wstrcmp.c b/src/backend/utils/mb/wstrcmp.c
index 64a9cf848e2..dad3ae023a3 100644
--- a/src/backend/utils/mb/wstrcmp.c
+++ b/src/backend/utils/mb/wstrcmp.c
@@ -23,7 +23,7 @@
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/src/backend/utils/mb/wstrncmp.c b/src/backend/utils/mb/wstrncmp.c
index 87c1f5afdaa..ea4823fc6f8 100644
--- a/src/backend/utils/mb/wstrncmp.c
+++ b/src/backend/utils/mb/wstrncmp.c
@@ -22,7 +22,7 @@
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c
index a73f022d1a0..2b6527f012a 100644
--- a/src/backend/utils/misc/guc.c
+++ b/src/backend/utils/misc/guc.c
@@ -3640,7 +3640,7 @@ get_guc_variables(void)
/*
- * Build the sorted array. This is split out so that it could be
+ * Build the sorted array. This is split out so that it could be
* re-executed after startup (eg, we could allow loadable modules to
* add vars, and then we'd need to re-sort).
*/
@@ -3797,7 +3797,7 @@ add_placeholder_variable(const char *name, int elevel)
/*
* The char* is allocated at the end of the struct since we have no
- * 'static' place to point to. Note that the current value, as well as
+ * 'static' place to point to. Note that the current value, as well as
* the boot and reset values, start out NULL.
*/
var->variable = (char **) (var + 1);
@@ -3839,7 +3839,7 @@ find_option(const char *name, bool create_placeholders, int elevel)
return *res;
/*
- * See if the name is an obsolete name for a variable. We assume that the
+ * See if the name is an obsolete name for a variable. We assume that the
* set of supported old names is short enough that a brute-force search is
* the best way.
*/
@@ -4495,7 +4495,7 @@ NewGUCNestLevel(void)
/*
* Do GUC processing at transaction or subtransaction commit or abort, or
* when exiting a function that has proconfig settings, or when undoing a
- * transient assignment to some GUC variables. (The name is thus a bit of
+ * transient assignment to some GUC variables. (The name is thus a bit of
* a misnomer; perhaps it should be ExitGUCNestLevel or some such.)
* During abort, we discard all GUC settings that were applied at nesting
* levels >= nestLevel. nestLevel == 1 corresponds to the main transaction.
@@ -5296,7 +5296,7 @@ set_config_option(const char *name, const char *value,
* If a PGC_BACKEND parameter is changed in the config file,
* we want to accept the new value in the postmaster (whence
* it will propagate to subsequently-started backends), but
- * ignore it in existing backends. This is a tad klugy, but
+ * ignore it in existing backends. This is a tad klugy, but
* necessary because we don't re-read the config file during
* backend start.
*
@@ -5353,7 +5353,7 @@ set_config_option(const char *name, const char *value,
* An exception might be made if the reset value is assumed to be "safe".
*
* Note: this flag is currently used for "session_authorization" and
- * "role". We need to prohibit changing these inside a local userid
+ * "role". We need to prohibit changing these inside a local userid
* context because when we exit it, GUC won't be notified, leaving things
* out of sync. (This could be fixed by forcing a new GUC nesting level,
* but that would change behavior in possibly-undesirable ways.) Also, we
@@ -6218,7 +6218,7 @@ flatten_set_variable_args(const char *name, List *args)
else
{
/*
- * Plain string literal or identifier. For quote mode,
+ * Plain string literal or identifier. For quote mode,
* quote it if it's not a vanilla identifier.
*/
if (flags & GUC_LIST_QUOTE)
@@ -6535,7 +6535,7 @@ define_custom_variable(struct config_generic * variable)
* variable. Essentially, we need to duplicate all the active and stacked
* values, but with appropriate validation and datatype adjustment.
*
- * If an assignment fails, we report a WARNING and keep going. We don't
+ * If an assignment fails, we report a WARNING and keep going. We don't
* want to throw ERROR for bad values, because it'd bollix the add-on
* module that's presumably halfway through getting loaded. In such cases
* the default or previous state will become active instead.
@@ -6563,7 +6563,7 @@ define_custom_variable(struct config_generic * variable)
/*
* Free up as much as we conveniently can of the placeholder structure.
* (This neglects any stack items, so it's possible for some memory to be
- * leaked. Since this can only happen once per session per variable, it
+ * leaked. Since this can only happen once per session per variable, it
* doesn't seem worth spending much code on.)
*/
set_string_field(pHolder, pHolder->variable, NULL);
@@ -6636,7 +6636,7 @@ reapply_stacked_values(struct config_generic * variable,
else
{
/*
- * We are at the end of the stack. If the active/previous value is
+ * We are at the end of the stack. If the active/previous value is
* different from the reset value, it must represent a previously
* committed session value. Apply it, and then drop the stack entry
* that set_config_option will have created under the impression that
@@ -7823,7 +7823,7 @@ ParseLongOption(const char *string, char **name, char **value)
/*
* Handle options fetched from pg_db_role_setting.setconfig,
- * pg_proc.proconfig, etc. Caller must specify proper context/source/action.
+ * pg_proc.proconfig, etc. Caller must specify proper context/source/action.
*
* The array parameter must be an array of TEXT (it must not be NULL).
*/
@@ -8105,7 +8105,7 @@ GUCArrayReset(ArrayType *array)
* Validate a proposed option setting for GUCArrayAdd/Delete/Reset.
*
* name is the option name. value is the proposed value for the Add case,
- * or NULL for the Delete/Reset cases. If skipIfNoPermissions is true, it's
+ * or NULL for the Delete/Reset cases. If skipIfNoPermissions is true, it's
* not an error to have no permissions to set the option.
*
* Returns TRUE if OK, FALSE if skipIfNoPermissions is true and user does not
@@ -8186,7 +8186,7 @@ validate_option_array_item(const char *name, const char *value,
* ERRCODE_INVALID_PARAMETER_VALUE SQLSTATE for check hook failures.
*
* Note that GUC_check_errmsg() etc are just macros that result in a direct
- * assignment to the associated variables. That is ugly, but forced by the
+ * assignment to the associated variables. That is ugly, but forced by the
* limitations of C's macro mechanisms.
*/
void
diff --git a/src/backend/utils/misc/ps_status.c b/src/backend/utils/misc/ps_status.c
index 57aa38435ac..33d275a999c 100644
--- a/src/backend/utils/misc/ps_status.c
+++ b/src/backend/utils/misc/ps_status.c
@@ -109,7 +109,7 @@ static char **save_argv;
* from being clobbered by subsequent ps_display actions.
*
* (The original argv[] will not be overwritten by this routine, but may be
- * overwritten during init_ps_display. Also, the physical location of the
+ * overwritten during init_ps_display. Also, the physical location of the
* environment strings may be moved, so this should be called before any code
* that might try to hang onto a getenv() result.)
*/
@@ -210,7 +210,7 @@ save_ps_display_args(int argc, char **argv)
/*
* Call this once during subprocess startup to set the identification
- * values. At this point, the original argv[] array may be overwritten.
+ * values. At this point, the original argv[] array may be overwritten.
*/
void
init_ps_display(const char *username, const char *dbname,
@@ -360,7 +360,7 @@ set_ps_display(const char *activity, bool force)
/*
* Returns what's currently in the ps display, in case someone needs
- * it. Note that only the activity part is returned. On some platforms
+ * it. Note that only the activity part is returned. On some platforms
* the string will not be null-terminated, so return the effective
* length into *displen.
*/
diff --git a/src/backend/utils/misc/rbtree.c b/src/backend/utils/misc/rbtree.c
index 58e797ce1ee..9d545e74d3e 100644
--- a/src/backend/utils/misc/rbtree.c
+++ b/src/backend/utils/misc/rbtree.c
@@ -13,7 +13,7 @@
*
* Red-black trees are a type of balanced binary tree wherein (1) any child of
* a red node is always black, and (2) every path from root to leaf traverses
- * an equal number of black nodes. From these properties, it follows that the
+ * an equal number of black nodes. From these properties, it follows that the
* longest path from root to leaf is only about twice as long as the shortest,
* so lookups are guaranteed to run in O(lg n) time.
*
@@ -102,7 +102,7 @@ static RBNode sentinel = {InitialState, RBBLACK, RBNIL, RBNIL, NULL};
* valid data! freefunc can be NULL if caller doesn't require retail
* space reclamation.
*
- * The RBTree node is palloc'd in the caller's memory context. Note that
+ * The RBTree node is palloc'd in the caller's memory context. Note that
* all contents of the tree are actually allocated by the caller, not here.
*
* Since tree contents are managed by the caller, there is currently not
@@ -282,10 +282,10 @@ rb_rotate_right(RBTree *rb, RBNode *x)
/*
* Maintain Red-Black tree balance after inserting node x.
*
- * The newly inserted node is always initially marked red. That may lead to
+ * The newly inserted node is always initially marked red. That may lead to
* a situation where a red node has a red child, which is prohibited. We can
* always fix the problem by a series of color changes and/or "rotations",
- * which move the problem progressively higher up in the tree. If one of the
+ * which move the problem progressively higher up in the tree. If one of the
* two red nodes is the root, we can always fix the problem by changing the
* root from red to black.
*
@@ -296,7 +296,7 @@ static void
rb_insert_fixup(RBTree *rb, RBNode *x)
{
/*
- * x is always a red node. Initially, it is the newly inserted node. Each
+ * x is always a red node. Initially, it is the newly inserted node. Each
* iteration of this loop moves it higher up in the tree.
*/
while (x != rb->root && x->parent->color == RBRED)
@@ -481,7 +481,7 @@ rb_delete_fixup(RBTree *rb, RBNode *x)
while (x != rb->root && x->color == RBBLACK)
{
/*
- * Left and right cases are symmetric. Any nodes that are children of
+ * Left and right cases are symmetric. Any nodes that are children of
* x have a black-height one less than the remainder of the nodes in
* the tree. We rotate and recolor nodes to move the problem up the
* tree: at some stage we'll either fix the problem, or reach the root
diff --git a/src/backend/utils/misc/timeout.c b/src/backend/utils/misc/timeout.c
index 05925390438..51f5df1c13b 100644
--- a/src/backend/utils/misc/timeout.c
+++ b/src/backend/utils/misc/timeout.c
@@ -57,7 +57,7 @@ static timeout_params *volatile active_timeouts[MAX_TIMEOUTS];
* Note that we don't bother to reset any pending timer interrupt when we
* disable the signal handler; it's not really worth the cycles to do so,
* since the probability of the interrupt actually occurring while we have
- * it disabled is low. See comments in schedule_alarm() about that.
+ * it disabled is low. See comments in schedule_alarm() about that.
*/
static volatile sig_atomic_t alarm_enabled = false;
@@ -69,7 +69,7 @@ static volatile sig_atomic_t alarm_enabled = false;
* Internal helper functions
*
* For all of these, it is caller's responsibility to protect them from
- * interruption by the signal handler. Generally, call disable_alarm()
+ * interruption by the signal handler. Generally, call disable_alarm()
* first to prevent interruption, then update state, and last call
* schedule_alarm(), which will re-enable the signal handler if needed.
*****************************************************************************/
@@ -144,7 +144,7 @@ enable_timeout(TimeoutId id, TimestampTz now, TimestampTz fin_time)
Assert(all_timeouts[id].timeout_handler != NULL);
/*
- * If this timeout was already active, momentarily disable it. We
+ * If this timeout was already active, momentarily disable it. We
* interpret the call as a directive to reschedule the timeout.
*/
i = find_active_timeout(id);
@@ -152,7 +152,7 @@ enable_timeout(TimeoutId id, TimestampTz now, TimestampTz fin_time)
remove_timeout_index(i);
/*
- * Find out the index where to insert the new timeout. We sort by
+ * Find out the index where to insert the new timeout. We sort by
* fin_time, and for equal fin_time by priority.
*/
for (i = 0; i < num_active_timeouts; i++)
@@ -214,18 +214,18 @@ schedule_alarm(TimestampTz now)
*
* Because we didn't bother to reset the timer in disable_alarm(),
* it's possible that a previously-set interrupt will fire between
- * enable_alarm() and setitimer(). This is safe, however. There are
+ * enable_alarm() and setitimer(). This is safe, however. There are
* two possible outcomes:
*
* 1. The signal handler finds nothing to do (because the nearest
* timeout event is still in the future). It will re-set the timer
- * and return. Then we'll overwrite the timer value with a new one.
+ * and return. Then we'll overwrite the timer value with a new one.
* This will mean that the timer fires a little later than we
* intended, but only by the amount of time it takes for the signal
* handler to do nothing useful, which shouldn't be much.
*
* 2. The signal handler executes and removes one or more timeout
- * events. When it returns, either the queue is now empty or the
+ * events. When it returns, either the queue is now empty or the
* frontmost event is later than the one we looked at above. So we'll
* overwrite the timer value with one that is too soon (plus or minus
* the signal handler's execution time), causing a useless interrupt
@@ -266,14 +266,14 @@ handle_sig_alarm(SIGNAL_ARGS)
* mainline is waiting for a lock). If SIGINT or similar arrives while
* this code is running, we'd lose control and perhaps leave our data
* structures in an inconsistent state. Disable immediate interrupts, and
- * just to be real sure, bump the holdoff counter as well. (The reason
+ * just to be real sure, bump the holdoff counter as well. (The reason
* for this belt-and-suspenders-too approach is to make sure that nothing
* bad happens if a timeout handler calls code that manipulates
* ImmediateInterruptOK.)
*
* Note: it's possible for a SIGINT to interrupt handle_sig_alarm before
* we manage to do this; the net effect would be as if the SIGALRM event
- * had been silently lost. Therefore error recovery must include some
+ * had been silently lost. Therefore error recovery must include some
* action that will allow any lost interrupt to be rescheduled. Disabling
* some or all timeouts is sufficient, or if that's not appropriate,
* reschedule_timeouts() can be called. Also, the signal blocking hazard
@@ -434,7 +434,7 @@ RegisterTimeout(TimeoutId id, timeout_handler_proc handler)
*
* This can be used during error recovery in case query cancel resulted in loss
* of a SIGALRM event (due to longjmp'ing out of handle_sig_alarm before it
- * could do anything). But note it's not necessary if any of the public
+ * could do anything). But note it's not necessary if any of the public
* enable_ or disable_timeout functions are called in the same area, since
* those all do schedule_alarm() internally if needed.
*/
@@ -503,7 +503,7 @@ enable_timeout_at(TimeoutId id, TimestampTz fin_time)
* Enable multiple timeouts at once.
*
* This works like calling enable_timeout_after() and/or enable_timeout_at()
- * multiple times. Use this to reduce the number of GetCurrentTimestamp()
+ * multiple times. Use this to reduce the number of GetCurrentTimestamp()
* and setitimer() calls needed to establish multiple timeouts.
*/
void
diff --git a/src/backend/utils/misc/tzparser.c b/src/backend/utils/misc/tzparser.c
index 93e42db7a51..8638cd92546 100644
--- a/src/backend/utils/misc/tzparser.c
+++ b/src/backend/utils/misc/tzparser.c
@@ -4,7 +4,7 @@
* Functions for parsing timezone offset files
*
* Note: this code is invoked from the check_hook for the GUC variable
- * timezone_abbreviations. Therefore, it should report problems using
+ * timezone_abbreviations. Therefore, it should report problems using
* GUC_check_errmsg() and related functions, and try to avoid throwing
* elog(ERROR). This is not completely bulletproof at present --- in
* particular out-of-memory will throw an error. Could probably fix with
@@ -179,7 +179,7 @@ addToArray(tzEntry **base, int *arraysize, int n,
/*
* Search the array for a duplicate; as a useful side effect, the array is
- * maintained in sorted order. We use strcmp() to ensure we match the
+ * maintained in sorted order. We use strcmp() to ensure we match the
* sort order datetime.c expects.
*/
arrayptr = *base;
diff --git a/src/backend/utils/mmgr/aset.c b/src/backend/utils/mmgr/aset.c
index 6a111c78329..94cc8117133 100644
--- a/src/backend/utils/mmgr/aset.c
+++ b/src/backend/utils/mmgr/aset.c
@@ -38,7 +38,7 @@
* request, even if it was much larger than necessary. This led to more
* and more wasted space in allocated chunks over time. To fix, get rid
* of the midrange behavior: we now handle only "small" power-of-2-size
- * chunks as chunks. Anything "large" is passed off to malloc(). Change
+ * chunks as chunks. Anything "large" is passed off to malloc(). Change
* the number of freelists to change the small/large boundary.
*
*
@@ -54,7 +54,7 @@
* Thus, if someone makes the common error of writing past what they've
* requested, the problem is likely to go unnoticed ... until the day when
* there *isn't* any wasted space, perhaps because of different memory
- * alignment on a new platform, or some other effect. To catch this sort
+ * alignment on a new platform, or some other effect. To catch this sort
* of problem, the MEMORY_CONTEXT_CHECKING option stores 0x7E just beyond
* the requested space whenever the request is less than the actual chunk
* size, and verifies that the byte is undamaged when the chunk is freed.
@@ -153,7 +153,7 @@ typedef AllocSetContext *AllocSet;
/*
* AllocBlock
* An AllocBlock is the unit of memory that is obtained by aset.c
- * from malloc(). It contains one or more AllocChunks, which are
+ * from malloc(). It contains one or more AllocChunks, which are
* the units requested by palloc() and freed by pfree(). AllocChunks
* cannot be returned to malloc() individually, instead they are put
* on freelists by pfree() and re-used by the next palloc() that has
@@ -290,7 +290,7 @@ AllocSetFreeIndex(Size size)
/*
* At this point we need to obtain log2(tsize)+1, ie, the number of
- * not-all-zero bits at the right. We used to do this with a
+ * not-all-zero bits at the right. We used to do this with a
* shift-and-count loop, but this function is enough of a hotspot to
* justify micro-optimization effort. The best approach seems to be
* to use a lookup table. Note that this code assumes that
@@ -457,7 +457,7 @@ AllocSetInit(MemoryContext context)
* Actually, this routine has some discretion about what to do.
* It should mark all allocated chunks freed, but it need not necessarily
* give back all the resources the set owns. Our actual implementation is
- * that we hang onto any "keeper" block specified for the set. In this way,
+ * that we hang onto any "keeper" block specified for the set. In this way,
* we don't thrash malloc() when a context is repeatedly reset after small
* allocations, which is typical behavior for per-tuple contexts.
*/
@@ -690,7 +690,7 @@ AllocSetAlloc(MemoryContext context, Size size)
/*
* In most cases, we'll get back the index of the next larger
- * freelist than the one we need to put this chunk on. The
+ * freelist than the one we need to put this chunk on. The
* exception is when availchunk is exactly a power of 2.
*/
if (availchunk != ((Size) 1 << (a_fidx + ALLOC_MINBITS)))
@@ -836,7 +836,7 @@ AllocSetFree(MemoryContext context, void *pointer)
{
/*
* Big chunks are certain to have been allocated as single-chunk
- * blocks. Find the containing block and return it to malloc().
+ * blocks. Find the containing block and return it to malloc().
*/
AllocBlock block = set->blocks;
AllocBlock prevblock = NULL;
@@ -932,7 +932,7 @@ AllocSetRealloc(MemoryContext context, void *pointer, Size size)
if (oldsize > set->allocChunkLimit)
{
/*
- * The chunk must have been allocated as a single-chunk block. Find
+ * The chunk must have been allocated as a single-chunk block. Find
* the containing block and use realloc() to make it bigger with
* minimum space wastage.
*/
diff --git a/src/backend/utils/mmgr/mcxt.c b/src/backend/utils/mmgr/mcxt.c
index 6118e12eaf8..7369ec36232 100644
--- a/src/backend/utils/mmgr/mcxt.c
+++ b/src/backend/utils/mmgr/mcxt.c
@@ -166,7 +166,7 @@ MemoryContextResetChildren(MemoryContext context)
*
* The type-specific delete routine removes all subsidiary storage
* for the context, but we have to delete the context node itself,
- * as well as recurse to get the children. We must also delink the
+ * as well as recurse to get the children. We must also delink the
* node from its parent, if it has one.
*/
void
@@ -476,22 +476,22 @@ MemoryContextContains(MemoryContext context, void *pointer)
* we want to be sure that we don't leave the context tree invalid
* in case of failure (such as insufficient memory to allocate the
* context node itself). The procedure goes like this:
- * 1. Context-type-specific routine first calls MemoryContextCreate(),
+ * 1. Context-type-specific routine first calls MemoryContextCreate(),
* passing the appropriate tag/size/methods values (the methods
* pointer will ordinarily point to statically allocated data).
* The parent and name parameters usually come from the caller.
- * 2. MemoryContextCreate() attempts to allocate the context node,
+ * 2. MemoryContextCreate() attempts to allocate the context node,
* plus space for the name. If this fails we can ereport() with no
* damage done.
- * 3. We fill in all of the type-independent MemoryContext fields.
- * 4. We call the type-specific init routine (using the methods pointer).
+ * 3. We fill in all of the type-independent MemoryContext fields.
+ * 4. We call the type-specific init routine (using the methods pointer).
* The init routine is required to make the node minimally valid
* with zero chance of failure --- it can't allocate more memory,
* for example.
- * 5. Now we have a minimally valid node that can behave correctly
+ * 5. Now we have a minimally valid node that can behave correctly
* when told to reset or delete itself. We link the node to its
* parent (if any), making the node part of the context tree.
- * 6. We return to the context-type-specific routine, which finishes
+ * 6. We return to the context-type-specific routine, which finishes
* up type-specific initialization. This routine can now do things
* that might fail (like allocate more memory), so long as it's
* sure the node is left in a state that delete will handle.
@@ -503,7 +503,7 @@ MemoryContextContains(MemoryContext context, void *pointer)
*
* Normally, the context node and the name are allocated from
* TopMemoryContext (NOT from the parent context, since the node must
- * survive resets of its parent context!). However, this routine is itself
+ * survive resets of its parent context!). However, this routine is itself
* used to create TopMemoryContext! If we see that TopMemoryContext is NULL,
* we assume we are creating TopMemoryContext and use malloc() to allocate
* the node.
diff --git a/src/backend/utils/mmgr/portalmem.c b/src/backend/utils/mmgr/portalmem.c
index 54bf16ee2f7..e7cc11fa0ea 100644
--- a/src/backend/utils/mmgr/portalmem.c
+++ b/src/backend/utils/mmgr/portalmem.c
@@ -144,14 +144,14 @@ GetPortalByName(const char *name)
* Get the "primary" stmt within a portal, ie, the one marked canSetTag.
*
* Returns NULL if no such stmt. If multiple PlannedStmt structs within the
- * portal are marked canSetTag, returns the first one. Neither of these
+ * portal are marked canSetTag, returns the first one. Neither of these
* cases should occur in present usages of this function.
*
* Copes if given a list of Querys --- can't happen in a portal, but this
* code also supports plancache.c, which needs both cases.
*
* Note: the reason this is just handed a List is so that plancache.c
- * can share the code. For use with a portal, use PortalGetPrimaryStmt
+ * can share the code. For use with a portal, use PortalGetPrimaryStmt
* rather than calling this directly.
*/
Node *
@@ -277,7 +277,7 @@ CreateNewPortal(void)
* you can pass a constant string, perhaps "(query not available)".)
*
* commandTag shall be NULL if and only if the original query string
- * (before rewriting) was an empty string. Also, the passed commandTag must
+ * (before rewriting) was an empty string. Also, the passed commandTag must
* be a pointer to a constant string, since it is not copied.
*
* If cplan is provided, then it is a cached plan containing the stmts, and
@@ -480,14 +480,14 @@ PortalDrop(Portal portal, bool isTopCommit)
/*
* Allow portalcmds.c to clean up the state it knows about, in particular
- * shutting down the executor if still active. This step potentially runs
+ * shutting down the executor if still active. This step potentially runs
* user-defined code so failure has to be expected. It's the cleanup
* hook's responsibility to not try to do that more than once, in the case
* that failure occurs and then we come back to drop the portal again
* during transaction abort.
*
* Note: in most paths of control, this will have been done already in
- * MarkPortalDone or MarkPortalFailed. We're just making sure.
+ * MarkPortalDone or MarkPortalFailed. We're just making sure.
*/
if (PointerIsValid(portal->cleanup))
{
@@ -507,12 +507,12 @@ PortalDrop(Portal portal, bool isTopCommit)
PortalReleaseCachedPlan(portal);
/*
- * Release any resources still attached to the portal. There are several
+ * Release any resources still attached to the portal. There are several
* cases being covered here:
*
* Top transaction commit (indicated by isTopCommit): normally we should
* do nothing here and let the regular end-of-transaction resource
- * releasing mechanism handle these resources too. However, if we have a
+ * releasing mechanism handle these resources too. However, if we have a
* FAILED portal (eg, a cursor that got an error), we'd better clean up
* its resources to avoid resource-leakage warning messages.
*
@@ -524,7 +524,7 @@ PortalDrop(Portal portal, bool isTopCommit)
* cleaned up in transaction abort.
*
* Ordinary portal drop: must release resources. However, if the portal
- * is not FAILED then we do not release its locks. The locks become the
+ * is not FAILED then we do not release its locks. The locks become the
* responsibility of the transaction's ResourceOwner (since it is the
* parent of the portal's owner) and will be released when the transaction
* eventually ends.
@@ -611,7 +611,7 @@ PortalHashTableDeleteAll(void)
* Holdable cursors created in this transaction need to be converted to
* materialized form, since we are going to close down the executor and
* release locks. Non-holdable portals created in this transaction are
- * simply removed. Portals remaining from prior transactions should be
+ * simply removed. Portals remaining from prior transactions should be
* left untouched.
*
* Returns TRUE if any portals changed state (possibly causing user-defined
diff --git a/src/backend/utils/resowner/resowner.c b/src/backend/utils/resowner/resowner.c
index e7ec3931f12..fd2926d083a 100644
--- a/src/backend/utils/resowner/resowner.c
+++ b/src/backend/utils/resowner/resowner.c
@@ -171,7 +171,7 @@ ResourceOwnerCreate(ResourceOwner parent, const char *name)
* but don't delete the owner objects themselves.
*
* Note that this executes just one phase of release, and so typically
- * must be called three times. We do it this way because (a) we want to
+ * must be called three times. We do it this way because (a) we want to
* do all the recursion separately for each phase, thereby preserving
* the needed order of operations; and (b) xact.c may have other operations
* to do between the phases.
@@ -245,7 +245,7 @@ ResourceOwnerReleaseInternal(ResourceOwner owner,
*
* During a commit, there shouldn't be any remaining pins --- that
* would indicate failure to clean up the executor correctly --- so
- * issue warnings. In the abort case, just clean up quietly.
+ * issue warnings. In the abort case, just clean up quietly.
*
* We are careful to do the releasing back-to-front, so as to avoid
* O(N^2) behavior in ResourceOwnerForgetBuffer().
@@ -417,7 +417,7 @@ ResourceOwnerDelete(ResourceOwner owner)
/*
* We delink the owner from its parent before deleting it, so that if
* there's an error we won't have deleted/busted owners still attached to
- * the owner tree. Better a leak than a crash.
+ * the owner tree. Better a leak than a crash.
*/
ResourceOwnerNewParent(owner, NULL);
@@ -609,7 +609,7 @@ ResourceOwnerForgetBuffer(ResourceOwner owner, Buffer buffer)
/*
* Scan back-to-front because it's more likely we are releasing a
- * recently pinned buffer. This isn't always the case of course, but
+ * recently pinned buffer. This isn't always the case of course, but
* it's the way to bet.
*/
for (i = nb1; i >= 0; i--)
diff --git a/src/backend/utils/sort/logtape.c b/src/backend/utils/sort/logtape.c
index feaabe2f698..f84dc12f797 100644
--- a/src/backend/utils/sort/logtape.c
+++ b/src/backend/utils/sort/logtape.c
@@ -7,14 +7,14 @@
* tuplesort.c). Merging is an ideal algorithm for tape devices, but if
* we implement it on disk by creating a separate file for each "tape",
* there is an annoying problem: the peak space usage is at least twice
- * the volume of actual data to be sorted. (This must be so because each
+ * the volume of actual data to be sorted. (This must be so because each
* datum will appear in both the input and output tapes of the final
- * merge pass. For seven-tape polyphase merge, which is otherwise a
+ * merge pass. For seven-tape polyphase merge, which is otherwise a
* pretty good algorithm, peak usage is more like 4x actual data volume.)
*
* We can work around this problem by recognizing that any one tape
* dataset (with the possible exception of the final output) is written
- * and read exactly once in a perfectly sequential manner. Therefore,
+ * and read exactly once in a perfectly sequential manner. Therefore,
* a datum once read will not be required again, and we can recycle its
* space for use by the new tape dataset(s) being generated. In this way,
* the total space usage is essentially just the actual data volume, plus
@@ -55,7 +55,7 @@
* To support the above policy of writing to the lowest free block,
* ltsGetFreeBlock sorts the list of free block numbers into decreasing
* order each time it is asked for a block and the list isn't currently
- * sorted. This is an efficient way to handle it because we expect cycles
+ * sorted. This is an efficient way to handle it because we expect cycles
* of releasing many blocks followed by re-using many blocks, due to
* tuplesort.c's "preread" behavior.
*
@@ -117,7 +117,7 @@ typedef struct LogicalTape
/*
* The total data volume in the logical tape is numFullBlocks * BLCKSZ +
- * lastBlockBytes. BUT: we do not update lastBlockBytes during writing,
+ * lastBlockBytes. BUT: we do not update lastBlockBytes during writing,
* only at completion of a write phase.
*/
long numFullBlocks; /* number of complete blocks in log tape */
@@ -157,7 +157,7 @@ struct LogicalTapeSet
*
* If blocksSorted is true then the block numbers in freeBlocks are in
* *decreasing* order, so that removing the last entry gives us the lowest
- * free block. We re-sort the blocks whenever a block is demanded; this
+ * free block. We re-sort the blocks whenever a block is demanded; this
* should be reasonably efficient given the expected usage pattern.
*/
bool forgetFreeSpace; /* are we remembering free blocks? */
@@ -218,7 +218,7 @@ ltsWriteBlock(LogicalTapeSet *lts, long blocknum, void *buffer)
/*
* Read a block-sized buffer from the specified block of the underlying file.
*
- * No need for an error return convention; we ereport() on any error. This
+ * No need for an error return convention; we ereport() on any error. This
* module should never attempt to read a block it doesn't know is there.
*/
static void
@@ -353,7 +353,7 @@ ltsRecordBlockNum(LogicalTapeSet *lts, IndirectBlock *indirect,
/*
* Reset a logical tape's indirect-block hierarchy after a write pass
- * to prepare for reading. We dump out partly-filled blocks except
+ * to prepare for reading. We dump out partly-filled blocks except
* at the top of the hierarchy, and we rewind each level to the start.
* This call returns the first data block number, or -1L if the tape
* is empty.
@@ -540,7 +540,7 @@ LogicalTapeSetCreate(int ntapes)
/*
* Initialize per-tape structs. Note we allocate the I/O buffer and
* first-level indirect block for a tape only when it is first actually
- * written to. This avoids wasting memory space when tuplesort.c
+ * written to. This avoids wasting memory space when tuplesort.c
* overestimates the number of tapes needed.
*/
for (i = 0; i < ntapes; i++)
@@ -591,7 +591,7 @@ LogicalTapeSetClose(LogicalTapeSet *lts)
* Mark a logical tape set as not needing management of free space anymore.
*
* This should be called if the caller does not intend to write any more data
- * into the tape set, but is reading from un-frozen tapes. Since no more
+ * into the tape set, but is reading from un-frozen tapes. Since no more
* writes are planned, remembering free blocks is no longer useful. Setting
* this flag lets us avoid wasting time and space in ltsReleaseBlock(), which
* is not designed to handle large numbers of free blocks.
@@ -732,7 +732,7 @@ LogicalTapeRewind(LogicalTapeSet *lts, int tapenum, bool forWrite)
else
{
/*
- * Completion of a read phase. Rewind and prepare for write.
+ * Completion of a read phase. Rewind and prepare for write.
*
* NOTE: we assume the caller has read the tape to the end; otherwise
* untouched data and indirect blocks will not have been freed. We
@@ -826,7 +826,7 @@ LogicalTapeRead(LogicalTapeSet *lts, int tapenum,
*
* This *must* be called just at the end of a write pass, before the
* tape is rewound (after rewind is too late!). It performs a rewind
- * and switch to read mode "for free". An immediately following rewind-
+ * and switch to read mode "for free". An immediately following rewind-
* for-read call is OK but not necessary.
*/
void
@@ -862,7 +862,7 @@ LogicalTapeFreeze(LogicalTapeSet *lts, int tapenum)
}
/*
- * Backspace the tape a given number of bytes. (We also support a more
+ * Backspace the tape a given number of bytes. (We also support a more
* general seek interface, see below.)
*
* *Only* a frozen-for-read tape can be backed up; we don't support
@@ -966,7 +966,7 @@ LogicalTapeSeek(LogicalTapeSet *lts, int tapenum,
return false;
/*
- * OK, advance or back up to the target block. This implementation would
+ * OK, advance or back up to the target block. This implementation would
* be pretty inefficient for long seeks, but we really aren't expecting
* that (a seek over one tuple is typical).
*/
@@ -999,7 +999,7 @@ LogicalTapeSeek(LogicalTapeSet *lts, int tapenum,
* Obtain current position in a form suitable for a later LogicalTapeSeek.
*
* NOTE: it'd be OK to do this during write phase with intention of using
- * the position for a seek after freezing. Not clear if anyone needs that.
+ * the position for a seek after freezing. Not clear if anyone needs that.
*/
void
LogicalTapeTell(LogicalTapeSet *lts, int tapenum,
diff --git a/src/backend/utils/sort/tuplesort.c b/src/backend/utils/sort/tuplesort.c
index 5d42480e604..58b3896af73 100644
--- a/src/backend/utils/sort/tuplesort.c
+++ b/src/backend/utils/sort/tuplesort.c
@@ -6,7 +6,7 @@
* This module handles sorting of heap tuples, index tuples, or single
* Datums (and could easily support other kinds of sortable objects,
* if necessary). It works efficiently for both small and large amounts
- * of data. Small amounts are sorted in-memory using qsort(). Large
+ * of data. Small amounts are sorted in-memory using qsort(). Large
* amounts are sorted using temporary files and a standard external sort
* algorithm.
*
@@ -40,7 +40,7 @@
* into sorted runs in temporary tapes, emitting just enough tuples at each
* step to get back within the workMem limit. Whenever the run number at
* the top of the heap changes, we begin a new run with a new output tape
- * (selected per Algorithm D). After the end of the input is reached,
+ * (selected per Algorithm D). After the end of the input is reached,
* we dump out remaining tuples in memory into a final run (or two),
* then merge the runs using Algorithm D.
*
@@ -57,17 +57,17 @@
* access at all, defeating the read-ahead methods used by most Unix kernels.
* Worse, the output tape gets written into a very random sequence of blocks
* of the temp file, ensuring that things will be even worse when it comes
- * time to read that tape. A straightforward merge pass thus ends up doing a
+ * time to read that tape. A straightforward merge pass thus ends up doing a
* lot of waiting for disk seeks. We can improve matters by prereading from
* each source tape sequentially, loading about workMem/M bytes from each tape
* in turn. Then we run the merge algorithm, writing but not reading until
- * one of the preloaded tuple series runs out. Then we switch back to preread
+ * one of the preloaded tuple series runs out. Then we switch back to preread
* mode, fill memory again, and repeat. This approach helps to localize both
* read and write accesses.
*
* When the caller requests random access to the sort result, we form
* the final sorted run on a logical tape which is then "frozen", so
- * that we can access it randomly. When the caller does not need random
+ * that we can access it randomly. When the caller does not need random
* access, we return from tuplesort_performsort() as soon as we are down
* to one run per logical tape. The final merge is then performed
* on-the-fly as the caller repeatedly calls tuplesort_getXXX; this
@@ -77,7 +77,7 @@
* grounds that 7 is the "sweet spot" on the tapes-to-passes curve according
* to Knuth's figure 70 (section 5.4.2). However, Knuth is assuming that
* tape drives are expensive beasts, and in particular that there will always
- * be many more runs than tape drives. In our implementation a "tape drive"
+ * be many more runs than tape drives. In our implementation a "tape drive"
* doesn't cost much more than a few Kb of memory buffers, so we can afford
* to have lots of them. In particular, if we can have as many tape drives
* as sorted runs, we can eliminate any repeated I/O at all. In the current
@@ -134,28 +134,28 @@ bool optimize_bounded_sort = true;
/*
- * The objects we actually sort are SortTuple structs. These contain
+ * The objects we actually sort are SortTuple structs. These contain
* a pointer to the tuple proper (might be a MinimalTuple or IndexTuple),
* which is a separate palloc chunk --- we assume it is just one chunk and
* can be freed by a simple pfree(). SortTuples also contain the tuple's
* first key column in Datum/nullflag format, and an index integer.
*
* Storing the first key column lets us save heap_getattr or index_getattr
- * calls during tuple comparisons. We could extract and save all the key
+ * calls during tuple comparisons. We could extract and save all the key
* columns not just the first, but this would increase code complexity and
* overhead, and wouldn't actually save any comparison cycles in the common
* case where the first key determines the comparison result. Note that
* for a pass-by-reference datatype, datum1 points into the "tuple" storage.
*
* When sorting single Datums, the data value is represented directly by
- * datum1/isnull1. If the datatype is pass-by-reference and isnull1 is false,
+ * datum1/isnull1. If the datatype is pass-by-reference and isnull1 is false,
* then datum1 points to a separately palloc'd data value that is also pointed
* to by the "tuple" pointer; otherwise "tuple" is NULL.
*
* While building initial runs, tupindex holds the tuple's run number. During
* merge passes, we re-use it to hold the input tape number that each tuple in
* the heap was read from, or to hold the index of the next tuple pre-read
- * from the same tape in the case of pre-read entries. tupindex goes unused
+ * from the same tape in the case of pre-read entries. tupindex goes unused
* if the sort occurs entirely in memory.
*/
typedef struct
@@ -238,7 +238,7 @@ struct Tuplesortstate
void (*copytup) (Tuplesortstate *state, SortTuple *stup, void *tup);
/*
- * Function to write a stored tuple onto tape. The representation of the
+ * Function to write a stored tuple onto tape. The representation of the
* tuple on tape need not be the same as it is in memory; requirements on
* the tape representation are given below. After writing the tuple,
* pfree() the out-of-line data (not the SortTuple struct!), and increase
@@ -264,7 +264,7 @@ struct Tuplesortstate
void (*reversedirection) (Tuplesortstate *state);
/*
- * This array holds the tuples now in sort memory. If we are in state
+ * This array holds the tuples now in sort memory. If we are in state
* INITIAL, the tuples are in no particular order; if we are in state
* SORTEDINMEM, the tuples are in final sorted order; in states BUILDRUNS
* and FINALMERGE, the tuples are organized in "heap" order per Algorithm
@@ -412,7 +412,7 @@ struct Tuplesortstate
* If state->randomAccess is true, then the stored representation of the
* tuple must be followed by another "unsigned int" that is a copy of the
* length --- so the total tape space used is actually sizeof(unsigned int)
- * more than the stored length value. This allows read-backwards. When
+ * more than the stored length value. This allows read-backwards. When
* randomAccess is not true, the write/read routines may omit the extra
* length word.
*
@@ -422,7 +422,7 @@ struct Tuplesortstate
* the back length word (if present).
*
* The write/read routines can make use of the tuple description data
- * stored in the Tuplesortstate record, if needed. They are also expected
+ * stored in the Tuplesortstate record, if needed. They are also expected
* to adjust state->availMem by the amount of memory space (not tape space!)
* released or consumed. There is no error return from either writetup
* or readtup; they should ereport() on failure.
@@ -519,7 +519,7 @@ static void free_sort_tuple(Tuplesortstate *state, SortTuple *stup);
*
* After calling tuplesort_begin, the caller should call tuplesort_putXXX
* zero or more times, then call tuplesort_performsort when all the tuples
- * have been supplied. After performsort, retrieve the tuples in sorted
+ * have been supplied. After performsort, retrieve the tuples in sorted
* order by calling tuplesort_getXXX until it returns false/NULL. (If random
* access was requested, rescan, markpos, and restorepos can also be called.)
* Call tuplesort_end to terminate the operation and release memory/disk space.
@@ -859,7 +859,7 @@ tuplesort_begin_datum(Oid datumType, Oid sortOperator, Oid sortCollation,
*
* Advise tuplesort that at most the first N result tuples are required.
*
- * Must be called before inserting any tuples. (Actually, we could allow it
+ * Must be called before inserting any tuples. (Actually, we could allow it
* as long as the sort hasn't spilled to disk, but there seems no need for
* delayed calls at the moment.)
*
@@ -1005,7 +1005,7 @@ grow_memtuples(Tuplesortstate *state)
* strategy and instead increase as much as we safely can.
*
* To stay within allowedMem, we can't increase memtupsize by more
- * than availMem / sizeof(SortTuple) elements. In practice, we want
+ * than availMem / sizeof(SortTuple) elements. In practice, we want
* to increase it by considerably less, because we need to leave some
* space for the tuples to which the new array slots will refer. We
* assume the new tuples will be about the same size as the tuples
@@ -1053,9 +1053,9 @@ grow_memtuples(Tuplesortstate *state)
* We need to be sure that we do not cause LACKMEM to become true, else
* the space management algorithm will go nuts. The code above should
* never generate a dangerous request, but to be safe, check explicitly
- * that the array growth fits within availMem. (We could still cause
+ * that the array growth fits within availMem. (We could still cause
* LACKMEM if the memory chunk overhead associated with the memtuples
- * array were to increase. That shouldn't happen with any sane value of
+ * array were to increase. That shouldn't happen with any sane value of
* allowedMem, because at any array size large enough to risk LACKMEM,
* palloc would be treating both old and new arrays as separate chunks.
* But we'll check LACKMEM explicitly below just in case.)
@@ -1191,7 +1191,7 @@ puttuple_common(Tuplesortstate *state, SortTuple *tuple)
case TSS_INITIAL:
/*
- * Save the tuple into the unsorted array. First, grow the array
+ * Save the tuple into the unsorted array. First, grow the array
* as needed. Note that we try to grow the array when there is
* still one free slot remaining --- if we fail, there'll still be
* room to store the incoming tuple, and then we'll switch to
@@ -1212,7 +1212,7 @@ puttuple_common(Tuplesortstate *state, SortTuple *tuple)
* enough tuples to meet the bound.
*
* Note that once we enter TSS_BOUNDED state we will always try to
- * complete the sort that way. In the worst case, if later input
+ * complete the sort that way. In the worst case, if later input
* tuples are larger than earlier ones, this might cause us to
* exceed workMem significantly.
*/
@@ -1350,7 +1350,7 @@ tuplesort_performsort(Tuplesortstate *state)
/*
* We were able to accumulate all the tuples required for output
- * in memory, using a heap to eliminate excess tuples. Now we
+ * in memory, using a heap to eliminate excess tuples. Now we
* have to transform the heap to a properly-sorted array.
*/
sort_bounded_heap(state);
@@ -1364,7 +1364,7 @@ tuplesort_performsort(Tuplesortstate *state)
case TSS_BUILDRUNS:
/*
- * Finish tape-based sort. First, flush all tuples remaining in
+ * Finish tape-based sort. First, flush all tuples remaining in
* memory out to tape; then merge until we have a single remaining
* run (or, if !randomAccess, one run per tape). Note that
* mergeruns sets the correct state->status.
@@ -1425,7 +1425,7 @@ tuplesort_gettuple_common(Tuplesortstate *state, bool forward,
/*
* Complain if caller tries to retrieve more tuples than
- * originally asked for in a bounded sort. This is because
+ * originally asked for in a bounded sort. This is because
* returning EOF here might be the wrong thing.
*/
if (state->bounded && state->current >= state->bound)
@@ -1631,7 +1631,7 @@ tuplesort_gettupleslot(Tuplesortstate *state, bool forward,
/*
* Fetch the next tuple in either forward or back direction.
- * Returns NULL if no more tuples. If *should_free is set, the
+ * Returns NULL if no more tuples. If *should_free is set, the
* caller must pfree the returned tuple when done with it.
*/
HeapTuple
@@ -1650,7 +1650,7 @@ tuplesort_getheaptuple(Tuplesortstate *state, bool forward, bool *should_free)
/*
* Fetch the next index tuple in either forward or back direction.
- * Returns NULL if no more tuples. If *should_free is set, the
+ * Returns NULL if no more tuples. If *should_free is set, the
* caller must pfree the returned tuple when done with it.
*/
IndexTuple
@@ -1721,7 +1721,7 @@ tuplesort_merge_order(long allowedMem)
/*
* We need one tape for each merge input, plus another one for the output,
- * and each of these tapes needs buffer space. In addition we want
+ * and each of these tapes needs buffer space. In addition we want
* MERGE_BUFFER_SIZE workspace per input tape (but the output tape doesn't
* count).
*
@@ -1775,7 +1775,7 @@ inittapes(Tuplesortstate *state)
* don't decrease it to the point that we have no room for tuples. (That
* case is only likely to occur if sorting pass-by-value Datums; in all
* other scenarios the memtuples[] array is unlikely to occupy more than
- * half of allowedMem. In the pass-by-value case it's not important to
+ * half of allowedMem. In the pass-by-value case it's not important to
* account for tuple space, so we don't care if LACKMEM becomes
* inaccurate.)
*/
@@ -1899,7 +1899,7 @@ mergeruns(Tuplesortstate *state)
/*
* If we produced only one initial run (quite likely if the total data
* volume is between 1X and 2X workMem), we can just use that tape as the
- * finished output, rather than doing a useless merge. (This obvious
+ * finished output, rather than doing a useless merge. (This obvious
* optimization is not in Knuth's algorithm.)
*/
if (state->currentRun == 1)
@@ -2005,7 +2005,7 @@ mergeruns(Tuplesortstate *state)
* the loop without performing the last iteration of step D6, we have not
* rearranged the tape unit assignment, and therefore the result is on
* TAPE[T]. We need to do it this way so that we can freeze the final
- * output tape while rewinding it. The last iteration of step D6 would be
+ * output tape while rewinding it. The last iteration of step D6 would be
* a waste of cycles anyway...
*/
state->result_tape = state->tp_tapenum[state->tapeRange];
@@ -2089,7 +2089,7 @@ mergeonerun(Tuplesortstate *state)
* beginmerge - initialize for a merge pass
*
* We decrease the counts of real and dummy runs for each tape, and mark
- * which tapes contain active input runs in mergeactive[]. Then, load
+ * which tapes contain active input runs in mergeactive[]. Then, load
* as many tuples as we can from each active input tape, and finally
* fill the merge heap with the first tuple from each active tape.
*/
@@ -2182,7 +2182,7 @@ beginmerge(Tuplesortstate *state)
* This routine exists to improve sequentiality of reads during a merge pass,
* as explained in the header comments of this file. Load tuples from each
* active source tape until the tape's run is exhausted or it has used up
- * its fair share of available memory. In any case, we guarantee that there
+ * its fair share of available memory. In any case, we guarantee that there
* is at least one preread tuple available from each unexhausted input tape.
*
* We invoke this routine at the start of a merge pass for initial load,
@@ -2445,7 +2445,7 @@ tuplesort_get_stats(Tuplesortstate *state,
* accurately once we have begun to return tuples to the caller (since we
* don't account for pfree's the caller is expected to do), so we cannot
* rely on availMem in a disk sort. This does not seem worth the overhead
- * to fix. Is it worth creating an API for the memory context code to
+ * to fix. Is it worth creating an API for the memory context code to
* tell us how much is actually used in sortcontext?
*/
if (state->tapeset)
@@ -2483,7 +2483,7 @@ tuplesort_get_stats(Tuplesortstate *state,
/*
* Heap manipulation routines, per Knuth's Algorithm 5.2.3H.
*
- * Compare two SortTuples. If checkIndex is true, use the tuple index
+ * Compare two SortTuples. If checkIndex is true, use the tuple index
* as the front of the sort key; otherwise, no.
*/
@@ -2588,7 +2588,7 @@ sort_bounded_heap(Tuplesortstate *state)
/*
* Insert a new tuple into an empty or existing heap, maintaining the
- * heap invariant. Caller is responsible for ensuring there's room.
+ * heap invariant. Caller is responsible for ensuring there's room.
*
* Note: we assume *tuple is a temporary variable that can be scribbled on.
* For some callers, tuple actually points to a memtuples[] entry above the
diff --git a/src/backend/utils/sort/tuplestore.c b/src/backend/utils/sort/tuplestore.c
index ea9bc04823d..549175c885c 100644
--- a/src/backend/utils/sort/tuplestore.c
+++ b/src/backend/utils/sort/tuplestore.c
@@ -8,7 +8,7 @@
* a dumbed-down version of tuplesort.c; it does no sorting of tuples
* but can only store and regurgitate a sequence of tuples. However,
* because no sort is required, it is allowed to start reading the sequence
- * before it has all been written. This is particularly useful for cursors,
+ * before it has all been written. This is particularly useful for cursors,
* because it allows random access within the already-scanned portion of
* a query without having to process the underlying scan to completion.
* Also, it is possible to support multiple independent read pointers.
@@ -17,7 +17,7 @@
* space limit specified by the caller.
*
* The (approximate) amount of memory allowed to the tuplestore is specified
- * in kilobytes by the caller. We absorb tuples and simply store them in an
+ * in kilobytes by the caller. We absorb tuples and simply store them in an
* in-memory array as long as we haven't exceeded maxKBytes. If we do exceed
* maxKBytes, we dump all the tuples into a temp file and then read from that
* when needed.
@@ -29,7 +29,7 @@
* When the caller requests backward-scan capability, we write the temp file
* in a format that allows either forward or backward scan. Otherwise, only
* forward scan is allowed. A request for backward scan must be made before
- * putting any tuples into the tuplestore. Rewind is normally allowed but
+ * putting any tuples into the tuplestore. Rewind is normally allowed but
* can be turned off via tuplestore_set_eflags; turning off rewind for all
* read pointers enables truncation of the tuplestore at the oldest read point
* for minimal memory usage. (The caller must explicitly call tuplestore_trim
@@ -63,7 +63,7 @@
/*
- * Possible states of a Tuplestore object. These denote the states that
+ * Possible states of a Tuplestore object. These denote the states that
* persist between calls of Tuplestore routines.
*/
typedef enum
@@ -82,7 +82,7 @@ typedef enum
*
* Special case: if eof_reached is true, then the pointer's read position is
* implicitly equal to the write position, and current/file/offset aren't
- * maintained. This way we need not update all the read pointers each time
+ * maintained. This way we need not update all the read pointers each time
* we write.
*/
typedef struct
@@ -127,7 +127,7 @@ struct Tuplestorestate
void *(*copytup) (Tuplestorestate *state, void *tup);
/*
- * Function to write a stored tuple onto tape. The representation of the
+ * Function to write a stored tuple onto tape. The representation of the
* tuple on tape need not be the same as it is in memory; requirements on
* the tape representation are given below. After writing the tuple,
* pfree() it, and increase state->availMem by the amount of memory space
@@ -196,7 +196,7 @@ struct Tuplestorestate
* If state->backward is true, then the stored representation of
* the tuple must be followed by another "unsigned int" that is a copy of the
* length --- so the total tape space used is actually sizeof(unsigned int)
- * more than the stored length value. This allows read-backwards. When
+ * more than the stored length value. This allows read-backwards. When
* state->backward is not set, the write/read routines may omit the extra
* length word.
*
@@ -294,7 +294,7 @@ tuplestore_begin_common(int eflags, bool interXact, int maxKBytes)
* tuple store are allowed.
*
* interXact: if true, the files used for on-disk storage persist beyond the
- * end of the current transaction. NOTE: It's the caller's responsibility to
+ * end of the current transaction. NOTE: It's the caller's responsibility to
* create such a tuplestore in a memory context and resource owner that will
* also survive transaction boundaries, and to ensure the tuplestore is closed
* when it's no longer wanted.
@@ -333,7 +333,7 @@ tuplestore_begin_heap(bool randomAccess, bool interXact, int maxKBytes)
* any data into the tuplestore.
*
* eflags is a bitmask following the meanings used for executor node
- * startup flags (see executor.h). tuplestore pays attention to these bits:
+ * startup flags (see executor.h). tuplestore pays attention to these bits:
* EXEC_FLAG_REWIND need rewind to start
* EXEC_FLAG_BACKWARD need backward fetch
* If tuplestore_set_eflags is not called, REWIND is allowed, and BACKWARD
@@ -623,9 +623,9 @@ grow_memtuples(Tuplestorestate *state)
* We need to be sure that we do not cause LACKMEM to become true, else
* the space management algorithm will go nuts. The code above should
* never generate a dangerous request, but to be safe, check explicitly
- * that the array growth fits within availMem. (We could still cause
+ * that the array growth fits within availMem. (We could still cause
* LACKMEM if the memory chunk overhead associated with the memtuples
- * array were to increase. That shouldn't happen with any sane value of
+ * array were to increase. That shouldn't happen with any sane value of
* allowedMem, because at any array size large enough to risk LACKMEM,
* palloc would be treating both old and new arrays as separate chunks.
* But we'll check LACKMEM explicitly below just in case.)
@@ -694,7 +694,7 @@ tuplestore_puttuple(Tuplestorestate *state, HeapTuple tuple)
MemoryContext oldcxt = MemoryContextSwitchTo(state->context);
/*
- * Copy the tuple. (Must do this even in WRITEFILE case. Note that
+ * Copy the tuple. (Must do this even in WRITEFILE case. Note that
* COPYTUP includes USEMEM, so we needn't do that here.)
*/
tuple = COPYTUP(state, tuple);
@@ -851,7 +851,7 @@ tuplestore_puttuple_common(Tuplestorestate *state, void *tuple)
/*
* Fetch the next tuple in either forward or back direction.
- * Returns NULL if no more tuples. If should_free is set, the
+ * Returns NULL if no more tuples. If should_free is set, the
* caller must pfree the returned tuple when done with it.
*
* Backward scan is only allowed if randomAccess was set true or
diff --git a/src/backend/utils/time/combocid.c b/src/backend/utils/time/combocid.c
index 923355d3ceb..44f7ef1bbb8 100644
--- a/src/backend/utils/time/combocid.c
+++ b/src/backend/utils/time/combocid.c
@@ -15,7 +15,7 @@
* this module.
*
* To allow reusing existing combo cids, we also keep a hash table that
- * maps cmin,cmax pairs to combo cids. This keeps the data structure size
+ * maps cmin,cmax pairs to combo cids. This keeps the data structure size
* reasonable in most cases, since the number of unique pairs used by any
* one transaction is likely to be small.
*
diff --git a/src/backend/utils/time/snapmgr.c b/src/backend/utils/time/snapmgr.c
index e739d2d192b..3bf42423313 100644
--- a/src/backend/utils/time/snapmgr.c
+++ b/src/backend/utils/time/snapmgr.c
@@ -11,7 +11,7 @@
* regd_count and count it in RegisteredSnapshots, but this reference is not
* tracked by a resource owner. We used to use the TopTransactionResourceOwner
* to track this snapshot reference, but that introduces logical circularity
- * and thus makes it impossible to clean up in a sane fashion. It's better to
+ * and thus makes it impossible to clean up in a sane fashion. It's better to
* handle this reference as an internally-tracked registration, so that this
* module is entirely lower-level than ResourceOwners.
*
@@ -20,9 +20,9 @@
* tracked by any resource owner.
*
* These arrangements let us reset MyPgXact->xmin when there are no snapshots
- * referenced by this transaction. (One possible improvement would be to be
+ * referenced by this transaction. (One possible improvement would be to be
* able to advance Xmin when the snapshot with the earliest Xmin is no longer
- * referenced. That's a bit harder though, it requires more locking, and
+ * referenced. That's a bit harder though, it requires more locking, and
* anyway it should be rather uncommon to keep temporary snapshots referenced
* for too long.)
*
@@ -57,7 +57,7 @@
* CurrentSnapshot points to the only snapshot taken in transaction-snapshot
* mode, and to the latest one taken in a read-committed transaction.
* SecondarySnapshot is a snapshot that's always up-to-date as of the current
- * instant, even in transaction-snapshot mode. It should only be used for
+ * instant, even in transaction-snapshot mode. It should only be used for
* special-purpose code (say, RI checking.)
*
* These SnapshotData structs are static to simplify memory allocation
@@ -76,7 +76,7 @@ static Snapshot SecondarySnapshot = NULL;
* mode, we don't want it to say that BootstrapTransactionId is in progress.
*
* RecentGlobalXmin is initialized to InvalidTransactionId, to ensure that no
- * one tries to use a stale value. Readers should ensure that it has been set
+ * one tries to use a stale value. Readers should ensure that it has been set
* to something else before using it.
*/
TransactionId TransactionXmin = FirstNormalTransactionId;
@@ -114,7 +114,7 @@ static int RegisteredSnapshots = 0;
bool FirstSnapshotSet = false;
/*
- * Remember the serializable transaction snapshot, if any. We cannot trust
+ * Remember the serializable transaction snapshot, if any. We cannot trust
* FirstSnapshotSet in combination with IsolationUsesXactSnapshot(), because
* GUC may be reset before us, changing the value of IsolationUsesXactSnapshot.
*/
@@ -286,7 +286,7 @@ SetTransactionSnapshot(Snapshot sourcesnap, TransactionId sourcexid)
/*
* In transaction-snapshot mode, the first snapshot must live until end of
- * xact, so we must make a copy of it. Furthermore, if we're running in
+ * xact, so we must make a copy of it. Furthermore, if we're running in
* serializable mode, predicate.c needs to do its own processing.
*/
if (IsolationUsesXactSnapshot())
@@ -382,7 +382,7 @@ FreeSnapshot(Snapshot snapshot)
*
* If the passed snapshot is a statically-allocated one, or it is possibly
* subject to a future command counter update, create a new long-lived copy
- * with active refcount=1. Otherwise, only increment the refcount.
+ * with active refcount=1. Otherwise, only increment the refcount.
*/
void
PushActiveSnapshot(Snapshot snap)
@@ -751,7 +751,7 @@ ExportSnapshot(Snapshot snapshot)
* However, we haven't got enough information to do that, since we don't
* know if we're at top level or not. For example, we could be inside a
* plpgsql function that is going to fire off other transactions via
- * dblink. Rather than disallow perfectly legitimate usages, don't make a
+ * dblink. Rather than disallow perfectly legitimate usages, don't make a
* check.
*
* Also note that we don't make any restriction on the transaction's
@@ -964,7 +964,7 @@ parseXidFromText(const char *prefix, char **s, const char *filename)
/*
* ImportSnapshot
- * Import a previously exported snapshot. The argument should be a
+ * Import a previously exported snapshot. The argument should be a
* filename in SNAPSHOT_EXPORT_DIR. Load the snapshot from that file.
* This is called by "SET TRANSACTION SNAPSHOT 'foo'".
*/
diff --git a/src/backend/utils/time/tqual.c b/src/backend/utils/time/tqual.c
index 44b4ddcb02f..82e1a4feb03 100644
--- a/src/backend/utils/time/tqual.c
+++ b/src/backend/utils/time/tqual.c
@@ -20,7 +20,7 @@
* TransactionIdDidCommit will both return true. If we check only
* TransactionIdDidCommit, we could consider a tuple committed when a
* later GetSnapshotData call will still think the originating transaction
- * is in progress, which leads to application-level inconsistency. The
+ * is in progress, which leads to application-level inconsistency. The
* upshot is that we gotta check TransactionIdIsInProgress first in all
* code paths, except for a few cases where we are looking at
* subtransactions of our own main transaction and so there can't be any
@@ -90,13 +90,13 @@ static bool XidInMVCCSnapshot(TransactionId xid, Snapshot snapshot);
* just refrain from setting the hint bit until some future re-examination
* of the tuple.
*
- * We can always set hint bits when marking a transaction aborted. (Some
+ * We can always set hint bits when marking a transaction aborted. (Some
* code in heapam.c relies on that!)
*
* Also, if we are cleaning up HEAP_MOVED_IN or HEAP_MOVED_OFF entries, then
* we can always set the hint bits, since pre-9.0 VACUUM FULL always used
* synchronous commits and didn't move tuples that weren't previously
- * hinted. (This is not known by this subroutine, but is applied by its
+ * hinted. (This is not known by this subroutine, but is applied by its
* callers.) Note: old-style VACUUM FULL is gone, but we have to keep this
* module's support for MOVED_OFF/MOVED_IN flag bits for as long as we
* support in-place update from pre-9.0 databases.
@@ -542,7 +542,7 @@ HeapTupleSatisfiesAny(HeapTupleHeader tuple, Snapshot snapshot, Buffer buffer)
* This is a simplified version that only checks for VACUUM moving conditions.
* It's appropriate for TOAST usage because TOAST really doesn't want to do
* its own time qual checks; if you can see the main table row that contains
- * a TOAST reference, you should be able to see the TOASTed value. However,
+ * a TOAST reference, you should be able to see the TOASTed value. However,
* vacuuming a TOAST table is independent of the main table, and in case such
* a vacuum fails partway through, we'd better do this much checking.
*
@@ -791,7 +791,7 @@ HeapTupleSatisfiesUpdate(HeapTupleHeader tuple, CommandId curcid,
{
/*
* If it's only locked but neither EXCL_LOCK nor KEYSHR_LOCK is
- * set, it cannot possibly be running. Otherwise need to check.
+ * set, it cannot possibly be running. Otherwise need to check.
*/
if ((tuple->t_infomask & (HEAP_XMAX_EXCL_LOCK |
HEAP_XMAX_KEYSHR_LOCK)) &&
@@ -1286,7 +1286,7 @@ HeapTupleSatisfiesMVCC(HeapTupleHeader tuple, Snapshot snapshot,
* we mainly want to know is if a tuple is potentially visible to *any*
* running transaction. If so, it can't be removed yet by VACUUM.
*
- * OldestXmin is a cutoff XID (obtained from GetOldestXmin()). Tuples
+ * OldestXmin is a cutoff XID (obtained from GetOldestXmin()). Tuples
* deleted by XIDs >= OldestXmin are deemed "recently dead"; they might
* still be visible to some open transaction, so we can't remove them,
* even if we see that the deleting transaction has committed.
@@ -1374,7 +1374,7 @@ HeapTupleSatisfiesVacuum(HeapTupleHeader tuple, TransactionId OldestXmin,
}
/*
- * Okay, the inserter committed, so it was good at some point. Now what
+ * Okay, the inserter committed, so it was good at some point. Now what
* about the deleting transaction?
*/
if (tuple->t_infomask & HEAP_XMAX_INVALID)
@@ -1514,7 +1514,7 @@ HeapTupleSatisfiesVacuum(HeapTupleHeader tuple, TransactionId OldestXmin,
* in lieu of HeapTupleSatisifesVacuum when the tuple has just been
* tested by HeapTupleSatisfiesMVCC and, therefore, any hint bits that
* can be set should already be set. We assume that if no hint bits
- * either for xmin or xmax, the transaction is still running. This is
+ * either for xmin or xmax, the transaction is still running. This is
* therefore faster than HeapTupleSatisfiesVacuum, because we don't
* consult CLOG (and also because we don't need to give an exact answer,
* just whether or not the tuple is surely dead).
@@ -1575,7 +1575,7 @@ XidInMVCCSnapshot(TransactionId xid, Snapshot snapshot)
/*
* Make a quick range check to eliminate most XIDs without looking at the
- * xip arrays. Note that this is OK even if we convert a subxact XID to
+ * xip arrays. Note that this is OK even if we convert a subxact XID to
* its parent below, because a subxact with XID < xmin has surely also got
* a parent with XID < xmin, while one with XID >= xmax must belong to a
* parent that was not yet committed at the time of this snapshot.