diff options
author | Bruce Momjian <bruce@momjian.us> | 2014-05-06 11:26:26 -0400 |
---|---|---|
committer | Bruce Momjian <bruce@momjian.us> | 2014-05-06 11:26:26 -0400 |
commit | 2616a5d300e5bb5a2838d2a065afa3740e08727f (patch) | |
tree | 5939408c63409abda810217fe812749a5da7345b /src/backend/utils/adt | |
parent | e0070a6858cfcd2c4129dfa93bc042d6d86732c8 (diff) | |
download | postgresql-2616a5d300e5bb5a2838d2a065afa3740e08727f.tar.gz postgresql-2616a5d300e5bb5a2838d2a065afa3740e08727f.zip |
Remove tabs after spaces in C comments
This was not changed in HEAD, but will be done later as part of a
pgindent run. Future pgindent runs will also do this.
Report by Tom Lane
Backpatch through all supported branches, but not HEAD
Diffstat (limited to 'src/backend/utils/adt')
37 files changed, 264 insertions, 264 deletions
diff --git a/src/backend/utils/adt/acl.c b/src/backend/utils/adt/acl.c index 7c4eaaca8a3..e32e081482e 100644 --- a/src/backend/utils/adt/acl.c +++ b/src/backend/utils/adt/acl.c @@ -119,7 +119,7 @@ static Oid get_role_oid_or_public(const char *rolname); /* * getid * Consumes the first alphanumeric string (identifier) found in string - * 's', ignoring any leading white space. If it finds a double quote + * 's', ignoring any leading white space. If it finds a double quote * it returns the word inside the quotes. * * RETURNS: @@ -225,7 +225,7 @@ putid(char *p, const char *s) * * RETURNS: * the string position in 's' immediately following the ACL - * specification. Also: + * specification. Also: * - loads the structure pointed to by 'aip' with the appropriate * UID/GID, id type identifier and mode type values. */ @@ -939,7 +939,7 @@ aclupdate(const Acl *old_acl, const AclItem *mod_aip, } /* - * Remove abandoned privileges (cascading revoke). Currently we can only + * Remove abandoned privileges (cascading revoke). Currently we can only * handle this when the grantee is not PUBLIC. */ if ((old_goptions & ~new_goptions) != 0) @@ -1005,7 +1005,7 @@ aclnewowner(const Acl *old_acl, Oid oldOwnerId, Oid newOwnerId) /* * If the old ACL contained any references to the new owner, then we may - * now have generated an ACL containing duplicate entries. Find them and + * now have generated an ACL containing duplicate entries. Find them and * merge them so that there are not duplicates. (This is relatively * expensive since we use a stupid O(N^2) algorithm, but it's unlikely to * be the normal case.) @@ -1016,7 +1016,7 @@ aclnewowner(const Acl *old_acl, Oid oldOwnerId, Oid newOwnerId) * remove privilege-free entries, should there be any in the input.) dst * is the next output slot, targ is the currently considered input slot * (always >= dst), and src scans entries to the right of targ looking for - * duplicates. Once an entry has been emitted to dst it is known + * duplicates. Once an entry has been emitted to dst it is known * duplicate-free and need not be considered anymore. */ if (newpresent) @@ -2400,7 +2400,7 @@ column_privilege_check(Oid tableoid, AttrNumber attnum, * existence of the pg_class row before risking calling pg_class_aclcheck. * Note: it might seem there's a race condition against concurrent DROP, * but really it's safe because there will be no syscache flush between - * here and there. So if we see the row in the syscache, so will + * here and there. So if we see the row in the syscache, so will * pg_class_aclcheck. */ if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(tableoid))) @@ -4747,14 +4747,14 @@ count_one_bits(AclMode mask) * The grantor must always be either the object owner or some role that has * been explicitly granted grant options. This ensures that all granted * privileges appear to flow from the object owner, and there are never - * multiple "original sources" of a privilege. Therefore, if the would-be + * multiple "original sources" of a privilege. Therefore, if the would-be * grantor is a member of a role that has the needed grant options, we have * to do the grant as that role instead. * * It is possible that the would-be grantor is a member of several roles * that have different subsets of the desired grant options, but no one * role has 'em all. In this case we pick a role with the largest number - * of desired options. Ties are broken in favor of closer ancestors. + * of desired options. Ties are broken in favor of closer ancestors. * * roleId: the role attempting to do the GRANT/REVOKE * privileges: the privileges to be granted/revoked diff --git a/src/backend/utils/adt/array_userfuncs.c b/src/backend/utils/adt/array_userfuncs.c index 274e867fdd1..bd186cb6cfc 100644 --- a/src/backend/utils/adt/array_userfuncs.c +++ b/src/backend/utils/adt/array_userfuncs.c @@ -502,7 +502,7 @@ array_agg_transfn(PG_FUNCTION_ARGS) /* * The transition type for array_agg() is declared to be "internal", which - * is a pass-by-value type the same size as a pointer. So we can safely + * is a pass-by-value type the same size as a pointer. So we can safely * pass the ArrayBuildState pointer through nodeAgg.c's machinations. */ PG_RETURN_POINTER(state); @@ -517,7 +517,7 @@ array_agg_finalfn(PG_FUNCTION_ARGS) int lbs[1]; /* - * Test for null before Asserting we are in right context. This is to + * Test for null before Asserting we are in right context. This is to * avoid possible Assert failure in 8.4beta installations, where it is * possible for users to create NULL constants of type internal. */ diff --git a/src/backend/utils/adt/arrayfuncs.c b/src/backend/utils/adt/arrayfuncs.c index 3e43e951e10..e8621ff8751 100644 --- a/src/backend/utils/adt/arrayfuncs.c +++ b/src/backend/utils/adt/arrayfuncs.c @@ -689,7 +689,7 @@ ReadArrayStr(char *arrayStr, /* * We have to remove " and \ characters to create a clean item value to - * pass to the datatype input routine. We overwrite each item value + * pass to the datatype input routine. We overwrite each item value * in-place within arrayStr to do this. srcptr is the current scan point, * and dstptr is where we are copying to. * @@ -889,7 +889,7 @@ ReadArrayStr(char *arrayStr, * referenced by Datums after copying them. * * If the input data is of varlena type, the caller must have ensured that - * the values are not toasted. (Doing it here doesn't work since the + * the values are not toasted. (Doing it here doesn't work since the * caller has already allocated space for the array...) */ static void @@ -1985,7 +1985,7 @@ array_get_slice(ArrayType *array, memcpy(ARR_DIMS(newarray), span, ndim * sizeof(int)); /* - * Lower bounds of the new array are set to 1. Formerly (before 7.3) we + * Lower bounds of the new array are set to 1. Formerly (before 7.3) we * copied the given lowerIndx values ... but that seems confusing. */ newlb = ARR_LBOUND(newarray); @@ -2617,7 +2617,7 @@ array_set_slice(ArrayType *array, /* * array_map() * - * Map an array through an arbitrary function. Return a new array with + * Map an array through an arbitrary function. Return a new array with * same dimensions and each source element transformed by fn(). Each * source element is passed as the first argument to fn(); additional * arguments to be passed to fn() can be specified by the caller. @@ -2632,9 +2632,9 @@ array_set_slice(ArrayType *array, * first argument position initially holds the input array value. * * inpType: OID of element type of input array. This must be the same as, * or binary-compatible with, the first argument type of fn(). - * * retType: OID of element type of output array. This must be the same as, + * * retType: OID of element type of output array. This must be the same as, * or binary-compatible with, the result type of fn(). - * * amstate: workspace for array_map. Must be zeroed by caller before + * * amstate: workspace for array_map. Must be zeroed by caller before * first call, and not touched after that. * * It is legitimate to pass a freshly-zeroed ArrayMapState on each call, @@ -3488,7 +3488,7 @@ array_cmp(FunctionCallInfo fcinfo) /* * If arrays contain same data (up to end of shorter one), apply - * additional rules to sort by dimensionality. The relative significance + * additional rules to sort by dimensionality. The relative significance * of the different bits of information is historical; mainly we just care * that we don't say "equal" for arrays of different dimensionality. */ @@ -3750,7 +3750,7 @@ array_contain_compare(ArrayType *array1, ArrayType *array2, Oid collation, /* * We assume that the comparison operator is strict, so a NULL can't - * match anything. XXX this diverges from the "NULL=NULL" behavior of + * match anything. XXX this diverges from the "NULL=NULL" behavior of * array_eq, should we act like that? */ if (isnull1) @@ -4241,7 +4241,7 @@ array_copy(char *destptr, int nitems, * * Note: this could certainly be optimized using standard bitblt methods. * However, it's not clear that the typical Postgres array has enough elements - * to make it worth worrying too much. For the moment, KISS. + * to make it worth worrying too much. For the moment, KISS. */ void array_bitmap_copy(bits8 *destbitmap, int destoffset, @@ -4438,7 +4438,7 @@ array_extract_slice(ArrayType *newarray, * Insert a slice into an array. * * ndim/dim[]/lb[] are dimensions of the original array. A new array with - * those same dimensions is to be constructed. destArray must already + * those same dimensions is to be constructed. destArray must already * have been allocated and its header initialized. * * st[]/endp[] identify the slice to be replaced. Elements within the slice @@ -5106,7 +5106,7 @@ array_unnest(PG_FUNCTION_ARGS) * Get the array value and detoast if needed. We can't do this * earlier because if we have to detoast, we want the detoasted copy * to be in multi_call_memory_ctx, so it will go away when we're done - * and not before. (If no detoast happens, we assume the originally + * and not before. (If no detoast happens, we assume the originally * passed array will stick around till then.) */ arr = PG_GETARG_ARRAYTYPE_P(0); diff --git a/src/backend/utils/adt/arrayutils.c b/src/backend/utils/adt/arrayutils.c index af7359ca4e3..cf248916000 100644 --- a/src/backend/utils/adt/arrayutils.c +++ b/src/backend/utils/adt/arrayutils.c @@ -193,7 +193,7 @@ mda_next_tuple(int n, int *curr, const int *span) /* * ArrayGetIntegerTypmods: verify that argument is a 1-D cstring array, - * and get the contents converted to integers. Returns a palloc'd array + * and get the contents converted to integers. Returns a palloc'd array * and places the length at *n. */ int32 * diff --git a/src/backend/utils/adt/char.c b/src/backend/utils/adt/char.c index 974cb223589..5a5f83a9aaf 100644 --- a/src/backend/utils/adt/char.c +++ b/src/backend/utils/adt/char.c @@ -59,7 +59,7 @@ charout(PG_FUNCTION_ARGS) * charrecv - converts external binary format to char * * The external representation is one byte, with no character set - * conversion. This is somewhat dubious, perhaps, but in many + * conversion. This is somewhat dubious, perhaps, but in many * cases people use char for a 1-byte binary type. */ Datum diff --git a/src/backend/utils/adt/date.c b/src/backend/utils/adt/date.c index 51c6b6bacbf..714250b9d0f 100644 --- a/src/backend/utils/adt/date.c +++ b/src/backend/utils/adt/date.c @@ -1257,7 +1257,7 @@ AdjustTimeForTypmod(TimeADT *time, int32 typmod) * Note: this round-to-nearest code is not completely consistent about * rounding values that are exactly halfway between integral values. * On most platforms, rint() will implement round-to-nearest-even, but - * the integer code always rounds up (away from zero). Is it worth + * the integer code always rounds up (away from zero). Is it worth * trying to be consistent? */ #ifdef HAVE_INT64_TIMESTAMP @@ -1606,7 +1606,7 @@ time_interval(PG_FUNCTION_ARGS) * Convert interval to time data type. * * This is defined as producing the fractional-day portion of the interval. - * Therefore, we can just ignore the months field. It is not real clear + * Therefore, we can just ignore the months field. It is not real clear * what to do with negative intervals, but we choose to subtract the floor, * so that, say, '-2 hours' becomes '22:00:00'. */ @@ -2596,7 +2596,7 @@ timetz_zone(PG_FUNCTION_ARGS) pg_tz *tzp; /* - * Look up the requested timezone. First we look in the date token table + * Look up the requested timezone. First we look in the date token table * (to handle cases like "EST"), and if that fails, we look in the * timezone database (to handle cases like "America/New_York"). (This * matches the order in which timestamp input checks the cases; it's diff --git a/src/backend/utils/adt/datetime.c b/src/backend/utils/adt/datetime.c index 05e1730c881..59e06bb569d 100644 --- a/src/backend/utils/adt/datetime.c +++ b/src/backend/utils/adt/datetime.c @@ -352,7 +352,7 @@ j2date(int jd, int *year, int *month, int *day) * j2day - convert Julian date to day-of-week (0..6 == Sun..Sat) * * Note: various places use the locution j2day(date - 1) to produce a - * result according to the convention 0..6 = Mon..Sun. This is a bit of + * result according to the convention 0..6 = Mon..Sun. This is a bit of * a crock, but will work as long as the computation here is just a modulo. */ int @@ -2472,7 +2472,7 @@ DecodeNumber(int flen, char *str, bool haveTextMonth, int fmask, /* * Nothing so far; make a decision about what we think the input - * is. There used to be lots of heuristics here, but the + * is. There used to be lots of heuristics here, but the * consensus now is to be paranoid. It *must* be either * YYYY-MM-DD (with a more-than-two-digit year field), or the * field order defined by DateOrder. @@ -2505,9 +2505,9 @@ DecodeNumber(int flen, char *str, bool haveTextMonth, int fmask, { /* * We are at the first numeric field of a date that included a - * textual month name. We want to support the variants + * textual month name. We want to support the variants * MON-DD-YYYY, DD-MON-YYYY, and YYYY-MON-DD as unambiguous - * inputs. We will also accept MON-DD-YY or DD-MON-YY in + * inputs. We will also accept MON-DD-YY or DD-MON-YY in * either DMY or MDY modes, as well as YY-MON-DD in YMD mode. */ if (flen >= 3 || DateOrder == DATEORDER_YMD) @@ -3315,7 +3315,7 @@ DecodeISO8601Interval(char *str, return dterr; /* - * Note: we could step off the end of the string here. Code below + * Note: we could step off the end of the string here. Code below * *must* exit the loop if unit == '\0'. */ unit = *str++; @@ -4109,7 +4109,7 @@ EncodeInterval(struct pg_tm * tm, fsec_t fsec, int style, char *str) /* * We've been burnt by stupid errors in the ordering of the datetkn tables - * once too often. Arrange to check them during postmaster start. + * once too often. Arrange to check them during postmaster start. */ static bool CheckDateTokenTable(const char *tablename, const datetkn *base, int nel) diff --git a/src/backend/utils/adt/datum.c b/src/backend/utils/adt/datum.c index 1fd7ff777ae..f29778a7f12 100644 --- a/src/backend/utils/adt/datum.c +++ b/src/backend/utils/adt/datum.c @@ -181,7 +181,7 @@ datumIsEqual(Datum value1, Datum value2, bool typByVal, int typLen) /* * just compare the two datums. NOTE: just comparing "len" bytes will * not do the work, because we do not know how these bytes are aligned - * inside the "Datum". We assume instead that any given datatype is + * inside the "Datum". We assume instead that any given datatype is * consistent about how it fills extraneous bits in the Datum. */ res = (value1 == value2); diff --git a/src/backend/utils/adt/dbsize.c b/src/backend/utils/adt/dbsize.c index 7381f217251..f73e4cd5c6e 100644 --- a/src/backend/utils/adt/dbsize.c +++ b/src/backend/utils/adt/dbsize.c @@ -510,7 +510,7 @@ pg_size_pretty(PG_FUNCTION_ARGS) * This is expected to be used in queries like * SELECT pg_relation_filenode(oid) FROM pg_class; * That leads to a couple of choices. We work from the pg_class row alone - * rather than actually opening each relation, for efficiency. We don't + * rather than actually opening each relation, for efficiency. We don't * fail if we can't find the relation --- some rows might be visible in * the query's MVCC snapshot but already dead according to SnapshotNow. * (Note: we could avoid using the catcache, but there's little point diff --git a/src/backend/utils/adt/domains.c b/src/backend/utils/adt/domains.c index c178fd0bddb..acf8c0cc5bb 100644 --- a/src/backend/utils/adt/domains.c +++ b/src/backend/utils/adt/domains.c @@ -12,11 +12,11 @@ * The overhead required for constraint checking can be high, since examining * the catalogs to discover the constraints for a given domain is not cheap. * We have three mechanisms for minimizing this cost: - * 1. In a nest of domains, we flatten the checking of all the levels + * 1. In a nest of domains, we flatten the checking of all the levels * into just one operation. - * 2. We cache the list of constraint items in the FmgrInfo struct + * 2. We cache the list of constraint items in the FmgrInfo struct * passed by the caller. - * 3. If there are CHECK constraints, we cache a standalone ExprContext + * 3. If there are CHECK constraints, we cache a standalone ExprContext * to evaluate them in. * * @@ -305,7 +305,7 @@ domain_recv(PG_FUNCTION_ARGS) /* * domain_check - check that a datum satisfies the constraints of a - * domain. extra and mcxt can be passed if they are available from, + * domain. extra and mcxt can be passed if they are available from, * say, a FmgrInfo structure, or they can be NULL, in which case the * setup is repeated for each call. */ diff --git a/src/backend/utils/adt/float.c b/src/backend/utils/adt/float.c index b929abeba78..61efe0d8a5f 100644 --- a/src/backend/utils/adt/float.c +++ b/src/backend/utils/adt/float.c @@ -287,7 +287,7 @@ float4in(PG_FUNCTION_ARGS) /* * In some IRIX versions, strtod() recognizes only "inf", so if the input - * is "infinity" we have to skip over "inity". Also, it may return + * is "infinity" we have to skip over "inity". Also, it may return * positive infinity for "-inf". */ if (isinf(val)) @@ -508,7 +508,7 @@ float8in(PG_FUNCTION_ARGS) /* * In some IRIX versions, strtod() recognizes only "inf", so if the input - * is "infinity" we have to skip over "inity". Also, it may return + * is "infinity" we have to skip over "inity". Also, it may return * positive infinity for "-inf". */ if (isinf(val)) @@ -2050,7 +2050,7 @@ float8_stddev_samp(PG_FUNCTION_ARGS) * in that order. Note that Y is the first argument to the aggregates! * * It might seem attractive to optimize this by having multiple accumulator - * functions that only calculate the sums actually needed. But on most + * functions that only calculate the sums actually needed. But on most * modern machines, a couple of extra floating-point multiplies will be * insignificant compared to the other per-tuple overhead, so I've chosen * to minimize code space instead. diff --git a/src/backend/utils/adt/format_type.c b/src/backend/utils/adt/format_type.c index 7c51ee7ecac..5037f090ca1 100644 --- a/src/backend/utils/adt/format_type.c +++ b/src/backend/utils/adt/format_type.c @@ -46,14 +46,14 @@ __attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3))); * double quoted if it contains funny characters or matches a keyword. * * If typemod is NULL then we are formatting a type name in a context where - * no typemod is available, eg a function argument or result type. This + * no typemod is available, eg a function argument or result type. This * yields a slightly different result from specifying typemod = -1 in some * cases. Given typemod = -1 we feel compelled to produce an output that * the parser will interpret as having typemod -1, so that pg_dump will - * produce CREATE TABLE commands that recreate the original state. But + * produce CREATE TABLE commands that recreate the original state. But * given NULL typemod, we assume that the parser's interpretation of * typemod doesn't matter, and so we are willing to output a slightly - * "prettier" representation of the same type. For example, type = bpchar + * "prettier" representation of the same type. For example, type = bpchar * and typemod = NULL gets you "character", whereas typemod = -1 gets you * "bpchar" --- the former will be interpreted as character(1) by the * parser, which does not yield typemod -1. diff --git a/src/backend/utils/adt/formatting.c b/src/backend/utils/adt/formatting.c index 6f267db0256..a54b215afb9 100644 --- a/src/backend/utils/adt/formatting.c +++ b/src/backend/utils/adt/formatting.c @@ -1816,7 +1816,7 @@ str_initcap(const char *buff, size_t nbytes, Oid collid) /* * Note: we assume that toupper_l()/tolower_l() will not be so broken - * as to need guard tests. When using the default collation, we apply + * as to need guard tests. When using the default collation, we apply * the traditional Postgres behavior that forces ASCII-style treatment * of I/i, but in non-default collations you get exactly what the * collation says. @@ -3579,7 +3579,7 @@ do_to_timestamp(text *date_txt, text *fmt, { /* * The month and day field have not been set, so we use the - * day-of-year field to populate them. Depending on the date mode, + * day-of-year field to populate them. Depending on the date mode, * this field may be interpreted as a Gregorian day-of-year, or an ISO * week date day-of-year. */ diff --git a/src/backend/utils/adt/geo_selfuncs.c b/src/backend/utils/adt/geo_selfuncs.c index c2e6bc1794e..c576b7d6ff9 100644 --- a/src/backend/utils/adt/geo_selfuncs.c +++ b/src/backend/utils/adt/geo_selfuncs.c @@ -22,7 +22,7 @@ /* - * Selectivity functions for geometric operators. These are bogus -- unless + * Selectivity functions for geometric operators. These are bogus -- unless * we know the actual key distribution in the index, we can't make a good * prediction of the selectivity of these operators. * @@ -34,7 +34,7 @@ * In general, GiST needs to search multiple subtrees in order to guarantee * that all occurrences of the same key have been found. Because of this, * the estimated cost for scanning the index ought to be higher than the - * output selectivity would indicate. gistcostestimate(), over in selfuncs.c, + * output selectivity would indicate. gistcostestimate(), over in selfuncs.c, * ought to be adjusted accordingly --- but until we can generate somewhat * realistic numbers here, it hardly matters... */ diff --git a/src/backend/utils/adt/inet_cidr_ntop.c b/src/backend/utils/adt/inet_cidr_ntop.c index 5f2a3d361d9..d33534ec173 100644 --- a/src/backend/utils/adt/inet_cidr_ntop.c +++ b/src/backend/utils/adt/inet_cidr_ntop.c @@ -196,7 +196,7 @@ inet_cidr_ntop_ipv6(const u_char *src, int bits, char *dst, size_t size) } else { - /* Copy src to private buffer. Zero host part. */ + /* Copy src to private buffer. Zero host part. */ p = (bits + 7) / 8; memcpy(inbuf, src, p); memset(inbuf + p, 0, 16 - p); diff --git a/src/backend/utils/adt/int.c b/src/backend/utils/adt/int.c index 07493e3c3f8..3ca243fb0ad 100644 --- a/src/backend/utils/adt/int.c +++ b/src/backend/utils/adt/int.c @@ -642,7 +642,7 @@ int4pl(PG_FUNCTION_ARGS) result = arg1 + arg2; /* - * Overflow check. If the inputs are of different signs then their sum + * Overflow check. If the inputs are of different signs then their sum * cannot overflow. If the inputs are of the same sign, their sum had * better be that sign too. */ @@ -663,8 +663,8 @@ int4mi(PG_FUNCTION_ARGS) result = arg1 - arg2; /* - * Overflow check. If the inputs are of the same sign then their - * difference cannot overflow. If they are of different signs then the + * Overflow check. If the inputs are of the same sign then their + * difference cannot overflow. If they are of different signs then the * result should be of the same sign as the first input. */ if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1)) @@ -684,7 +684,7 @@ int4mul(PG_FUNCTION_ARGS) result = arg1 * arg2; /* - * Overflow check. We basically check to see if result / arg2 gives arg1 + * Overflow check. We basically check to see if result / arg2 gives arg1 * again. There are two cases where this fails: arg2 = 0 (which cannot * overflow) and arg1 = INT_MIN, arg2 = -1 (where the division itself will * overflow and thus incorrectly match). @@ -794,7 +794,7 @@ int2pl(PG_FUNCTION_ARGS) result = arg1 + arg2; /* - * Overflow check. If the inputs are of different signs then their sum + * Overflow check. If the inputs are of different signs then their sum * cannot overflow. If the inputs are of the same sign, their sum had * better be that sign too. */ @@ -815,8 +815,8 @@ int2mi(PG_FUNCTION_ARGS) result = arg1 - arg2; /* - * Overflow check. If the inputs are of the same sign then their - * difference cannot overflow. If they are of different signs then the + * Overflow check. If the inputs are of the same sign then their + * difference cannot overflow. If they are of different signs then the * result should be of the same sign as the first input. */ if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1)) @@ -897,7 +897,7 @@ int24pl(PG_FUNCTION_ARGS) result = arg1 + arg2; /* - * Overflow check. If the inputs are of different signs then their sum + * Overflow check. If the inputs are of different signs then their sum * cannot overflow. If the inputs are of the same sign, their sum had * better be that sign too. */ @@ -918,8 +918,8 @@ int24mi(PG_FUNCTION_ARGS) result = arg1 - arg2; /* - * Overflow check. If the inputs are of the same sign then their - * difference cannot overflow. If they are of different signs then the + * Overflow check. If the inputs are of the same sign then their + * difference cannot overflow. If they are of different signs then the * result should be of the same sign as the first input. */ if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1)) @@ -939,7 +939,7 @@ int24mul(PG_FUNCTION_ARGS) result = arg1 * arg2; /* - * Overflow check. We basically check to see if result / arg2 gives arg1 + * Overflow check. We basically check to see if result / arg2 gives arg1 * again. There is one case where this fails: arg2 = 0 (which cannot * overflow). * @@ -985,7 +985,7 @@ int42pl(PG_FUNCTION_ARGS) result = arg1 + arg2; /* - * Overflow check. If the inputs are of different signs then their sum + * Overflow check. If the inputs are of different signs then their sum * cannot overflow. If the inputs are of the same sign, their sum had * better be that sign too. */ @@ -1006,8 +1006,8 @@ int42mi(PG_FUNCTION_ARGS) result = arg1 - arg2; /* - * Overflow check. If the inputs are of the same sign then their - * difference cannot overflow. If they are of different signs then the + * Overflow check. If the inputs are of the same sign then their + * difference cannot overflow. If they are of different signs then the * result should be of the same sign as the first input. */ if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1)) @@ -1027,7 +1027,7 @@ int42mul(PG_FUNCTION_ARGS) result = arg1 * arg2; /* - * Overflow check. We basically check to see if result / arg1 gives arg2 + * Overflow check. We basically check to see if result / arg1 gives arg2 * again. There is one case where this fails: arg1 = 0 (which cannot * overflow). * diff --git a/src/backend/utils/adt/int8.c b/src/backend/utils/adt/int8.c index b28b6f03e7e..a5723af3043 100644 --- a/src/backend/utils/adt/int8.c +++ b/src/backend/utils/adt/int8.c @@ -73,7 +73,7 @@ scanint8(const char *str, bool errorOK, int64 *result) ptr++; /* - * Do an explicit check for INT64_MIN. Ugly though this is, it's + * Do an explicit check for INT64_MIN. Ugly though this is, it's * cleaner than trying to get the loop below to handle it portably. */ if (strncmp(ptr, "9223372036854775808", 19) == 0) @@ -519,7 +519,7 @@ int8pl(PG_FUNCTION_ARGS) result = arg1 + arg2; /* - * Overflow check. If the inputs are of different signs then their sum + * Overflow check. If the inputs are of different signs then their sum * cannot overflow. If the inputs are of the same sign, their sum had * better be that sign too. */ @@ -540,8 +540,8 @@ int8mi(PG_FUNCTION_ARGS) result = arg1 - arg2; /* - * Overflow check. If the inputs are of the same sign then their - * difference cannot overflow. If they are of different signs then the + * Overflow check. If the inputs are of the same sign then their + * difference cannot overflow. If they are of different signs then the * result should be of the same sign as the first input. */ if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1)) @@ -561,7 +561,7 @@ int8mul(PG_FUNCTION_ARGS) result = arg1 * arg2; /* - * Overflow check. We basically check to see if result / arg2 gives arg1 + * Overflow check. We basically check to see if result / arg2 gives arg1 * again. There are two cases where this fails: arg2 = 0 (which cannot * overflow) and arg1 = INT64_MIN, arg2 = -1 (where the division itself * will overflow and thus incorrectly match). @@ -719,7 +719,7 @@ int8inc(PG_FUNCTION_ARGS) /* * These functions are exactly like int8inc but are used for aggregates that - * count only non-null values. Since the functions are declared strict, + * count only non-null values. Since the functions are declared strict, * the null checks happen before we ever get here, and all we need do is * increment the state value. We could actually make these pg_proc entries * point right at int8inc, but then the opr_sanity regression test would @@ -773,7 +773,7 @@ int84pl(PG_FUNCTION_ARGS) result = arg1 + arg2; /* - * Overflow check. If the inputs are of different signs then their sum + * Overflow check. If the inputs are of different signs then their sum * cannot overflow. If the inputs are of the same sign, their sum had * better be that sign too. */ @@ -794,8 +794,8 @@ int84mi(PG_FUNCTION_ARGS) result = arg1 - arg2; /* - * Overflow check. If the inputs are of the same sign then their - * difference cannot overflow. If they are of different signs then the + * Overflow check. If the inputs are of the same sign then their + * difference cannot overflow. If they are of different signs then the * result should be of the same sign as the first input. */ if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1)) @@ -815,7 +815,7 @@ int84mul(PG_FUNCTION_ARGS) result = arg1 * arg2; /* - * Overflow check. We basically check to see if result / arg1 gives arg2 + * Overflow check. We basically check to see if result / arg1 gives arg2 * again. There is one case where this fails: arg1 = 0 (which cannot * overflow). * @@ -882,7 +882,7 @@ int48pl(PG_FUNCTION_ARGS) result = arg1 + arg2; /* - * Overflow check. If the inputs are of different signs then their sum + * Overflow check. If the inputs are of different signs then their sum * cannot overflow. If the inputs are of the same sign, their sum had * better be that sign too. */ @@ -903,8 +903,8 @@ int48mi(PG_FUNCTION_ARGS) result = arg1 - arg2; /* - * Overflow check. If the inputs are of the same sign then their - * difference cannot overflow. If they are of different signs then the + * Overflow check. If the inputs are of the same sign then their + * difference cannot overflow. If they are of different signs then the * result should be of the same sign as the first input. */ if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1)) @@ -924,7 +924,7 @@ int48mul(PG_FUNCTION_ARGS) result = arg1 * arg2; /* - * Overflow check. We basically check to see if result / arg2 gives arg1 + * Overflow check. We basically check to see if result / arg2 gives arg1 * again. There is one case where this fails: arg2 = 0 (which cannot * overflow). * @@ -970,7 +970,7 @@ int82pl(PG_FUNCTION_ARGS) result = arg1 + arg2; /* - * Overflow check. If the inputs are of different signs then their sum + * Overflow check. If the inputs are of different signs then their sum * cannot overflow. If the inputs are of the same sign, their sum had * better be that sign too. */ @@ -991,8 +991,8 @@ int82mi(PG_FUNCTION_ARGS) result = arg1 - arg2; /* - * Overflow check. If the inputs are of the same sign then their - * difference cannot overflow. If they are of different signs then the + * Overflow check. If the inputs are of the same sign then their + * difference cannot overflow. If they are of different signs then the * result should be of the same sign as the first input. */ if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1)) @@ -1012,7 +1012,7 @@ int82mul(PG_FUNCTION_ARGS) result = arg1 * arg2; /* - * Overflow check. We basically check to see if result / arg1 gives arg2 + * Overflow check. We basically check to see if result / arg1 gives arg2 * again. There is one case where this fails: arg1 = 0 (which cannot * overflow). * @@ -1079,7 +1079,7 @@ int28pl(PG_FUNCTION_ARGS) result = arg1 + arg2; /* - * Overflow check. If the inputs are of different signs then their sum + * Overflow check. If the inputs are of different signs then their sum * cannot overflow. If the inputs are of the same sign, their sum had * better be that sign too. */ @@ -1100,8 +1100,8 @@ int28mi(PG_FUNCTION_ARGS) result = arg1 - arg2; /* - * Overflow check. If the inputs are of the same sign then their - * difference cannot overflow. If they are of different signs then the + * Overflow check. If the inputs are of the same sign then their + * difference cannot overflow. If they are of different signs then the * result should be of the same sign as the first input. */ if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1)) @@ -1121,7 +1121,7 @@ int28mul(PG_FUNCTION_ARGS) result = arg1 * arg2; /* - * Overflow check. We basically check to see if result / arg2 gives arg1 + * Overflow check. We basically check to see if result / arg2 gives arg1 * again. There is one case where this fails: arg2 = 0 (which cannot * overflow). * diff --git a/src/backend/utils/adt/like.c b/src/backend/utils/adt/like.c index 09e8698af2c..3137711c384 100644 --- a/src/backend/utils/adt/like.c +++ b/src/backend/utils/adt/like.c @@ -76,12 +76,12 @@ wchareq(char *p1, char *p2) /* * Formerly we had a routine iwchareq() here that tried to do case-insensitive - * comparison of multibyte characters. It did not work at all, however, + * comparison of multibyte characters. It did not work at all, however, * because it relied on tolower() which has a single-byte API ... and * towlower() wouldn't be much better since we have no suitably cheap way * of getting a single character transformed to the system's wchar_t format. * So now, we just downcase the strings using lower() and apply regular LIKE - * comparison. This should be revisited when we install better locale support. + * comparison. This should be revisited when we install better locale support. */ /* diff --git a/src/backend/utils/adt/misc.c b/src/backend/utils/adt/misc.c index 5bda4af50fd..3b6300dc40f 100644 --- a/src/backend/utils/adt/misc.c +++ b/src/backend/utils/adt/misc.c @@ -278,7 +278,7 @@ pg_sleep(PG_FUNCTION_ARGS) * pg_usleep's upper bound on allowed delays. * * By computing the intended stop time initially, we avoid accumulation of - * extra delay across multiple sleeps. This also ensures we won't delay + * extra delay across multiple sleeps. This also ensures we won't delay * less than the specified time if pg_usleep is interrupted by other * signals such as SIGHUP. */ diff --git a/src/backend/utils/adt/nabstime.c b/src/backend/utils/adt/nabstime.c index 6771e78af8e..bdd98d98d54 100644 --- a/src/backend/utils/adt/nabstime.c +++ b/src/backend/utils/adt/nabstime.c @@ -198,7 +198,7 @@ tm2abstime(struct pg_tm * tm, int tz) sec = tm->tm_sec + tz + (tm->tm_min + (day * HOURS_PER_DAY + tm->tm_hour) * MINS_PER_HOUR) * SECS_PER_MINUTE; /* - * check for overflow. We need a little slop here because the H/M/S plus + * check for overflow. We need a little slop here because the H/M/S plus * TZ offset could add up to more than 1 day. */ if ((day >= MAX_DAYNUM - 10 && sec < 0) || @@ -1163,7 +1163,7 @@ tintervalsame(PG_FUNCTION_ARGS) * 1. The interval length computations overflow at 2^31 seconds, causing * intervals longer than that to sort oddly compared to those shorter. * 2. infinity and minus infinity (NOEND_ABSTIME and NOSTART_ABSTIME) are - * just ordinary integers. Since this code doesn't handle them specially, + * just ordinary integers. Since this code doesn't handle them specially, * it's possible for [a b] to be considered longer than [c infinity] for * finite abstimes a, b, c. In combination with the previous point, the * interval [-infinity infinity] is treated as being shorter than many finite diff --git a/src/backend/utils/adt/network.c b/src/backend/utils/adt/network.c index a968d707994..7ebae765400 100644 --- a/src/backend/utils/adt/network.c +++ b/src/backend/utils/adt/network.c @@ -29,7 +29,7 @@ static int ip_addrsize(inet *inetptr); static inet *internal_inetpl(inet *ip, int64 addend); /* - * Access macros. We use VARDATA_ANY so that we can process short-header + * Access macros. We use VARDATA_ANY so that we can process short-header * varlena values without detoasting them. This requires a trick: * VARDATA_ANY assumes the varlena header is already filled in, which is * not the case when constructing a new value (until SET_INET_VARSIZE is @@ -88,7 +88,7 @@ network_in(char *src, bool is_cidr) dst = (inet *) palloc0(sizeof(inet)); /* - * First, check to see if this is an IPv6 or IPv4 address. IPv6 addresses + * First, check to see if this is an IPv6 or IPv4 address. IPv6 addresses * will have a : somewhere in them (several, in fact) so if there is one * present, assume it's V6, otherwise assume it's V4. */ @@ -193,7 +193,7 @@ cidr_out(PG_FUNCTION_ARGS) * family, bits, is_cidr, address length, address in network byte order. * * Presence of is_cidr is largely for historical reasons, though it might - * allow some code-sharing on the client side. We send it correctly on + * allow some code-sharing on the client side. We send it correctly on * output, but ignore the value on input. */ static inet * @@ -1392,7 +1392,7 @@ inetmi(PG_FUNCTION_ARGS) /* * We form the difference using the traditional complement, increment, * and add rule, with the increment part being handled by starting the - * carry off at 1. If you don't think integer arithmetic is done in + * carry off at 1. If you don't think integer arithmetic is done in * two's complement, too bad. */ int nb = ip_addrsize(ip); @@ -1414,7 +1414,7 @@ inetmi(PG_FUNCTION_ARGS) else { /* - * Input wider than int64: check for overflow. All bytes to + * Input wider than int64: check for overflow. All bytes to * the left of what will fit should be 0 or 0xFF, depending on * sign of the now-complete result. */ @@ -1445,9 +1445,9 @@ inetmi(PG_FUNCTION_ARGS) * XXX This should go away someday! * * This is a kluge needed because we don't yet support zones in stored inet - * values. Since the result of getnameinfo() might include a zone spec, + * values. Since the result of getnameinfo() might include a zone spec, * call this to remove it anywhere we want to feed getnameinfo's output to - * network_in. Beats failing entirely. + * network_in. Beats failing entirely. * * An alternative approach would be to let network_in ignore %-parts for * itself, but that would mean we'd silently drop zone specs in user input, diff --git a/src/backend/utils/adt/numeric.c b/src/backend/utils/adt/numeric.c index 6b60a5c1c78..5091306b0e9 100644 --- a/src/backend/utils/adt/numeric.c +++ b/src/backend/utils/adt/numeric.c @@ -49,7 +49,7 @@ * Numeric values are represented in a base-NBASE floating point format. * Each "digit" ranges from 0 to NBASE-1. The type NumericDigit is signed * and wide enough to store a digit. We assume that NBASE*NBASE can fit in - * an int. Although the purely calculational routines could handle any even + * an int. Although the purely calculational routines could handle any even * NBASE that's less than sqrt(INT_MAX), in practice we are only interested * in NBASE a power of ten, so that I/O conversions and decimal rounding * are easy. Also, it's actually more efficient if NBASE is rather less than @@ -94,11 +94,11 @@ typedef int16 NumericDigit; * If the high bits of the first word of a NumericChoice (n_header, or * n_short.n_header, or n_long.n_sign_dscale) are NUMERIC_SHORT, then the * numeric follows the NumericShort format; if they are NUMERIC_POS or - * NUMERIC_NEG, it follows the NumericLong format. If they are NUMERIC_NAN, + * NUMERIC_NEG, it follows the NumericLong format. If they are NUMERIC_NAN, * it is a NaN. We currently always store a NaN using just two bytes (i.e. * only n_header), but previous releases used only the NumericLong format, * so we might find 4-byte NaNs on disk if a database has been migrated using - * pg_upgrade. In either case, when the high bits indicate a NaN, the + * pg_upgrade. In either case, when the high bits indicate a NaN, the * remaining bits are never examined. Currently, we always initialize these * to zero, but it might be possible to use them for some other purpose in * the future. @@ -206,19 +206,19 @@ struct NumericData : ((n)->choice.n_long.n_weight)) /* ---------- - * NumericVar is the format we use for arithmetic. The digit-array part + * NumericVar is the format we use for arithmetic. The digit-array part * is the same as the NumericData storage format, but the header is more * complex. * * The value represented by a NumericVar is determined by the sign, weight, * ndigits, and digits[] array. * Note: the first digit of a NumericVar's value is assumed to be multiplied - * by NBASE ** weight. Another way to say it is that there are weight+1 + * by NBASE ** weight. Another way to say it is that there are weight+1 * digits before the decimal point. It is possible to have weight < 0. * * buf points at the physical start of the palloc'd digit buffer for the - * NumericVar. digits points at the first digit in actual use (the one - * with the specified weight). We normally leave an unused digit or two + * NumericVar. digits points at the first digit in actual use (the one + * with the specified weight). We normally leave an unused digit or two * (preset to zeroes) between buf and digits, so that there is room to store * a carry out of the top digit without reallocating space. We just need to * decrement digits (and increment weight) to make room for the carry digit. @@ -592,7 +592,7 @@ numeric_maximum_size(int32 typmod) * In most cases, the size of a numeric will be smaller than the value * computed below, because the varlena header will typically get toasted * down to a single byte before being stored on disk, and it may also be - * possible to use a short numeric header. But our job here is to compute + * possible to use a short numeric header. But our job here is to compute * the worst case. */ return NUMERIC_HDRSZ + (numeric_digits * sizeof(NumericDigit)); @@ -913,7 +913,7 @@ numeric_uminus(PG_FUNCTION_ARGS) /* * The packed format is known to be totally zero digit trimmed always. So - * we can identify a ZERO by the fact that there are no digits at all. Do + * we can identify a ZERO by the fact that there are no digits at all. Do * nothing to a zero. */ if (NUMERIC_NDIGITS(num) != 0) @@ -1926,7 +1926,7 @@ numeric_sqrt(PG_FUNCTION_ARGS) PG_RETURN_NUMERIC(make_result(&const_nan)); /* - * Unpack the argument and determine the result scale. We choose a scale + * Unpack the argument and determine the result scale. We choose a scale * to give at least NUMERIC_MIN_SIG_DIGITS significant digits; but in any * case not less than the input's dscale. */ @@ -1979,7 +1979,7 @@ numeric_exp(PG_FUNCTION_ARGS) PG_RETURN_NUMERIC(make_result(&const_nan)); /* - * Unpack the argument and determine the result scale. We choose a scale + * Unpack the argument and determine the result scale. We choose a scale * to give at least NUMERIC_MIN_SIG_DIGITS significant digits; but in any * case not less than the input's dscale. */ @@ -2585,9 +2585,9 @@ numeric_avg_accum(PG_FUNCTION_ARGS) /* * Integer data types all use Numeric accumulators to share code and - * avoid risk of overflow. For int2 and int4 inputs, Numeric accumulation + * avoid risk of overflow. For int2 and int4 inputs, Numeric accumulation * is overkill for the N and sum(X) values, but definitely not overkill - * for the sum(X*X) value. Hence, we use int2_accum and int4_accum only + * for the sum(X*X) value. Hence, we use int2_accum and int4_accum only * for stddev/variance --- there are faster special-purpose accumulator * routines for SUM and AVG of these datatypes. */ @@ -2850,7 +2850,7 @@ numeric_stddev_pop(PG_FUNCTION_ARGS) * the initial condition of the transition data value needs to be NULL. This * means we can't rely on ExecAgg to automatically insert the first non-null * data value into the transition data: it doesn't know how to do the type - * conversion. The upshot is that these routines have to be marked non-strict + * conversion. The upshot is that these routines have to be marked non-strict * and handle substitution of the first non-null input themselves. */ @@ -3248,7 +3248,7 @@ set_var_from_str(const char *str, const char *cp, NumericVar *dest) /* * We first parse the string to extract decimal digits and determine the - * correct decimal weight. Then convert to NBASE representation. + * correct decimal weight. Then convert to NBASE representation. */ switch (*cp) { @@ -3838,7 +3838,7 @@ apply_typmod(NumericVar *var, int32 typmod) /* * Convert numeric to int8, rounding if needed. * - * If overflow, return FALSE (no error is raised). Return TRUE if okay. + * If overflow, return FALSE (no error is raised). Return TRUE if okay. * * CAUTION: var's contents may be modified by rounding! */ @@ -4302,7 +4302,7 @@ sub_var(NumericVar *var1, NumericVar *var2, NumericVar *result) * mul_var() - * * Multiplication on variable level. Product of var1 * var2 is stored - * in result. Result is rounded to no more than rscale fractional digits. + * in result. Result is rounded to no more than rscale fractional digits. */ static void mul_var(NumericVar *var1, NumericVar *var2, NumericVar *result, @@ -4346,7 +4346,7 @@ mul_var(NumericVar *var1, NumericVar *var2, NumericVar *result, /* * Determine number of result digits to compute. If the exact result * would have more than rscale fractional digits, truncate the computation - * with MUL_GUARD_DIGITS guard digits. We do that by pretending that one + * with MUL_GUARD_DIGITS guard digits. We do that by pretending that one * or both inputs have fewer digits than they really do. */ res_ndigits = var1ndigits + var2ndigits + 1; @@ -4589,7 +4589,7 @@ div_var(NumericVar *var1, NumericVar *var2, NumericVar *result, * * We need the first divisor digit to be >= NBASE/2. If it isn't, * make it so by scaling up both the divisor and dividend by the - * factor "d". (The reason for allocating dividend[0] above is to + * factor "d". (The reason for allocating dividend[0] above is to * leave room for possible carry here.) */ if (divisor[1] < HALF_NBASE) @@ -4633,7 +4633,7 @@ div_var(NumericVar *var1, NumericVar *var2, NumericVar *result, /* * If next2digits are 0, then quotient digit must be 0 and there's - * no need to adjust the working dividend. It's worth testing + * no need to adjust the working dividend. It's worth testing * here to fall out ASAP when processing trailing zeroes in a * dividend. */ @@ -4651,7 +4651,7 @@ div_var(NumericVar *var1, NumericVar *var2, NumericVar *result, /* * Adjust quotient digit if it's too large. Knuth proves that * after this step, the quotient digit will be either correct or - * just one too large. (Note: it's OK to use dividend[j+2] here + * just one too large. (Note: it's OK to use dividend[j+2] here * because we know the divisor length is at least 2.) */ while (divisor2 * qhat > @@ -4826,7 +4826,7 @@ div_var_fast(NumericVar *var1, NumericVar *var2, NumericVar *result, * dividend's digits (plus appended zeroes to reach the desired precision * including guard digits). Each step of the main loop computes an * (approximate) quotient digit and stores it into div[], removing one - * position of dividend space. A final pass of carry propagation takes + * position of dividend space. A final pass of carry propagation takes * care of any mistaken quotient digits. */ div = (int *) palloc0((div_ndigits + 1) * sizeof(int)); @@ -5683,7 +5683,7 @@ power_var_int(NumericVar *base, int exp, NumericVar *result, int rscale) /* * The general case repeatedly multiplies base according to the bit - * pattern of exp. We do the multiplications with some extra precision. + * pattern of exp. We do the multiplications with some extra precision. */ neg = (exp < 0); exp = Abs(exp); diff --git a/src/backend/utils/adt/oid.c b/src/backend/utils/adt/oid.c index 495b6261e62..4510acd17a1 100644 --- a/src/backend/utils/adt/oid.c +++ b/src/backend/utils/adt/oid.c @@ -318,7 +318,7 @@ oidparse(Node *node) /* * Values too large for int4 will be represented as Float - * constants by the lexer. Accept these if they are valid OID + * constants by the lexer. Accept these if they are valid OID * strings. */ return oidin_subr(strVal(node), NULL); diff --git a/src/backend/utils/adt/pg_locale.c b/src/backend/utils/adt/pg_locale.c index c3181be0d2f..82c77ed768d 100644 --- a/src/backend/utils/adt/pg_locale.c +++ b/src/backend/utils/adt/pg_locale.c @@ -20,12 +20,12 @@ * * The other categories, LC_MONETARY, LC_NUMERIC, and LC_TIME are also * settable at run-time. However, we don't actually set those locale - * categories permanently. This would have bizarre effects like no + * categories permanently. This would have bizarre effects like no * longer accepting standard floating-point literals in some locales. * Instead, we only set the locales briefly when needed, cache the * required information obtained from localeconv(), and set them back. * The cached information is only used by the formatting functions - * (to_char, etc.) and the money type. For the user, this should all be + * (to_char, etc.) and the money type. For the user, this should all be * transparent. * * !!! NOW HEAR THIS !!! @@ -39,7 +39,7 @@ * fail = true; * setlocale(category, save); * DOES NOT WORK RELIABLY: on some platforms the second setlocale() call - * will change the memory save is pointing at. To do this sort of thing + * will change the memory save is pointing at. To do this sort of thing * safely, you *must* pstrdup what setlocale returns the first time. * * FYI, The Open Group locale standard is defined here: @@ -253,7 +253,7 @@ check_locale(int category, const char *value) * * For most locale categories, the assign hook doesn't actually set the locale * permanently, just reset flags so that the next use will cache the - * appropriate values. (See explanation at the top of this file.) + * appropriate values. (See explanation at the top of this file.) * * Note: we accept value = "" as selecting the postmaster's environment * value, whatever it was (so long as the environment setting is legal). @@ -766,7 +766,7 @@ IsoLocaleName(const char *winlocname) * could fail if the locale is C, so str_tolower() shouldn't call it * in that case. * - * Note that we currently lack any way to flush the cache. Since we don't + * Note that we currently lack any way to flush the cache. Since we don't * support ALTER COLLATION, this is OK. The worst case is that someone * drops a collation, and a useless cache entry hangs around in existing * backends. @@ -960,7 +960,7 @@ report_newlocale_failure(const char *localename) /* - * Create a locale_t from a collation OID. Results are cached for the + * Create a locale_t from a collation OID. Results are cached for the * lifetime of the backend. Thus, do not free the result with freelocale(). * * As a special optimization, the default/database collation returns 0. @@ -1143,7 +1143,7 @@ wchar2char(char *to, const wchar_t *from, size_t tolen, pg_locale_t locale) * This has almost the API of mbstowcs_l(), except that *from need not be * null-terminated; instead, the number of input bytes is specified as * fromlen. Also, we ereport() rather than returning -1 for invalid - * input encoding. tolen is the maximum number of wchar_t's to store at *to. + * input encoding. tolen is the maximum number of wchar_t's to store at *to. * The output will be zero-terminated iff there is room. */ size_t diff --git a/src/backend/utils/adt/pg_lzcompress.c b/src/backend/utils/adt/pg_lzcompress.c index 6b0fd364e44..f5c8321a44e 100644 --- a/src/backend/utils/adt/pg_lzcompress.c +++ b/src/backend/utils/adt/pg_lzcompress.c @@ -578,7 +578,7 @@ pglz_compress(const char *source, int32 slen, PGLZ_Header *dest, /* * If we've emitted more than first_success_by bytes without finding - * anything compressible at all, fail. This lets us fall out + * anything compressible at all, fail. This lets us fall out * reasonably quickly when looking at incompressible input (such as * pre-compressed data). */ @@ -602,7 +602,7 @@ pglz_compress(const char *source, int32 slen, PGLZ_Header *dest, hist_next, hist_recycle, dp, dend); dp++; /* Do not do this ++ in the line above! */ - /* The macro would do it four times - Jan. */ + /* The macro would do it four times - Jan. */ } found_match = true; } @@ -616,7 +616,7 @@ pglz_compress(const char *source, int32 slen, PGLZ_Header *dest, hist_next, hist_recycle, dp, dend); dp++; /* Do not do this ++ in the line above! */ - /* The macro would do it four times - Jan. */ + /* The macro would do it four times - Jan. */ } } diff --git a/src/backend/utils/adt/pseudotypes.c b/src/backend/utils/adt/pseudotypes.c index ddb1bd2b71c..638618db89d 100644 --- a/src/backend/utils/adt/pseudotypes.c +++ b/src/backend/utils/adt/pseudotypes.c @@ -6,7 +6,7 @@ * A pseudo-type isn't really a type and never has any operations, but * we do need to supply input and output functions to satisfy the links * in the pseudo-type's entry in pg_type. In most cases the functions - * just throw an error if invoked. (XXX the error messages here cover + * just throw an error if invoked. (XXX the error messages here cover * the most common case, but might be confusing in some contexts. Can * we do better?) * @@ -138,7 +138,7 @@ anyarray_out(PG_FUNCTION_ARGS) * anyarray_recv - binary input routine for pseudo-type ANYARRAY. * * XXX this could actually be made to work, since the incoming array - * data will contain the element type OID. Need to think through + * data will contain the element type OID. Need to think through * type-safety issues before allowing it, however. */ Datum @@ -192,7 +192,7 @@ anyenum_out(PG_FUNCTION_ARGS) * void_in - input routine for pseudo-type VOID. * * We allow this so that PL functions can return VOID without any special - * hack in the PL handler. Whatever value the PL thinks it's returning + * hack in the PL handler. Whatever value the PL thinks it's returning * will just be ignored. */ Datum diff --git a/src/backend/utils/adt/regexp.c b/src/backend/utils/adt/regexp.c index 7e05a53810e..3f8a812a492 100644 --- a/src/backend/utils/adt/regexp.c +++ b/src/backend/utils/adt/regexp.c @@ -142,7 +142,7 @@ RE_compile_and_cache(text *text_re, int cflags, Oid collation) char errMsg[100]; /* - * Look for a match among previously compiled REs. Since the data + * Look for a match among previously compiled REs. Since the data * structure is self-organizing with most-used entries at the front, our * search strategy can just be to scan from the front. */ @@ -192,7 +192,7 @@ RE_compile_and_cache(text *text_re, int cflags, Oid collation) /* * Here and in other places in this file, do CHECK_FOR_INTERRUPTS - * before reporting a regex error. This is so that if the regex + * before reporting a regex error. This is so that if the regex * library aborts and returns REG_CANCEL, we don't print an error * message that implies the regex was invalid. */ @@ -298,7 +298,7 @@ RE_wchar_execute(regex_t *re, pg_wchar *data, int data_len, * dat_len --- the length of the data string * nmatch, pmatch --- optional return area for match details * - * Data is given in the database encoding. We internally + * Data is given in the database encoding. We internally * convert to array of pg_wchar which is what Spencer's regex package wants. */ static bool diff --git a/src/backend/utils/adt/regproc.c b/src/backend/utils/adt/regproc.c index 6716c0204c1..6d06d415cc9 100644 --- a/src/backend/utils/adt/regproc.c +++ b/src/backend/utils/adt/regproc.c @@ -81,7 +81,7 @@ regprocin(PG_FUNCTION_ARGS) /* * In bootstrap mode we assume the given name is not schema-qualified, and - * just search pg_proc for a unique match. This is needed for + * just search pg_proc for a unique match. This is needed for * initializing other system catalogs (pg_namespace may not exist yet, and * certainly there are no schemas other than pg_catalog). */ @@ -266,7 +266,7 @@ regprocedurein(PG_FUNCTION_ARGS) /* * Else it's a name and arguments. Parse the name and arguments, look up * potential matches in the current namespace search list, and scan to see - * which one exactly matches the given argument types. (There will not be + * which one exactly matches the given argument types. (There will not be * more than one match.) * * XXX at present, this code will not work in bootstrap mode, hence this @@ -427,7 +427,7 @@ regoperin(PG_FUNCTION_ARGS) /* * In bootstrap mode we assume the given name is not schema-qualified, and - * just search pg_operator for a unique match. This is needed for + * just search pg_operator for a unique match. This is needed for * initializing other system catalogs (pg_namespace may not exist yet, and * certainly there are no schemas other than pg_catalog). */ @@ -616,7 +616,7 @@ regoperatorin(PG_FUNCTION_ARGS) /* * Else it's a name and arguments. Parse the name and arguments, look up * potential matches in the current namespace search list, and scan to see - * which one exactly matches the given argument types. (There will not be + * which one exactly matches the given argument types. (There will not be * more than one match.) * * XXX at present, this code will not work in bootstrap mode, hence this @@ -853,7 +853,7 @@ regclassout(PG_FUNCTION_ARGS) /* * In bootstrap mode, skip the fancy namespace stuff and just return - * the class name. (This path is only needed for debugging output + * the class name. (This path is only needed for debugging output * anyway.) */ if (IsBootstrapProcessingMode()) @@ -1343,7 +1343,7 @@ stringToQualifiedNameList(const char *string) /* * Given a C string, parse it into a qualified function or operator name - * followed by a parenthesized list of type names. Reduce the + * followed by a parenthesized list of type names. Reduce the * type names to an array of OIDs (returned into *nargs and *argtypes; * the argtypes array should be of size FUNC_MAX_ARGS). The function or * operator name is returned to *names as a List of Strings. diff --git a/src/backend/utils/adt/ri_triggers.c b/src/backend/utils/adt/ri_triggers.c index f08af12a2bd..32621e0fc26 100644 --- a/src/backend/utils/adt/ri_triggers.c +++ b/src/backend/utils/adt/ri_triggers.c @@ -2006,11 +2006,11 @@ RI_FKey_setnull_upd(PG_FUNCTION_ARGS) /* * "MATCH <unspecified>" only changes columns corresponding to the - * referenced columns that have changed in pk_rel. This means the + * referenced columns that have changed in pk_rel. This means the * "SET attrn=NULL [, attrn=NULL]" string will be change as well. * In this case, we need to build a temporary plan rather than use * our cached plan, unless the update happens to change all - * columns in the key. Fortunately, for the most common case of a + * columns in the key. Fortunately, for the most common case of a * single-column foreign key, this will be true. * * In case you're wondering, the inequality check works because we @@ -2768,7 +2768,7 @@ RI_Initial_Check(Trigger *trigger, Relation fk_rel, Relation pk_rel) * Temporarily increase work_mem so that the check query can be executed * more efficiently. It seems okay to do this because the query is simple * enough to not use a multiple of work_mem, and one typically would not - * have many large foreign-key validations happening concurrently. So + * have many large foreign-key validations happening concurrently. So * this seems to meet the criteria for being considered a "maintenance" * operation, and accordingly we use maintenance_work_mem. * @@ -3506,7 +3506,7 @@ ri_ReportViolation(RI_QueryKey *qkey, const char *constrname, errhint("This is most likely due to a rule having rewritten the query."))); /* - * Determine which relation to complain about. If tupdesc wasn't passed + * Determine which relation to complain about. If tupdesc wasn't passed * by caller, assume the violator tuple came from there. */ onfk = (qkey->constr_queryno == RI_PLAN_CHECK_LOOKUPPK); diff --git a/src/backend/utils/adt/rowtypes.c b/src/backend/utils/adt/rowtypes.c index c55c3d3902b..8372c7be772 100644 --- a/src/backend/utils/adt/rowtypes.c +++ b/src/backend/utils/adt/rowtypes.c @@ -277,7 +277,7 @@ record_in(PG_FUNCTION_ARGS) /* * We cannot return tuple->t_data because heap_form_tuple allocates it as * part of a larger chunk, and our caller may expect to be able to pfree - * our result. So must copy the info into a new palloc chunk. + * our result. So must copy the info into a new palloc chunk. */ result = (HeapTupleHeader) palloc(tuple->t_len); memcpy(result, tuple->t_data, tuple->t_len); @@ -635,7 +635,7 @@ record_recv(PG_FUNCTION_ARGS) /* * We cannot return tuple->t_data because heap_form_tuple allocates it as * part of a larger chunk, and our caller may expect to be able to pfree - * our result. So must copy the info into a new palloc chunk. + * our result. So must copy the info into a new palloc chunk. */ result = (HeapTupleHeader) palloc(tuple->t_len); memcpy(result, tuple->t_data, tuple->t_len); @@ -889,7 +889,7 @@ record_cmp(FunctionCallInfo fcinfo) /* * Scan corresponding columns, allowing for dropped columns in different - * places in the two rows. i1 and i2 are physical column indexes, j is + * places in the two rows. i1 and i2 are physical column indexes, j is * the logical column index. */ i1 = i2 = j = 0; @@ -1124,7 +1124,7 @@ record_eq(PG_FUNCTION_ARGS) /* * Scan corresponding columns, allowing for dropped columns in different - * places in the two rows. i1 and i2 are physical column indexes, j is + * places in the two rows. i1 and i2 are physical column indexes, j is * the logical column index. */ i1 = i2 = j = 0; diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c index 8bf5a791375..059eeb3dcc0 100644 --- a/src/backend/utils/adt/ruleutils.c +++ b/src/backend/utils/adt/ruleutils.c @@ -877,7 +877,7 @@ pg_get_indexdef_worker(Oid indexrelid, int colno, context = deparse_context_for(get_relation_name(indrelid), indrelid); /* - * Start the index definition. Note that the index's name should never be + * Start the index definition. Note that the index's name should never be * schema-qualified, but the indexed rel's name may be. */ initStringInfo(&buf); @@ -1305,7 +1305,7 @@ pg_get_constraintdef_worker(Oid constraintId, bool fullCommand, prettyFlags, 0); /* - * Now emit the constraint definition. There are cases where + * Now emit the constraint definition. There are cases where * the constraint expression will be fully parenthesized and * we don't need the outer parens ... but there are other * cases where we do need 'em. Be conservative for now. @@ -2126,7 +2126,7 @@ deparse_expression_pretty(Node *expr, List *dpcontext, * * Given the reference name (alias) and OID of a relation, build deparsing * context for an expression referencing only that relation (as varno 1, - * varlevelsup 0). This is sufficient for many uses of deparse_expression. + * varlevelsup 0). This is sufficient for many uses of deparse_expression. * ---------- */ List * @@ -2211,7 +2211,7 @@ set_deparse_planstate(deparse_namespace *dpns, PlanState *ps) * We special-case Append and MergeAppend to pretend that the first child * plan is the OUTER referent; we have to interpret OUTER Vars in their * tlists according to one of the children, and the first one is the most - * natural choice. Likewise special-case ModifyTable to pretend that the + * natural choice. Likewise special-case ModifyTable to pretend that the * first child plan is the OUTER referent; this is to support RETURNING * lists containing references to non-target relations. */ @@ -2251,7 +2251,7 @@ set_deparse_planstate(deparse_namespace *dpns, PlanState *ps) * push_child_plan: temporarily transfer deparsing attention to a child plan * * When expanding an OUTER or INNER reference, we must adjust the deparse - * context in case the referenced expression itself uses OUTER/INNER. We + * context in case the referenced expression itself uses OUTER/INNER. We * modify the top stack entry in-place to avoid affecting levelsup issues * (although in a Plan tree there really shouldn't be any). * @@ -2615,8 +2615,8 @@ get_query_def(Query *query, StringInfo buf, List *parentnamespace, /* * Before we begin to examine the query, acquire locks on referenced - * relations, and fix up deleted columns in JOIN RTEs. This ensures - * consistent results. Note we assume it's OK to scribble on the passed + * relations, and fix up deleted columns in JOIN RTEs. This ensures + * consistent results. Note we assume it's OK to scribble on the passed * querytree! * * We are only deparsing the query (we are not about to execute it), so we @@ -3036,7 +3036,7 @@ get_target_list(List *targetList, deparse_context *context, } /* - * Figure out what the result column should be called. In the context + * Figure out what the result column should be called. In the context * of a view, use the view's tuple descriptor (so as to pick up the * effects of any column RENAME that's been done on the view). * Otherwise, just use what we can find in the TLE. @@ -3176,7 +3176,7 @@ get_rule_sortgroupclause(SortGroupClause *srt, List *tlist, bool force_colno, * expression is a constant, force it to be dumped with an explicit cast * as decoration --- this is because a simple integer constant is * ambiguous (and will be misinterpreted by findTargetlistEntry()) if we - * dump it without any decoration. Otherwise, just dump the expression + * dump it without any decoration. Otherwise, just dump the expression * normally. */ if (force_colno) @@ -4292,7 +4292,7 @@ get_name_for_var_field(Var *var, int fieldno, /* * We now have an expression we can't expand any more, so see if - * get_expr_result_type() can do anything with it. If not, pass to + * get_expr_result_type() can do anything with it. If not, pass to * lookup_rowtype_tupdesc() which will probably fail, but will give an * appropriate error message while failing. */ @@ -4920,10 +4920,10 @@ get_rule_expr(Node *node, deparse_context *context, /* * If there's a refassgnexpr, we want to print the node in the - * format "array[subscripts] := refassgnexpr". This is not + * format "array[subscripts] := refassgnexpr". This is not * legal SQL, so decompilation of INSERT or UPDATE statements * should always use processIndirection as part of the - * statement-level syntax. We should only see this when + * statement-level syntax. We should only see this when * EXPLAIN tries to print the targetlist of a plan resulting * from such a statement. */ @@ -5082,7 +5082,7 @@ get_rule_expr(Node *node, deparse_context *context, /* * We cannot see an already-planned subplan in rule deparsing, - * only while EXPLAINing a query plan. We don't try to + * only while EXPLAINing a query plan. We don't try to * reconstruct the original SQL, just reference the subplan * that appears elsewhere in EXPLAIN's result. */ @@ -5155,14 +5155,14 @@ get_rule_expr(Node *node, deparse_context *context, * There is no good way to represent a FieldStore as real SQL, * so decompilation of INSERT or UPDATE statements should * always use processIndirection as part of the - * statement-level syntax. We should only get here when + * statement-level syntax. We should only get here when * EXPLAIN tries to print the targetlist of a plan resulting * from such a statement. The plan case is even harder than * ordinary rules would be, because the planner tries to * collapse multiple assignments to the same field or subfield * into one FieldStore; so we can see a list of target fields * not just one, and the arguments could be FieldStores - * themselves. We don't bother to try to print the target + * themselves. We don't bother to try to print the target * field names; we just print the source arguments, with a * ROW() around them if there's more than one. This isn't * terribly complete, but it's probably good enough for @@ -6058,7 +6058,7 @@ get_coercion_expr(Node *arg, deparse_context *context, * Since parse_coerce.c doesn't immediately collapse application of * length-coercion functions to constants, what we'll typically see in * such cases is a Const with typmod -1 and a length-coercion function - * right above it. Avoid generating redundant output. However, beware of + * right above it. Avoid generating redundant output. However, beware of * suppressing casts when the user actually wrote something like * 'foo'::text::char(3). */ @@ -6140,7 +6140,7 @@ get_const_expr(Const *constval, deparse_context *context, int showtype) /* * These types are printed without quotes unless they contain * values that aren't accepted by the scanner unquoted (e.g., - * 'NaN'). Note that strtod() and friends might accept NaN, + * 'NaN'). Note that strtod() and friends might accept NaN, * so we can't use that to test. * * In reality we only need to defend against infinity and NaN, @@ -6795,7 +6795,7 @@ get_opclass_name(Oid opclass, Oid actual_datatype, if (!OidIsValid(actual_datatype) || GetDefaultOpClass(actual_datatype, opcrec->opcmethod) != opclass) { - /* Okay, we need the opclass name. Do we need to qualify it? */ + /* Okay, we need the opclass name. Do we need to qualify it? */ opcname = NameStr(opcrec->opcname); if (OpclassIsVisible(opclass)) appendStringInfo(buf, " %s", quote_identifier(opcname)); @@ -7090,9 +7090,9 @@ generate_relation_name(Oid relid, List *namespaces) * generate_function_name * Compute the name to display for a function specified by OID, * given that it is being called with the specified actual arg names and - * types. (Those matter because of ambiguous-function resolution rules.) + * types. (Those matter because of ambiguous-function resolution rules.) * - * The result includes all necessary quoting and schema-prefixing. We can + * The result includes all necessary quoting and schema-prefixing. We can * also pass back an indication of whether the function is variadic. */ static char * @@ -7120,7 +7120,7 @@ generate_function_name(Oid funcid, int nargs, List *argnames, /* * The idea here is to schema-qualify only if the parser would fail to * resolve the correct function given the unqualified func name with the - * specified argtypes. If the function is variadic, we should presume + * specified argtypes. If the function is variadic, we should presume * that VARIADIC will be included in the call. */ p_result = func_get_detail(list_make1(makeString(proname)), diff --git a/src/backend/utils/adt/selfuncs.c b/src/backend/utils/adt/selfuncs.c index c71e952f8e5..b90d2d7412c 100644 --- a/src/backend/utils/adt/selfuncs.c +++ b/src/backend/utils/adt/selfuncs.c @@ -72,7 +72,7 @@ * float8 oprjoin (internal, oid, internal, int2, internal); * * (Before Postgres 8.4, join estimators had only the first four of these - * parameters. That signature is still allowed, but deprecated.) The + * parameters. That signature is still allowed, but deprecated.) The * relationship between jointype and sjinfo is explained in the comments for * clause_selectivity() --- the short version is that jointype is usually * best ignored in favor of examining sjinfo. @@ -201,7 +201,7 @@ static Const *string_to_bytea_const(const char *str, size_t str_len); * * Note: this routine is also used to estimate selectivity for some * operators that are not "=" but have comparable selectivity behavior, - * such as "~=" (geometric approximate-match). Even for "=", we must + * such as "~=" (geometric approximate-match). Even for "=", we must * keep in mind that the left and right datatypes may differ. */ Datum @@ -286,7 +286,7 @@ var_eq_const(VariableStatData *vardata, Oid operator, /* * Is the constant "=" to any of the column's most common values? * (Although the given operator may not really be "=", we will assume - * that seeing whether it returns TRUE is an appropriate test. If you + * that seeing whether it returns TRUE is an appropriate test. If you * don't like this, maybe you shouldn't be using eqsel for your * operator...) */ @@ -420,7 +420,7 @@ var_eq_non_const(VariableStatData *vardata, Oid operator, * result averaged over all possible values whether common or * uncommon. (Essentially, we are assuming that the not-yet-known * comparison value is equally likely to be any of the possible - * values, regardless of their frequency in the table. Is that a good + * values, regardless of their frequency in the table. Is that a good * idea?) */ selec = 1.0 - stats->stanullfrac; @@ -643,7 +643,7 @@ mcv_selectivity(VariableStatData *vardata, FmgrInfo *opproc, * essentially using the histogram just as a representative sample. However, * small histograms are unlikely to be all that representative, so the caller * should be prepared to fall back on some other estimation approach when the - * histogram is missing or very small. It may also be prudent to combine this + * histogram is missing or very small. It may also be prudent to combine this * approach with another one when the histogram is small. * * If the actual histogram size is not at least min_hist_size, we won't bother @@ -661,7 +661,7 @@ mcv_selectivity(VariableStatData *vardata, FmgrInfo *opproc, * * Note that the result disregards both the most-common-values (if any) and * null entries. The caller is expected to combine this result with - * statistics for those portions of the column population. It may also be + * statistics for those portions of the column population. It may also be * prudent to clamp the result range, ie, disbelieve exact 0 or 1 outputs. */ double @@ -774,7 +774,7 @@ ineq_histogram_selectivity(PlannerInfo *root, * * If the binary search accesses the first or last histogram * entry, we try to replace that endpoint with the true column min - * or max as found by get_actual_variable_range(). This + * or max as found by get_actual_variable_range(). This * ameliorates misestimates when the min or max is moving as a * result of changes since the last ANALYZE. Note that this could * result in effectively including MCVs into the histogram that @@ -878,7 +878,7 @@ ineq_histogram_selectivity(PlannerInfo *root, /* * Watch out for the possibility that we got a NaN or - * Infinity from the division. This can happen + * Infinity from the division. This can happen * despite the previous checks, if for example "low" * is -Infinity. */ @@ -893,7 +893,7 @@ ineq_histogram_selectivity(PlannerInfo *root, * Ideally we'd produce an error here, on the grounds that * the given operator shouldn't have scalarXXsel * registered as its selectivity func unless we can deal - * with its operand types. But currently, all manner of + * with its operand types. But currently, all manner of * stuff is invoking scalarXXsel, so give a default * estimate until that can be fixed. */ @@ -919,7 +919,7 @@ ineq_histogram_selectivity(PlannerInfo *root, /* * The histogram boundaries are only approximate to begin with, - * and may well be out of date anyway. Therefore, don't believe + * and may well be out of date anyway. Therefore, don't believe * extremely small or large selectivity estimates --- unless we * got actual current endpoint values from the table. */ @@ -1116,7 +1116,7 @@ patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype, bool negate) /* * If this is for a NOT LIKE or similar operator, get the corresponding - * positive-match operator and work with that. Set result to the correct + * positive-match operator and work with that. Set result to the correct * default estimate, too. */ if (negate) @@ -1320,7 +1320,7 @@ patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype, bool negate) /* * If we have most-common-values info, add up the fractions of the MCV * entries that satisfy MCV OP PATTERN. These fractions contribute - * directly to the result selectivity. Also add up the total fraction + * directly to the result selectivity. Also add up the total fraction * represented by MCV entries. */ mcv_selec = mcv_selectivity(&vardata, &opproc, constval, true, @@ -2135,9 +2135,9 @@ eqjoinsel_inner(Oid operator, if (have_mcvs1 && have_mcvs2) { /* - * We have most-common-value lists for both relations. Run through + * We have most-common-value lists for both relations. Run through * the lists to see which MCVs actually join to each other with the - * given operator. This allows us to determine the exact join + * given operator. This allows us to determine the exact join * selectivity for the portion of the relations represented by the MCV * lists. We still have to estimate for the remaining population, but * in a skewed distribution this gives us a big leg up in accuracy. @@ -2169,7 +2169,7 @@ eqjoinsel_inner(Oid operator, /* * Note we assume that each MCV will match at most one member of the - * other MCV list. If the operator isn't really equality, there could + * other MCV list. If the operator isn't really equality, there could * be multiple matches --- but we don't look for them, both for speed * and because the math wouldn't add up... */ @@ -2377,9 +2377,9 @@ eqjoinsel_semi(Oid operator, if (have_mcvs1 && have_mcvs2 && OidIsValid(operator)) { /* - * We have most-common-value lists for both relations. Run through + * We have most-common-value lists for both relations. Run through * the lists to see which MCVs actually join to each other with the - * given operator. This allows us to determine the exact join + * given operator. This allows us to determine the exact join * selectivity for the portion of the relations represented by the MCV * lists. We still have to estimate for the remaining population, but * in a skewed distribution this gives us a big leg up in accuracy. @@ -2410,7 +2410,7 @@ eqjoinsel_semi(Oid operator, /* * Note we assume that each MCV will match at most one member of the - * other MCV list. If the operator isn't really equality, there could + * other MCV list. If the operator isn't really equality, there could * be multiple matches --- but we don't look for them, both for speed * and because the math wouldn't add up... */ @@ -2447,7 +2447,7 @@ eqjoinsel_semi(Oid operator, /* * Now we need to estimate the fraction of relation 1 that has at - * least one join partner. We know for certain that the matched MCVs + * least one join partner. We know for certain that the matched MCVs * do, so that gives us a lower bound, but we're really in the dark * about everything else. Our crude approach is: if nd1 <= nd2 then * assume all non-null rel1 rows have join partners, else assume for @@ -3044,11 +3044,11 @@ add_unique_group_var(PlannerInfo *root, List *varinfos, * case (all possible cross-product terms actually appear as groups) since * very often the grouped-by Vars are highly correlated. Our current approach * is as follows: - * 1. Expressions yielding boolean are assumed to contribute two groups, + * 1. Expressions yielding boolean are assumed to contribute two groups, * independently of their content, and are ignored in the subsequent - * steps. This is mainly because tests like "col IS NULL" break the + * steps. This is mainly because tests like "col IS NULL" break the * heuristic used in step 2 especially badly. - * 2. Reduce the given expressions to a list of unique Vars used. For + * 2. Reduce the given expressions to a list of unique Vars used. For * example, GROUP BY a, a + b is treated the same as GROUP BY a, b. * It is clearly correct not to count the same Var more than once. * It is also reasonable to treat f(x) the same as x: f() cannot @@ -3058,14 +3058,14 @@ add_unique_group_var(PlannerInfo *root, List *varinfos, * As a special case, if a GROUP BY expression can be matched to an * expressional index for which we have statistics, then we treat the * whole expression as though it were just a Var. - * 3. If the list contains Vars of different relations that are known equal + * 3. If the list contains Vars of different relations that are known equal * due to equivalence classes, then drop all but one of the Vars from each * known-equal set, keeping the one with smallest estimated # of values * (since the extra values of the others can't appear in joined rows). * Note the reason we only consider Vars of different relations is that * if we considered ones of the same rel, we'd be double-counting the * restriction selectivity of the equality in the next step. - * 4. For Vars within a single source rel, we multiply together the numbers + * 4. For Vars within a single source rel, we multiply together the numbers * of values, clamp to the number of rows in the rel (divided by 10 if * more than one Var), and then multiply by the selectivity of the * restriction clauses for that rel. When there's more than one Var, @@ -3076,7 +3076,7 @@ add_unique_group_var(PlannerInfo *root, List *varinfos, * by the restriction selectivity is effectively assuming that the * restriction clauses are independent of the grouping, which is a crummy * assumption, but it's hard to do better. - * 5. If there are Vars from multiple rels, we repeat step 4 for each such + * 5. If there are Vars from multiple rels, we repeat step 4 for each such * rel, and multiply the results together. * Note that rels not containing grouped Vars are ignored completely, as are * join clauses. Such rels cannot increase the number of groups, and we @@ -3107,7 +3107,7 @@ estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows) return 1.0; /* - * Count groups derived from boolean grouping expressions. For other + * Count groups derived from boolean grouping expressions. For other * expressions, find the unique Vars used, treating an expression as a Var * if we can find stats for it. For each one, record the statistical * estimate of number of distinct values (total in its table, without @@ -3196,7 +3196,7 @@ estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows) * Group Vars by relation and estimate total numdistinct. * * For each iteration of the outer loop, we process the frontmost Var in - * varinfos, plus all other Vars in the same relation. We remove these + * varinfos, plus all other Vars in the same relation. We remove these * Vars from the newvarinfos list for the next iteration. This is the * easiest way to group Vars of same rel together. */ @@ -3297,11 +3297,11 @@ estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows) * distribution, so this will have to do for now. * * We are passed the number of buckets the executor will use for the given - * input relation. If the data were perfectly distributed, with the same + * input relation. If the data were perfectly distributed, with the same * number of tuples going into each available bucket, then the bucketsize * fraction would be 1/nbuckets. But this happy state of affairs will occur * only if (a) there are at least nbuckets distinct data values, and (b) - * we have a not-too-skewed data distribution. Otherwise the buckets will + * we have a not-too-skewed data distribution. Otherwise the buckets will * be nonuniformly occupied. If the other relation in the join has a key * distribution similar to this one's, then the most-loaded buckets are * exactly those that will be probed most often. Therefore, the "average" @@ -3477,7 +3477,7 @@ convert_to_scalar(Datum value, Oid valuetypid, double *scaledvalue, * operators to estimate selectivity for the other's. This is outright * wrong in some cases --- in particular signed versus unsigned * interpretation could trip us up. But it's useful enough in the - * majority of cases that we do it anyway. Should think about more + * majority of cases that we do it anyway. Should think about more * rigorous ways to do it. */ switch (valuetypid) @@ -4061,7 +4061,7 @@ get_restriction_variable(PlannerInfo *root, List *args, int varRelid, right = (Node *) lsecond(args); /* - * Examine both sides. Note that when varRelid is nonzero, Vars of other + * Examine both sides. Note that when varRelid is nonzero, Vars of other * relations will be treated as pseudoconstants. */ examine_variable(root, left, varRelid, vardata); @@ -4224,7 +4224,7 @@ examine_variable(PlannerInfo *root, Node *node, int varRelid, * XXX This means the Var comes from a JOIN or sub-SELECT. Later * add code to dig down into the join etc and see if we can trace * the variable to something with stats. (But beware of - * sub-SELECTs with DISTINCT/GROUP BY/etc. Perhaps there are no + * sub-SELECTs with DISTINCT/GROUP BY/etc. Perhaps there are no * cases where this would really be useful, because we'd have * flattened the subselect if it is??) */ @@ -4235,7 +4235,7 @@ examine_variable(PlannerInfo *root, Node *node, int varRelid, /* * Okay, it's a more complicated expression. Determine variable - * membership. Note that when varRelid isn't zero, only vars of that + * membership. Note that when varRelid isn't zero, only vars of that * relation are considered "real" vars. */ varnos = pull_varnos(basenode); @@ -4284,13 +4284,13 @@ examine_variable(PlannerInfo *root, Node *node, int varRelid, if (onerel) { /* - * We have an expression in vars of a single relation. Try to match + * We have an expression in vars of a single relation. Try to match * it to expressional index columns, in hopes of finding some * statistics. * * XXX it's conceivable that there are multiple matches with different * index opfamilies; if so, we need to pick one that matches the - * operator we are estimating for. FIXME later. + * operator we are estimating for. FIXME later. */ ListCell *ilist; @@ -4386,7 +4386,7 @@ get_variable_numdistinct(VariableStatData *vardata) double ntuples; /* - * Determine the stadistinct value to use. There are cases where we can + * Determine the stadistinct value to use. There are cases where we can * get an estimate even without a pg_statistic entry, or can get a better * value than is in pg_statistic. */ @@ -4502,7 +4502,7 @@ get_variable_range(PlannerInfo *root, VariableStatData *vardata, Oid sortop, /* * XXX It's very tempting to try to use the actual column min and max, if - * we can get them relatively-cheaply with an index probe. However, since + * we can get them relatively-cheaply with an index probe. However, since * this function is called many times during join planning, that could * have unpleasant effects on planning speed. Need more investigation * before enabling this. @@ -4753,7 +4753,7 @@ get_actual_variable_range(PlannerInfo *root, VariableStatData *vardata, * and it can be very expensive if a lot of uncommitted rows * exist at the end of the index (because we'll laboriously * fetch each one and reject it). What seems like a good - * compromise is to use SnapshotDirty. That will accept + * compromise is to use SnapshotDirty. That will accept * uncommitted rows, and thus avoid fetching multiple heap * tuples in this scenario. On the other hand, it will reject * known-dead rows, and thus not give a bogus answer when the @@ -4892,7 +4892,7 @@ find_join_input_rel(PlannerInfo *root, Relids relids) * Check whether char is a letter (and, hence, subject to case-folding) * * In multibyte character sets, we can't use isalpha, and it does not seem - * worth trying to convert to wchar_t to use iswalpha. Instead, just assume + * worth trying to convert to wchar_t to use iswalpha. Instead, just assume * any multibyte char is potentially case-varying. */ static int @@ -5144,7 +5144,7 @@ pattern_fixed_prefix(Const *patt, Pattern_Type ptype, Oid collation, * together with info about MCVs and NULLs. * * We use the >= and < operators from the specified btree opfamily to do the - * estimation. The given variable and Const must be of the associated + * estimation. The given variable and Const must be of the associated * datatype. * * XXX Note: we make use of the upper bound to estimate operator selectivity @@ -5203,7 +5203,7 @@ prefix_selectivity(PlannerInfo *root, VariableStatData *vardata, /* * Merge the two selectivities in the same way as for a range query - * (see clauselist_selectivity()). Note that we don't need to worry + * (see clauselist_selectivity()). Note that we don't need to worry * about double-exclusion of nulls, since ineq_histogram_selectivity * doesn't count those anyway. */ @@ -5427,7 +5427,7 @@ regex_selectivity(const char *patt, int pattlen, bool case_insensitive, * that is not a bulletproof guarantee that an extension of the string might * not sort after it; an example is that "foo " is less than "foo!", but it * is not clear that a "dictionary" sort ordering will consider "foo!" less - * than "foo bar". CAUTION: Therefore, this function should be used only for + * than "foo bar". CAUTION: Therefore, this function should be used only for * estimation purposes when working in a non-C collation. * * To try to catch most cases where an extended string might otherwise sort @@ -5813,9 +5813,9 @@ genericcostestimate(PlannerInfo *root, * The above calculations are all per-index-scan. However, if we are in a * nestloop inner scan, we can expect the scan to be repeated (with * different search keys) for each row of the outer relation. Likewise, - * ScalarArrayOpExpr quals result in multiple index scans. This creates + * ScalarArrayOpExpr quals result in multiple index scans. This creates * the potential for cache effects to reduce the number of disk page - * fetches needed. We want to estimate the average per-scan I/O cost in + * fetches needed. We want to estimate the average per-scan I/O cost in * the presence of caching. * * We use the Mackert-Lohman formula (see costsize.c for details) to @@ -5888,7 +5888,7 @@ genericcostestimate(PlannerInfo *root, * evaluated once at the start of the scan to reduce them to runtime keys * to pass to the index AM (see nodeIndexscan.c). We model the per-tuple * CPU costs as cpu_index_tuple_cost plus one cpu_operator_cost per - * indexqual operator. Because we have numIndexTuples as a per-scan + * indexqual operator. Because we have numIndexTuples as a per-scan * number, we have to multiply by num_sa_scans to get the correct result * for ScalarArrayOpExpr cases. Similarly add in costs for any index * ORDER BY expressions. @@ -5965,7 +5965,7 @@ btcostestimate(PG_FUNCTION_ARGS) * the index scan). Additional quals can suppress visits to the heap, so * it's OK to count them in indexSelectivity, but they should not count * for estimating numIndexTuples. So we must examine the given indexQuals - * to find out which ones count as boundary quals. We rely on the + * to find out which ones count as boundary quals. We rely on the * knowledge that they are given in index column order. * * For a RowCompareExpr, we consider only the first column, just as @@ -6631,7 +6631,7 @@ gincostestimate(PG_FUNCTION_ARGS) /* * nPendingPages can be trusted, but the other fields are as of the last - * VACUUM. Scale them by the ratio numPages / nTotalPages to account for + * VACUUM. Scale them by the ratio numPages / nTotalPages to account for * growth since then. If the fields are zero (implying no VACUUM at all, * and an index created pre-9.1), assume all pages are entry pages. */ @@ -6776,7 +6776,7 @@ gincostestimate(PG_FUNCTION_ARGS) /* * Add an estimate of entry pages read by partial match algorithm. It's a - * scan over leaf pages in entry tree. We haven't any useful stats here, + * scan over leaf pages in entry tree. We haven't any useful stats here, * so estimate it as proportion. */ entryPagesFetched += ceil(numEntryPages * counts.partialEntries / numEntries); diff --git a/src/backend/utils/adt/timestamp.c b/src/backend/utils/adt/timestamp.c index d11962802a7..b6b3478fc9a 100644 --- a/src/backend/utils/adt/timestamp.c +++ b/src/backend/utils/adt/timestamp.c @@ -374,7 +374,7 @@ AdjustTimestampForTypmod(Timestamp *time, int32 typmod) * Note: this round-to-nearest code is not completely consistent about * rounding values that are exactly halfway between integral values. * On most platforms, rint() will implement round-to-nearest-even, but - * the integer code always rounds up (away from zero). Is it worth + * the integer code always rounds up (away from zero). Is it worth * trying to be consistent? */ #ifdef HAVE_INT64_TIMESTAMP @@ -973,7 +973,7 @@ AdjustIntervalForTypmod(Interval *interval, int32 typmod) * that fields to the right of the last one specified are zeroed out, * but those to the left of it remain valid. Thus for example there * is no operational difference between INTERVAL YEAR TO MONTH and - * INTERVAL MONTH. In some cases we could meaningfully enforce that + * INTERVAL MONTH. In some cases we could meaningfully enforce that * higher-order fields are zero; for example INTERVAL DAY could reject * nonzero "month" field. However that seems a bit pointless when we * can't do it consistently. (We cannot enforce a range limit on the @@ -982,9 +982,9 @@ AdjustIntervalForTypmod(Interval *interval, int32 typmod) * * Note: before PG 8.4 we interpreted a limited set of fields as * actually causing a "modulo" operation on a given value, potentially - * losing high-order as well as low-order information. But there is + * losing high-order as well as low-order information. But there is * no support for such behavior in the standard, and it seems fairly - * undesirable on data consistency grounds anyway. Now we only + * undesirable on data consistency grounds anyway. Now we only * perform truncation or rounding of low-order fields. */ if (range == INTERVAL_FULL_RANGE) @@ -1104,7 +1104,7 @@ AdjustIntervalForTypmod(Interval *interval, int32 typmod) /* * Note: this round-to-nearest code is not completely consistent * about rounding values that are exactly halfway between integral - * values. On most platforms, rint() will implement + * values. On most platforms, rint() will implement * round-to-nearest-even, but the integer code always rounds up * (away from zero). Is it worth trying to be consistent? */ @@ -1314,7 +1314,7 @@ timestamptz_to_time_t(TimestampTz t) * Produce a C-string representation of a TimestampTz. * * This is mostly for use in emitting messages. The primary difference - * from timestamptz_out is that we force the output format to ISO. Note + * from timestamptz_out is that we force the output format to ISO. Note * also that the result is in a static buffer, not pstrdup'd. */ const char * @@ -1484,7 +1484,7 @@ recalc_t: * * First, convert to an integral timestamp, avoiding possibly * platform-specific roundoff-in-wrong-direction errors, and adjust to - * Unix epoch. Then see if we can convert to pg_time_t without loss. This + * Unix epoch. Then see if we can convert to pg_time_t without loss. This * coding avoids hardwiring any assumptions about the width of pg_time_t, * so it should behave sanely on machines without int64. */ @@ -4407,7 +4407,7 @@ timestamp_zone(PG_FUNCTION_ARGS) PG_RETURN_TIMESTAMPTZ(timestamp); /* - * Look up the requested timezone. First we look in the date token table + * Look up the requested timezone. First we look in the date token table * (to handle cases like "EST"), and if that fails, we look in the * timezone database (to handle cases like "America/New_York"). (This * matches the order in which timestamp input checks the cases; it's @@ -4581,7 +4581,7 @@ timestamptz_zone(PG_FUNCTION_ARGS) PG_RETURN_TIMESTAMP(timestamp); /* - * Look up the requested timezone. First we look in the date token table + * Look up the requested timezone. First we look in the date token table * (to handle cases like "EST"), and if that fails, we look in the * timezone database (to handle cases like "America/New_York"). (This * matches the order in which timestamp input checks the cases; it's diff --git a/src/backend/utils/adt/tsginidx.c b/src/backend/utils/adt/tsginidx.c index 4cb961146b8..3ae552cb93d 100644 --- a/src/backend/utils/adt/tsginidx.c +++ b/src/backend/utils/adt/tsginidx.c @@ -237,7 +237,7 @@ gin_tsquery_consistent(PG_FUNCTION_ARGS) * Formerly, gin_extract_tsvector had only two arguments. Now it has three, * but we still need a pg_proc entry with two args to support reloading * pre-9.1 contrib/tsearch2 opclass declarations. This compatibility - * function should go away eventually. (Note: you might say "hey, but the + * function should go away eventually. (Note: you might say "hey, but the * code above is only *using* two args, so let's just declare it that way". * If you try that you'll find the opr_sanity regression test complains.) */ diff --git a/src/backend/utils/adt/varchar.c b/src/backend/utils/adt/varchar.c index 7b84637f8cf..c9f0f9f7cfe 100644 --- a/src/backend/utils/adt/varchar.c +++ b/src/backend/utils/adt/varchar.c @@ -256,7 +256,7 @@ bpcharsend(PG_FUNCTION_ARGS) * * Truncation rules: for an explicit cast, silently truncate to the given * length; for an implicit cast, raise error unless extra characters are - * all spaces. (This is sort-of per SQL: the spec would actually have us + * all spaces. (This is sort-of per SQL: the spec would actually have us * raise a "completion condition" for the explicit cast case, but Postgres * hasn't got such a concept.) */ @@ -550,7 +550,7 @@ varcharsend(PG_FUNCTION_ARGS) * * Truncation rules: for an explicit cast, silently truncate to the given * length; for an implicit cast, raise error unless extra characters are - * all spaces. (This is sort-of per SQL: the spec would actually have us + * all spaces. (This is sort-of per SQL: the spec would actually have us * raise a "completion condition" for the explicit cast case, but Postgres * hasn't got such a concept.) */ diff --git a/src/backend/utils/adt/varlena.c b/src/backend/utils/adt/varlena.c index e9ea6288048..dd115900d4b 100644 --- a/src/backend/utils/adt/varlena.c +++ b/src/backend/utils/adt/varlena.c @@ -528,7 +528,7 @@ textlen(PG_FUNCTION_ARGS) * Does the real work for textlen() * * This is broken out so it can be called directly by other string processing - * functions. Note that the argument is passed as a Datum, to indicate that + * functions. Note that the argument is passed as a Datum, to indicate that * it may still be in compressed form. We can avoid decompressing it at all * in some cases. */ @@ -700,7 +700,7 @@ text_substr_no_len(PG_FUNCTION_ARGS) * Does the real work for text_substr() and text_substr_no_len() * * This is broken out so it can be called directly by other string processing - * functions. Note that the argument is passed as a Datum, to indicate that + * functions. Note that the argument is passed as a Datum, to indicate that * it may still be in compressed/toasted form. We can avoid detoasting all * of it in some cases. * @@ -1050,7 +1050,7 @@ text_position_setup(text *t1, text *t2, TextPositionState *state) * searched (t1) and the "needle" is the pattern being sought (t2). * * If the needle is empty or bigger than the haystack then there is no - * point in wasting cycles initializing the table. We also choose not to + * point in wasting cycles initializing the table. We also choose not to * use B-M-H for needles of length 1, since the skip table can't possibly * save anything in that case. */ @@ -1066,7 +1066,7 @@ text_position_setup(text *t1, text *t2, TextPositionState *state) * declaration of TextPositionState allows up to 256 elements, but for * short search problems we don't really want to have to initialize so * many elements --- it would take too long in comparison to the - * actual search time. So we choose a useful skip table size based on + * actual search time. So we choose a useful skip table size based on * the haystack length minus the needle length. The closer the needle * length is to the haystack length the less useful skipping becomes. * @@ -1098,7 +1098,7 @@ text_position_setup(text *t1, text *t2, TextPositionState *state) state->skiptable[i] = len2; /* - * Now examine the needle. For each character except the last one, + * Now examine the needle. For each character except the last one, * set the corresponding table element to the appropriate skip * distance. Note that when two characters share the same skip table * entry, the one later in the needle must determine the skip @@ -1186,11 +1186,11 @@ text_position_next(int start_pos, TextPositionState *state) /* * No match, so use the haystack char at hptr to decide how - * far to advance. If the needle had any occurrence of that + * far to advance. If the needle had any occurrence of that * character (or more precisely, one sharing the same * skiptable entry) before its last character, then we advance * far enough to align the last such needle character with - * that haystack position. Otherwise we can advance by the + * that haystack position. Otherwise we can advance by the * whole needle length. */ hptr += state->skiptable[(unsigned char) *hptr & skiptablemask]; @@ -1242,11 +1242,11 @@ text_position_next(int start_pos, TextPositionState *state) /* * No match, so use the haystack char at hptr to decide how - * far to advance. If the needle had any occurrence of that + * far to advance. If the needle had any occurrence of that * character (or more precisely, one sharing the same * skiptable entry) before its last character, then we advance * far enough to align the last such needle character with - * that haystack position. Otherwise we can advance by the + * that haystack position. Otherwise we can advance by the * whole needle length. */ hptr += state->skiptable[*hptr & skiptablemask]; @@ -1281,7 +1281,7 @@ varstr_cmp(char *arg1, int len1, char *arg2, int len2, Oid collid) /* * Unfortunately, there is no strncoll(), so in the non-C locale case we - * have to do some memory copying. This turns out to be significantly + * have to do some memory copying. This turns out to be significantly * slower, so we optimize the case where LC_COLLATE is C. We also try to * optimize relatively-short strings by avoiding palloc/pfree overhead. */ @@ -2266,7 +2266,7 @@ textToQualifiedNameList(text *textval) * SplitIdentifierString --- parse a string containing identifiers * * This is the guts of textToQualifiedNameList, and is exported for use in - * other situations such as parsing GUC variables. In the GUC case, it's + * other situations such as parsing GUC variables. In the GUC case, it's * important to avoid memory leaks, so the API is designed to minimize the * amount of stuff that needs to be allocated and freed. * @@ -2274,7 +2274,7 @@ textToQualifiedNameList(text *textval) * rawstring: the input string; must be overwritable! On return, it's * been modified to contain the separated identifiers. * separator: the separator punctuation expected between identifiers - * (typically '.' or ','). Whitespace may also appear around + * (typically '.' or ','). Whitespace may also appear around * identifiers. * Outputs: * namelist: filled with a palloc'd list of pointers to identifiers within @@ -2343,7 +2343,7 @@ SplitIdentifierString(char *rawstring, char separator, * * XXX because we want to overwrite the input in-place, we cannot * support a downcasing transformation that increases the string - * length. This is not a problem given the current implementation + * length. This is not a problem given the current implementation * of downcase_truncate_identifier, but we'll probably have to do * something about this someday. */ @@ -2694,7 +2694,7 @@ check_replace_text_has_escape_char(const text *replace_text) * appendStringInfoRegexpSubstr * * Append replace_text to str, substituting regexp back references for - * \n escapes. start_ptr is the start of the match in the source string, + * \n escapes. start_ptr is the start of the match in the source string, * at logical character position data_pos. */ static void @@ -2777,7 +2777,7 @@ appendStringInfoRegexpSubstr(StringInfo str, text *replace_text, if (so != -1 && eo != -1) { /* - * Copy the text that is back reference of regexp. Note so and eo + * Copy the text that is back reference of regexp. Note so and eo * are counted in characters not bytes. */ char *chunk_start; diff --git a/src/backend/utils/adt/xml.c b/src/backend/utils/adt/xml.c index 3261eac1ce8..bfaf0a0be70 100644 --- a/src/backend/utils/adt/xml.c +++ b/src/backend/utils/adt/xml.c @@ -19,7 +19,7 @@ * fail. For one thing, this avoids having to manage variant catalog * installations. But it also has nice effects such as that you can * dump a database containing XML type data even if the server is not - * linked with libxml. Thus, make sure xml_out() works even if nothing + * linked with libxml. Thus, make sure xml_out() works even if nothing * else does. */ @@ -254,7 +254,7 @@ xml_out(PG_FUNCTION_ARGS) xmltype *x = PG_GETARG_XML_P(0); /* - * xml_out removes the encoding property in all cases. This is because we + * xml_out removes the encoding property in all cases. This is because we * cannot control from here whether the datum will be converted to a * different client encoding, so we'd do more harm than good by including * it. @@ -425,7 +425,7 @@ xmlcomment(PG_FUNCTION_ARGS) /* * TODO: xmlconcat needs to merge the notations and unparsed entities - * of the argument values. Not very important in practice, though. + * of the argument values. Not very important in practice, though. */ xmltype * xmlconcat(List *args) @@ -559,7 +559,7 @@ xmlelement(XmlExprState *xmlExpr, ExprContext *econtext) /* * We first evaluate all the arguments, then start up libxml and create - * the result. This avoids issues if one of the arguments involves a call + * the result. This avoids issues if one of the arguments involves a call * to some other function or subsystem that wants to use libxml on its own * terms. */ @@ -849,7 +849,7 @@ xml_is_document(xmltype *arg) * pg_xml_init --- set up for use of libxml * * This should be called by each function that is about to use libxml - * facilities. It has two responsibilities: verify compatibility with the + * facilities. It has two responsibilities: verify compatibility with the * loaded libxml version (done on first call in a session) and establish * or re-establish our libxml error handler. The latter needs to be done * anytime we might have passed control to add-on modules (eg libperl) which @@ -909,7 +909,7 @@ pg_xml_init(void) resetStringInfo(xml_err_buf); /* - * We re-establish the error callback function every time. This makes + * We re-establish the error callback function every time. This makes * it safe for other subsystems (PL/Perl, say) to also use libxml with * their own callbacks ... so long as they likewise set up the * callbacks on every use. It's cheap enough to not be worth worrying @@ -1116,7 +1116,7 @@ finished: /* * Write an XML declaration. On output, we adjust the XML declaration - * as follows. (These rules are the moral equivalent of the clause + * as follows. (These rules are the moral equivalent of the clause * "Serialization of an XML value" in the SQL standard.) * * We try to avoid generating an XML declaration if possible. This is @@ -1638,8 +1638,8 @@ map_xml_name_to_sql_identifier(char *name) * * When xml_escape_strings is true, then certain characters in string * values are replaced by entity references (< etc.), as specified - * in SQL/XML:2008 section 9.8 GR 9) a) iii). This is normally what is - * wanted. The false case is mainly useful when the resulting value + * in SQL/XML:2008 section 9.8 GR 9) a) iii). This is normally what is + * wanted. The false case is mainly useful when the resulting value * is used with xmlTextWriterWriteAttribute() to write out an * attribute, because that function does the escaping itself. */ @@ -1909,13 +1909,13 @@ _SPI_strdup(const char *s) * * There are two kinds of mappings: Mapping SQL data (table contents) * to XML documents, and mapping SQL structure (the "schema") to XML - * Schema. And there are functions that do both at the same time. + * Schema. And there are functions that do both at the same time. * * Then you can map a database, a schema, or a table, each in both * ways. This breaks down recursively: Mapping a database invokes * mapping schemas, which invokes mapping tables, which invokes * mapping rows, which invokes mapping columns, although you can't - * call the last two from the outside. Because of this, there are a + * call the last two from the outside. Because of this, there are a * number of xyz_internal() functions which are to be called both from * the function manager wrapper and from some upper layer in a * recursive call. @@ -1924,7 +1924,7 @@ _SPI_strdup(const char *s) * nulls, tableforest, and targetns mean. * * Some style guidelines for XML output: Use double quotes for quoting - * XML attributes. Indent XML elements by two spaces, but remember + * XML attributes. Indent XML elements by two spaces, but remember * that a lot of code is called recursively at different levels, so * it's better not to indent rather than create output that indents * and outdents weirdly. Add newlines to make the output look nice. @@ -2088,12 +2088,12 @@ cursor_to_xml(PG_FUNCTION_ARGS) * Write the start tag of the root element of a data mapping. * * top_level means that this is the very top level of the eventual - * output. For example, when the user calls table_to_xml, then a call + * output. For example, when the user calls table_to_xml, then a call * with a table name to this function is the top level. When the user * calls database_to_xml, then a call with a schema name to this * function is not the top level. If top_level is false, then the XML * namespace declarations are omitted, because they supposedly already - * appeared earlier in the output. Repeating them is not wrong, but + * appeared earlier in the output. Repeating them is not wrong, but * it looks ugly. */ static void @@ -3031,7 +3031,7 @@ map_sql_typecoll_to_xmlschema_types(List *tupdesc_list) * SQL/XML:2008 sections 9.5 and 9.6. * * (The distinction between 9.5 and 9.6 is basically that 9.6 adds - * a name attribute, which this function does. The name-less version + * a name attribute, which this function does. The name-less version * 9.5 doesn't appear to be required anywhere.) */ static const char * @@ -3209,7 +3209,7 @@ map_sql_type_to_xmlschema_type(Oid typeoid, int typmod) /* * Map an SQL row to an XML element, taking the row from the active - * SPI cursor. See also SQL/XML:2008 section 9.10. + * SPI cursor. See also SQL/XML:2008 section 9.10. */ static void SPI_sql_row_to_xmlelement(int rownum, StringInfo result, char *tablename, |