diff options
Diffstat (limited to 'src/backend/utils')
72 files changed, 555 insertions, 555 deletions
diff --git a/src/backend/utils/adt/acl.c b/src/backend/utils/adt/acl.c index 7c4eaaca8a3..e32e081482e 100644 --- a/src/backend/utils/adt/acl.c +++ b/src/backend/utils/adt/acl.c @@ -119,7 +119,7 @@ static Oid get_role_oid_or_public(const char *rolname); /* * getid * Consumes the first alphanumeric string (identifier) found in string - * 's', ignoring any leading white space. If it finds a double quote + * 's', ignoring any leading white space. If it finds a double quote * it returns the word inside the quotes. * * RETURNS: @@ -225,7 +225,7 @@ putid(char *p, const char *s) * * RETURNS: * the string position in 's' immediately following the ACL - * specification. Also: + * specification. Also: * - loads the structure pointed to by 'aip' with the appropriate * UID/GID, id type identifier and mode type values. */ @@ -939,7 +939,7 @@ aclupdate(const Acl *old_acl, const AclItem *mod_aip, } /* - * Remove abandoned privileges (cascading revoke). Currently we can only + * Remove abandoned privileges (cascading revoke). Currently we can only * handle this when the grantee is not PUBLIC. */ if ((old_goptions & ~new_goptions) != 0) @@ -1005,7 +1005,7 @@ aclnewowner(const Acl *old_acl, Oid oldOwnerId, Oid newOwnerId) /* * If the old ACL contained any references to the new owner, then we may - * now have generated an ACL containing duplicate entries. Find them and + * now have generated an ACL containing duplicate entries. Find them and * merge them so that there are not duplicates. (This is relatively * expensive since we use a stupid O(N^2) algorithm, but it's unlikely to * be the normal case.) @@ -1016,7 +1016,7 @@ aclnewowner(const Acl *old_acl, Oid oldOwnerId, Oid newOwnerId) * remove privilege-free entries, should there be any in the input.) dst * is the next output slot, targ is the currently considered input slot * (always >= dst), and src scans entries to the right of targ looking for - * duplicates. Once an entry has been emitted to dst it is known + * duplicates. Once an entry has been emitted to dst it is known * duplicate-free and need not be considered anymore. */ if (newpresent) @@ -2400,7 +2400,7 @@ column_privilege_check(Oid tableoid, AttrNumber attnum, * existence of the pg_class row before risking calling pg_class_aclcheck. * Note: it might seem there's a race condition against concurrent DROP, * but really it's safe because there will be no syscache flush between - * here and there. So if we see the row in the syscache, so will + * here and there. So if we see the row in the syscache, so will * pg_class_aclcheck. */ if (!SearchSysCacheExists1(RELOID, ObjectIdGetDatum(tableoid))) @@ -4747,14 +4747,14 @@ count_one_bits(AclMode mask) * The grantor must always be either the object owner or some role that has * been explicitly granted grant options. This ensures that all granted * privileges appear to flow from the object owner, and there are never - * multiple "original sources" of a privilege. Therefore, if the would-be + * multiple "original sources" of a privilege. Therefore, if the would-be * grantor is a member of a role that has the needed grant options, we have * to do the grant as that role instead. * * It is possible that the would-be grantor is a member of several roles * that have different subsets of the desired grant options, but no one * role has 'em all. In this case we pick a role with the largest number - * of desired options. Ties are broken in favor of closer ancestors. + * of desired options. Ties are broken in favor of closer ancestors. * * roleId: the role attempting to do the GRANT/REVOKE * privileges: the privileges to be granted/revoked diff --git a/src/backend/utils/adt/array_userfuncs.c b/src/backend/utils/adt/array_userfuncs.c index 274e867fdd1..bd186cb6cfc 100644 --- a/src/backend/utils/adt/array_userfuncs.c +++ b/src/backend/utils/adt/array_userfuncs.c @@ -502,7 +502,7 @@ array_agg_transfn(PG_FUNCTION_ARGS) /* * The transition type for array_agg() is declared to be "internal", which - * is a pass-by-value type the same size as a pointer. So we can safely + * is a pass-by-value type the same size as a pointer. So we can safely * pass the ArrayBuildState pointer through nodeAgg.c's machinations. */ PG_RETURN_POINTER(state); @@ -517,7 +517,7 @@ array_agg_finalfn(PG_FUNCTION_ARGS) int lbs[1]; /* - * Test for null before Asserting we are in right context. This is to + * Test for null before Asserting we are in right context. This is to * avoid possible Assert failure in 8.4beta installations, where it is * possible for users to create NULL constants of type internal. */ diff --git a/src/backend/utils/adt/arrayfuncs.c b/src/backend/utils/adt/arrayfuncs.c index 3e43e951e10..e8621ff8751 100644 --- a/src/backend/utils/adt/arrayfuncs.c +++ b/src/backend/utils/adt/arrayfuncs.c @@ -689,7 +689,7 @@ ReadArrayStr(char *arrayStr, /* * We have to remove " and \ characters to create a clean item value to - * pass to the datatype input routine. We overwrite each item value + * pass to the datatype input routine. We overwrite each item value * in-place within arrayStr to do this. srcptr is the current scan point, * and dstptr is where we are copying to. * @@ -889,7 +889,7 @@ ReadArrayStr(char *arrayStr, * referenced by Datums after copying them. * * If the input data is of varlena type, the caller must have ensured that - * the values are not toasted. (Doing it here doesn't work since the + * the values are not toasted. (Doing it here doesn't work since the * caller has already allocated space for the array...) */ static void @@ -1985,7 +1985,7 @@ array_get_slice(ArrayType *array, memcpy(ARR_DIMS(newarray), span, ndim * sizeof(int)); /* - * Lower bounds of the new array are set to 1. Formerly (before 7.3) we + * Lower bounds of the new array are set to 1. Formerly (before 7.3) we * copied the given lowerIndx values ... but that seems confusing. */ newlb = ARR_LBOUND(newarray); @@ -2617,7 +2617,7 @@ array_set_slice(ArrayType *array, /* * array_map() * - * Map an array through an arbitrary function. Return a new array with + * Map an array through an arbitrary function. Return a new array with * same dimensions and each source element transformed by fn(). Each * source element is passed as the first argument to fn(); additional * arguments to be passed to fn() can be specified by the caller. @@ -2632,9 +2632,9 @@ array_set_slice(ArrayType *array, * first argument position initially holds the input array value. * * inpType: OID of element type of input array. This must be the same as, * or binary-compatible with, the first argument type of fn(). - * * retType: OID of element type of output array. This must be the same as, + * * retType: OID of element type of output array. This must be the same as, * or binary-compatible with, the result type of fn(). - * * amstate: workspace for array_map. Must be zeroed by caller before + * * amstate: workspace for array_map. Must be zeroed by caller before * first call, and not touched after that. * * It is legitimate to pass a freshly-zeroed ArrayMapState on each call, @@ -3488,7 +3488,7 @@ array_cmp(FunctionCallInfo fcinfo) /* * If arrays contain same data (up to end of shorter one), apply - * additional rules to sort by dimensionality. The relative significance + * additional rules to sort by dimensionality. The relative significance * of the different bits of information is historical; mainly we just care * that we don't say "equal" for arrays of different dimensionality. */ @@ -3750,7 +3750,7 @@ array_contain_compare(ArrayType *array1, ArrayType *array2, Oid collation, /* * We assume that the comparison operator is strict, so a NULL can't - * match anything. XXX this diverges from the "NULL=NULL" behavior of + * match anything. XXX this diverges from the "NULL=NULL" behavior of * array_eq, should we act like that? */ if (isnull1) @@ -4241,7 +4241,7 @@ array_copy(char *destptr, int nitems, * * Note: this could certainly be optimized using standard bitblt methods. * However, it's not clear that the typical Postgres array has enough elements - * to make it worth worrying too much. For the moment, KISS. + * to make it worth worrying too much. For the moment, KISS. */ void array_bitmap_copy(bits8 *destbitmap, int destoffset, @@ -4438,7 +4438,7 @@ array_extract_slice(ArrayType *newarray, * Insert a slice into an array. * * ndim/dim[]/lb[] are dimensions of the original array. A new array with - * those same dimensions is to be constructed. destArray must already + * those same dimensions is to be constructed. destArray must already * have been allocated and its header initialized. * * st[]/endp[] identify the slice to be replaced. Elements within the slice @@ -5106,7 +5106,7 @@ array_unnest(PG_FUNCTION_ARGS) * Get the array value and detoast if needed. We can't do this * earlier because if we have to detoast, we want the detoasted copy * to be in multi_call_memory_ctx, so it will go away when we're done - * and not before. (If no detoast happens, we assume the originally + * and not before. (If no detoast happens, we assume the originally * passed array will stick around till then.) */ arr = PG_GETARG_ARRAYTYPE_P(0); diff --git a/src/backend/utils/adt/arrayutils.c b/src/backend/utils/adt/arrayutils.c index af7359ca4e3..cf248916000 100644 --- a/src/backend/utils/adt/arrayutils.c +++ b/src/backend/utils/adt/arrayutils.c @@ -193,7 +193,7 @@ mda_next_tuple(int n, int *curr, const int *span) /* * ArrayGetIntegerTypmods: verify that argument is a 1-D cstring array, - * and get the contents converted to integers. Returns a palloc'd array + * and get the contents converted to integers. Returns a palloc'd array * and places the length at *n. */ int32 * diff --git a/src/backend/utils/adt/char.c b/src/backend/utils/adt/char.c index 974cb223589..5a5f83a9aaf 100644 --- a/src/backend/utils/adt/char.c +++ b/src/backend/utils/adt/char.c @@ -59,7 +59,7 @@ charout(PG_FUNCTION_ARGS) * charrecv - converts external binary format to char * * The external representation is one byte, with no character set - * conversion. This is somewhat dubious, perhaps, but in many + * conversion. This is somewhat dubious, perhaps, but in many * cases people use char for a 1-byte binary type. */ Datum diff --git a/src/backend/utils/adt/date.c b/src/backend/utils/adt/date.c index 51c6b6bacbf..714250b9d0f 100644 --- a/src/backend/utils/adt/date.c +++ b/src/backend/utils/adt/date.c @@ -1257,7 +1257,7 @@ AdjustTimeForTypmod(TimeADT *time, int32 typmod) * Note: this round-to-nearest code is not completely consistent about * rounding values that are exactly halfway between integral values. * On most platforms, rint() will implement round-to-nearest-even, but - * the integer code always rounds up (away from zero). Is it worth + * the integer code always rounds up (away from zero). Is it worth * trying to be consistent? */ #ifdef HAVE_INT64_TIMESTAMP @@ -1606,7 +1606,7 @@ time_interval(PG_FUNCTION_ARGS) * Convert interval to time data type. * * This is defined as producing the fractional-day portion of the interval. - * Therefore, we can just ignore the months field. It is not real clear + * Therefore, we can just ignore the months field. It is not real clear * what to do with negative intervals, but we choose to subtract the floor, * so that, say, '-2 hours' becomes '22:00:00'. */ @@ -2596,7 +2596,7 @@ timetz_zone(PG_FUNCTION_ARGS) pg_tz *tzp; /* - * Look up the requested timezone. First we look in the date token table + * Look up the requested timezone. First we look in the date token table * (to handle cases like "EST"), and if that fails, we look in the * timezone database (to handle cases like "America/New_York"). (This * matches the order in which timestamp input checks the cases; it's diff --git a/src/backend/utils/adt/datetime.c b/src/backend/utils/adt/datetime.c index 05e1730c881..59e06bb569d 100644 --- a/src/backend/utils/adt/datetime.c +++ b/src/backend/utils/adt/datetime.c @@ -352,7 +352,7 @@ j2date(int jd, int *year, int *month, int *day) * j2day - convert Julian date to day-of-week (0..6 == Sun..Sat) * * Note: various places use the locution j2day(date - 1) to produce a - * result according to the convention 0..6 = Mon..Sun. This is a bit of + * result according to the convention 0..6 = Mon..Sun. This is a bit of * a crock, but will work as long as the computation here is just a modulo. */ int @@ -2472,7 +2472,7 @@ DecodeNumber(int flen, char *str, bool haveTextMonth, int fmask, /* * Nothing so far; make a decision about what we think the input - * is. There used to be lots of heuristics here, but the + * is. There used to be lots of heuristics here, but the * consensus now is to be paranoid. It *must* be either * YYYY-MM-DD (with a more-than-two-digit year field), or the * field order defined by DateOrder. @@ -2505,9 +2505,9 @@ DecodeNumber(int flen, char *str, bool haveTextMonth, int fmask, { /* * We are at the first numeric field of a date that included a - * textual month name. We want to support the variants + * textual month name. We want to support the variants * MON-DD-YYYY, DD-MON-YYYY, and YYYY-MON-DD as unambiguous - * inputs. We will also accept MON-DD-YY or DD-MON-YY in + * inputs. We will also accept MON-DD-YY or DD-MON-YY in * either DMY or MDY modes, as well as YY-MON-DD in YMD mode. */ if (flen >= 3 || DateOrder == DATEORDER_YMD) @@ -3315,7 +3315,7 @@ DecodeISO8601Interval(char *str, return dterr; /* - * Note: we could step off the end of the string here. Code below + * Note: we could step off the end of the string here. Code below * *must* exit the loop if unit == '\0'. */ unit = *str++; @@ -4109,7 +4109,7 @@ EncodeInterval(struct pg_tm * tm, fsec_t fsec, int style, char *str) /* * We've been burnt by stupid errors in the ordering of the datetkn tables - * once too often. Arrange to check them during postmaster start. + * once too often. Arrange to check them during postmaster start. */ static bool CheckDateTokenTable(const char *tablename, const datetkn *base, int nel) diff --git a/src/backend/utils/adt/datum.c b/src/backend/utils/adt/datum.c index 1fd7ff777ae..f29778a7f12 100644 --- a/src/backend/utils/adt/datum.c +++ b/src/backend/utils/adt/datum.c @@ -181,7 +181,7 @@ datumIsEqual(Datum value1, Datum value2, bool typByVal, int typLen) /* * just compare the two datums. NOTE: just comparing "len" bytes will * not do the work, because we do not know how these bytes are aligned - * inside the "Datum". We assume instead that any given datatype is + * inside the "Datum". We assume instead that any given datatype is * consistent about how it fills extraneous bits in the Datum. */ res = (value1 == value2); diff --git a/src/backend/utils/adt/dbsize.c b/src/backend/utils/adt/dbsize.c index 7381f217251..f73e4cd5c6e 100644 --- a/src/backend/utils/adt/dbsize.c +++ b/src/backend/utils/adt/dbsize.c @@ -510,7 +510,7 @@ pg_size_pretty(PG_FUNCTION_ARGS) * This is expected to be used in queries like * SELECT pg_relation_filenode(oid) FROM pg_class; * That leads to a couple of choices. We work from the pg_class row alone - * rather than actually opening each relation, for efficiency. We don't + * rather than actually opening each relation, for efficiency. We don't * fail if we can't find the relation --- some rows might be visible in * the query's MVCC snapshot but already dead according to SnapshotNow. * (Note: we could avoid using the catcache, but there's little point diff --git a/src/backend/utils/adt/domains.c b/src/backend/utils/adt/domains.c index c178fd0bddb..acf8c0cc5bb 100644 --- a/src/backend/utils/adt/domains.c +++ b/src/backend/utils/adt/domains.c @@ -12,11 +12,11 @@ * The overhead required for constraint checking can be high, since examining * the catalogs to discover the constraints for a given domain is not cheap. * We have three mechanisms for minimizing this cost: - * 1. In a nest of domains, we flatten the checking of all the levels + * 1. In a nest of domains, we flatten the checking of all the levels * into just one operation. - * 2. We cache the list of constraint items in the FmgrInfo struct + * 2. We cache the list of constraint items in the FmgrInfo struct * passed by the caller. - * 3. If there are CHECK constraints, we cache a standalone ExprContext + * 3. If there are CHECK constraints, we cache a standalone ExprContext * to evaluate them in. * * @@ -305,7 +305,7 @@ domain_recv(PG_FUNCTION_ARGS) /* * domain_check - check that a datum satisfies the constraints of a - * domain. extra and mcxt can be passed if they are available from, + * domain. extra and mcxt can be passed if they are available from, * say, a FmgrInfo structure, or they can be NULL, in which case the * setup is repeated for each call. */ diff --git a/src/backend/utils/adt/float.c b/src/backend/utils/adt/float.c index b929abeba78..61efe0d8a5f 100644 --- a/src/backend/utils/adt/float.c +++ b/src/backend/utils/adt/float.c @@ -287,7 +287,7 @@ float4in(PG_FUNCTION_ARGS) /* * In some IRIX versions, strtod() recognizes only "inf", so if the input - * is "infinity" we have to skip over "inity". Also, it may return + * is "infinity" we have to skip over "inity". Also, it may return * positive infinity for "-inf". */ if (isinf(val)) @@ -508,7 +508,7 @@ float8in(PG_FUNCTION_ARGS) /* * In some IRIX versions, strtod() recognizes only "inf", so if the input - * is "infinity" we have to skip over "inity". Also, it may return + * is "infinity" we have to skip over "inity". Also, it may return * positive infinity for "-inf". */ if (isinf(val)) @@ -2050,7 +2050,7 @@ float8_stddev_samp(PG_FUNCTION_ARGS) * in that order. Note that Y is the first argument to the aggregates! * * It might seem attractive to optimize this by having multiple accumulator - * functions that only calculate the sums actually needed. But on most + * functions that only calculate the sums actually needed. But on most * modern machines, a couple of extra floating-point multiplies will be * insignificant compared to the other per-tuple overhead, so I've chosen * to minimize code space instead. diff --git a/src/backend/utils/adt/format_type.c b/src/backend/utils/adt/format_type.c index 7c51ee7ecac..5037f090ca1 100644 --- a/src/backend/utils/adt/format_type.c +++ b/src/backend/utils/adt/format_type.c @@ -46,14 +46,14 @@ __attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3))); * double quoted if it contains funny characters or matches a keyword. * * If typemod is NULL then we are formatting a type name in a context where - * no typemod is available, eg a function argument or result type. This + * no typemod is available, eg a function argument or result type. This * yields a slightly different result from specifying typemod = -1 in some * cases. Given typemod = -1 we feel compelled to produce an output that * the parser will interpret as having typemod -1, so that pg_dump will - * produce CREATE TABLE commands that recreate the original state. But + * produce CREATE TABLE commands that recreate the original state. But * given NULL typemod, we assume that the parser's interpretation of * typemod doesn't matter, and so we are willing to output a slightly - * "prettier" representation of the same type. For example, type = bpchar + * "prettier" representation of the same type. For example, type = bpchar * and typemod = NULL gets you "character", whereas typemod = -1 gets you * "bpchar" --- the former will be interpreted as character(1) by the * parser, which does not yield typemod -1. diff --git a/src/backend/utils/adt/formatting.c b/src/backend/utils/adt/formatting.c index 6f267db0256..a54b215afb9 100644 --- a/src/backend/utils/adt/formatting.c +++ b/src/backend/utils/adt/formatting.c @@ -1816,7 +1816,7 @@ str_initcap(const char *buff, size_t nbytes, Oid collid) /* * Note: we assume that toupper_l()/tolower_l() will not be so broken - * as to need guard tests. When using the default collation, we apply + * as to need guard tests. When using the default collation, we apply * the traditional Postgres behavior that forces ASCII-style treatment * of I/i, but in non-default collations you get exactly what the * collation says. @@ -3579,7 +3579,7 @@ do_to_timestamp(text *date_txt, text *fmt, { /* * The month and day field have not been set, so we use the - * day-of-year field to populate them. Depending on the date mode, + * day-of-year field to populate them. Depending on the date mode, * this field may be interpreted as a Gregorian day-of-year, or an ISO * week date day-of-year. */ diff --git a/src/backend/utils/adt/geo_selfuncs.c b/src/backend/utils/adt/geo_selfuncs.c index c2e6bc1794e..c576b7d6ff9 100644 --- a/src/backend/utils/adt/geo_selfuncs.c +++ b/src/backend/utils/adt/geo_selfuncs.c @@ -22,7 +22,7 @@ /* - * Selectivity functions for geometric operators. These are bogus -- unless + * Selectivity functions for geometric operators. These are bogus -- unless * we know the actual key distribution in the index, we can't make a good * prediction of the selectivity of these operators. * @@ -34,7 +34,7 @@ * In general, GiST needs to search multiple subtrees in order to guarantee * that all occurrences of the same key have been found. Because of this, * the estimated cost for scanning the index ought to be higher than the - * output selectivity would indicate. gistcostestimate(), over in selfuncs.c, + * output selectivity would indicate. gistcostestimate(), over in selfuncs.c, * ought to be adjusted accordingly --- but until we can generate somewhat * realistic numbers here, it hardly matters... */ diff --git a/src/backend/utils/adt/inet_cidr_ntop.c b/src/backend/utils/adt/inet_cidr_ntop.c index 5f2a3d361d9..d33534ec173 100644 --- a/src/backend/utils/adt/inet_cidr_ntop.c +++ b/src/backend/utils/adt/inet_cidr_ntop.c @@ -196,7 +196,7 @@ inet_cidr_ntop_ipv6(const u_char *src, int bits, char *dst, size_t size) } else { - /* Copy src to private buffer. Zero host part. */ + /* Copy src to private buffer. Zero host part. */ p = (bits + 7) / 8; memcpy(inbuf, src, p); memset(inbuf + p, 0, 16 - p); diff --git a/src/backend/utils/adt/int.c b/src/backend/utils/adt/int.c index 07493e3c3f8..3ca243fb0ad 100644 --- a/src/backend/utils/adt/int.c +++ b/src/backend/utils/adt/int.c @@ -642,7 +642,7 @@ int4pl(PG_FUNCTION_ARGS) result = arg1 + arg2; /* - * Overflow check. If the inputs are of different signs then their sum + * Overflow check. If the inputs are of different signs then their sum * cannot overflow. If the inputs are of the same sign, their sum had * better be that sign too. */ @@ -663,8 +663,8 @@ int4mi(PG_FUNCTION_ARGS) result = arg1 - arg2; /* - * Overflow check. If the inputs are of the same sign then their - * difference cannot overflow. If they are of different signs then the + * Overflow check. If the inputs are of the same sign then their + * difference cannot overflow. If they are of different signs then the * result should be of the same sign as the first input. */ if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1)) @@ -684,7 +684,7 @@ int4mul(PG_FUNCTION_ARGS) result = arg1 * arg2; /* - * Overflow check. We basically check to see if result / arg2 gives arg1 + * Overflow check. We basically check to see if result / arg2 gives arg1 * again. There are two cases where this fails: arg2 = 0 (which cannot * overflow) and arg1 = INT_MIN, arg2 = -1 (where the division itself will * overflow and thus incorrectly match). @@ -794,7 +794,7 @@ int2pl(PG_FUNCTION_ARGS) result = arg1 + arg2; /* - * Overflow check. If the inputs are of different signs then their sum + * Overflow check. If the inputs are of different signs then their sum * cannot overflow. If the inputs are of the same sign, their sum had * better be that sign too. */ @@ -815,8 +815,8 @@ int2mi(PG_FUNCTION_ARGS) result = arg1 - arg2; /* - * Overflow check. If the inputs are of the same sign then their - * difference cannot overflow. If they are of different signs then the + * Overflow check. If the inputs are of the same sign then their + * difference cannot overflow. If they are of different signs then the * result should be of the same sign as the first input. */ if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1)) @@ -897,7 +897,7 @@ int24pl(PG_FUNCTION_ARGS) result = arg1 + arg2; /* - * Overflow check. If the inputs are of different signs then their sum + * Overflow check. If the inputs are of different signs then their sum * cannot overflow. If the inputs are of the same sign, their sum had * better be that sign too. */ @@ -918,8 +918,8 @@ int24mi(PG_FUNCTION_ARGS) result = arg1 - arg2; /* - * Overflow check. If the inputs are of the same sign then their - * difference cannot overflow. If they are of different signs then the + * Overflow check. If the inputs are of the same sign then their + * difference cannot overflow. If they are of different signs then the * result should be of the same sign as the first input. */ if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1)) @@ -939,7 +939,7 @@ int24mul(PG_FUNCTION_ARGS) result = arg1 * arg2; /* - * Overflow check. We basically check to see if result / arg2 gives arg1 + * Overflow check. We basically check to see if result / arg2 gives arg1 * again. There is one case where this fails: arg2 = 0 (which cannot * overflow). * @@ -985,7 +985,7 @@ int42pl(PG_FUNCTION_ARGS) result = arg1 + arg2; /* - * Overflow check. If the inputs are of different signs then their sum + * Overflow check. If the inputs are of different signs then their sum * cannot overflow. If the inputs are of the same sign, their sum had * better be that sign too. */ @@ -1006,8 +1006,8 @@ int42mi(PG_FUNCTION_ARGS) result = arg1 - arg2; /* - * Overflow check. If the inputs are of the same sign then their - * difference cannot overflow. If they are of different signs then the + * Overflow check. If the inputs are of the same sign then their + * difference cannot overflow. If they are of different signs then the * result should be of the same sign as the first input. */ if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1)) @@ -1027,7 +1027,7 @@ int42mul(PG_FUNCTION_ARGS) result = arg1 * arg2; /* - * Overflow check. We basically check to see if result / arg1 gives arg2 + * Overflow check. We basically check to see if result / arg1 gives arg2 * again. There is one case where this fails: arg1 = 0 (which cannot * overflow). * diff --git a/src/backend/utils/adt/int8.c b/src/backend/utils/adt/int8.c index b28b6f03e7e..a5723af3043 100644 --- a/src/backend/utils/adt/int8.c +++ b/src/backend/utils/adt/int8.c @@ -73,7 +73,7 @@ scanint8(const char *str, bool errorOK, int64 *result) ptr++; /* - * Do an explicit check for INT64_MIN. Ugly though this is, it's + * Do an explicit check for INT64_MIN. Ugly though this is, it's * cleaner than trying to get the loop below to handle it portably. */ if (strncmp(ptr, "9223372036854775808", 19) == 0) @@ -519,7 +519,7 @@ int8pl(PG_FUNCTION_ARGS) result = arg1 + arg2; /* - * Overflow check. If the inputs are of different signs then their sum + * Overflow check. If the inputs are of different signs then their sum * cannot overflow. If the inputs are of the same sign, their sum had * better be that sign too. */ @@ -540,8 +540,8 @@ int8mi(PG_FUNCTION_ARGS) result = arg1 - arg2; /* - * Overflow check. If the inputs are of the same sign then their - * difference cannot overflow. If they are of different signs then the + * Overflow check. If the inputs are of the same sign then their + * difference cannot overflow. If they are of different signs then the * result should be of the same sign as the first input. */ if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1)) @@ -561,7 +561,7 @@ int8mul(PG_FUNCTION_ARGS) result = arg1 * arg2; /* - * Overflow check. We basically check to see if result / arg2 gives arg1 + * Overflow check. We basically check to see if result / arg2 gives arg1 * again. There are two cases where this fails: arg2 = 0 (which cannot * overflow) and arg1 = INT64_MIN, arg2 = -1 (where the division itself * will overflow and thus incorrectly match). @@ -719,7 +719,7 @@ int8inc(PG_FUNCTION_ARGS) /* * These functions are exactly like int8inc but are used for aggregates that - * count only non-null values. Since the functions are declared strict, + * count only non-null values. Since the functions are declared strict, * the null checks happen before we ever get here, and all we need do is * increment the state value. We could actually make these pg_proc entries * point right at int8inc, but then the opr_sanity regression test would @@ -773,7 +773,7 @@ int84pl(PG_FUNCTION_ARGS) result = arg1 + arg2; /* - * Overflow check. If the inputs are of different signs then their sum + * Overflow check. If the inputs are of different signs then their sum * cannot overflow. If the inputs are of the same sign, their sum had * better be that sign too. */ @@ -794,8 +794,8 @@ int84mi(PG_FUNCTION_ARGS) result = arg1 - arg2; /* - * Overflow check. If the inputs are of the same sign then their - * difference cannot overflow. If they are of different signs then the + * Overflow check. If the inputs are of the same sign then their + * difference cannot overflow. If they are of different signs then the * result should be of the same sign as the first input. */ if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1)) @@ -815,7 +815,7 @@ int84mul(PG_FUNCTION_ARGS) result = arg1 * arg2; /* - * Overflow check. We basically check to see if result / arg1 gives arg2 + * Overflow check. We basically check to see if result / arg1 gives arg2 * again. There is one case where this fails: arg1 = 0 (which cannot * overflow). * @@ -882,7 +882,7 @@ int48pl(PG_FUNCTION_ARGS) result = arg1 + arg2; /* - * Overflow check. If the inputs are of different signs then their sum + * Overflow check. If the inputs are of different signs then their sum * cannot overflow. If the inputs are of the same sign, their sum had * better be that sign too. */ @@ -903,8 +903,8 @@ int48mi(PG_FUNCTION_ARGS) result = arg1 - arg2; /* - * Overflow check. If the inputs are of the same sign then their - * difference cannot overflow. If they are of different signs then the + * Overflow check. If the inputs are of the same sign then their + * difference cannot overflow. If they are of different signs then the * result should be of the same sign as the first input. */ if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1)) @@ -924,7 +924,7 @@ int48mul(PG_FUNCTION_ARGS) result = arg1 * arg2; /* - * Overflow check. We basically check to see if result / arg2 gives arg1 + * Overflow check. We basically check to see if result / arg2 gives arg1 * again. There is one case where this fails: arg2 = 0 (which cannot * overflow). * @@ -970,7 +970,7 @@ int82pl(PG_FUNCTION_ARGS) result = arg1 + arg2; /* - * Overflow check. If the inputs are of different signs then their sum + * Overflow check. If the inputs are of different signs then their sum * cannot overflow. If the inputs are of the same sign, their sum had * better be that sign too. */ @@ -991,8 +991,8 @@ int82mi(PG_FUNCTION_ARGS) result = arg1 - arg2; /* - * Overflow check. If the inputs are of the same sign then their - * difference cannot overflow. If they are of different signs then the + * Overflow check. If the inputs are of the same sign then their + * difference cannot overflow. If they are of different signs then the * result should be of the same sign as the first input. */ if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1)) @@ -1012,7 +1012,7 @@ int82mul(PG_FUNCTION_ARGS) result = arg1 * arg2; /* - * Overflow check. We basically check to see if result / arg1 gives arg2 + * Overflow check. We basically check to see if result / arg1 gives arg2 * again. There is one case where this fails: arg1 = 0 (which cannot * overflow). * @@ -1079,7 +1079,7 @@ int28pl(PG_FUNCTION_ARGS) result = arg1 + arg2; /* - * Overflow check. If the inputs are of different signs then their sum + * Overflow check. If the inputs are of different signs then their sum * cannot overflow. If the inputs are of the same sign, their sum had * better be that sign too. */ @@ -1100,8 +1100,8 @@ int28mi(PG_FUNCTION_ARGS) result = arg1 - arg2; /* - * Overflow check. If the inputs are of the same sign then their - * difference cannot overflow. If they are of different signs then the + * Overflow check. If the inputs are of the same sign then their + * difference cannot overflow. If they are of different signs then the * result should be of the same sign as the first input. */ if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1)) @@ -1121,7 +1121,7 @@ int28mul(PG_FUNCTION_ARGS) result = arg1 * arg2; /* - * Overflow check. We basically check to see if result / arg2 gives arg1 + * Overflow check. We basically check to see if result / arg2 gives arg1 * again. There is one case where this fails: arg2 = 0 (which cannot * overflow). * diff --git a/src/backend/utils/adt/like.c b/src/backend/utils/adt/like.c index 09e8698af2c..3137711c384 100644 --- a/src/backend/utils/adt/like.c +++ b/src/backend/utils/adt/like.c @@ -76,12 +76,12 @@ wchareq(char *p1, char *p2) /* * Formerly we had a routine iwchareq() here that tried to do case-insensitive - * comparison of multibyte characters. It did not work at all, however, + * comparison of multibyte characters. It did not work at all, however, * because it relied on tolower() which has a single-byte API ... and * towlower() wouldn't be much better since we have no suitably cheap way * of getting a single character transformed to the system's wchar_t format. * So now, we just downcase the strings using lower() and apply regular LIKE - * comparison. This should be revisited when we install better locale support. + * comparison. This should be revisited when we install better locale support. */ /* diff --git a/src/backend/utils/adt/misc.c b/src/backend/utils/adt/misc.c index 5bda4af50fd..3b6300dc40f 100644 --- a/src/backend/utils/adt/misc.c +++ b/src/backend/utils/adt/misc.c @@ -278,7 +278,7 @@ pg_sleep(PG_FUNCTION_ARGS) * pg_usleep's upper bound on allowed delays. * * By computing the intended stop time initially, we avoid accumulation of - * extra delay across multiple sleeps. This also ensures we won't delay + * extra delay across multiple sleeps. This also ensures we won't delay * less than the specified time if pg_usleep is interrupted by other * signals such as SIGHUP. */ diff --git a/src/backend/utils/adt/nabstime.c b/src/backend/utils/adt/nabstime.c index 6771e78af8e..bdd98d98d54 100644 --- a/src/backend/utils/adt/nabstime.c +++ b/src/backend/utils/adt/nabstime.c @@ -198,7 +198,7 @@ tm2abstime(struct pg_tm * tm, int tz) sec = tm->tm_sec + tz + (tm->tm_min + (day * HOURS_PER_DAY + tm->tm_hour) * MINS_PER_HOUR) * SECS_PER_MINUTE; /* - * check for overflow. We need a little slop here because the H/M/S plus + * check for overflow. We need a little slop here because the H/M/S plus * TZ offset could add up to more than 1 day. */ if ((day >= MAX_DAYNUM - 10 && sec < 0) || @@ -1163,7 +1163,7 @@ tintervalsame(PG_FUNCTION_ARGS) * 1. The interval length computations overflow at 2^31 seconds, causing * intervals longer than that to sort oddly compared to those shorter. * 2. infinity and minus infinity (NOEND_ABSTIME and NOSTART_ABSTIME) are - * just ordinary integers. Since this code doesn't handle them specially, + * just ordinary integers. Since this code doesn't handle them specially, * it's possible for [a b] to be considered longer than [c infinity] for * finite abstimes a, b, c. In combination with the previous point, the * interval [-infinity infinity] is treated as being shorter than many finite diff --git a/src/backend/utils/adt/network.c b/src/backend/utils/adt/network.c index a968d707994..7ebae765400 100644 --- a/src/backend/utils/adt/network.c +++ b/src/backend/utils/adt/network.c @@ -29,7 +29,7 @@ static int ip_addrsize(inet *inetptr); static inet *internal_inetpl(inet *ip, int64 addend); /* - * Access macros. We use VARDATA_ANY so that we can process short-header + * Access macros. We use VARDATA_ANY so that we can process short-header * varlena values without detoasting them. This requires a trick: * VARDATA_ANY assumes the varlena header is already filled in, which is * not the case when constructing a new value (until SET_INET_VARSIZE is @@ -88,7 +88,7 @@ network_in(char *src, bool is_cidr) dst = (inet *) palloc0(sizeof(inet)); /* - * First, check to see if this is an IPv6 or IPv4 address. IPv6 addresses + * First, check to see if this is an IPv6 or IPv4 address. IPv6 addresses * will have a : somewhere in them (several, in fact) so if there is one * present, assume it's V6, otherwise assume it's V4. */ @@ -193,7 +193,7 @@ cidr_out(PG_FUNCTION_ARGS) * family, bits, is_cidr, address length, address in network byte order. * * Presence of is_cidr is largely for historical reasons, though it might - * allow some code-sharing on the client side. We send it correctly on + * allow some code-sharing on the client side. We send it correctly on * output, but ignore the value on input. */ static inet * @@ -1392,7 +1392,7 @@ inetmi(PG_FUNCTION_ARGS) /* * We form the difference using the traditional complement, increment, * and add rule, with the increment part being handled by starting the - * carry off at 1. If you don't think integer arithmetic is done in + * carry off at 1. If you don't think integer arithmetic is done in * two's complement, too bad. */ int nb = ip_addrsize(ip); @@ -1414,7 +1414,7 @@ inetmi(PG_FUNCTION_ARGS) else { /* - * Input wider than int64: check for overflow. All bytes to + * Input wider than int64: check for overflow. All bytes to * the left of what will fit should be 0 or 0xFF, depending on * sign of the now-complete result. */ @@ -1445,9 +1445,9 @@ inetmi(PG_FUNCTION_ARGS) * XXX This should go away someday! * * This is a kluge needed because we don't yet support zones in stored inet - * values. Since the result of getnameinfo() might include a zone spec, + * values. Since the result of getnameinfo() might include a zone spec, * call this to remove it anywhere we want to feed getnameinfo's output to - * network_in. Beats failing entirely. + * network_in. Beats failing entirely. * * An alternative approach would be to let network_in ignore %-parts for * itself, but that would mean we'd silently drop zone specs in user input, diff --git a/src/backend/utils/adt/numeric.c b/src/backend/utils/adt/numeric.c index 6b60a5c1c78..5091306b0e9 100644 --- a/src/backend/utils/adt/numeric.c +++ b/src/backend/utils/adt/numeric.c @@ -49,7 +49,7 @@ * Numeric values are represented in a base-NBASE floating point format. * Each "digit" ranges from 0 to NBASE-1. The type NumericDigit is signed * and wide enough to store a digit. We assume that NBASE*NBASE can fit in - * an int. Although the purely calculational routines could handle any even + * an int. Although the purely calculational routines could handle any even * NBASE that's less than sqrt(INT_MAX), in practice we are only interested * in NBASE a power of ten, so that I/O conversions and decimal rounding * are easy. Also, it's actually more efficient if NBASE is rather less than @@ -94,11 +94,11 @@ typedef int16 NumericDigit; * If the high bits of the first word of a NumericChoice (n_header, or * n_short.n_header, or n_long.n_sign_dscale) are NUMERIC_SHORT, then the * numeric follows the NumericShort format; if they are NUMERIC_POS or - * NUMERIC_NEG, it follows the NumericLong format. If they are NUMERIC_NAN, + * NUMERIC_NEG, it follows the NumericLong format. If they are NUMERIC_NAN, * it is a NaN. We currently always store a NaN using just two bytes (i.e. * only n_header), but previous releases used only the NumericLong format, * so we might find 4-byte NaNs on disk if a database has been migrated using - * pg_upgrade. In either case, when the high bits indicate a NaN, the + * pg_upgrade. In either case, when the high bits indicate a NaN, the * remaining bits are never examined. Currently, we always initialize these * to zero, but it might be possible to use them for some other purpose in * the future. @@ -206,19 +206,19 @@ struct NumericData : ((n)->choice.n_long.n_weight)) /* ---------- - * NumericVar is the format we use for arithmetic. The digit-array part + * NumericVar is the format we use for arithmetic. The digit-array part * is the same as the NumericData storage format, but the header is more * complex. * * The value represented by a NumericVar is determined by the sign, weight, * ndigits, and digits[] array. * Note: the first digit of a NumericVar's value is assumed to be multiplied - * by NBASE ** weight. Another way to say it is that there are weight+1 + * by NBASE ** weight. Another way to say it is that there are weight+1 * digits before the decimal point. It is possible to have weight < 0. * * buf points at the physical start of the palloc'd digit buffer for the - * NumericVar. digits points at the first digit in actual use (the one - * with the specified weight). We normally leave an unused digit or two + * NumericVar. digits points at the first digit in actual use (the one + * with the specified weight). We normally leave an unused digit or two * (preset to zeroes) between buf and digits, so that there is room to store * a carry out of the top digit without reallocating space. We just need to * decrement digits (and increment weight) to make room for the carry digit. @@ -592,7 +592,7 @@ numeric_maximum_size(int32 typmod) * In most cases, the size of a numeric will be smaller than the value * computed below, because the varlena header will typically get toasted * down to a single byte before being stored on disk, and it may also be - * possible to use a short numeric header. But our job here is to compute + * possible to use a short numeric header. But our job here is to compute * the worst case. */ return NUMERIC_HDRSZ + (numeric_digits * sizeof(NumericDigit)); @@ -913,7 +913,7 @@ numeric_uminus(PG_FUNCTION_ARGS) /* * The packed format is known to be totally zero digit trimmed always. So - * we can identify a ZERO by the fact that there are no digits at all. Do + * we can identify a ZERO by the fact that there are no digits at all. Do * nothing to a zero. */ if (NUMERIC_NDIGITS(num) != 0) @@ -1926,7 +1926,7 @@ numeric_sqrt(PG_FUNCTION_ARGS) PG_RETURN_NUMERIC(make_result(&const_nan)); /* - * Unpack the argument and determine the result scale. We choose a scale + * Unpack the argument and determine the result scale. We choose a scale * to give at least NUMERIC_MIN_SIG_DIGITS significant digits; but in any * case not less than the input's dscale. */ @@ -1979,7 +1979,7 @@ numeric_exp(PG_FUNCTION_ARGS) PG_RETURN_NUMERIC(make_result(&const_nan)); /* - * Unpack the argument and determine the result scale. We choose a scale + * Unpack the argument and determine the result scale. We choose a scale * to give at least NUMERIC_MIN_SIG_DIGITS significant digits; but in any * case not less than the input's dscale. */ @@ -2585,9 +2585,9 @@ numeric_avg_accum(PG_FUNCTION_ARGS) /* * Integer data types all use Numeric accumulators to share code and - * avoid risk of overflow. For int2 and int4 inputs, Numeric accumulation + * avoid risk of overflow. For int2 and int4 inputs, Numeric accumulation * is overkill for the N and sum(X) values, but definitely not overkill - * for the sum(X*X) value. Hence, we use int2_accum and int4_accum only + * for the sum(X*X) value. Hence, we use int2_accum and int4_accum only * for stddev/variance --- there are faster special-purpose accumulator * routines for SUM and AVG of these datatypes. */ @@ -2850,7 +2850,7 @@ numeric_stddev_pop(PG_FUNCTION_ARGS) * the initial condition of the transition data value needs to be NULL. This * means we can't rely on ExecAgg to automatically insert the first non-null * data value into the transition data: it doesn't know how to do the type - * conversion. The upshot is that these routines have to be marked non-strict + * conversion. The upshot is that these routines have to be marked non-strict * and handle substitution of the first non-null input themselves. */ @@ -3248,7 +3248,7 @@ set_var_from_str(const char *str, const char *cp, NumericVar *dest) /* * We first parse the string to extract decimal digits and determine the - * correct decimal weight. Then convert to NBASE representation. + * correct decimal weight. Then convert to NBASE representation. */ switch (*cp) { @@ -3838,7 +3838,7 @@ apply_typmod(NumericVar *var, int32 typmod) /* * Convert numeric to int8, rounding if needed. * - * If overflow, return FALSE (no error is raised). Return TRUE if okay. + * If overflow, return FALSE (no error is raised). Return TRUE if okay. * * CAUTION: var's contents may be modified by rounding! */ @@ -4302,7 +4302,7 @@ sub_var(NumericVar *var1, NumericVar *var2, NumericVar *result) * mul_var() - * * Multiplication on variable level. Product of var1 * var2 is stored - * in result. Result is rounded to no more than rscale fractional digits. + * in result. Result is rounded to no more than rscale fractional digits. */ static void mul_var(NumericVar *var1, NumericVar *var2, NumericVar *result, @@ -4346,7 +4346,7 @@ mul_var(NumericVar *var1, NumericVar *var2, NumericVar *result, /* * Determine number of result digits to compute. If the exact result * would have more than rscale fractional digits, truncate the computation - * with MUL_GUARD_DIGITS guard digits. We do that by pretending that one + * with MUL_GUARD_DIGITS guard digits. We do that by pretending that one * or both inputs have fewer digits than they really do. */ res_ndigits = var1ndigits + var2ndigits + 1; @@ -4589,7 +4589,7 @@ div_var(NumericVar *var1, NumericVar *var2, NumericVar *result, * * We need the first divisor digit to be >= NBASE/2. If it isn't, * make it so by scaling up both the divisor and dividend by the - * factor "d". (The reason for allocating dividend[0] above is to + * factor "d". (The reason for allocating dividend[0] above is to * leave room for possible carry here.) */ if (divisor[1] < HALF_NBASE) @@ -4633,7 +4633,7 @@ div_var(NumericVar *var1, NumericVar *var2, NumericVar *result, /* * If next2digits are 0, then quotient digit must be 0 and there's - * no need to adjust the working dividend. It's worth testing + * no need to adjust the working dividend. It's worth testing * here to fall out ASAP when processing trailing zeroes in a * dividend. */ @@ -4651,7 +4651,7 @@ div_var(NumericVar *var1, NumericVar *var2, NumericVar *result, /* * Adjust quotient digit if it's too large. Knuth proves that * after this step, the quotient digit will be either correct or - * just one too large. (Note: it's OK to use dividend[j+2] here + * just one too large. (Note: it's OK to use dividend[j+2] here * because we know the divisor length is at least 2.) */ while (divisor2 * qhat > @@ -4826,7 +4826,7 @@ div_var_fast(NumericVar *var1, NumericVar *var2, NumericVar *result, * dividend's digits (plus appended zeroes to reach the desired precision * including guard digits). Each step of the main loop computes an * (approximate) quotient digit and stores it into div[], removing one - * position of dividend space. A final pass of carry propagation takes + * position of dividend space. A final pass of carry propagation takes * care of any mistaken quotient digits. */ div = (int *) palloc0((div_ndigits + 1) * sizeof(int)); @@ -5683,7 +5683,7 @@ power_var_int(NumericVar *base, int exp, NumericVar *result, int rscale) /* * The general case repeatedly multiplies base according to the bit - * pattern of exp. We do the multiplications with some extra precision. + * pattern of exp. We do the multiplications with some extra precision. */ neg = (exp < 0); exp = Abs(exp); diff --git a/src/backend/utils/adt/oid.c b/src/backend/utils/adt/oid.c index 495b6261e62..4510acd17a1 100644 --- a/src/backend/utils/adt/oid.c +++ b/src/backend/utils/adt/oid.c @@ -318,7 +318,7 @@ oidparse(Node *node) /* * Values too large for int4 will be represented as Float - * constants by the lexer. Accept these if they are valid OID + * constants by the lexer. Accept these if they are valid OID * strings. */ return oidin_subr(strVal(node), NULL); diff --git a/src/backend/utils/adt/pg_locale.c b/src/backend/utils/adt/pg_locale.c index c3181be0d2f..82c77ed768d 100644 --- a/src/backend/utils/adt/pg_locale.c +++ b/src/backend/utils/adt/pg_locale.c @@ -20,12 +20,12 @@ * * The other categories, LC_MONETARY, LC_NUMERIC, and LC_TIME are also * settable at run-time. However, we don't actually set those locale - * categories permanently. This would have bizarre effects like no + * categories permanently. This would have bizarre effects like no * longer accepting standard floating-point literals in some locales. * Instead, we only set the locales briefly when needed, cache the * required information obtained from localeconv(), and set them back. * The cached information is only used by the formatting functions - * (to_char, etc.) and the money type. For the user, this should all be + * (to_char, etc.) and the money type. For the user, this should all be * transparent. * * !!! NOW HEAR THIS !!! @@ -39,7 +39,7 @@ * fail = true; * setlocale(category, save); * DOES NOT WORK RELIABLY: on some platforms the second setlocale() call - * will change the memory save is pointing at. To do this sort of thing + * will change the memory save is pointing at. To do this sort of thing * safely, you *must* pstrdup what setlocale returns the first time. * * FYI, The Open Group locale standard is defined here: @@ -253,7 +253,7 @@ check_locale(int category, const char *value) * * For most locale categories, the assign hook doesn't actually set the locale * permanently, just reset flags so that the next use will cache the - * appropriate values. (See explanation at the top of this file.) + * appropriate values. (See explanation at the top of this file.) * * Note: we accept value = "" as selecting the postmaster's environment * value, whatever it was (so long as the environment setting is legal). @@ -766,7 +766,7 @@ IsoLocaleName(const char *winlocname) * could fail if the locale is C, so str_tolower() shouldn't call it * in that case. * - * Note that we currently lack any way to flush the cache. Since we don't + * Note that we currently lack any way to flush the cache. Since we don't * support ALTER COLLATION, this is OK. The worst case is that someone * drops a collation, and a useless cache entry hangs around in existing * backends. @@ -960,7 +960,7 @@ report_newlocale_failure(const char *localename) /* - * Create a locale_t from a collation OID. Results are cached for the + * Create a locale_t from a collation OID. Results are cached for the * lifetime of the backend. Thus, do not free the result with freelocale(). * * As a special optimization, the default/database collation returns 0. @@ -1143,7 +1143,7 @@ wchar2char(char *to, const wchar_t *from, size_t tolen, pg_locale_t locale) * This has almost the API of mbstowcs_l(), except that *from need not be * null-terminated; instead, the number of input bytes is specified as * fromlen. Also, we ereport() rather than returning -1 for invalid - * input encoding. tolen is the maximum number of wchar_t's to store at *to. + * input encoding. tolen is the maximum number of wchar_t's to store at *to. * The output will be zero-terminated iff there is room. */ size_t diff --git a/src/backend/utils/adt/pg_lzcompress.c b/src/backend/utils/adt/pg_lzcompress.c index 6b0fd364e44..f5c8321a44e 100644 --- a/src/backend/utils/adt/pg_lzcompress.c +++ b/src/backend/utils/adt/pg_lzcompress.c @@ -578,7 +578,7 @@ pglz_compress(const char *source, int32 slen, PGLZ_Header *dest, /* * If we've emitted more than first_success_by bytes without finding - * anything compressible at all, fail. This lets us fall out + * anything compressible at all, fail. This lets us fall out * reasonably quickly when looking at incompressible input (such as * pre-compressed data). */ @@ -602,7 +602,7 @@ pglz_compress(const char *source, int32 slen, PGLZ_Header *dest, hist_next, hist_recycle, dp, dend); dp++; /* Do not do this ++ in the line above! */ - /* The macro would do it four times - Jan. */ + /* The macro would do it four times - Jan. */ } found_match = true; } @@ -616,7 +616,7 @@ pglz_compress(const char *source, int32 slen, PGLZ_Header *dest, hist_next, hist_recycle, dp, dend); dp++; /* Do not do this ++ in the line above! */ - /* The macro would do it four times - Jan. */ + /* The macro would do it four times - Jan. */ } } diff --git a/src/backend/utils/adt/pseudotypes.c b/src/backend/utils/adt/pseudotypes.c index ddb1bd2b71c..638618db89d 100644 --- a/src/backend/utils/adt/pseudotypes.c +++ b/src/backend/utils/adt/pseudotypes.c @@ -6,7 +6,7 @@ * A pseudo-type isn't really a type and never has any operations, but * we do need to supply input and output functions to satisfy the links * in the pseudo-type's entry in pg_type. In most cases the functions - * just throw an error if invoked. (XXX the error messages here cover + * just throw an error if invoked. (XXX the error messages here cover * the most common case, but might be confusing in some contexts. Can * we do better?) * @@ -138,7 +138,7 @@ anyarray_out(PG_FUNCTION_ARGS) * anyarray_recv - binary input routine for pseudo-type ANYARRAY. * * XXX this could actually be made to work, since the incoming array - * data will contain the element type OID. Need to think through + * data will contain the element type OID. Need to think through * type-safety issues before allowing it, however. */ Datum @@ -192,7 +192,7 @@ anyenum_out(PG_FUNCTION_ARGS) * void_in - input routine for pseudo-type VOID. * * We allow this so that PL functions can return VOID without any special - * hack in the PL handler. Whatever value the PL thinks it's returning + * hack in the PL handler. Whatever value the PL thinks it's returning * will just be ignored. */ Datum diff --git a/src/backend/utils/adt/regexp.c b/src/backend/utils/adt/regexp.c index 7e05a53810e..3f8a812a492 100644 --- a/src/backend/utils/adt/regexp.c +++ b/src/backend/utils/adt/regexp.c @@ -142,7 +142,7 @@ RE_compile_and_cache(text *text_re, int cflags, Oid collation) char errMsg[100]; /* - * Look for a match among previously compiled REs. Since the data + * Look for a match among previously compiled REs. Since the data * structure is self-organizing with most-used entries at the front, our * search strategy can just be to scan from the front. */ @@ -192,7 +192,7 @@ RE_compile_and_cache(text *text_re, int cflags, Oid collation) /* * Here and in other places in this file, do CHECK_FOR_INTERRUPTS - * before reporting a regex error. This is so that if the regex + * before reporting a regex error. This is so that if the regex * library aborts and returns REG_CANCEL, we don't print an error * message that implies the regex was invalid. */ @@ -298,7 +298,7 @@ RE_wchar_execute(regex_t *re, pg_wchar *data, int data_len, * dat_len --- the length of the data string * nmatch, pmatch --- optional return area for match details * - * Data is given in the database encoding. We internally + * Data is given in the database encoding. We internally * convert to array of pg_wchar which is what Spencer's regex package wants. */ static bool diff --git a/src/backend/utils/adt/regproc.c b/src/backend/utils/adt/regproc.c index 6716c0204c1..6d06d415cc9 100644 --- a/src/backend/utils/adt/regproc.c +++ b/src/backend/utils/adt/regproc.c @@ -81,7 +81,7 @@ regprocin(PG_FUNCTION_ARGS) /* * In bootstrap mode we assume the given name is not schema-qualified, and - * just search pg_proc for a unique match. This is needed for + * just search pg_proc for a unique match. This is needed for * initializing other system catalogs (pg_namespace may not exist yet, and * certainly there are no schemas other than pg_catalog). */ @@ -266,7 +266,7 @@ regprocedurein(PG_FUNCTION_ARGS) /* * Else it's a name and arguments. Parse the name and arguments, look up * potential matches in the current namespace search list, and scan to see - * which one exactly matches the given argument types. (There will not be + * which one exactly matches the given argument types. (There will not be * more than one match.) * * XXX at present, this code will not work in bootstrap mode, hence this @@ -427,7 +427,7 @@ regoperin(PG_FUNCTION_ARGS) /* * In bootstrap mode we assume the given name is not schema-qualified, and - * just search pg_operator for a unique match. This is needed for + * just search pg_operator for a unique match. This is needed for * initializing other system catalogs (pg_namespace may not exist yet, and * certainly there are no schemas other than pg_catalog). */ @@ -616,7 +616,7 @@ regoperatorin(PG_FUNCTION_ARGS) /* * Else it's a name and arguments. Parse the name and arguments, look up * potential matches in the current namespace search list, and scan to see - * which one exactly matches the given argument types. (There will not be + * which one exactly matches the given argument types. (There will not be * more than one match.) * * XXX at present, this code will not work in bootstrap mode, hence this @@ -853,7 +853,7 @@ regclassout(PG_FUNCTION_ARGS) /* * In bootstrap mode, skip the fancy namespace stuff and just return - * the class name. (This path is only needed for debugging output + * the class name. (This path is only needed for debugging output * anyway.) */ if (IsBootstrapProcessingMode()) @@ -1343,7 +1343,7 @@ stringToQualifiedNameList(const char *string) /* * Given a C string, parse it into a qualified function or operator name - * followed by a parenthesized list of type names. Reduce the + * followed by a parenthesized list of type names. Reduce the * type names to an array of OIDs (returned into *nargs and *argtypes; * the argtypes array should be of size FUNC_MAX_ARGS). The function or * operator name is returned to *names as a List of Strings. diff --git a/src/backend/utils/adt/ri_triggers.c b/src/backend/utils/adt/ri_triggers.c index f08af12a2bd..32621e0fc26 100644 --- a/src/backend/utils/adt/ri_triggers.c +++ b/src/backend/utils/adt/ri_triggers.c @@ -2006,11 +2006,11 @@ RI_FKey_setnull_upd(PG_FUNCTION_ARGS) /* * "MATCH <unspecified>" only changes columns corresponding to the - * referenced columns that have changed in pk_rel. This means the + * referenced columns that have changed in pk_rel. This means the * "SET attrn=NULL [, attrn=NULL]" string will be change as well. * In this case, we need to build a temporary plan rather than use * our cached plan, unless the update happens to change all - * columns in the key. Fortunately, for the most common case of a + * columns in the key. Fortunately, for the most common case of a * single-column foreign key, this will be true. * * In case you're wondering, the inequality check works because we @@ -2768,7 +2768,7 @@ RI_Initial_Check(Trigger *trigger, Relation fk_rel, Relation pk_rel) * Temporarily increase work_mem so that the check query can be executed * more efficiently. It seems okay to do this because the query is simple * enough to not use a multiple of work_mem, and one typically would not - * have many large foreign-key validations happening concurrently. So + * have many large foreign-key validations happening concurrently. So * this seems to meet the criteria for being considered a "maintenance" * operation, and accordingly we use maintenance_work_mem. * @@ -3506,7 +3506,7 @@ ri_ReportViolation(RI_QueryKey *qkey, const char *constrname, errhint("This is most likely due to a rule having rewritten the query."))); /* - * Determine which relation to complain about. If tupdesc wasn't passed + * Determine which relation to complain about. If tupdesc wasn't passed * by caller, assume the violator tuple came from there. */ onfk = (qkey->constr_queryno == RI_PLAN_CHECK_LOOKUPPK); diff --git a/src/backend/utils/adt/rowtypes.c b/src/backend/utils/adt/rowtypes.c index c55c3d3902b..8372c7be772 100644 --- a/src/backend/utils/adt/rowtypes.c +++ b/src/backend/utils/adt/rowtypes.c @@ -277,7 +277,7 @@ record_in(PG_FUNCTION_ARGS) /* * We cannot return tuple->t_data because heap_form_tuple allocates it as * part of a larger chunk, and our caller may expect to be able to pfree - * our result. So must copy the info into a new palloc chunk. + * our result. So must copy the info into a new palloc chunk. */ result = (HeapTupleHeader) palloc(tuple->t_len); memcpy(result, tuple->t_data, tuple->t_len); @@ -635,7 +635,7 @@ record_recv(PG_FUNCTION_ARGS) /* * We cannot return tuple->t_data because heap_form_tuple allocates it as * part of a larger chunk, and our caller may expect to be able to pfree - * our result. So must copy the info into a new palloc chunk. + * our result. So must copy the info into a new palloc chunk. */ result = (HeapTupleHeader) palloc(tuple->t_len); memcpy(result, tuple->t_data, tuple->t_len); @@ -889,7 +889,7 @@ record_cmp(FunctionCallInfo fcinfo) /* * Scan corresponding columns, allowing for dropped columns in different - * places in the two rows. i1 and i2 are physical column indexes, j is + * places in the two rows. i1 and i2 are physical column indexes, j is * the logical column index. */ i1 = i2 = j = 0; @@ -1124,7 +1124,7 @@ record_eq(PG_FUNCTION_ARGS) /* * Scan corresponding columns, allowing for dropped columns in different - * places in the two rows. i1 and i2 are physical column indexes, j is + * places in the two rows. i1 and i2 are physical column indexes, j is * the logical column index. */ i1 = i2 = j = 0; diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c index 8bf5a791375..059eeb3dcc0 100644 --- a/src/backend/utils/adt/ruleutils.c +++ b/src/backend/utils/adt/ruleutils.c @@ -877,7 +877,7 @@ pg_get_indexdef_worker(Oid indexrelid, int colno, context = deparse_context_for(get_relation_name(indrelid), indrelid); /* - * Start the index definition. Note that the index's name should never be + * Start the index definition. Note that the index's name should never be * schema-qualified, but the indexed rel's name may be. */ initStringInfo(&buf); @@ -1305,7 +1305,7 @@ pg_get_constraintdef_worker(Oid constraintId, bool fullCommand, prettyFlags, 0); /* - * Now emit the constraint definition. There are cases where + * Now emit the constraint definition. There are cases where * the constraint expression will be fully parenthesized and * we don't need the outer parens ... but there are other * cases where we do need 'em. Be conservative for now. @@ -2126,7 +2126,7 @@ deparse_expression_pretty(Node *expr, List *dpcontext, * * Given the reference name (alias) and OID of a relation, build deparsing * context for an expression referencing only that relation (as varno 1, - * varlevelsup 0). This is sufficient for many uses of deparse_expression. + * varlevelsup 0). This is sufficient for many uses of deparse_expression. * ---------- */ List * @@ -2211,7 +2211,7 @@ set_deparse_planstate(deparse_namespace *dpns, PlanState *ps) * We special-case Append and MergeAppend to pretend that the first child * plan is the OUTER referent; we have to interpret OUTER Vars in their * tlists according to one of the children, and the first one is the most - * natural choice. Likewise special-case ModifyTable to pretend that the + * natural choice. Likewise special-case ModifyTable to pretend that the * first child plan is the OUTER referent; this is to support RETURNING * lists containing references to non-target relations. */ @@ -2251,7 +2251,7 @@ set_deparse_planstate(deparse_namespace *dpns, PlanState *ps) * push_child_plan: temporarily transfer deparsing attention to a child plan * * When expanding an OUTER or INNER reference, we must adjust the deparse - * context in case the referenced expression itself uses OUTER/INNER. We + * context in case the referenced expression itself uses OUTER/INNER. We * modify the top stack entry in-place to avoid affecting levelsup issues * (although in a Plan tree there really shouldn't be any). * @@ -2615,8 +2615,8 @@ get_query_def(Query *query, StringInfo buf, List *parentnamespace, /* * Before we begin to examine the query, acquire locks on referenced - * relations, and fix up deleted columns in JOIN RTEs. This ensures - * consistent results. Note we assume it's OK to scribble on the passed + * relations, and fix up deleted columns in JOIN RTEs. This ensures + * consistent results. Note we assume it's OK to scribble on the passed * querytree! * * We are only deparsing the query (we are not about to execute it), so we @@ -3036,7 +3036,7 @@ get_target_list(List *targetList, deparse_context *context, } /* - * Figure out what the result column should be called. In the context + * Figure out what the result column should be called. In the context * of a view, use the view's tuple descriptor (so as to pick up the * effects of any column RENAME that's been done on the view). * Otherwise, just use what we can find in the TLE. @@ -3176,7 +3176,7 @@ get_rule_sortgroupclause(SortGroupClause *srt, List *tlist, bool force_colno, * expression is a constant, force it to be dumped with an explicit cast * as decoration --- this is because a simple integer constant is * ambiguous (and will be misinterpreted by findTargetlistEntry()) if we - * dump it without any decoration. Otherwise, just dump the expression + * dump it without any decoration. Otherwise, just dump the expression * normally. */ if (force_colno) @@ -4292,7 +4292,7 @@ get_name_for_var_field(Var *var, int fieldno, /* * We now have an expression we can't expand any more, so see if - * get_expr_result_type() can do anything with it. If not, pass to + * get_expr_result_type() can do anything with it. If not, pass to * lookup_rowtype_tupdesc() which will probably fail, but will give an * appropriate error message while failing. */ @@ -4920,10 +4920,10 @@ get_rule_expr(Node *node, deparse_context *context, /* * If there's a refassgnexpr, we want to print the node in the - * format "array[subscripts] := refassgnexpr". This is not + * format "array[subscripts] := refassgnexpr". This is not * legal SQL, so decompilation of INSERT or UPDATE statements * should always use processIndirection as part of the - * statement-level syntax. We should only see this when + * statement-level syntax. We should only see this when * EXPLAIN tries to print the targetlist of a plan resulting * from such a statement. */ @@ -5082,7 +5082,7 @@ get_rule_expr(Node *node, deparse_context *context, /* * We cannot see an already-planned subplan in rule deparsing, - * only while EXPLAINing a query plan. We don't try to + * only while EXPLAINing a query plan. We don't try to * reconstruct the original SQL, just reference the subplan * that appears elsewhere in EXPLAIN's result. */ @@ -5155,14 +5155,14 @@ get_rule_expr(Node *node, deparse_context *context, * There is no good way to represent a FieldStore as real SQL, * so decompilation of INSERT or UPDATE statements should * always use processIndirection as part of the - * statement-level syntax. We should only get here when + * statement-level syntax. We should only get here when * EXPLAIN tries to print the targetlist of a plan resulting * from such a statement. The plan case is even harder than * ordinary rules would be, because the planner tries to * collapse multiple assignments to the same field or subfield * into one FieldStore; so we can see a list of target fields * not just one, and the arguments could be FieldStores - * themselves. We don't bother to try to print the target + * themselves. We don't bother to try to print the target * field names; we just print the source arguments, with a * ROW() around them if there's more than one. This isn't * terribly complete, but it's probably good enough for @@ -6058,7 +6058,7 @@ get_coercion_expr(Node *arg, deparse_context *context, * Since parse_coerce.c doesn't immediately collapse application of * length-coercion functions to constants, what we'll typically see in * such cases is a Const with typmod -1 and a length-coercion function - * right above it. Avoid generating redundant output. However, beware of + * right above it. Avoid generating redundant output. However, beware of * suppressing casts when the user actually wrote something like * 'foo'::text::char(3). */ @@ -6140,7 +6140,7 @@ get_const_expr(Const *constval, deparse_context *context, int showtype) /* * These types are printed without quotes unless they contain * values that aren't accepted by the scanner unquoted (e.g., - * 'NaN'). Note that strtod() and friends might accept NaN, + * 'NaN'). Note that strtod() and friends might accept NaN, * so we can't use that to test. * * In reality we only need to defend against infinity and NaN, @@ -6795,7 +6795,7 @@ get_opclass_name(Oid opclass, Oid actual_datatype, if (!OidIsValid(actual_datatype) || GetDefaultOpClass(actual_datatype, opcrec->opcmethod) != opclass) { - /* Okay, we need the opclass name. Do we need to qualify it? */ + /* Okay, we need the opclass name. Do we need to qualify it? */ opcname = NameStr(opcrec->opcname); if (OpclassIsVisible(opclass)) appendStringInfo(buf, " %s", quote_identifier(opcname)); @@ -7090,9 +7090,9 @@ generate_relation_name(Oid relid, List *namespaces) * generate_function_name * Compute the name to display for a function specified by OID, * given that it is being called with the specified actual arg names and - * types. (Those matter because of ambiguous-function resolution rules.) + * types. (Those matter because of ambiguous-function resolution rules.) * - * The result includes all necessary quoting and schema-prefixing. We can + * The result includes all necessary quoting and schema-prefixing. We can * also pass back an indication of whether the function is variadic. */ static char * @@ -7120,7 +7120,7 @@ generate_function_name(Oid funcid, int nargs, List *argnames, /* * The idea here is to schema-qualify only if the parser would fail to * resolve the correct function given the unqualified func name with the - * specified argtypes. If the function is variadic, we should presume + * specified argtypes. If the function is variadic, we should presume * that VARIADIC will be included in the call. */ p_result = func_get_detail(list_make1(makeString(proname)), diff --git a/src/backend/utils/adt/selfuncs.c b/src/backend/utils/adt/selfuncs.c index c71e952f8e5..b90d2d7412c 100644 --- a/src/backend/utils/adt/selfuncs.c +++ b/src/backend/utils/adt/selfuncs.c @@ -72,7 +72,7 @@ * float8 oprjoin (internal, oid, internal, int2, internal); * * (Before Postgres 8.4, join estimators had only the first four of these - * parameters. That signature is still allowed, but deprecated.) The + * parameters. That signature is still allowed, but deprecated.) The * relationship between jointype and sjinfo is explained in the comments for * clause_selectivity() --- the short version is that jointype is usually * best ignored in favor of examining sjinfo. @@ -201,7 +201,7 @@ static Const *string_to_bytea_const(const char *str, size_t str_len); * * Note: this routine is also used to estimate selectivity for some * operators that are not "=" but have comparable selectivity behavior, - * such as "~=" (geometric approximate-match). Even for "=", we must + * such as "~=" (geometric approximate-match). Even for "=", we must * keep in mind that the left and right datatypes may differ. */ Datum @@ -286,7 +286,7 @@ var_eq_const(VariableStatData *vardata, Oid operator, /* * Is the constant "=" to any of the column's most common values? * (Although the given operator may not really be "=", we will assume - * that seeing whether it returns TRUE is an appropriate test. If you + * that seeing whether it returns TRUE is an appropriate test. If you * don't like this, maybe you shouldn't be using eqsel for your * operator...) */ @@ -420,7 +420,7 @@ var_eq_non_const(VariableStatData *vardata, Oid operator, * result averaged over all possible values whether common or * uncommon. (Essentially, we are assuming that the not-yet-known * comparison value is equally likely to be any of the possible - * values, regardless of their frequency in the table. Is that a good + * values, regardless of their frequency in the table. Is that a good * idea?) */ selec = 1.0 - stats->stanullfrac; @@ -643,7 +643,7 @@ mcv_selectivity(VariableStatData *vardata, FmgrInfo *opproc, * essentially using the histogram just as a representative sample. However, * small histograms are unlikely to be all that representative, so the caller * should be prepared to fall back on some other estimation approach when the - * histogram is missing or very small. It may also be prudent to combine this + * histogram is missing or very small. It may also be prudent to combine this * approach with another one when the histogram is small. * * If the actual histogram size is not at least min_hist_size, we won't bother @@ -661,7 +661,7 @@ mcv_selectivity(VariableStatData *vardata, FmgrInfo *opproc, * * Note that the result disregards both the most-common-values (if any) and * null entries. The caller is expected to combine this result with - * statistics for those portions of the column population. It may also be + * statistics for those portions of the column population. It may also be * prudent to clamp the result range, ie, disbelieve exact 0 or 1 outputs. */ double @@ -774,7 +774,7 @@ ineq_histogram_selectivity(PlannerInfo *root, * * If the binary search accesses the first or last histogram * entry, we try to replace that endpoint with the true column min - * or max as found by get_actual_variable_range(). This + * or max as found by get_actual_variable_range(). This * ameliorates misestimates when the min or max is moving as a * result of changes since the last ANALYZE. Note that this could * result in effectively including MCVs into the histogram that @@ -878,7 +878,7 @@ ineq_histogram_selectivity(PlannerInfo *root, /* * Watch out for the possibility that we got a NaN or - * Infinity from the division. This can happen + * Infinity from the division. This can happen * despite the previous checks, if for example "low" * is -Infinity. */ @@ -893,7 +893,7 @@ ineq_histogram_selectivity(PlannerInfo *root, * Ideally we'd produce an error here, on the grounds that * the given operator shouldn't have scalarXXsel * registered as its selectivity func unless we can deal - * with its operand types. But currently, all manner of + * with its operand types. But currently, all manner of * stuff is invoking scalarXXsel, so give a default * estimate until that can be fixed. */ @@ -919,7 +919,7 @@ ineq_histogram_selectivity(PlannerInfo *root, /* * The histogram boundaries are only approximate to begin with, - * and may well be out of date anyway. Therefore, don't believe + * and may well be out of date anyway. Therefore, don't believe * extremely small or large selectivity estimates --- unless we * got actual current endpoint values from the table. */ @@ -1116,7 +1116,7 @@ patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype, bool negate) /* * If this is for a NOT LIKE or similar operator, get the corresponding - * positive-match operator and work with that. Set result to the correct + * positive-match operator and work with that. Set result to the correct * default estimate, too. */ if (negate) @@ -1320,7 +1320,7 @@ patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype, bool negate) /* * If we have most-common-values info, add up the fractions of the MCV * entries that satisfy MCV OP PATTERN. These fractions contribute - * directly to the result selectivity. Also add up the total fraction + * directly to the result selectivity. Also add up the total fraction * represented by MCV entries. */ mcv_selec = mcv_selectivity(&vardata, &opproc, constval, true, @@ -2135,9 +2135,9 @@ eqjoinsel_inner(Oid operator, if (have_mcvs1 && have_mcvs2) { /* - * We have most-common-value lists for both relations. Run through + * We have most-common-value lists for both relations. Run through * the lists to see which MCVs actually join to each other with the - * given operator. This allows us to determine the exact join + * given operator. This allows us to determine the exact join * selectivity for the portion of the relations represented by the MCV * lists. We still have to estimate for the remaining population, but * in a skewed distribution this gives us a big leg up in accuracy. @@ -2169,7 +2169,7 @@ eqjoinsel_inner(Oid operator, /* * Note we assume that each MCV will match at most one member of the - * other MCV list. If the operator isn't really equality, there could + * other MCV list. If the operator isn't really equality, there could * be multiple matches --- but we don't look for them, both for speed * and because the math wouldn't add up... */ @@ -2377,9 +2377,9 @@ eqjoinsel_semi(Oid operator, if (have_mcvs1 && have_mcvs2 && OidIsValid(operator)) { /* - * We have most-common-value lists for both relations. Run through + * We have most-common-value lists for both relations. Run through * the lists to see which MCVs actually join to each other with the - * given operator. This allows us to determine the exact join + * given operator. This allows us to determine the exact join * selectivity for the portion of the relations represented by the MCV * lists. We still have to estimate for the remaining population, but * in a skewed distribution this gives us a big leg up in accuracy. @@ -2410,7 +2410,7 @@ eqjoinsel_semi(Oid operator, /* * Note we assume that each MCV will match at most one member of the - * other MCV list. If the operator isn't really equality, there could + * other MCV list. If the operator isn't really equality, there could * be multiple matches --- but we don't look for them, both for speed * and because the math wouldn't add up... */ @@ -2447,7 +2447,7 @@ eqjoinsel_semi(Oid operator, /* * Now we need to estimate the fraction of relation 1 that has at - * least one join partner. We know for certain that the matched MCVs + * least one join partner. We know for certain that the matched MCVs * do, so that gives us a lower bound, but we're really in the dark * about everything else. Our crude approach is: if nd1 <= nd2 then * assume all non-null rel1 rows have join partners, else assume for @@ -3044,11 +3044,11 @@ add_unique_group_var(PlannerInfo *root, List *varinfos, * case (all possible cross-product terms actually appear as groups) since * very often the grouped-by Vars are highly correlated. Our current approach * is as follows: - * 1. Expressions yielding boolean are assumed to contribute two groups, + * 1. Expressions yielding boolean are assumed to contribute two groups, * independently of their content, and are ignored in the subsequent - * steps. This is mainly because tests like "col IS NULL" break the + * steps. This is mainly because tests like "col IS NULL" break the * heuristic used in step 2 especially badly. - * 2. Reduce the given expressions to a list of unique Vars used. For + * 2. Reduce the given expressions to a list of unique Vars used. For * example, GROUP BY a, a + b is treated the same as GROUP BY a, b. * It is clearly correct not to count the same Var more than once. * It is also reasonable to treat f(x) the same as x: f() cannot @@ -3058,14 +3058,14 @@ add_unique_group_var(PlannerInfo *root, List *varinfos, * As a special case, if a GROUP BY expression can be matched to an * expressional index for which we have statistics, then we treat the * whole expression as though it were just a Var. - * 3. If the list contains Vars of different relations that are known equal + * 3. If the list contains Vars of different relations that are known equal * due to equivalence classes, then drop all but one of the Vars from each * known-equal set, keeping the one with smallest estimated # of values * (since the extra values of the others can't appear in joined rows). * Note the reason we only consider Vars of different relations is that * if we considered ones of the same rel, we'd be double-counting the * restriction selectivity of the equality in the next step. - * 4. For Vars within a single source rel, we multiply together the numbers + * 4. For Vars within a single source rel, we multiply together the numbers * of values, clamp to the number of rows in the rel (divided by 10 if * more than one Var), and then multiply by the selectivity of the * restriction clauses for that rel. When there's more than one Var, @@ -3076,7 +3076,7 @@ add_unique_group_var(PlannerInfo *root, List *varinfos, * by the restriction selectivity is effectively assuming that the * restriction clauses are independent of the grouping, which is a crummy * assumption, but it's hard to do better. - * 5. If there are Vars from multiple rels, we repeat step 4 for each such + * 5. If there are Vars from multiple rels, we repeat step 4 for each such * rel, and multiply the results together. * Note that rels not containing grouped Vars are ignored completely, as are * join clauses. Such rels cannot increase the number of groups, and we @@ -3107,7 +3107,7 @@ estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows) return 1.0; /* - * Count groups derived from boolean grouping expressions. For other + * Count groups derived from boolean grouping expressions. For other * expressions, find the unique Vars used, treating an expression as a Var * if we can find stats for it. For each one, record the statistical * estimate of number of distinct values (total in its table, without @@ -3196,7 +3196,7 @@ estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows) * Group Vars by relation and estimate total numdistinct. * * For each iteration of the outer loop, we process the frontmost Var in - * varinfos, plus all other Vars in the same relation. We remove these + * varinfos, plus all other Vars in the same relation. We remove these * Vars from the newvarinfos list for the next iteration. This is the * easiest way to group Vars of same rel together. */ @@ -3297,11 +3297,11 @@ estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows) * distribution, so this will have to do for now. * * We are passed the number of buckets the executor will use for the given - * input relation. If the data were perfectly distributed, with the same + * input relation. If the data were perfectly distributed, with the same * number of tuples going into each available bucket, then the bucketsize * fraction would be 1/nbuckets. But this happy state of affairs will occur * only if (a) there are at least nbuckets distinct data values, and (b) - * we have a not-too-skewed data distribution. Otherwise the buckets will + * we have a not-too-skewed data distribution. Otherwise the buckets will * be nonuniformly occupied. If the other relation in the join has a key * distribution similar to this one's, then the most-loaded buckets are * exactly those that will be probed most often. Therefore, the "average" @@ -3477,7 +3477,7 @@ convert_to_scalar(Datum value, Oid valuetypid, double *scaledvalue, * operators to estimate selectivity for the other's. This is outright * wrong in some cases --- in particular signed versus unsigned * interpretation could trip us up. But it's useful enough in the - * majority of cases that we do it anyway. Should think about more + * majority of cases that we do it anyway. Should think about more * rigorous ways to do it. */ switch (valuetypid) @@ -4061,7 +4061,7 @@ get_restriction_variable(PlannerInfo *root, List *args, int varRelid, right = (Node *) lsecond(args); /* - * Examine both sides. Note that when varRelid is nonzero, Vars of other + * Examine both sides. Note that when varRelid is nonzero, Vars of other * relations will be treated as pseudoconstants. */ examine_variable(root, left, varRelid, vardata); @@ -4224,7 +4224,7 @@ examine_variable(PlannerInfo *root, Node *node, int varRelid, * XXX This means the Var comes from a JOIN or sub-SELECT. Later * add code to dig down into the join etc and see if we can trace * the variable to something with stats. (But beware of - * sub-SELECTs with DISTINCT/GROUP BY/etc. Perhaps there are no + * sub-SELECTs with DISTINCT/GROUP BY/etc. Perhaps there are no * cases where this would really be useful, because we'd have * flattened the subselect if it is??) */ @@ -4235,7 +4235,7 @@ examine_variable(PlannerInfo *root, Node *node, int varRelid, /* * Okay, it's a more complicated expression. Determine variable - * membership. Note that when varRelid isn't zero, only vars of that + * membership. Note that when varRelid isn't zero, only vars of that * relation are considered "real" vars. */ varnos = pull_varnos(basenode); @@ -4284,13 +4284,13 @@ examine_variable(PlannerInfo *root, Node *node, int varRelid, if (onerel) { /* - * We have an expression in vars of a single relation. Try to match + * We have an expression in vars of a single relation. Try to match * it to expressional index columns, in hopes of finding some * statistics. * * XXX it's conceivable that there are multiple matches with different * index opfamilies; if so, we need to pick one that matches the - * operator we are estimating for. FIXME later. + * operator we are estimating for. FIXME later. */ ListCell *ilist; @@ -4386,7 +4386,7 @@ get_variable_numdistinct(VariableStatData *vardata) double ntuples; /* - * Determine the stadistinct value to use. There are cases where we can + * Determine the stadistinct value to use. There are cases where we can * get an estimate even without a pg_statistic entry, or can get a better * value than is in pg_statistic. */ @@ -4502,7 +4502,7 @@ get_variable_range(PlannerInfo *root, VariableStatData *vardata, Oid sortop, /* * XXX It's very tempting to try to use the actual column min and max, if - * we can get them relatively-cheaply with an index probe. However, since + * we can get them relatively-cheaply with an index probe. However, since * this function is called many times during join planning, that could * have unpleasant effects on planning speed. Need more investigation * before enabling this. @@ -4753,7 +4753,7 @@ get_actual_variable_range(PlannerInfo *root, VariableStatData *vardata, * and it can be very expensive if a lot of uncommitted rows * exist at the end of the index (because we'll laboriously * fetch each one and reject it). What seems like a good - * compromise is to use SnapshotDirty. That will accept + * compromise is to use SnapshotDirty. That will accept * uncommitted rows, and thus avoid fetching multiple heap * tuples in this scenario. On the other hand, it will reject * known-dead rows, and thus not give a bogus answer when the @@ -4892,7 +4892,7 @@ find_join_input_rel(PlannerInfo *root, Relids relids) * Check whether char is a letter (and, hence, subject to case-folding) * * In multibyte character sets, we can't use isalpha, and it does not seem - * worth trying to convert to wchar_t to use iswalpha. Instead, just assume + * worth trying to convert to wchar_t to use iswalpha. Instead, just assume * any multibyte char is potentially case-varying. */ static int @@ -5144,7 +5144,7 @@ pattern_fixed_prefix(Const *patt, Pattern_Type ptype, Oid collation, * together with info about MCVs and NULLs. * * We use the >= and < operators from the specified btree opfamily to do the - * estimation. The given variable and Const must be of the associated + * estimation. The given variable and Const must be of the associated * datatype. * * XXX Note: we make use of the upper bound to estimate operator selectivity @@ -5203,7 +5203,7 @@ prefix_selectivity(PlannerInfo *root, VariableStatData *vardata, /* * Merge the two selectivities in the same way as for a range query - * (see clauselist_selectivity()). Note that we don't need to worry + * (see clauselist_selectivity()). Note that we don't need to worry * about double-exclusion of nulls, since ineq_histogram_selectivity * doesn't count those anyway. */ @@ -5427,7 +5427,7 @@ regex_selectivity(const char *patt, int pattlen, bool case_insensitive, * that is not a bulletproof guarantee that an extension of the string might * not sort after it; an example is that "foo " is less than "foo!", but it * is not clear that a "dictionary" sort ordering will consider "foo!" less - * than "foo bar". CAUTION: Therefore, this function should be used only for + * than "foo bar". CAUTION: Therefore, this function should be used only for * estimation purposes when working in a non-C collation. * * To try to catch most cases where an extended string might otherwise sort @@ -5813,9 +5813,9 @@ genericcostestimate(PlannerInfo *root, * The above calculations are all per-index-scan. However, if we are in a * nestloop inner scan, we can expect the scan to be repeated (with * different search keys) for each row of the outer relation. Likewise, - * ScalarArrayOpExpr quals result in multiple index scans. This creates + * ScalarArrayOpExpr quals result in multiple index scans. This creates * the potential for cache effects to reduce the number of disk page - * fetches needed. We want to estimate the average per-scan I/O cost in + * fetches needed. We want to estimate the average per-scan I/O cost in * the presence of caching. * * We use the Mackert-Lohman formula (see costsize.c for details) to @@ -5888,7 +5888,7 @@ genericcostestimate(PlannerInfo *root, * evaluated once at the start of the scan to reduce them to runtime keys * to pass to the index AM (see nodeIndexscan.c). We model the per-tuple * CPU costs as cpu_index_tuple_cost plus one cpu_operator_cost per - * indexqual operator. Because we have numIndexTuples as a per-scan + * indexqual operator. Because we have numIndexTuples as a per-scan * number, we have to multiply by num_sa_scans to get the correct result * for ScalarArrayOpExpr cases. Similarly add in costs for any index * ORDER BY expressions. @@ -5965,7 +5965,7 @@ btcostestimate(PG_FUNCTION_ARGS) * the index scan). Additional quals can suppress visits to the heap, so * it's OK to count them in indexSelectivity, but they should not count * for estimating numIndexTuples. So we must examine the given indexQuals - * to find out which ones count as boundary quals. We rely on the + * to find out which ones count as boundary quals. We rely on the * knowledge that they are given in index column order. * * For a RowCompareExpr, we consider only the first column, just as @@ -6631,7 +6631,7 @@ gincostestimate(PG_FUNCTION_ARGS) /* * nPendingPages can be trusted, but the other fields are as of the last - * VACUUM. Scale them by the ratio numPages / nTotalPages to account for + * VACUUM. Scale them by the ratio numPages / nTotalPages to account for * growth since then. If the fields are zero (implying no VACUUM at all, * and an index created pre-9.1), assume all pages are entry pages. */ @@ -6776,7 +6776,7 @@ gincostestimate(PG_FUNCTION_ARGS) /* * Add an estimate of entry pages read by partial match algorithm. It's a - * scan over leaf pages in entry tree. We haven't any useful stats here, + * scan over leaf pages in entry tree. We haven't any useful stats here, * so estimate it as proportion. */ entryPagesFetched += ceil(numEntryPages * counts.partialEntries / numEntries); diff --git a/src/backend/utils/adt/timestamp.c b/src/backend/utils/adt/timestamp.c index d11962802a7..b6b3478fc9a 100644 --- a/src/backend/utils/adt/timestamp.c +++ b/src/backend/utils/adt/timestamp.c @@ -374,7 +374,7 @@ AdjustTimestampForTypmod(Timestamp *time, int32 typmod) * Note: this round-to-nearest code is not completely consistent about * rounding values that are exactly halfway between integral values. * On most platforms, rint() will implement round-to-nearest-even, but - * the integer code always rounds up (away from zero). Is it worth + * the integer code always rounds up (away from zero). Is it worth * trying to be consistent? */ #ifdef HAVE_INT64_TIMESTAMP @@ -973,7 +973,7 @@ AdjustIntervalForTypmod(Interval *interval, int32 typmod) * that fields to the right of the last one specified are zeroed out, * but those to the left of it remain valid. Thus for example there * is no operational difference between INTERVAL YEAR TO MONTH and - * INTERVAL MONTH. In some cases we could meaningfully enforce that + * INTERVAL MONTH. In some cases we could meaningfully enforce that * higher-order fields are zero; for example INTERVAL DAY could reject * nonzero "month" field. However that seems a bit pointless when we * can't do it consistently. (We cannot enforce a range limit on the @@ -982,9 +982,9 @@ AdjustIntervalForTypmod(Interval *interval, int32 typmod) * * Note: before PG 8.4 we interpreted a limited set of fields as * actually causing a "modulo" operation on a given value, potentially - * losing high-order as well as low-order information. But there is + * losing high-order as well as low-order information. But there is * no support for such behavior in the standard, and it seems fairly - * undesirable on data consistency grounds anyway. Now we only + * undesirable on data consistency grounds anyway. Now we only * perform truncation or rounding of low-order fields. */ if (range == INTERVAL_FULL_RANGE) @@ -1104,7 +1104,7 @@ AdjustIntervalForTypmod(Interval *interval, int32 typmod) /* * Note: this round-to-nearest code is not completely consistent * about rounding values that are exactly halfway between integral - * values. On most platforms, rint() will implement + * values. On most platforms, rint() will implement * round-to-nearest-even, but the integer code always rounds up * (away from zero). Is it worth trying to be consistent? */ @@ -1314,7 +1314,7 @@ timestamptz_to_time_t(TimestampTz t) * Produce a C-string representation of a TimestampTz. * * This is mostly for use in emitting messages. The primary difference - * from timestamptz_out is that we force the output format to ISO. Note + * from timestamptz_out is that we force the output format to ISO. Note * also that the result is in a static buffer, not pstrdup'd. */ const char * @@ -1484,7 +1484,7 @@ recalc_t: * * First, convert to an integral timestamp, avoiding possibly * platform-specific roundoff-in-wrong-direction errors, and adjust to - * Unix epoch. Then see if we can convert to pg_time_t without loss. This + * Unix epoch. Then see if we can convert to pg_time_t without loss. This * coding avoids hardwiring any assumptions about the width of pg_time_t, * so it should behave sanely on machines without int64. */ @@ -4407,7 +4407,7 @@ timestamp_zone(PG_FUNCTION_ARGS) PG_RETURN_TIMESTAMPTZ(timestamp); /* - * Look up the requested timezone. First we look in the date token table + * Look up the requested timezone. First we look in the date token table * (to handle cases like "EST"), and if that fails, we look in the * timezone database (to handle cases like "America/New_York"). (This * matches the order in which timestamp input checks the cases; it's @@ -4581,7 +4581,7 @@ timestamptz_zone(PG_FUNCTION_ARGS) PG_RETURN_TIMESTAMP(timestamp); /* - * Look up the requested timezone. First we look in the date token table + * Look up the requested timezone. First we look in the date token table * (to handle cases like "EST"), and if that fails, we look in the * timezone database (to handle cases like "America/New_York"). (This * matches the order in which timestamp input checks the cases; it's diff --git a/src/backend/utils/adt/tsginidx.c b/src/backend/utils/adt/tsginidx.c index 4cb961146b8..3ae552cb93d 100644 --- a/src/backend/utils/adt/tsginidx.c +++ b/src/backend/utils/adt/tsginidx.c @@ -237,7 +237,7 @@ gin_tsquery_consistent(PG_FUNCTION_ARGS) * Formerly, gin_extract_tsvector had only two arguments. Now it has three, * but we still need a pg_proc entry with two args to support reloading * pre-9.1 contrib/tsearch2 opclass declarations. This compatibility - * function should go away eventually. (Note: you might say "hey, but the + * function should go away eventually. (Note: you might say "hey, but the * code above is only *using* two args, so let's just declare it that way". * If you try that you'll find the opr_sanity regression test complains.) */ diff --git a/src/backend/utils/adt/varchar.c b/src/backend/utils/adt/varchar.c index 7b84637f8cf..c9f0f9f7cfe 100644 --- a/src/backend/utils/adt/varchar.c +++ b/src/backend/utils/adt/varchar.c @@ -256,7 +256,7 @@ bpcharsend(PG_FUNCTION_ARGS) * * Truncation rules: for an explicit cast, silently truncate to the given * length; for an implicit cast, raise error unless extra characters are - * all spaces. (This is sort-of per SQL: the spec would actually have us + * all spaces. (This is sort-of per SQL: the spec would actually have us * raise a "completion condition" for the explicit cast case, but Postgres * hasn't got such a concept.) */ @@ -550,7 +550,7 @@ varcharsend(PG_FUNCTION_ARGS) * * Truncation rules: for an explicit cast, silently truncate to the given * length; for an implicit cast, raise error unless extra characters are - * all spaces. (This is sort-of per SQL: the spec would actually have us + * all spaces. (This is sort-of per SQL: the spec would actually have us * raise a "completion condition" for the explicit cast case, but Postgres * hasn't got such a concept.) */ diff --git a/src/backend/utils/adt/varlena.c b/src/backend/utils/adt/varlena.c index e9ea6288048..dd115900d4b 100644 --- a/src/backend/utils/adt/varlena.c +++ b/src/backend/utils/adt/varlena.c @@ -528,7 +528,7 @@ textlen(PG_FUNCTION_ARGS) * Does the real work for textlen() * * This is broken out so it can be called directly by other string processing - * functions. Note that the argument is passed as a Datum, to indicate that + * functions. Note that the argument is passed as a Datum, to indicate that * it may still be in compressed form. We can avoid decompressing it at all * in some cases. */ @@ -700,7 +700,7 @@ text_substr_no_len(PG_FUNCTION_ARGS) * Does the real work for text_substr() and text_substr_no_len() * * This is broken out so it can be called directly by other string processing - * functions. Note that the argument is passed as a Datum, to indicate that + * functions. Note that the argument is passed as a Datum, to indicate that * it may still be in compressed/toasted form. We can avoid detoasting all * of it in some cases. * @@ -1050,7 +1050,7 @@ text_position_setup(text *t1, text *t2, TextPositionState *state) * searched (t1) and the "needle" is the pattern being sought (t2). * * If the needle is empty or bigger than the haystack then there is no - * point in wasting cycles initializing the table. We also choose not to + * point in wasting cycles initializing the table. We also choose not to * use B-M-H for needles of length 1, since the skip table can't possibly * save anything in that case. */ @@ -1066,7 +1066,7 @@ text_position_setup(text *t1, text *t2, TextPositionState *state) * declaration of TextPositionState allows up to 256 elements, but for * short search problems we don't really want to have to initialize so * many elements --- it would take too long in comparison to the - * actual search time. So we choose a useful skip table size based on + * actual search time. So we choose a useful skip table size based on * the haystack length minus the needle length. The closer the needle * length is to the haystack length the less useful skipping becomes. * @@ -1098,7 +1098,7 @@ text_position_setup(text *t1, text *t2, TextPositionState *state) state->skiptable[i] = len2; /* - * Now examine the needle. For each character except the last one, + * Now examine the needle. For each character except the last one, * set the corresponding table element to the appropriate skip * distance. Note that when two characters share the same skip table * entry, the one later in the needle must determine the skip @@ -1186,11 +1186,11 @@ text_position_next(int start_pos, TextPositionState *state) /* * No match, so use the haystack char at hptr to decide how - * far to advance. If the needle had any occurrence of that + * far to advance. If the needle had any occurrence of that * character (or more precisely, one sharing the same * skiptable entry) before its last character, then we advance * far enough to align the last such needle character with - * that haystack position. Otherwise we can advance by the + * that haystack position. Otherwise we can advance by the * whole needle length. */ hptr += state->skiptable[(unsigned char) *hptr & skiptablemask]; @@ -1242,11 +1242,11 @@ text_position_next(int start_pos, TextPositionState *state) /* * No match, so use the haystack char at hptr to decide how - * far to advance. If the needle had any occurrence of that + * far to advance. If the needle had any occurrence of that * character (or more precisely, one sharing the same * skiptable entry) before its last character, then we advance * far enough to align the last such needle character with - * that haystack position. Otherwise we can advance by the + * that haystack position. Otherwise we can advance by the * whole needle length. */ hptr += state->skiptable[*hptr & skiptablemask]; @@ -1281,7 +1281,7 @@ varstr_cmp(char *arg1, int len1, char *arg2, int len2, Oid collid) /* * Unfortunately, there is no strncoll(), so in the non-C locale case we - * have to do some memory copying. This turns out to be significantly + * have to do some memory copying. This turns out to be significantly * slower, so we optimize the case where LC_COLLATE is C. We also try to * optimize relatively-short strings by avoiding palloc/pfree overhead. */ @@ -2266,7 +2266,7 @@ textToQualifiedNameList(text *textval) * SplitIdentifierString --- parse a string containing identifiers * * This is the guts of textToQualifiedNameList, and is exported for use in - * other situations such as parsing GUC variables. In the GUC case, it's + * other situations such as parsing GUC variables. In the GUC case, it's * important to avoid memory leaks, so the API is designed to minimize the * amount of stuff that needs to be allocated and freed. * @@ -2274,7 +2274,7 @@ textToQualifiedNameList(text *textval) * rawstring: the input string; must be overwritable! On return, it's * been modified to contain the separated identifiers. * separator: the separator punctuation expected between identifiers - * (typically '.' or ','). Whitespace may also appear around + * (typically '.' or ','). Whitespace may also appear around * identifiers. * Outputs: * namelist: filled with a palloc'd list of pointers to identifiers within @@ -2343,7 +2343,7 @@ SplitIdentifierString(char *rawstring, char separator, * * XXX because we want to overwrite the input in-place, we cannot * support a downcasing transformation that increases the string - * length. This is not a problem given the current implementation + * length. This is not a problem given the current implementation * of downcase_truncate_identifier, but we'll probably have to do * something about this someday. */ @@ -2694,7 +2694,7 @@ check_replace_text_has_escape_char(const text *replace_text) * appendStringInfoRegexpSubstr * * Append replace_text to str, substituting regexp back references for - * \n escapes. start_ptr is the start of the match in the source string, + * \n escapes. start_ptr is the start of the match in the source string, * at logical character position data_pos. */ static void @@ -2777,7 +2777,7 @@ appendStringInfoRegexpSubstr(StringInfo str, text *replace_text, if (so != -1 && eo != -1) { /* - * Copy the text that is back reference of regexp. Note so and eo + * Copy the text that is back reference of regexp. Note so and eo * are counted in characters not bytes. */ char *chunk_start; diff --git a/src/backend/utils/adt/xml.c b/src/backend/utils/adt/xml.c index 3261eac1ce8..bfaf0a0be70 100644 --- a/src/backend/utils/adt/xml.c +++ b/src/backend/utils/adt/xml.c @@ -19,7 +19,7 @@ * fail. For one thing, this avoids having to manage variant catalog * installations. But it also has nice effects such as that you can * dump a database containing XML type data even if the server is not - * linked with libxml. Thus, make sure xml_out() works even if nothing + * linked with libxml. Thus, make sure xml_out() works even if nothing * else does. */ @@ -254,7 +254,7 @@ xml_out(PG_FUNCTION_ARGS) xmltype *x = PG_GETARG_XML_P(0); /* - * xml_out removes the encoding property in all cases. This is because we + * xml_out removes the encoding property in all cases. This is because we * cannot control from here whether the datum will be converted to a * different client encoding, so we'd do more harm than good by including * it. @@ -425,7 +425,7 @@ xmlcomment(PG_FUNCTION_ARGS) /* * TODO: xmlconcat needs to merge the notations and unparsed entities - * of the argument values. Not very important in practice, though. + * of the argument values. Not very important in practice, though. */ xmltype * xmlconcat(List *args) @@ -559,7 +559,7 @@ xmlelement(XmlExprState *xmlExpr, ExprContext *econtext) /* * We first evaluate all the arguments, then start up libxml and create - * the result. This avoids issues if one of the arguments involves a call + * the result. This avoids issues if one of the arguments involves a call * to some other function or subsystem that wants to use libxml on its own * terms. */ @@ -849,7 +849,7 @@ xml_is_document(xmltype *arg) * pg_xml_init --- set up for use of libxml * * This should be called by each function that is about to use libxml - * facilities. It has two responsibilities: verify compatibility with the + * facilities. It has two responsibilities: verify compatibility with the * loaded libxml version (done on first call in a session) and establish * or re-establish our libxml error handler. The latter needs to be done * anytime we might have passed control to add-on modules (eg libperl) which @@ -909,7 +909,7 @@ pg_xml_init(void) resetStringInfo(xml_err_buf); /* - * We re-establish the error callback function every time. This makes + * We re-establish the error callback function every time. This makes * it safe for other subsystems (PL/Perl, say) to also use libxml with * their own callbacks ... so long as they likewise set up the * callbacks on every use. It's cheap enough to not be worth worrying @@ -1116,7 +1116,7 @@ finished: /* * Write an XML declaration. On output, we adjust the XML declaration - * as follows. (These rules are the moral equivalent of the clause + * as follows. (These rules are the moral equivalent of the clause * "Serialization of an XML value" in the SQL standard.) * * We try to avoid generating an XML declaration if possible. This is @@ -1638,8 +1638,8 @@ map_xml_name_to_sql_identifier(char *name) * * When xml_escape_strings is true, then certain characters in string * values are replaced by entity references (< etc.), as specified - * in SQL/XML:2008 section 9.8 GR 9) a) iii). This is normally what is - * wanted. The false case is mainly useful when the resulting value + * in SQL/XML:2008 section 9.8 GR 9) a) iii). This is normally what is + * wanted. The false case is mainly useful when the resulting value * is used with xmlTextWriterWriteAttribute() to write out an * attribute, because that function does the escaping itself. */ @@ -1909,13 +1909,13 @@ _SPI_strdup(const char *s) * * There are two kinds of mappings: Mapping SQL data (table contents) * to XML documents, and mapping SQL structure (the "schema") to XML - * Schema. And there are functions that do both at the same time. + * Schema. And there are functions that do both at the same time. * * Then you can map a database, a schema, or a table, each in both * ways. This breaks down recursively: Mapping a database invokes * mapping schemas, which invokes mapping tables, which invokes * mapping rows, which invokes mapping columns, although you can't - * call the last two from the outside. Because of this, there are a + * call the last two from the outside. Because of this, there are a * number of xyz_internal() functions which are to be called both from * the function manager wrapper and from some upper layer in a * recursive call. @@ -1924,7 +1924,7 @@ _SPI_strdup(const char *s) * nulls, tableforest, and targetns mean. * * Some style guidelines for XML output: Use double quotes for quoting - * XML attributes. Indent XML elements by two spaces, but remember + * XML attributes. Indent XML elements by two spaces, but remember * that a lot of code is called recursively at different levels, so * it's better not to indent rather than create output that indents * and outdents weirdly. Add newlines to make the output look nice. @@ -2088,12 +2088,12 @@ cursor_to_xml(PG_FUNCTION_ARGS) * Write the start tag of the root element of a data mapping. * * top_level means that this is the very top level of the eventual - * output. For example, when the user calls table_to_xml, then a call + * output. For example, when the user calls table_to_xml, then a call * with a table name to this function is the top level. When the user * calls database_to_xml, then a call with a schema name to this * function is not the top level. If top_level is false, then the XML * namespace declarations are omitted, because they supposedly already - * appeared earlier in the output. Repeating them is not wrong, but + * appeared earlier in the output. Repeating them is not wrong, but * it looks ugly. */ static void @@ -3031,7 +3031,7 @@ map_sql_typecoll_to_xmlschema_types(List *tupdesc_list) * SQL/XML:2008 sections 9.5 and 9.6. * * (The distinction between 9.5 and 9.6 is basically that 9.6 adds - * a name attribute, which this function does. The name-less version + * a name attribute, which this function does. The name-less version * 9.5 doesn't appear to be required anywhere.) */ static const char * @@ -3209,7 +3209,7 @@ map_sql_type_to_xmlschema_type(Oid typeoid, int typmod) /* * Map an SQL row to an XML element, taking the row from the active - * SPI cursor. See also SQL/XML:2008 section 9.10. + * SPI cursor. See also SQL/XML:2008 section 9.10. */ static void SPI_sql_row_to_xmlelement(int rownum, StringInfo result, char *tablename, diff --git a/src/backend/utils/cache/attoptcache.c b/src/backend/utils/cache/attoptcache.c index 7018ccfe62a..b6f00478e9c 100644 --- a/src/backend/utils/cache/attoptcache.c +++ b/src/backend/utils/cache/attoptcache.c @@ -48,7 +48,7 @@ typedef struct * Flush all cache entries when pg_attribute is updated. * * When pg_attribute is updated, we must flush the cache entry at least - * for that attribute. Currently, we just flush them all. Since attribute + * for that attribute. Currently, we just flush them all. Since attribute * options are not currently used in performance-critical paths (such as * query execution), this seems OK. */ diff --git a/src/backend/utils/cache/catcache.c b/src/backend/utils/cache/catcache.c index 00f17d9edc6..81dcf3361f8 100644 --- a/src/backend/utils/cache/catcache.c +++ b/src/backend/utils/cache/catcache.c @@ -823,7 +823,7 @@ InitCatCache(int id, * CatalogCacheInitializeCache * * This function does final initialization of a catcache: obtain the tuple - * descriptor and set up the hash and equality function links. We assume + * descriptor and set up the hash and equality function links. We assume * that the relcache entry can be opened at this point! */ #ifdef CACHEDEBUG @@ -1048,7 +1048,7 @@ IndexScanOK(CatCache *cache, ScanKey cur_skey) * if necessary (on the first access to a particular cache). * * The result is NULL if not found, or a pointer to a HeapTuple in - * the cache. The caller must not modify the tuple, and must call + * the cache. The caller must not modify the tuple, and must call * ReleaseCatCache() when done with it. * * The search key values should be expressed as Datums of the key columns' @@ -1175,8 +1175,8 @@ SearchCatCache(CatCache *cache, * the relation --- for example, due to shared-cache-inval messages being * processed during heap_open(). This is OK. It's even possible for one * of those lookups to find and enter the very same tuple we are trying to - * fetch here. If that happens, we will enter a second copy of the tuple - * into the cache. The first copy will never be referenced again, and + * fetch here. If that happens, we will enter a second copy of the tuple + * into the cache. The first copy will never be referenced again, and * will eventually age out of the cache, so there's no functional problem. * This case is rare enough that it's not worth expending extra cycles to * detect. @@ -1215,7 +1215,7 @@ SearchCatCache(CatCache *cache, * * In bootstrap mode, we don't build negative entries, because the cache * invalidation mechanism isn't alive and can't clear them if the tuple - * gets created later. (Bootstrap doesn't do UPDATEs, so it doesn't need + * gets created later. (Bootstrap doesn't do UPDATEs, so it doesn't need * cache inval for that.) */ if (ct == NULL) @@ -1502,7 +1502,7 @@ SearchCatCacheList(CatCache *cache, /* * We are now past the last thing that could trigger an elog before we * have finished building the CatCList and remembering it in the - * resource owner. So it's OK to fall out of the PG_TRY, and indeed + * resource owner. So it's OK to fall out of the PG_TRY, and indeed * we'd better do so before we start marking the members as belonging * to the list. */ @@ -1592,7 +1592,7 @@ ReleaseCatCacheList(CatCList *list) /* * CatalogCacheCreateEntry * Create a new CatCTup entry, copying the given HeapTuple and other - * supplied data into it. The new entry initially has refcount 0. + * supplied data into it. The new entry initially has refcount 0. */ static CatCTup * CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp, @@ -1727,7 +1727,7 @@ build_dummy_tuple(CatCache *cache, int nkeys, ScanKey skeys) * the specified relation, find all catcaches it could be in, compute the * correct hash value for each such catcache, and call the specified function * to record the cache id, hash value, and tuple ItemPointer in inval.c's - * lists. CatalogCacheIdInvalidate will be called later, if appropriate, + * lists. CatalogCacheIdInvalidate will be called later, if appropriate, * using the recorded information. * * Note that it is irrelevant whether the given tuple is actually loaded diff --git a/src/backend/utils/cache/inval.c b/src/backend/utils/cache/inval.c index 4249bd33765..396cc0bea69 100644 --- a/src/backend/utils/cache/inval.c +++ b/src/backend/utils/cache/inval.c @@ -29,23 +29,23 @@ * * If we successfully complete the transaction, we have to broadcast all * these invalidation events to other backends (via the SI message queue) - * so that they can flush obsolete entries from their caches. Note we have + * so that they can flush obsolete entries from their caches. Note we have * to record the transaction commit before sending SI messages, otherwise * the other backends won't see our updated tuples as good. * * When a subtransaction aborts, we can process and discard any events - * it has queued. When a subtransaction commits, we just add its events + * it has queued. When a subtransaction commits, we just add its events * to the pending lists of the parent transaction. * * In short, we need to remember until xact end every insert or delete - * of a tuple that might be in the system caches. Updates are treated as + * of a tuple that might be in the system caches. Updates are treated as * two events, delete + insert, for simplicity. (There are cases where * it'd be possible to record just one event, but we don't currently try.) * * We do not need to register EVERY tuple operation in this way, just those - * on tuples in relations that have associated catcaches. We do, however, + * on tuples in relations that have associated catcaches. We do, however, * have to register every operation on every tuple that *could* be in a - * catcache, whether or not it currently is in our cache. Also, if the + * catcache, whether or not it currently is in our cache. Also, if the * tuple is in a relation that has multiple catcaches, we need to register * an invalidation message for each such catcache. catcache.c's * PrepareToInvalidateCacheTuple() routine provides the knowledge of which @@ -103,7 +103,7 @@ /* * To minimize palloc traffic, we keep pending requests in successively- * larger chunks (a slightly more sophisticated version of an expansible - * array). All request types can be stored as SharedInvalidationMessage + * array). All request types can be stored as SharedInvalidationMessage * records. The ordering of requests within a list is never significant. */ typedef struct InvalidationChunk @@ -630,7 +630,7 @@ PrepareForTupleInvalidation(Relation relation, HeapTuple tuple) * This essentially means that only backends in this same database * will react to the relcache flush request. This is in fact * appropriate, since only those backends could see our pg_attribute - * change anyway. It looks a bit ugly though. (In practice, shared + * change anyway. It looks a bit ugly though. (In practice, shared * relations can't have schema changes after bootstrap, so we should * never come here for a shared rel anyway.) */ @@ -642,7 +642,7 @@ PrepareForTupleInvalidation(Relation relation, HeapTuple tuple) /* * When a pg_index row is updated, we should send out a relcache inval - * for the index relation. As above, we don't know the shared status + * for the index relation. As above, we don't know the shared status * of the index, but in practice it doesn't matter since indexes of * shared catalogs can't have such updates. */ @@ -688,7 +688,7 @@ AcceptInvalidationMessages(void) * * If you're a glutton for punishment, try CLOBBER_CACHE_RECURSIVELY. This * slows things by at least a factor of 10000, so I wouldn't suggest - * trying to run the entire regression tests that way. It's useful to try + * trying to run the entire regression tests that way. It's useful to try * a few simple tests, to make sure that cache reload isn't subject to * internal cache-flush hazards, but after you've done a few thousand * recursive reloads it's unlikely you'll learn more. @@ -901,12 +901,12 @@ ProcessCommittedInvalidationMessages(SharedInvalidationMessage *msgs, * If isCommit, we must send out the messages in our PriorCmdInvalidMsgs list * to the shared invalidation message queue. Note that these will be read * not only by other backends, but also by our own backend at the next - * transaction start (via AcceptInvalidationMessages). This means that + * transaction start (via AcceptInvalidationMessages). This means that * we can skip immediate local processing of anything that's still in * CurrentCmdInvalidMsgs, and just send that list out too. * * If not isCommit, we are aborting, and must locally process the messages - * in PriorCmdInvalidMsgs. No messages need be sent to other backends, + * in PriorCmdInvalidMsgs. No messages need be sent to other backends, * since they'll not have seen our changed tuples anyway. We can forget * about CurrentCmdInvalidMsgs too, since those changes haven't touched * the caches yet. @@ -965,11 +965,11 @@ AtEOXact_Inval(bool isCommit) * parent's PriorCmdInvalidMsgs list. * * If not isCommit, we are aborting, and must locally process the messages - * in PriorCmdInvalidMsgs. No messages need be sent to other backends. + * in PriorCmdInvalidMsgs. No messages need be sent to other backends. * We can forget about CurrentCmdInvalidMsgs too, since those changes haven't * touched the caches yet. * - * In any case, pop the transaction stack. We need not physically free memory + * In any case, pop the transaction stack. We need not physically free memory * here, since CurTransactionContext is about to be emptied anyway * (if aborting). Beware of the possibility of aborting the same nesting * level twice, though. @@ -1025,7 +1025,7 @@ AtEOSubXact_Inval(bool isCommit) * in a transaction. * * Here, we send no messages to the shared queue, since we don't know yet if - * we will commit. We do need to locally process the CurrentCmdInvalidMsgs + * we will commit. We do need to locally process the CurrentCmdInvalidMsgs * list, so as to flush our caches of any entries we have outdated in the * current command. We then move the current-cmd list over to become part * of the prior-cmds list. @@ -1155,7 +1155,7 @@ CacheInvalidateRelcacheByRelid(Oid relid) * * Sending this type of invalidation msg forces other backends to close open * smgr entries for the rel. This should be done to flush dangling open-file - * references when the physical rel is being dropped or truncated. Because + * references when the physical rel is being dropped or truncated. Because * these are nontransactional (i.e., not-rollback-able) operations, we just * send the inval message immediately without any queuing. * diff --git a/src/backend/utils/cache/lsyscache.c b/src/backend/utils/cache/lsyscache.c index 6435541bb63..c90909c95cd 100644 --- a/src/backend/utils/cache/lsyscache.c +++ b/src/backend/utils/cache/lsyscache.c @@ -182,13 +182,13 @@ get_opfamily_member(Oid opfamily, Oid lefttype, Oid righttype, * (This indicates that the operator is not a valid ordering operator.) * * Note: the operator could be registered in multiple families, for example - * if someone were to build a "reverse sort" opfamily. This would result in + * if someone were to build a "reverse sort" opfamily. This would result in * uncertainty as to whether "ORDER BY USING op" would default to NULLS FIRST * or NULLS LAST, as well as inefficient planning due to failure to match up * pathkeys that should be the same. So we want a determinate result here. * Because of the way the syscache search works, we'll use the interpretation * associated with the opfamily with smallest OID, which is probably - * determinate enough. Since there is no longer any particularly good reason + * determinate enough. Since there is no longer any particularly good reason * to build reverse-sort opfamilies, it doesn't seem worth expending any * additional effort on ensuring consistency. */ @@ -386,7 +386,7 @@ get_ordering_op_for_equality_op(Oid opno, bool use_lhs_type) * * The planner currently uses simple equal() tests to compare the lists * returned by this function, which makes the list order relevant, though - * strictly speaking it should not be. Because of the way syscache list + * strictly speaking it should not be. Because of the way syscache list * searches are handled, in normal operation the result will be sorted by OID * so everything works fine. If running with system index usage disabled, * the result ordering is unspecified and hence the planner might fail to @@ -1195,7 +1195,7 @@ op_mergejoinable(Oid opno, Oid inputtype) * * In some cases (currently only array_eq), hashjoinability depends on the * specific input data type the operator is invoked for, so that must be - * passed as well. We currently assume that only one input's type is needed + * passed as well. We currently assume that only one input's type is needed * to check this --- by convention, pass the left input's data type. */ bool @@ -1825,7 +1825,7 @@ get_typbyval(Oid typid) * A two-fer: given the type OID, return both typlen and typbyval. * * Since both pieces of info are needed to know how to copy a Datum, - * many places need both. Might as well get them with one cache lookup + * many places need both. Might as well get them with one cache lookup * instead of two. Also, this routine raises an error instead of * returning a bogus value when given a bad type OID. */ diff --git a/src/backend/utils/cache/plancache.c b/src/backend/utils/cache/plancache.c index 5dcea3db33a..8894ff8076f 100644 --- a/src/backend/utils/cache/plancache.c +++ b/src/backend/utils/cache/plancache.c @@ -12,16 +12,16 @@ * * The plan cache manager itself is principally responsible for tracking * whether cached plans should be invalidated because of schema changes in - * the objects they depend on. When (and if) the next demand for a cached + * the objects they depend on. When (and if) the next demand for a cached * plan occurs, the query will be replanned. Note that this could result * in an error, for example if a column referenced by the query is no - * longer present. The creator of a cached plan can specify whether it + * longer present. The creator of a cached plan can specify whether it * is allowable for the query to change output tupdesc on replan (this * could happen with "SELECT *" for example) --- if so, it's up to the * caller to notice changes and cope with them. * * Currently, we track exactly the dependencies of plans on relations and - * user-defined functions. On relcache invalidation events or pg_proc + * user-defined functions. On relcache invalidation events or pg_proc * syscache invalidation events, we invalidate just those plans that depend * on the particular object being modified. (Note: this scheme assumes * that any table modification that requires replanning will generate a @@ -209,7 +209,7 @@ CreateCachedPlan(Node *raw_parse_tree, * avoids extra copy steps during plan construction. If the query ever does * need replanning, we'll generate a separate new CachedPlan at that time, but * the CachedPlanSource and the initial CachedPlan share the caller-provided - * context and go away together when neither is needed any longer. (Because + * context and go away together when neither is needed any longer. (Because * the parser and planner generate extra cruft in addition to their real * output, this approach means that the context probably contains a bunch of * useless junk as well as the useful trees. Hence, this method is a @@ -300,7 +300,7 @@ FastCreateCachedPlan(Node *raw_parse_tree, * CachedPlanSetParserHook: set up to use parser callback hooks * * Use this when a caller wants to manage parameter information via parser - * callbacks rather than a fixed parameter-types list. Beware that the + * callbacks rather than a fixed parameter-types list. Beware that the * information pointed to by parserSetupArg must be valid for as long as * the cached plan might be replanned! */ @@ -377,7 +377,7 @@ StoreCachedPlan(CachedPlanSource *plansource, { /* * Planner already extracted dependencies, we don't have to ... except - * in the case of EXPLAIN. We assume here that EXPLAIN can't appear + * in the case of EXPLAIN. We assume here that EXPLAIN can't appear * in a list with other commands. */ plan->relationOids = plan->invalItems = NIL; @@ -410,7 +410,7 @@ StoreCachedPlan(CachedPlanSource *plansource, * DropCachedPlan: destroy a cached plan. * * Actually this only destroys the CachedPlanSource: the referenced CachedPlan - * is released, but not destroyed until its refcount goes to zero. That + * is released, but not destroyed until its refcount goes to zero. That * handles the situation where DropCachedPlan is called while the plan is * still in use. */ @@ -586,7 +586,7 @@ RevalidateCachedPlan(CachedPlanSource *plansource, bool useResOwner) } /* - * Check or update the result tupdesc. XXX should we use a weaker + * Check or update the result tupdesc. XXX should we use a weaker * condition than equalTupleDescs() here? */ resultDesc = PlanCacheComputeResultDesc(slist); @@ -651,7 +651,7 @@ RevalidateCachedPlan(CachedPlanSource *plansource, bool useResOwner) * * Note: useResOwner = false is used for releasing references that are in * persistent data structures, such as the parent CachedPlanSource or a - * Portal. Transient references should be protected by a resource owner. + * Portal. Transient references should be protected by a resource owner. */ void ReleaseCachedPlan(CachedPlan *plan, bool useResOwner) @@ -925,7 +925,7 @@ plan_list_is_transient(List *stmt_list) /* * PlanCacheComputeResultDesc: given a list of either fully-planned statements - * or Queries, determine the result tupledesc it will produce. Returns NULL + * or Queries, determine the result tupledesc it will produce. Returns NULL * if the execution will not return tuples. * * Note: the result is created or copied into current memory context. diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c index 29ae3878ef3..6d5ac6c8564 100644 --- a/src/backend/utils/cache/relcache.c +++ b/src/backend/utils/cache/relcache.c @@ -124,7 +124,7 @@ bool criticalSharedRelcachesBuilt = false; /* * This counter counts relcache inval events received since backend startup - * (but only for rels that are actually in cache). Presently, we use it only + * (but only for rels that are actually in cache). Presently, we use it only * to detect whether data about to be written by write_relcache_init_file() * might already be obsolete. */ @@ -449,7 +449,7 @@ RelationBuildTupleDesc(Relation relation) Int16GetDatum(0)); /* - * Open pg_attribute and begin a scan. Force heap scan if we haven't yet + * Open pg_attribute and begin a scan. Force heap scan if we haven't yet * built the critical relcache entries (this includes initdb and startup * without a pg_internal.init file). */ @@ -512,7 +512,7 @@ RelationBuildTupleDesc(Relation relation) /* * The attcacheoff values we read from pg_attribute should all be -1 - * ("unknown"). Verify this if assert checking is on. They will be + * ("unknown"). Verify this if assert checking is on. They will be * computed when and if needed during tuple access. */ #ifdef USE_ASSERT_CHECKING @@ -526,7 +526,7 @@ RelationBuildTupleDesc(Relation relation) /* * However, we can easily set the attcacheoff value for the first - * attribute: it must be zero. This eliminates the need for special cases + * attribute: it must be zero. This eliminates the need for special cases * for attnum=1 that used to exist in fastgetattr() and index_getattr(). */ if (relation->rd_rel->relnatts > 0) @@ -582,7 +582,7 @@ RelationBuildTupleDesc(Relation relation) * each relcache entry that has associated rules. The context is used * just for rule info, not for any other subsidiary data of the relcache * entry, because that keeps the update logic in RelationClearRelation() - * manageable. The other subsidiary data structures are simple enough + * manageable. The other subsidiary data structures are simple enough * to be easy to free explicitly, anyway. */ static void @@ -691,9 +691,9 @@ RelationBuildRuleLock(Relation relation) /* * We want the rule's table references to be checked as though by the - * table owner, not the user referencing the rule. Therefore, scan + * table owner, not the user referencing the rule. Therefore, scan * through the rule's actions and set the checkAsUser field on all - * rtable entries. We have to look at the qual as well, in case it + * rtable entries. We have to look at the qual as well, in case it * contains sublinks. * * The reason for doing this when the rule is loaded, rather than when @@ -1036,7 +1036,7 @@ RelationInitIndexAccessInfo(Relation relation) amsupport = aform->amsupport; /* - * Make the private context to hold index access info. The reason we need + * Make the private context to hold index access info. The reason we need * a context, and not just a couple of pallocs, is so that we won't leak * any subsidiary info attached to fmgr lookup records. * @@ -1084,7 +1084,7 @@ RelationInitIndexAccessInfo(Relation relation) /* * indcollation cannot be referenced directly through the C struct, - * because it comes after the variable-width indkey field. Must extract + * because it comes after the variable-width indkey field. Must extract * the datum the hard way... */ indcollDatum = fastgetattr(relation->rd_indextuple, @@ -1109,7 +1109,7 @@ RelationInitIndexAccessInfo(Relation relation) /* * Fill the support procedure OID array, as well as the info about - * opfamilies and opclass input types. (aminfo and supportinfo are left + * opfamilies and opclass input types. (aminfo and supportinfo are left * as zeroes, and are filled on-the-fly when used) */ IndexSupportInitialize(indclass, relation->rd_support, @@ -1197,7 +1197,7 @@ IndexSupportInitialize(oidvector *indclass, * Note there is no provision for flushing the cache. This is OK at the * moment because there is no way to ALTER any interesting properties of an * existing opclass --- all you can do is drop it, which will result in - * a useless but harmless dead entry in the cache. To support altering + * a useless but harmless dead entry in the cache. To support altering * opclass membership (not the same as opfamily membership!), we'd need to * be able to flush this cache as well as the contents of relcache entries * for indexes. @@ -1306,7 +1306,7 @@ LookupOpclassInfo(Oid operatorClassOid, heap_close(rel, AccessShareLock); /* - * Scan pg_amproc to obtain support procs for the opclass. We only fetch + * Scan pg_amproc to obtain support procs for the opclass. We only fetch * the default ones (those with lefttype = righttype = opcintype). */ if (numSupport > 0) @@ -1825,7 +1825,7 @@ RelationDestroyRelation(Relation relation) * * NB: when rebuilding, we'd better hold some lock on the relation, * else the catalog data we need to read could be changing under us. - * Also, a rel to be rebuilt had better have refcnt > 0. This is because + * Also, a rel to be rebuilt had better have refcnt > 0. This is because * an sinval reset could happen while we're accessing the catalogs, and * the rel would get blown away underneath us by RelationCacheInvalidate * if it has zero refcnt. @@ -1848,7 +1848,7 @@ RelationClearRelation(Relation relation, bool rebuild) /* * Make sure smgr and lower levels close the relation's files, if they * weren't closed already. If the relation is not getting deleted, the - * next smgr access should reopen the files automatically. This ensures + * next smgr access should reopen the files automatically. This ensures * that the low-level file access state is updated after, say, a vacuum * truncation. */ @@ -1860,7 +1860,7 @@ RelationClearRelation(Relation relation, bool rebuild) * in case it is a mapped relation whose mapping changed. * * If it's a nailed index, then we need to re-read the pg_class row to see - * if its relfilenode changed. We can't necessarily do that here, because + * if its relfilenode changed. We can't necessarily do that here, because * we might be in a failed transaction. We assume it's okay to do it if * there are open references to the relcache entry (cf notes for * AtEOXact_RelationCache). Otherwise just mark the entry as possibly @@ -1921,7 +1921,7 @@ RelationClearRelation(Relation relation, bool rebuild) * over from the old entry). This is to avoid trouble in case an * error causes us to lose control partway through. The old entry * will still be marked !rd_isvalid, so we'll try to rebuild it again - * on next access. Meanwhile it's not any less valid than it was + * on next access. Meanwhile it's not any less valid than it was * before, so any code that might expect to continue accessing it * isn't hurt by the rebuild failure. (Consider for example a * subtransaction that ALTERs a table and then gets canceled partway @@ -2110,7 +2110,7 @@ RelationCacheInvalidateEntry(Oid relationId) /* * RelationCacheInvalidate * Blow away cached relation descriptors that have zero reference counts, - * and rebuild those with positive reference counts. Also reset the smgr + * and rebuild those with positive reference counts. Also reset the smgr * relation cache and re-read relation mapping data. * * This is currently used only to recover from SI message buffer overflow, @@ -2123,7 +2123,7 @@ RelationCacheInvalidateEntry(Oid relationId) * We do this in two phases: the first pass deletes deletable items, and * the second one rebuilds the rebuildable items. This is essential for * safety, because hash_seq_search only copes with concurrent deletion of - * the element it is currently visiting. If a second SI overflow were to + * the element it is currently visiting. If a second SI overflow were to * occur while we are walking the table, resulting in recursive entry to * this routine, we could crash because the inner invocation blows away * the entry next to be visited by the outer scan. But this way is OK, @@ -2274,7 +2274,7 @@ AtEOXact_RelationCache(bool isCommit) * unless there is actually something for this routine to do. Other than * the debug-only Assert checks, most transactions don't create any work * for us to do here, so we keep a static flag that gets set if there is - * anything to do. (Currently, this means either a relation is created in + * anything to do. (Currently, this means either a relation is created in * the current xact, or one is given a new relfilenode, or an index list * is forced.) For simplicity, the flag remains set till end of top-level * transaction, even though we could clear it at subtransaction end in @@ -2570,7 +2570,7 @@ RelationBuildLocalRelation(const char *relname, /* * Insert relation physical and logical identifiers (OIDs) into the right - * places. Note that the physical ID (relfilenode) is initially the same + * places. Note that the physical ID (relfilenode) is initially the same * as the logical ID (OID); except that for a mapped relation, we set * relfilenode to zero and rely on RelationInitPhysicalAddr to consult the * map. @@ -2804,7 +2804,7 @@ RelationCacheInitializePhase2(void) oldcxt = MemoryContextSwitchTo(CacheMemoryContext); /* - * Try to load the shared relcache cache file. If unsuccessful, bootstrap + * Try to load the shared relcache cache file. If unsuccessful, bootstrap * the cache with pre-made descriptors for the critical shared catalogs. */ if (!load_relcache_init_file(true)) @@ -2884,9 +2884,9 @@ RelationCacheInitializePhase3(void) /* * If we didn't get the critical system indexes loaded into relcache, do - * so now. These are critical because the catcache and/or opclass cache + * so now. These are critical because the catcache and/or opclass cache * depend on them for fetches done during relcache load. Thus, we have an - * infinite-recursion problem. We can break the recursion by doing + * infinite-recursion problem. We can break the recursion by doing * heapscans instead of indexscans at certain key spots. To avoid hobbling * performance, we only want to do that until we have the critical indexes * loaded into relcache. Thus, the flag criticalRelcachesBuilt is used to @@ -2903,7 +2903,7 @@ RelationCacheInitializePhase3(void) * RewriteRelRulenameIndexId and TriggerRelidNameIndexId are not critical * in the same way as the others, because the critical catalogs don't * (currently) have any rules or triggers, and so these indexes can be - * rebuilt without inducing recursion. However they are used during + * rebuilt without inducing recursion. However they are used during * relcache load when a rel does have rules or triggers, so we choose to * nail them for performance reasons. */ @@ -2934,7 +2934,7 @@ RelationCacheInitializePhase3(void) * * DatabaseNameIndexId isn't critical for relcache loading, but rather for * initial lookup of MyDatabaseId, without which we'll never find any - * non-shared catalogs at all. Autovacuum calls InitPostgres with a + * non-shared catalogs at all. Autovacuum calls InitPostgres with a * database OID, so it instead depends on DatabaseOidIndexId. We also * need to nail up some indexes on pg_authid and pg_auth_members for use * during client authentication. @@ -3360,7 +3360,7 @@ RelationGetIndexList(Relation relation) /* * We build the list we intend to return (in the caller's context) while - * doing the scan. After successfully completing the scan, we copy that + * doing the scan. After successfully completing the scan, we copy that * list into the relcache entry. This avoids cache-context memory leakage * if we get some sort of error partway through. */ @@ -4260,7 +4260,7 @@ load_relcache_init_file(bool shared) return true; /* - * init file is broken, so do it the hard way. We don't bother trying to + * init file is broken, so do it the hard way. We don't bother trying to * free the clutter we just allocated; it's not in the relcache so it * won't hurt. */ @@ -4325,7 +4325,7 @@ write_relcache_init_file(bool shared) } /* - * Write a magic number to serve as a file version identifier. We can + * Write a magic number to serve as a file version identifier. We can * change the magic number whenever the relcache layout changes. */ magic = RELCACHE_INIT_FILEMAGIC; @@ -4550,7 +4550,7 @@ RelationCacheInitFilePostInvalidate(void) * * We used to keep the init files across restarts, but that is unsafe in PITR * scenarios, and even in simple crash-recovery cases there are windows for - * the init files to become out-of-sync with the database. So now we just + * the init files to become out-of-sync with the database. So now we just * remove them during startup and expect the first backend launch to rebuild * them. Of course, this has to happen in each database of the cluster. */ diff --git a/src/backend/utils/cache/relmapper.c b/src/backend/utils/cache/relmapper.c index a19ee28b53b..6d2a0198588 100644 --- a/src/backend/utils/cache/relmapper.c +++ b/src/backend/utils/cache/relmapper.c @@ -23,7 +23,7 @@ * mapped catalogs can only be relocated by operations such as VACUUM FULL * and CLUSTER, which make no transactionally-significant changes: it must be * safe for the new file to replace the old, even if the transaction itself - * aborts. An important factor here is that the indexes and toast table of + * aborts. An important factor here is that the indexes and toast table of * a mapped catalog must also be mapped, so that the rewrites/relocations of * all these files commit in a single map file update rather than being tied * to transaction commit. @@ -58,13 +58,13 @@ /* * The map file is critical data: we have no automatic method for recovering * from loss or corruption of it. We use a CRC so that we can detect - * corruption. To minimize the risk of failed updates, the map file should + * corruption. To minimize the risk of failed updates, the map file should * be kept to no more than one standard-size disk sector (ie 512 bytes), * and we use overwrite-in-place rather than playing renaming games. * The struct layout below is designed to occupy exactly 512 bytes, which * might make filesystem updates a bit more efficient. * - * Entries in the mappings[] array are in no particular order. We could + * Entries in the mappings[] array are in no particular order. We could * speed searching by insisting on OID order, but it really shouldn't be * worth the trouble given the intended size of the mapping sets. */ @@ -91,7 +91,7 @@ typedef struct RelMapFile /* * The currently known contents of the shared map file and our database's - * local map file are stored here. These can be reloaded from disk + * local map file are stored here. These can be reloaded from disk * immediately whenever we receive an update sinval message. */ static RelMapFile shared_map; @@ -294,7 +294,7 @@ merge_map_updates(RelMapFile *map, const RelMapFile *updates, bool add_okay) * RelationMapRemoveMapping * * Remove a relation's entry in the map. This is only allowed for "active" - * (but not committed) local mappings. We need it so we can back out the + * (but not committed) local mappings. We need it so we can back out the * entry for the transient target file when doing VACUUM FULL/CLUSTER on * a mapped relation. */ @@ -322,7 +322,7 @@ RelationMapRemoveMapping(Oid relationId) * RelationMapInvalidate * * This routine is invoked for SI cache flush messages. We must re-read - * the indicated map file. However, we might receive a SI message in a + * the indicated map file. However, we might receive a SI message in a * process that hasn't yet, and might never, load the mapping files; * for example the autovacuum launcher, which *must not* try to read * a local map since it is attached to no particular database. @@ -390,7 +390,7 @@ AtCCI_RelationMap(void) * * During commit, this must be called as late as possible before the actual * transaction commit, so as to minimize the window where the transaction - * could still roll back after committing map changes. Although nothing + * could still roll back after committing map changes. Although nothing * critically bad happens in such a case, we still would prefer that it * not happen, since we'd possibly be losing useful updates to the relations' * pg_class row(s). @@ -457,7 +457,7 @@ AtPrepare_RelationMap(void) /* * CheckPointRelationMap * - * This is called during a checkpoint. It must ensure that any relation map + * This is called during a checkpoint. It must ensure that any relation map * updates that were WAL-logged before the start of the checkpoint are * securely flushed to disk and will not need to be replayed later. This * seems unlikely to be a performance-critical issue, so we use a simple @@ -647,7 +647,7 @@ load_relmap_file(bool shared) * * Because this may be called during WAL replay when MyDatabaseId, * DatabasePath, etc aren't valid, we require the caller to pass in suitable - * values. The caller is also responsible for being sure no concurrent + * values. The caller is also responsible for being sure no concurrent * map update could be happening. */ static void @@ -675,7 +675,7 @@ write_relmap_file(bool shared, RelMapFile *newmap, * critical section, so that an open() failure need not force PANIC. * * Note: since we use BasicOpenFile, we are nominally responsible for - * ensuring the fd is closed on error. In practice, this isn't important + * ensuring the fd is closed on error. In practice, this isn't important * because either an error happens inside the critical section, or we are * in bootstrap or WAL replay; so an error past this point is always fatal * anyway. @@ -773,7 +773,7 @@ write_relmap_file(bool shared, RelMapFile *newmap, /* * Make sure that the files listed in the map are not deleted if the outer - * transaction aborts. This had better be within the critical section + * transaction aborts. This had better be within the critical section * too: it's not likely to fail, but if it did, we'd arrive at transaction * abort with the files still vulnerable. PANICing will leave things in a * good state on-disk. diff --git a/src/backend/utils/cache/spccache.c b/src/backend/utils/cache/spccache.c index 57e5d0342a6..a306c481db6 100644 --- a/src/backend/utils/cache/spccache.c +++ b/src/backend/utils/cache/spccache.c @@ -4,7 +4,7 @@ * Tablespace cache management. * * We cache the parsed version of spcoptions for each tablespace to avoid - * needing to reparse on every lookup. Right now, there doesn't appear to + * needing to reparse on every lookup. Right now, there doesn't appear to * be a measurable performance gain from doing this, but that might change * in the future as we add more options. * @@ -128,7 +128,7 @@ get_tablespace(Oid spcid) return spc; /* - * Not found in TableSpace cache. Check catcache. If we don't find a + * Not found in TableSpace cache. Check catcache. If we don't find a * valid HeapTuple, it must mean someone has managed to request tablespace * details for a non-existent tablespace. We'll just treat that case as * if no options were specified. @@ -158,7 +158,7 @@ get_tablespace(Oid spcid) } /* - * Now create the cache entry. It's important to do this only after + * Now create the cache entry. It's important to do this only after * reading the pg_tablespace entry, since doing so could cause a cache * flush. */ diff --git a/src/backend/utils/cache/syscache.c b/src/backend/utils/cache/syscache.c index 99e5f1d9fe6..55d852e95e4 100644 --- a/src/backend/utils/cache/syscache.c +++ b/src/backend/utils/cache/syscache.c @@ -764,7 +764,7 @@ static bool CacheInitialized = false; * InitCatalogCache - initialize the caches * * Note that no database access is done here; we only allocate memory - * and initialize the cache structure. Interrogation of the database + * and initialize the cache structure. Interrogation of the database * to complete initialization of a cache happens upon first use * of that cache. */ @@ -1001,7 +1001,7 @@ SearchSysCacheExistsAttName(Oid relid, const char *attname) * extract a specific attribute. * * This is equivalent to using heap_getattr() on a tuple fetched - * from a non-cached relation. Usually, this is only used for attributes + * from a non-cached relation. Usually, this is only used for attributes * that could be NULL or variable length; the fixed-size attributes in * a system table are accessed just by mapping the tuple onto the C struct * declarations from include/catalog/. diff --git a/src/backend/utils/cache/typcache.c b/src/backend/utils/cache/typcache.c index 5fd02958cf1..06972e4da9d 100644 --- a/src/backend/utils/cache/typcache.c +++ b/src/backend/utils/cache/typcache.c @@ -11,7 +11,7 @@ * * Several seemingly-odd choices have been made to support use of the type * cache by generic array and record handling routines, such as array_eq(), - * record_cmp(), and hash_array(). Because those routines are used as index + * record_cmp(), and hash_array(). Because those routines are used as index * support operations, they cannot leak memory. To allow them to execute * efficiently, all information that they would like to re-use across calls * is kept in the type cache. @@ -99,7 +99,7 @@ typedef struct TypeCacheEnumData * * Stored record types are remembered in a linear array of TupleDescs, * which can be indexed quickly with the assigned typmod. There is also - * a hash table to speed searches for matching TupleDescs. The hash key + * a hash table to speed searches for matching TupleDescs. The hash key * uses just the first N columns' type OIDs, and so we may have multiple * entries with the same hash key. */ @@ -468,7 +468,7 @@ load_typcache_tupdesc(TypeCacheEntry *typentry) /* * Link to the tupdesc and increment its refcount (we assert it's a - * refcounted descriptor). We don't use IncrTupleDescRefCount() for this, + * refcounted descriptor). We don't use IncrTupleDescRefCount() for this, * because the reference mustn't be entered in the current resource owner; * it can outlive the current query. */ @@ -1004,7 +1004,7 @@ load_enum_cache_data(TypeCacheEntry *tcache) /* * Read all the information for members of the enum type. We collect the * info in working memory in the caller's context, and then transfer it to - * permanent memory in CacheMemoryContext. This minimizes the risk of + * permanent memory in CacheMemoryContext. This minimizes the risk of * leaking memory from CacheMemoryContext in the event of an error partway * through. */ diff --git a/src/backend/utils/error/elog.c b/src/backend/utils/error/elog.c index 0f19bb45e40..fef2dd14776 100644 --- a/src/backend/utils/error/elog.c +++ b/src/backend/utils/error/elog.c @@ -5,7 +5,7 @@ * * Because of the extremely high rate at which log messages can be generated, * we need to be mindful of the performance cost of obtaining any information - * that may be logged. Also, it's important to keep in mind that this code may + * that may be logged. Also, it's important to keep in mind that this code may * get called from within an aborted transaction, in which case operations * such as syscache lookups are unsafe. * @@ -15,23 +15,23 @@ * if we run out of memory, it's important to be able to report that fact. * There are a number of considerations that go into this. * - * First, distinguish between re-entrant use and actual recursion. It + * First, distinguish between re-entrant use and actual recursion. It * is possible for an error or warning message to be emitted while the - * parameters for an error message are being computed. In this case + * parameters for an error message are being computed. In this case * errstart has been called for the outer message, and some field values - * may have already been saved, but we are not actually recursing. We handle - * this by providing a (small) stack of ErrorData records. The inner message + * may have already been saved, but we are not actually recursing. We handle + * this by providing a (small) stack of ErrorData records. The inner message * can be computed and sent without disturbing the state of the outer message. * (If the inner message is actually an error, this isn't very interesting * because control won't come back to the outer message generator ... but * if the inner message is only debug or log data, this is critical.) * * Second, actual recursion will occur if an error is reported by one of - * the elog.c routines or something they call. By far the most probable + * the elog.c routines or something they call. By far the most probable * scenario of this sort is "out of memory"; and it's also the nastiest * to handle because we'd likely also run out of memory while trying to * report this error! Our escape hatch for this case is to reset the - * ErrorContext to empty before trying to process the inner error. Since + * ErrorContext to empty before trying to process the inner error. Since * ErrorContext is guaranteed to have at least 8K of space in it (see mcxt.c), * we should be able to process an "out of memory" message successfully. * Since we lose the prior error state due to the reset, we won't be able @@ -231,7 +231,7 @@ errstart(int elevel, const char *filename, int lineno, { /* * If we are inside a critical section, all errors become PANIC - * errors. See miscadmin.h. + * errors. See miscadmin.h. */ if (CritSectionCount > 0) elevel = PANIC; @@ -244,7 +244,7 @@ errstart(int elevel, const char *filename, int lineno, * * 2. ExitOnAnyError mode switch is set (initdb uses this). * - * 3. the error occurred after proc_exit has begun to run. (It's + * 3. the error occurred after proc_exit has begun to run. (It's * proc_exit's responsibility to see that this doesn't turn into * infinite recursion!) */ @@ -341,7 +341,7 @@ errstart(int elevel, const char *filename, int lineno, if (++errordata_stack_depth >= ERRORDATA_STACK_SIZE) { /* - * Wups, stack not big enough. We treat this as a PANIC condition + * Wups, stack not big enough. We treat this as a PANIC condition * because it suggests an infinite loop of errors during error * recovery. */ @@ -443,7 +443,7 @@ errfinish(int dummy,...) * * Reset InterruptHoldoffCount in case we ereport'd from inside an * interrupt holdoff section. (We assume here that no handler will - * itself be inside a holdoff section. If necessary, such a handler + * itself be inside a holdoff section. If necessary, such a handler * could save and restore InterruptHoldoffCount for itself, but this * should make life easier for most.) * @@ -469,7 +469,7 @@ errfinish(int dummy,...) * progress, so that we can report the message before dying. (Without * this, pq_putmessage will refuse to send the message at all, which is * what we want for NOTICE messages, but not for fatal exits.) This hack - * is necessary because of poor design of old-style copy protocol. Note + * is necessary because of poor design of old-style copy protocol. Note * we must do this even if client is fool enough to have set * client_min_messages above FATAL, so don't look at output_to_client. */ @@ -581,7 +581,7 @@ errcode(int sqlerrcode) /* * errcode_for_file_access --- add SQLSTATE error code to the current error * - * The SQLSTATE code is chosen based on the saved errno value. We assume + * The SQLSTATE code is chosen based on the saved errno value. We assume * that the failing operation was some type of disk file access. * * NOTE: the primary error message string should generally include %m @@ -652,7 +652,7 @@ errcode_for_file_access(void) /* * errcode_for_socket_access --- add SQLSTATE error code to the current error * - * The SQLSTATE code is chosen based on the saved errno value. We assume + * The SQLSTATE code is chosen based on the saved errno value. We assume * that the failing operation was some type of socket access. * * NOTE: the primary error message string should generally include %m @@ -690,7 +690,7 @@ errcode_for_socket_access(void) * This macro handles expansion of a format string and associated parameters; * it's common code for errmsg(), errdetail(), etc. Must be called inside * a routine that is declared like "const char *fmt, ..." and has an edata - * pointer set up. The message is assigned to edata->targetfield, or + * pointer set up. The message is assigned to edata->targetfield, or * appended to it if appendval is true. The message is subject to translation * if translateit is true. * @@ -1173,7 +1173,7 @@ elog_start(const char *filename, int lineno, const char *funcname) if (++errordata_stack_depth >= ERRORDATA_STACK_SIZE) { /* - * Wups, stack not big enough. We treat this as a PANIC condition + * Wups, stack not big enough. We treat this as a PANIC condition * because it suggests an infinite loop of errors during error * recovery. Note that the message is intentionally not localized, * else failure to convert it to client encoding could cause further @@ -1324,7 +1324,7 @@ EmitErrorReport(void) /* * CopyErrorData --- obtain a copy of the topmost error stack entry * - * This is only for use in error handler code. The data is copied into the + * This is only for use in error handler code. The data is copied into the * current memory context, so callers should always switch away from * ErrorContext first; otherwise it will be lost when FlushErrorState is done. */ @@ -1415,7 +1415,7 @@ FlushErrorState(void) * * A handler can do CopyErrorData/FlushErrorState to get out of the error * subsystem, then do some processing, and finally ReThrowError to re-throw - * the original error. This is slower than just PG_RE_THROW() but should + * the original error. This is slower than just PG_RE_THROW() but should * be used if the "some processing" is likely to incur another error. */ void @@ -1432,7 +1432,7 @@ ReThrowError(ErrorData *edata) if (++errordata_stack_depth >= ERRORDATA_STACK_SIZE) { /* - * Wups, stack not big enough. We treat this as a PANIC condition + * Wups, stack not big enough. We treat this as a PANIC condition * because it suggests an infinite loop of errors during error * recovery. */ @@ -1585,7 +1585,7 @@ set_syslog_parameters(const char *ident, int facility) { /* * guc.c is likely to call us repeatedly with same parameters, so don't - * thrash the syslog connection unnecessarily. Also, we do not re-open + * thrash the syslog connection unnecessarily. Also, we do not re-open * the connection until needed, since this routine will get called whether * or not Log_destination actually mentions syslog. * @@ -2794,7 +2794,7 @@ useful_strerror(int errnum) str = strerror(errnum); /* - * Some strerror()s return an empty string for out-of-range errno. This + * Some strerror()s return an empty string for out-of-range errno. This * is ANSI C spec compliant, but not exactly useful. Also, we may get * back strings of question marks if libc cannot transcode the message to * the codeset specified by LC_CTYPE. If we get nothing useful, first try diff --git a/src/backend/utils/fmgr/dfmgr.c b/src/backend/utils/fmgr/dfmgr.c index 734bd056891..d7634a98d90 100644 --- a/src/backend/utils/fmgr/dfmgr.c +++ b/src/backend/utils/fmgr/dfmgr.c @@ -131,7 +131,7 @@ load_external_function(char *filename, char *funcname, /* * This function loads a shlib file without looking up any particular - * function in it. If the same shlib has previously been loaded, + * function in it. If the same shlib has previously been loaded, * unload and reload it. * * When 'restricted' is true, only libraries in the presumed-secure @@ -171,7 +171,7 @@ lookup_external_function(void *filehandle, char *funcname) /* * Load the specified dynamic-link library file, unless it already is - * loaded. Return the pg_dl* handle for the file. + * loaded. Return the pg_dl* handle for the file. * * Note: libname is expected to be an exact name for the library file. */ @@ -473,7 +473,7 @@ file_exists(const char *name) * If name contains a slash, check if the file exists, if so return * the name. Else (no slash) try to expand using search path (see * find_in_dynamic_libpath below); if that works, return the fully - * expanded file name. If the previous failed, append DLSUFFIX and + * expanded file name. If the previous failed, append DLSUFFIX and * try again. If all fails, just return the original name. * * The result will always be freshly palloc'd. diff --git a/src/backend/utils/fmgr/fmgr.c b/src/backend/utils/fmgr/fmgr.c index 0a2d4ef9565..12dcd7980a7 100644 --- a/src/backend/utils/fmgr/fmgr.c +++ b/src/backend/utils/fmgr/fmgr.c @@ -96,7 +96,7 @@ static Datum fmgr_security_definer(PG_FUNCTION_ARGS); /* - * Lookup routines for builtin-function table. We can search by either Oid + * Lookup routines for builtin-function table. We can search by either Oid * or name, but search by Oid is much faster. */ @@ -581,7 +581,7 @@ clear_external_function_hash(void *filehandle) * Copy an FmgrInfo struct * * This is inherently somewhat bogus since we can't reliably duplicate - * language-dependent subsidiary info. We cheat by zeroing fn_extra, + * language-dependent subsidiary info. We cheat by zeroing fn_extra, * instead, meaning that subsidiary info will have to be recomputed. */ void @@ -861,7 +861,7 @@ fmgr_oldstyle(PG_FUNCTION_ARGS) /* - * Support for security-definer and proconfig-using functions. We support + * Support for security-definer and proconfig-using functions. We support * both of these features using the same call handler, because they are * often used together and it would be inefficient (as well as notationally * messy) to have two levels of call handler involved. @@ -881,7 +881,7 @@ struct fmgr_security_definer_cache * (All this info is cached for the duration of the current query.) * To execute a call, we temporarily replace the flinfo with the cached * and looked-up one, while keeping the outer fcinfo (which contains all - * the actual arguments, etc.) intact. This is not re-entrant, but then + * the actual arguments, etc.) intact. This is not re-entrant, but then * the fcinfo itself can't be used re-entrantly anyway. */ static Datum @@ -961,7 +961,7 @@ fmgr_security_definer(PG_FUNCTION_ARGS) /* * We don't need to restore GUC or userid settings on error, because the - * ensuing xact or subxact abort will do that. The PG_TRY block is only + * ensuing xact or subxact abort will do that. The PG_TRY block is only * needed to clean up the flinfo link. */ save_flinfo = fcinfo->flinfo; @@ -1014,7 +1014,7 @@ fmgr_security_definer(PG_FUNCTION_ARGS) /* * These are for invocation of a specifically named function with a * directly-computed parameter list. Note that neither arguments nor result - * are allowed to be NULL. Also, the function cannot be one that needs to + * are allowed to be NULL. Also, the function cannot be one that needs to * look at FmgrInfo, since there won't be any. */ Datum @@ -1559,8 +1559,8 @@ FunctionCall9Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2, /* * These are for invocation of a function identified by OID with a * directly-computed parameter list. Note that neither arguments nor result - * are allowed to be NULL. These are essentially fmgr_info() followed - * by FunctionCallN(). If the same function is to be invoked repeatedly, + * are allowed to be NULL. These are essentially fmgr_info() followed + * by FunctionCallN(). If the same function is to be invoked repeatedly, * do the fmgr_info() once and then use FunctionCallN(). */ Datum @@ -1889,7 +1889,7 @@ OidFunctionCall9Coll(Oid functionId, Oid collation, Datum arg1, Datum arg2, * * One important difference from the bare function call is that we will * push any active SPI context, allowing SPI-using I/O functions to be - * called from other SPI functions without extra notation. This is a hack, + * called from other SPI functions without extra notation. This is a hack, * but the alternative of expecting all SPI functions to do SPI_push/SPI_pop * around I/O calls seems worse. */ diff --git a/src/backend/utils/fmgr/funcapi.c b/src/backend/utils/fmgr/funcapi.c index aa249fabfe8..85a17b010e9 100644 --- a/src/backend/utils/fmgr/funcapi.c +++ b/src/backend/utils/fmgr/funcapi.c @@ -135,7 +135,7 @@ per_MultiFuncCall(PG_FUNCTION_ARGS) * FuncCallContext is pointing to it), but in most usage patterns the * tuples stored in it will be in the function's per-tuple context. So at * the beginning of each call, the Slot will hold a dangling pointer to an - * already-recycled tuple. We clear it out here. + * already-recycled tuple. We clear it out here. * * Note: use of retval->slot is obsolete as of 8.0, and we expect that it * will always be NULL. This is just here for backwards compatibility in @@ -191,13 +191,13 @@ shutdown_MultiFuncCall(Datum arg) * Given a function's call info record, determine the kind of datatype * it is supposed to return. If resultTypeId isn't NULL, *resultTypeId * receives the actual datatype OID (this is mainly useful for scalar - * result types). If resultTupleDesc isn't NULL, *resultTupleDesc + * result types). If resultTupleDesc isn't NULL, *resultTupleDesc * receives a pointer to a TupleDesc when the result is of a composite * type, or NULL when it's a scalar result. * * One hard case that this handles is resolution of actual rowtypes for * functions returning RECORD (from either the function's OUT parameter - * list, or a ReturnSetInfo context node). TYPEFUNC_RECORD is returned + * list, or a ReturnSetInfo context node). TYPEFUNC_RECORD is returned * only when we couldn't resolve the actual rowtype for lack of information. * * The other hard case that this handles is resolution of polymorphism. @@ -280,7 +280,7 @@ get_func_result_type(Oid functionId, /* * internal_get_result_type -- workhorse code implementing all the above * - * funcid must always be supplied. call_expr and rsinfo can be NULL if not + * funcid must always be supplied. call_expr and rsinfo can be NULL if not * available. We will return TYPEFUNC_RECORD, and store NULL into * *resultTupleDesc, if we cannot deduce the complete result rowtype from * the available information. @@ -441,7 +441,7 @@ resolve_polymorphic_tupdesc(TupleDesc tupdesc, oidvector *declared_args, return true; /* - * Otherwise, extract actual datatype(s) from input arguments. (We assume + * Otherwise, extract actual datatype(s) from input arguments. (We assume * the parser already validated consistency of the arguments.) */ if (!call_expr) diff --git a/src/backend/utils/hash/dynahash.c b/src/backend/utils/hash/dynahash.c index 77d615cbda1..dcc4c91aa42 100644 --- a/src/backend/utils/hash/dynahash.c +++ b/src/backend/utils/hash/dynahash.c @@ -5,19 +5,19 @@ * * dynahash.c supports both local-to-a-backend hash tables and hash tables in * shared memory. For shared hash tables, it is the caller's responsibility - * to provide appropriate access interlocking. The simplest convention is - * that a single LWLock protects the whole hash table. Searches (HASH_FIND or + * to provide appropriate access interlocking. The simplest convention is + * that a single LWLock protects the whole hash table. Searches (HASH_FIND or * hash_seq_search) need only shared lock, but any update requires exclusive * lock. For heavily-used shared tables, the single-lock approach creates a * concurrency bottleneck, so we also support "partitioned" locking wherein * there are multiple LWLocks guarding distinct subsets of the table. To use * a hash table in partitioned mode, the HASH_PARTITION flag must be given - * to hash_create. This prevents any attempt to split buckets on-the-fly. + * to hash_create. This prevents any attempt to split buckets on-the-fly. * Therefore, each hash bucket chain operates independently, and no fields * of the hash header change after init except nentries and freeList. * A partitioned table uses a spinlock to guard changes of those two fields. * This lets any subset of the hash buckets be treated as a separately - * lockable partition. We expect callers to use the low-order bits of a + * lockable partition. We expect callers to use the low-order bits of a * lookup key's hash value as a partition number --- this will work because * of the way calc_bucket() maps hash values to bucket numbers. * @@ -76,7 +76,7 @@ * Constants * * A hash table has a top-level "directory", each of whose entries points - * to a "segment" of ssize bucket headers. The maximum number of hash + * to a "segment" of ssize bucket headers. The maximum number of hash * buckets is thus dsize * ssize (but dsize may be expansible). Of course, * the number of records in the table can be larger, but we don't want a * whole lot of records per bucket or performance goes down. @@ -84,7 +84,7 @@ * In a hash table allocated in shared memory, the directory cannot be * expanded because it must stay at a fixed address. The directory size * should be selected using hash_select_dirsize (and you'd better have - * a good idea of the maximum number of entries!). For non-shared hash + * a good idea of the maximum number of entries!). For non-shared hash * tables, the initial directory size can be left at the default. */ #define DEF_SEGSIZE 256 @@ -330,7 +330,7 @@ hash_create(const char *tabname, long nelem, HASHCTL *info, int flags) { /* * ctl structure and directory are preallocated for shared memory - * tables. Note that HASH_DIRSIZE and HASH_ALLOC had better be set as + * tables. Note that HASH_DIRSIZE and HASH_ALLOC had better be set as * well. */ hashp->hctl = info->hctl; @@ -779,7 +779,7 @@ calc_bucket(HASHHDR *hctl, uint32 hash_val) * the result is a dangling pointer that shouldn't be dereferenced!) * * HASH_ENTER will normally ereport a generic "out of memory" error if - * it is unable to create a new entry. The HASH_ENTER_NULL operation is + * it is unable to create a new entry. The HASH_ENTER_NULL operation is * the same except it will return NULL if out of memory. Note that * HASH_ENTER_NULL cannot be used with the default palloc-based allocator, * since palloc internally ereports on out-of-memory. @@ -1245,7 +1245,7 @@ expand_table(HTAB *hashp) } /* - * Relocate records to the new bucket. NOTE: because of the way the hash + * Relocate records to the new bucket. NOTE: because of the way the hash * masking is done in calc_bucket, only one old bucket can need to be * split at this point. With a different way of reducing the hash value, * that might not be true! @@ -1394,7 +1394,7 @@ hash_corrupted(HTAB *hashp) { /* * If the corruption is in a shared hashtable, we'd better force a - * systemwide restart. Otherwise, just shut down this one backend. + * systemwide restart. Otherwise, just shut down this one backend. */ if (hashp->isshared) elog(PANIC, "hash table \"%s\" corrupted", hashp->tabname); @@ -1439,7 +1439,7 @@ next_pow2_int(long num) /************************* SEQ SCAN TRACKING ************************/ /* - * We track active hash_seq_search scans here. The need for this mechanism + * We track active hash_seq_search scans here. The need for this mechanism * comes from the fact that a scan will get confused if a bucket split occurs * while it's in progress: it might visit entries twice, or even miss some * entirely (if it's partway through the same bucket that splits). Hence @@ -1459,7 +1459,7 @@ next_pow2_int(long num) * * This arrangement is reasonably robust if a transient hashtable is deleted * without notifying us. The absolute worst case is we might inhibit splits - * in another table created later at exactly the same address. We will give + * in another table created later at exactly the same address. We will give * a warning at transaction end for reference leaks, so any bugs leading to * lack of notification should be easy to catch. */ diff --git a/src/backend/utils/init/miscinit.c b/src/backend/utils/init/miscinit.c index 25cbf60e657..fc37bf6d467 100644 --- a/src/backend/utils/init/miscinit.c +++ b/src/backend/utils/init/miscinit.c @@ -58,7 +58,7 @@ static char socketLockFile[MAXPGPATH]; * * NOTE: "ignoring system indexes" means we do not use the system indexes * for lookups (either in hardwired catalog accesses or in planner-generated - * plans). We do, however, still update the indexes when a catalog + * plans). We do, however, still update the indexes when a catalog * modification is made. * ---------------------------------------------------------------- */ @@ -300,7 +300,7 @@ SetSessionUserId(Oid userid, bool is_superuser) * Currently there are two valid bits in SecurityRestrictionContext: * * SECURITY_LOCAL_USERID_CHANGE indicates that we are inside an operation - * that is temporarily changing CurrentUserId via these functions. This is + * that is temporarily changing CurrentUserId via these functions. This is * needed to indicate that the actual value of CurrentUserId is not in sync * with guc.c's internal state, so SET ROLE has to be disallowed. * @@ -321,7 +321,7 @@ SetSessionUserId(Oid userid, bool is_superuser) * ever throw any kind of error. This is because they are used by * StartTransaction and AbortTransaction to save/restore the settings, * and during the first transaction within a backend, the value to be saved - * and perhaps restored is indeed invalid. We have to be able to get + * and perhaps restored is indeed invalid. We have to be able to get * through AbortTransaction without asserting in case InitPostgres fails. */ void @@ -361,7 +361,7 @@ InSecurityRestrictedOperation(void) /* * These are obsolete versions of Get/SetUserIdAndSecContext that are * only provided for bug-compatibility with some rather dubious code in - * pljava. We allow the userid to be set, but only when not inside a + * pljava. We allow the userid to be set, but only when not inside a * security restriction context. */ void @@ -464,7 +464,7 @@ InitializeSessionUserId(const char *rolename) * Check connection limit for this role. * * There is a race condition here --- we create our PGPROC before - * checking for other PGPROCs. If two backends did this at about the + * checking for other PGPROCs. If two backends did this at about the * same time, they might both think they were over the limit, while * ideally one should succeed and one fail. Getting that to work * exactly seems more trouble than it is worth, however; instead we @@ -563,7 +563,7 @@ GetCurrentRoleId(void) * Change Role ID while running (SET ROLE) * * If roleid is InvalidOid, we are doing SET ROLE NONE: revert to the - * session user authorization. In this case the is_superuser argument + * session user authorization. In this case the is_superuser argument * is ignored. * * When roleid is not InvalidOid, the caller must have checked whether @@ -725,7 +725,7 @@ CreateLockFile(const char *filename, bool amPostmaster, my_gp_pid = 0; /* - * We need a loop here because of race conditions. But don't loop forever + * We need a loop here because of race conditions. But don't loop forever * (for example, a non-writable $PGDATA directory might cause a failure * that won't go away). 100 tries seems like plenty. */ @@ -734,7 +734,7 @@ CreateLockFile(const char *filename, bool amPostmaster, /* * Try to create the lock file --- O_EXCL makes this atomic. * - * Think not to make the file protection weaker than 0600. See + * Think not to make the file protection weaker than 0600. See * comments below. */ fd = open(filename, O_RDWR | O_CREAT | O_EXCL, 0600); @@ -794,7 +794,7 @@ CreateLockFile(const char *filename, bool amPostmaster, * implies that the existing process has a different userid than we * do, which means it cannot be a competing postmaster. A postmaster * cannot successfully attach to a data directory owned by a userid - * other than its own. (This is now checked directly in + * other than its own. (This is now checked directly in * checkDataDir(), but has been true for a long time because of the * restriction that the data directory isn't group- or * world-accessible.) Also, since we create the lockfiles mode 600, @@ -832,9 +832,9 @@ CreateLockFile(const char *filename, bool amPostmaster, } /* - * No, the creating process did not exist. However, it could be that + * No, the creating process did not exist. However, it could be that * the postmaster crashed (or more likely was kill -9'd by a clueless - * admin) but has left orphan backends behind. Check for this by + * admin) but has left orphan backends behind. Check for this by * looking to see if there is an associated shmem segment that is * still in use. * @@ -875,7 +875,7 @@ CreateLockFile(const char *filename, bool amPostmaster, /* * Looks like nobody's home. Unlink the file and try again to create - * it. Need a loop because of possible race condition against other + * it. Need a loop because of possible race condition against other * would-be creators. */ if (unlink(filename) < 0) @@ -889,7 +889,7 @@ CreateLockFile(const char *filename, bool amPostmaster, } /* - * Successfully created the file, now fill it. See comment in miscadmin.h + * Successfully created the file, now fill it. See comment in miscadmin.h * about the contents. Note that we write the same first five lines into * both datadir and socket lockfiles; although more stuff may get added to * the datadir lockfile later. @@ -1232,7 +1232,7 @@ load_libraries(const char *libraries, const char *gucname, bool restricted) /* * Choose notice level: avoid repeat messages when re-loading a library - * that was preloaded into the postmaster. (Only possible in EXEC_BACKEND + * that was preloaded into the postmaster. (Only possible in EXEC_BACKEND * configurations) */ #ifdef EXEC_BACKEND diff --git a/src/backend/utils/init/postinit.c b/src/backend/utils/init/postinit.c index f8d8626a92c..cfefe4b0a4f 100644 --- a/src/backend/utils/init/postinit.c +++ b/src/backend/utils/init/postinit.c @@ -76,7 +76,7 @@ static void process_settings(Oid databaseid, Oid roleid); * GetDatabaseTuple -- fetch the pg_database row for a database * * This is used during backend startup when we don't yet have any access to - * system catalogs in general. In the worst case, we can seqscan pg_database + * system catalogs in general. In the worst case, we can seqscan pg_database * using nothing but the hard-wired descriptor that relcache.c creates for * pg_database. In more typical cases, relcache.c was able to load * descriptors for both pg_database and its indexes from the shared relcache @@ -100,7 +100,7 @@ GetDatabaseTuple(const char *dbname) CStringGetDatum(dbname)); /* - * Open pg_database and fetch a tuple. Force heap scan if we haven't yet + * Open pg_database and fetch a tuple. Force heap scan if we haven't yet * built the critical shared relcache entries (i.e., we're starting up * without a shared relcache cache file). */ @@ -143,7 +143,7 @@ GetDatabaseTupleByOid(Oid dboid) ObjectIdGetDatum(dboid)); /* - * Open pg_database and fetch a tuple. Force heap scan if we haven't yet + * Open pg_database and fetch a tuple. Force heap scan if we haven't yet * built the critical shared relcache entries (i.e., we're starting up * without a shared relcache cache file). */ @@ -184,7 +184,7 @@ PerformAuthentication(Port *port) * are loading them into the startup transaction's memory context, not * PostmasterContext, but that shouldn't matter. * - * FIXME: [fork/exec] Ugh. Is there a way around this overhead? + * FIXME: [fork/exec] Ugh. Is there a way around this overhead? */ #ifdef EXEC_BACKEND if (!load_hba()) @@ -283,7 +283,7 @@ CheckMyDatabase(const char *name, bool am_superuser) name))); /* - * Check privilege to connect to the database. (The am_superuser test + * Check privilege to connect to the database. (The am_superuser test * is redundant, but since we have the flag, might as well check it * and save a few cycles.) */ @@ -299,7 +299,7 @@ CheckMyDatabase(const char *name, bool am_superuser) * Check connection limit for this database. * * There is a race condition here --- we create our PGPROC before - * checking for other PGPROCs. If two backends did this at about the + * checking for other PGPROCs. If two backends did this at about the * same time, they might both think they were over the limit, while * ideally one should succeed and one fail. Getting that to work * exactly seems more trouble than it is worth, however; instead we @@ -444,7 +444,7 @@ BaseInit(void) * Initialize POSTGRES. * * The database can be specified by name, using the in_dbname parameter, or by - * OID, using the dboid parameter. In the latter case, the actual database + * OID, using the dboid parameter. In the latter case, the actual database * name can be returned to the caller in out_dbname. If out_dbname isn't * NULL, it must point to a buffer of size NAMEDATALEN. * @@ -838,7 +838,7 @@ InitPostgres(const char *in_dbname, Oid dboid, const char *username, /* * Now process any command-line switches and any additional GUC variable - * settings passed in the startup packet. We couldn't do this before + * settings passed in the startup packet. We couldn't do this before * because we didn't know if client is a superuser. */ if (MyProcPort != NULL) diff --git a/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c b/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c index ca965390557..68615726552 100644 --- a/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c +++ b/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c @@ -231,7 +231,7 @@ static unsigned short BinarySearchRange /* * NOTE: big5 high_byte: 0xa1-0xfe, low_byte: 0x40-0x7e, * 0xa1-0xfe (radicals: 0x00-0x3e, 0x3f-0x9c) big5 radix is - * 0x9d. [region_low, region_high] We + * 0x9d. [region_low, region_high] We * should remember big5 has two different regions (above). * There is a bias for the distance between these regions. * 0xa1 - 0x7e + bias = 1 (Distance between 0xa1 and 0x7e is diff --git a/src/backend/utils/mb/mbutils.c b/src/backend/utils/mb/mbutils.c index 287ff808fc1..49826bad037 100644 --- a/src/backend/utils/mb/mbutils.c +++ b/src/backend/utils/mb/mbutils.c @@ -29,7 +29,7 @@ /* * We maintain a simple linked list caching the fmgr lookup info for the * currently selected conversion functions, as well as any that have been - * selected previously in the current session. (We remember previous + * selected previously in the current session. (We remember previous * settings because we must be able to restore a previous setting during * transaction rollback, without doing any fresh catalog accesses.) * @@ -76,7 +76,7 @@ static int cliplen(const char *str, int len, int limit); /* - * Prepare for a future call to SetClientEncoding. Success should mean + * Prepare for a future call to SetClientEncoding. Success should mean * that SetClientEncoding is guaranteed to succeed for this encoding request. * * (But note that success before backend_startup_complete does not guarantee @@ -148,7 +148,7 @@ PrepareClientEncoding(int encoding) /* * We cannot yet remove any older entry for the same encoding pair, - * since it could still be in use. SetClientEncoding will clean up. + * since it could still be in use. SetClientEncoding will clean up. */ return 0; /* success */ @@ -157,8 +157,8 @@ PrepareClientEncoding(int encoding) { /* * If we're not in a live transaction, the only thing we can do is - * restore a previous setting using the cache. This covers all - * transaction-rollback cases. The only case it might not work for is + * restore a previous setting using the cache. This covers all + * transaction-rollback cases. The only case it might not work for is * trying to change client_encoding on the fly by editing * postgresql.conf and SIGHUP'ing. Which would probably be a stupid * thing to do anyway. @@ -316,7 +316,7 @@ pg_get_client_encoding_name(void) * * CAUTION: although the presence of a length argument means that callers * can pass non-null-terminated strings, care is required because the same - * string will be passed back if no conversion occurs. Such callers *must* + * string will be passed back if no conversion occurs. Such callers *must* * check whether result == src and handle that case differently. * * Note: we try to avoid raising error, since that could get us into @@ -572,7 +572,7 @@ pg_any_to_server(const char *s, int len, int encoding) * the selected client_encoding. If the client encoding is ASCII-safe * then we just do a straight validation under that encoding. For an * ASCII-unsafe encoding we have a problem: we dare not pass such data - * to the parser but we have no way to convert it. We compromise by + * to the parser but we have no way to convert it. We compromise by * rejecting the data if it contains any non-ASCII characters. */ if (PG_VALID_BE_ENCODING(encoding)) diff --git a/src/backend/utils/mb/wstrcmp.c b/src/backend/utils/mb/wstrcmp.c index 64a9cf848e2..dad3ae023a3 100644 --- a/src/backend/utils/mb/wstrcmp.c +++ b/src/backend/utils/mb/wstrcmp.c @@ -23,7 +23,7 @@ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) diff --git a/src/backend/utils/mb/wstrncmp.c b/src/backend/utils/mb/wstrncmp.c index 87c1f5afdaa..ea4823fc6f8 100644 --- a/src/backend/utils/mb/wstrncmp.c +++ b/src/backend/utils/mb/wstrncmp.c @@ -22,7 +22,7 @@ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c index f32cf90ac4a..a66a7d9198c 100644 --- a/src/backend/utils/misc/guc.c +++ b/src/backend/utils/misc/guc.c @@ -3499,7 +3499,7 @@ get_guc_variables(void) /* - * Build the sorted array. This is split out so that it could be + * Build the sorted array. This is split out so that it could be * re-executed after startup (eg, we could allow loadable modules to * add vars, and then we'd need to re-sort). */ @@ -3657,7 +3657,7 @@ add_placeholder_variable(const char *name, int elevel) /* * The char* is allocated at the end of the struct since we have no - * 'static' place to point to. Note that the current value, as well as + * 'static' place to point to. Note that the current value, as well as * the boot and reset values, start out NULL. */ var->variable = (char **) (var + 1); @@ -3735,7 +3735,7 @@ find_option(const char *name, bool create_placeholders, int elevel) return *res; /* - * See if the name is an obsolete name for a variable. We assume that the + * See if the name is an obsolete name for a variable. We assume that the * set of supported old names is short enough that a brute-force search is * the best way. */ @@ -5188,7 +5188,7 @@ set_config_option(const char *name, const char *value, * If a PGC_BACKEND parameter is changed in the config file, * we want to accept the new value in the postmaster (whence * it will propagate to subsequently-started backends), but - * ignore it in existing backends. This is a tad klugy, but + * ignore it in existing backends. This is a tad klugy, but * necessary because we don't re-read the config file during * backend start. * @@ -5245,7 +5245,7 @@ set_config_option(const char *name, const char *value, * An exception might be made if the reset value is assumed to be "safe". * * Note: this flag is currently used for "session_authorization" and - * "role". We need to prohibit changing these inside a local userid + * "role". We need to prohibit changing these inside a local userid * context because when we exit it, GUC won't be notified, leaving things * out of sync. (This could be fixed by forcing a new GUC nesting level, * but that would change behavior in possibly-undesirable ways.) Also, we @@ -5837,7 +5837,7 @@ set_config_sourcefile(const char *name, char *sourcefile, int sourceline) /* * Set a config option to the given value. See also set_config_option, - * this is just the wrapper to be called from outside GUC. NB: this + * this is just the wrapper to be called from outside GUC. NB: this * is used only for non-transactional operations. * * Note: there is no support here for setting source file/line, as it @@ -6073,7 +6073,7 @@ flatten_set_variable_args(const char *name, List *args) else { /* - * Plain string literal or identifier. For quote mode, + * Plain string literal or identifier. For quote mode, * quote it if it's not a vanilla identifier. */ if (flags & GUC_LIST_QUOTE) @@ -7625,7 +7625,7 @@ ParseLongOption(const char *string, char **name, char **value) /* * Handle options fetched from pg_db_role_setting.setconfig, - * pg_proc.proconfig, etc. Caller must specify proper context/source/action. + * pg_proc.proconfig, etc. Caller must specify proper context/source/action. * * The array parameter must be an array of TEXT (it must not be NULL). */ @@ -7905,7 +7905,7 @@ GUCArrayReset(ArrayType *array) * Validate a proposed option setting for GUCArrayAdd/Delete/Reset. * * name is the option name. value is the proposed value for the Add case, - * or NULL for the Delete/Reset cases. If skipIfNoPermissions is true, it's + * or NULL for the Delete/Reset cases. If skipIfNoPermissions is true, it's * not an error to have no permissions to set the option. * * Returns TRUE if OK, FALSE if skipIfNoPermissions is true and user does not @@ -7990,7 +7990,7 @@ validate_option_array_item(const char *name, const char *value, * ERRCODE_INVALID_PARAMETER_VALUE SQLSTATE for check hook failures. * * Note that GUC_check_errmsg() etc are just macros that result in a direct - * assignment to the associated variables. That is ugly, but forced by the + * assignment to the associated variables. That is ugly, but forced by the * limitations of C's macro mechanisms. */ void diff --git a/src/backend/utils/misc/ps_status.c b/src/backend/utils/misc/ps_status.c index 66d03adaf90..190ec15ca13 100644 --- a/src/backend/utils/misc/ps_status.c +++ b/src/backend/utils/misc/ps_status.c @@ -109,7 +109,7 @@ static char **save_argv; * from being clobbered by subsequent ps_display actions. * * (The original argv[] will not be overwritten by this routine, but may be - * overwritten during init_ps_display. Also, the physical location of the + * overwritten during init_ps_display. Also, the physical location of the * environment strings may be moved, so this should be called before any code * that might try to hang onto a getenv() result.) */ @@ -210,7 +210,7 @@ save_ps_display_args(int argc, char **argv) /* * Call this once during subprocess startup to set the identification - * values. At this point, the original argv[] array may be overwritten. + * values. At this point, the original argv[] array may be overwritten. */ void init_ps_display(const char *username, const char *dbname, @@ -360,7 +360,7 @@ set_ps_display(const char *activity, bool force) /* * Returns what's currently in the ps display, in case someone needs - * it. Note that only the activity part is returned. On some platforms + * it. Note that only the activity part is returned. On some platforms * the string will not be null-terminated, so return the effective * length into *displen. */ diff --git a/src/backend/utils/misc/rbtree.c b/src/backend/utils/misc/rbtree.c index f8143724d0a..c52e5047bfc 100644 --- a/src/backend/utils/misc/rbtree.c +++ b/src/backend/utils/misc/rbtree.c @@ -13,7 +13,7 @@ * * Red-black trees are a type of balanced binary tree wherein (1) any child of * a red node is always black, and (2) every path from root to leaf traverses - * an equal number of black nodes. From these properties, it follows that the + * an equal number of black nodes. From these properties, it follows that the * longest path from root to leaf is only about twice as long as the shortest, * so lookups are guaranteed to run in O(lg n) time. * @@ -102,7 +102,7 @@ static RBNode sentinel = {InitialState, RBBLACK, RBNIL, RBNIL, NULL}; * valid data! freefunc can be NULL if caller doesn't require retail * space reclamation. * - * The RBTree node is palloc'd in the caller's memory context. Note that + * The RBTree node is palloc'd in the caller's memory context. Note that * all contents of the tree are actually allocated by the caller, not here. * * Since tree contents are managed by the caller, there is currently not @@ -282,10 +282,10 @@ rb_rotate_right(RBTree *rb, RBNode *x) /* * Maintain Red-Black tree balance after inserting node x. * - * The newly inserted node is always initially marked red. That may lead to + * The newly inserted node is always initially marked red. That may lead to * a situation where a red node has a red child, which is prohibited. We can * always fix the problem by a series of color changes and/or "rotations", - * which move the problem progressively higher up in the tree. If one of the + * which move the problem progressively higher up in the tree. If one of the * two red nodes is the root, we can always fix the problem by changing the * root from red to black. * @@ -296,7 +296,7 @@ static void rb_insert_fixup(RBTree *rb, RBNode *x) { /* - * x is always a red node. Initially, it is the newly inserted node. Each + * x is always a red node. Initially, it is the newly inserted node. Each * iteration of this loop moves it higher up in the tree. */ while (x != rb->root && x->parent->color == RBRED) @@ -481,7 +481,7 @@ rb_delete_fixup(RBTree *rb, RBNode *x) while (x != rb->root && x->color == RBBLACK) { /* - * Left and right cases are symmetric. Any nodes that are children of + * Left and right cases are symmetric. Any nodes that are children of * x have a black-height one less than the remainder of the nodes in * the tree. We rotate and recolor nodes to move the problem up the * tree: at some stage we'll either fix the problem, or reach the root diff --git a/src/backend/utils/misc/tzparser.c b/src/backend/utils/misc/tzparser.c index b52942db721..72ee2afea71 100644 --- a/src/backend/utils/misc/tzparser.c +++ b/src/backend/utils/misc/tzparser.c @@ -4,7 +4,7 @@ * Functions for parsing timezone offset files * * Note: this code is invoked from the check_hook for the GUC variable - * timezone_abbreviations. Therefore, it should report problems using + * timezone_abbreviations. Therefore, it should report problems using * GUC_check_errmsg() and related functions, and try to avoid throwing * elog(ERROR). This is not completely bulletproof at present --- in * particular out-of-memory will throw an error. Could probably fix with @@ -179,7 +179,7 @@ addToArray(tzEntry **base, int *arraysize, int n, /* * Search the array for a duplicate; as a useful side effect, the array is - * maintained in sorted order. We use strcmp() to ensure we match the + * maintained in sorted order. We use strcmp() to ensure we match the * sort order datetime.c expects. */ arrayptr = *base; diff --git a/src/backend/utils/mmgr/aset.c b/src/backend/utils/mmgr/aset.c index e202acac934..23b345bb595 100644 --- a/src/backend/utils/mmgr/aset.c +++ b/src/backend/utils/mmgr/aset.c @@ -38,7 +38,7 @@ * request, even if it was much larger than necessary. This led to more * and more wasted space in allocated chunks over time. To fix, get rid * of the midrange behavior: we now handle only "small" power-of-2-size - * chunks as chunks. Anything "large" is passed off to malloc(). Change + * chunks as chunks. Anything "large" is passed off to malloc(). Change * the number of freelists to change the small/large boundary. * * @@ -54,7 +54,7 @@ * Thus, if someone makes the common error of writing past what they've * requested, the problem is likely to go unnoticed ... until the day when * there *isn't* any wasted space, perhaps because of different memory - * alignment on a new platform, or some other effect. To catch this sort + * alignment on a new platform, or some other effect. To catch this sort * of problem, the MEMORY_CONTEXT_CHECKING option stores 0x7E just beyond * the requested space whenever the request is less than the actual chunk * size, and verifies that the byte is undamaged when the chunk is freed. @@ -153,7 +153,7 @@ typedef AllocSetContext *AllocSet; /* * AllocBlock * An AllocBlock is the unit of memory that is obtained by aset.c - * from malloc(). It contains one or more AllocChunks, which are + * from malloc(). It contains one or more AllocChunks, which are * the units requested by palloc() and freed by pfree(). AllocChunks * cannot be returned to malloc() individually, instead they are put * on freelists by pfree() and re-used by the next palloc() that has @@ -290,7 +290,7 @@ AllocSetFreeIndex(Size size) /* * At this point we need to obtain log2(tsize)+1, ie, the number of - * not-all-zero bits at the right. We used to do this with a + * not-all-zero bits at the right. We used to do this with a * shift-and-count loop, but this function is enough of a hotspot to * justify micro-optimization effort. The best approach seems to be * to use a lookup table. Note that this code assumes that @@ -457,7 +457,7 @@ AllocSetInit(MemoryContext context) * Actually, this routine has some discretion about what to do. * It should mark all allocated chunks freed, but it need not necessarily * give back all the resources the set owns. Our actual implementation is - * that we hang onto any "keeper" block specified for the set. In this way, + * that we hang onto any "keeper" block specified for the set. In this way, * we don't thrash malloc() when a context is repeatedly reset after small * allocations, which is typical behavior for per-tuple contexts. */ @@ -690,7 +690,7 @@ AllocSetAlloc(MemoryContext context, Size size) /* * In most cases, we'll get back the index of the next larger - * freelist than the one we need to put this chunk on. The + * freelist than the one we need to put this chunk on. The * exception is when availchunk is exactly a power of 2. */ if (availchunk != ((Size) 1 << (a_fidx + ALLOC_MINBITS))) @@ -836,7 +836,7 @@ AllocSetFree(MemoryContext context, void *pointer) { /* * Big chunks are certain to have been allocated as single-chunk - * blocks. Find the containing block and return it to malloc(). + * blocks. Find the containing block and return it to malloc(). */ AllocBlock block = set->blocks; AllocBlock prevblock = NULL; @@ -932,7 +932,7 @@ AllocSetRealloc(MemoryContext context, void *pointer, Size size) if (oldsize > set->allocChunkLimit) { /* - * The chunk must have been allocated as a single-chunk block. Find + * The chunk must have been allocated as a single-chunk block. Find * the containing block and use realloc() to make it bigger with * minimum space wastage. */ diff --git a/src/backend/utils/mmgr/mcxt.c b/src/backend/utils/mmgr/mcxt.c index 0e2d151a3ee..42c5ffba44b 100644 --- a/src/backend/utils/mmgr/mcxt.c +++ b/src/backend/utils/mmgr/mcxt.c @@ -163,7 +163,7 @@ MemoryContextResetChildren(MemoryContext context) * * The type-specific delete routine removes all subsidiary storage * for the context, but we have to delete the context node itself, - * as well as recurse to get the children. We must also delink the + * as well as recurse to get the children. We must also delink the * node from its parent, if it has one. */ void @@ -418,22 +418,22 @@ MemoryContextContains(MemoryContext context, void *pointer) * we want to be sure that we don't leave the context tree invalid * in case of failure (such as insufficient memory to allocate the * context node itself). The procedure goes like this: - * 1. Context-type-specific routine first calls MemoryContextCreate(), + * 1. Context-type-specific routine first calls MemoryContextCreate(), * passing the appropriate tag/size/methods values (the methods * pointer will ordinarily point to statically allocated data). * The parent and name parameters usually come from the caller. - * 2. MemoryContextCreate() attempts to allocate the context node, + * 2. MemoryContextCreate() attempts to allocate the context node, * plus space for the name. If this fails we can ereport() with no * damage done. - * 3. We fill in all of the type-independent MemoryContext fields. - * 4. We call the type-specific init routine (using the methods pointer). + * 3. We fill in all of the type-independent MemoryContext fields. + * 4. We call the type-specific init routine (using the methods pointer). * The init routine is required to make the node minimally valid * with zero chance of failure --- it can't allocate more memory, * for example. - * 5. Now we have a minimally valid node that can behave correctly + * 5. Now we have a minimally valid node that can behave correctly * when told to reset or delete itself. We link the node to its * parent (if any), making the node part of the context tree. - * 6. We return to the context-type-specific routine, which finishes + * 6. We return to the context-type-specific routine, which finishes * up type-specific initialization. This routine can now do things * that might fail (like allocate more memory), so long as it's * sure the node is left in a state that delete will handle. @@ -445,7 +445,7 @@ MemoryContextContains(MemoryContext context, void *pointer) * * Normally, the context node and the name are allocated from * TopMemoryContext (NOT from the parent context, since the node must - * survive resets of its parent context!). However, this routine is itself + * survive resets of its parent context!). However, this routine is itself * used to create TopMemoryContext! If we see that TopMemoryContext is NULL, * we assume we are creating TopMemoryContext and use malloc() to allocate * the node. diff --git a/src/backend/utils/mmgr/portalmem.c b/src/backend/utils/mmgr/portalmem.c index 9a257e7351b..3dda7cff94c 100644 --- a/src/backend/utils/mmgr/portalmem.c +++ b/src/backend/utils/mmgr/portalmem.c @@ -143,14 +143,14 @@ GetPortalByName(const char *name) * Get the "primary" stmt within a portal, ie, the one marked canSetTag. * * Returns NULL if no such stmt. If multiple PlannedStmt structs within the - * portal are marked canSetTag, returns the first one. Neither of these + * portal are marked canSetTag, returns the first one. Neither of these * cases should occur in present usages of this function. * * Copes if given a list of Querys --- can't happen in a portal, but this * code also supports plancache.c, which needs both cases. * * Note: the reason this is just handed a List is so that plancache.c - * can share the code. For use with a portal, use PortalGetPrimaryStmt + * can share the code. For use with a portal, use PortalGetPrimaryStmt * rather than calling this directly. */ Node * @@ -276,7 +276,7 @@ CreateNewPortal(void) * you can pass a constant string, perhaps "(query not available)".) * * commandTag shall be NULL if and only if the original query string - * (before rewriting) was an empty string. Also, the passed commandTag must + * (before rewriting) was an empty string. Also, the passed commandTag must * be a pointer to a constant string, since it is not copied. * * If cplan is provided, then it is a cached plan containing the stmts, @@ -479,7 +479,7 @@ PortalDrop(Portal portal, bool isTopCommit) /* * Allow portalcmds.c to clean up the state it knows about, in particular - * shutting down the executor if still active. This step potentially runs + * shutting down the executor if still active. This step potentially runs * user-defined code so failure has to be expected. It's the cleanup * hook's responsibility to not try to do that more than once, in the case * that failure occurs and then we come back to drop the portal again @@ -506,12 +506,12 @@ PortalDrop(Portal portal, bool isTopCommit) PortalReleaseCachedPlan(portal); /* - * Release any resources still attached to the portal. There are several + * Release any resources still attached to the portal. There are several * cases being covered here: * * Top transaction commit (indicated by isTopCommit): normally we should * do nothing here and let the regular end-of-transaction resource - * releasing mechanism handle these resources too. However, if we have a + * releasing mechanism handle these resources too. However, if we have a * FAILED portal (eg, a cursor that got an error), we'd better clean up * its resources to avoid resource-leakage warning messages. * @@ -523,7 +523,7 @@ PortalDrop(Portal portal, bool isTopCommit) * cleaned up in transaction abort. * * Ordinary portal drop: must release resources. However, if the portal - * is not FAILED then we do not release its locks. The locks become the + * is not FAILED then we do not release its locks. The locks become the * responsibility of the transaction's ResourceOwner (since it is the * parent of the portal's owner) and will be released when the transaction * eventually ends. @@ -610,7 +610,7 @@ PortalHashTableDeleteAll(void) * Holdable cursors created in this transaction need to be converted to * materialized form, since we are going to close down the executor and * release locks. Non-holdable portals created in this transaction are - * simply removed. Portals remaining from prior transactions should be + * simply removed. Portals remaining from prior transactions should be * left untouched. * * Returns TRUE if any portals changed state (possibly causing user-defined diff --git a/src/backend/utils/resowner/resowner.c b/src/backend/utils/resowner/resowner.c index e5461e660ef..f8507d8f856 100644 --- a/src/backend/utils/resowner/resowner.c +++ b/src/backend/utils/resowner/resowner.c @@ -151,7 +151,7 @@ ResourceOwnerCreate(ResourceOwner parent, const char *name) * but don't delete the owner objects themselves. * * Note that this executes just one phase of release, and so typically - * must be called three times. We do it this way because (a) we want to + * must be called three times. We do it this way because (a) we want to * do all the recursion separately for each phase, thereby preserving * the needed order of operations; and (b) xact.c may have other operations * to do between the phases. @@ -225,7 +225,7 @@ ResourceOwnerReleaseInternal(ResourceOwner owner, * * During a commit, there shouldn't be any remaining pins --- that * would indicate failure to clean up the executor correctly --- so - * issue warnings. In the abort case, just clean up quietly. + * issue warnings. In the abort case, just clean up quietly. * * We are careful to do the releasing back-to-front, so as to avoid * O(N^2) behavior in ResourceOwnerForgetBuffer(). @@ -377,7 +377,7 @@ ResourceOwnerDelete(ResourceOwner owner) /* * We delink the owner from its parent before deleting it, so that if * there's an error we won't have deleted/busted owners still attached to - * the owner tree. Better a leak than a crash. + * the owner tree. Better a leak than a crash. */ ResourceOwnerNewParent(owner, NULL); @@ -569,7 +569,7 @@ ResourceOwnerForgetBuffer(ResourceOwner owner, Buffer buffer) /* * Scan back-to-front because it's more likely we are releasing a - * recently pinned buffer. This isn't always the case of course, but + * recently pinned buffer. This isn't always the case of course, but * it's the way to bet. */ for (i = nb1; i >= 0; i--) diff --git a/src/backend/utils/sort/logtape.c b/src/backend/utils/sort/logtape.c index 1f21b1dd542..1a5c5de7f8d 100644 --- a/src/backend/utils/sort/logtape.c +++ b/src/backend/utils/sort/logtape.c @@ -7,14 +7,14 @@ * tuplesort.c). Merging is an ideal algorithm for tape devices, but if * we implement it on disk by creating a separate file for each "tape", * there is an annoying problem: the peak space usage is at least twice - * the volume of actual data to be sorted. (This must be so because each + * the volume of actual data to be sorted. (This must be so because each * datum will appear in both the input and output tapes of the final - * merge pass. For seven-tape polyphase merge, which is otherwise a + * merge pass. For seven-tape polyphase merge, which is otherwise a * pretty good algorithm, peak usage is more like 4x actual data volume.) * * We can work around this problem by recognizing that any one tape * dataset (with the possible exception of the final output) is written - * and read exactly once in a perfectly sequential manner. Therefore, + * and read exactly once in a perfectly sequential manner. Therefore, * a datum once read will not be required again, and we can recycle its * space for use by the new tape dataset(s) being generated. In this way, * the total space usage is essentially just the actual data volume, plus @@ -55,7 +55,7 @@ * To support the above policy of writing to the lowest free block, * ltsGetFreeBlock sorts the list of free block numbers into decreasing * order each time it is asked for a block and the list isn't currently - * sorted. This is an efficient way to handle it because we expect cycles + * sorted. This is an efficient way to handle it because we expect cycles * of releasing many blocks followed by re-using many blocks, due to * tuplesort.c's "preread" behavior. * @@ -117,7 +117,7 @@ typedef struct LogicalTape /* * The total data volume in the logical tape is numFullBlocks * BLCKSZ + - * lastBlockBytes. BUT: we do not update lastBlockBytes during writing, + * lastBlockBytes. BUT: we do not update lastBlockBytes during writing, * only at completion of a write phase. */ long numFullBlocks; /* number of complete blocks in log tape */ @@ -157,7 +157,7 @@ struct LogicalTapeSet * * If blocksSorted is true then the block numbers in freeBlocks are in * *decreasing* order, so that removing the last entry gives us the lowest - * free block. We re-sort the blocks whenever a block is demanded; this + * free block. We re-sort the blocks whenever a block is demanded; this * should be reasonably efficient given the expected usage pattern. */ bool forgetFreeSpace; /* are we remembering free blocks? */ @@ -218,7 +218,7 @@ ltsWriteBlock(LogicalTapeSet *lts, long blocknum, void *buffer) /* * Read a block-sized buffer from the specified block of the underlying file. * - * No need for an error return convention; we ereport() on any error. This + * No need for an error return convention; we ereport() on any error. This * module should never attempt to read a block it doesn't know is there. */ static void @@ -353,7 +353,7 @@ ltsRecordBlockNum(LogicalTapeSet *lts, IndirectBlock *indirect, /* * Reset a logical tape's indirect-block hierarchy after a write pass - * to prepare for reading. We dump out partly-filled blocks except + * to prepare for reading. We dump out partly-filled blocks except * at the top of the hierarchy, and we rewind each level to the start. * This call returns the first data block number, or -1L if the tape * is empty. @@ -540,7 +540,7 @@ LogicalTapeSetCreate(int ntapes) /* * Initialize per-tape structs. Note we allocate the I/O buffer and * first-level indirect block for a tape only when it is first actually - * written to. This avoids wasting memory space when tuplesort.c + * written to. This avoids wasting memory space when tuplesort.c * overestimates the number of tapes needed. */ for (i = 0; i < ntapes; i++) @@ -591,7 +591,7 @@ LogicalTapeSetClose(LogicalTapeSet *lts) * Mark a logical tape set as not needing management of free space anymore. * * This should be called if the caller does not intend to write any more data - * into the tape set, but is reading from un-frozen tapes. Since no more + * into the tape set, but is reading from un-frozen tapes. Since no more * writes are planned, remembering free blocks is no longer useful. Setting * this flag lets us avoid wasting time and space in ltsReleaseBlock(), which * is not designed to handle large numbers of free blocks. @@ -732,7 +732,7 @@ LogicalTapeRewind(LogicalTapeSet *lts, int tapenum, bool forWrite) else { /* - * Completion of a read phase. Rewind and prepare for write. + * Completion of a read phase. Rewind and prepare for write. * * NOTE: we assume the caller has read the tape to the end; otherwise * untouched data and indirect blocks will not have been freed. We @@ -826,7 +826,7 @@ LogicalTapeRead(LogicalTapeSet *lts, int tapenum, * * This *must* be called just at the end of a write pass, before the * tape is rewound (after rewind is too late!). It performs a rewind - * and switch to read mode "for free". An immediately following rewind- + * and switch to read mode "for free". An immediately following rewind- * for-read call is OK but not necessary. */ void @@ -862,7 +862,7 @@ LogicalTapeFreeze(LogicalTapeSet *lts, int tapenum) } /* - * Backspace the tape a given number of bytes. (We also support a more + * Backspace the tape a given number of bytes. (We also support a more * general seek interface, see below.) * * *Only* a frozen-for-read tape can be backed up; we don't support @@ -966,7 +966,7 @@ LogicalTapeSeek(LogicalTapeSet *lts, int tapenum, return false; /* - * OK, advance or back up to the target block. This implementation would + * OK, advance or back up to the target block. This implementation would * be pretty inefficient for long seeks, but we really aren't expecting * that (a seek over one tuple is typical). */ @@ -999,7 +999,7 @@ LogicalTapeSeek(LogicalTapeSet *lts, int tapenum, * Obtain current position in a form suitable for a later LogicalTapeSeek. * * NOTE: it'd be OK to do this during write phase with intention of using - * the position for a seek after freezing. Not clear if anyone needs that. + * the position for a seek after freezing. Not clear if anyone needs that. */ void LogicalTapeTell(LogicalTapeSet *lts, int tapenum, diff --git a/src/backend/utils/sort/tuplesort.c b/src/backend/utils/sort/tuplesort.c index a7f760312a3..90ef0abca62 100644 --- a/src/backend/utils/sort/tuplesort.c +++ b/src/backend/utils/sort/tuplesort.c @@ -6,7 +6,7 @@ * This module handles sorting of heap tuples, index tuples, or single * Datums (and could easily support other kinds of sortable objects, * if necessary). It works efficiently for both small and large amounts - * of data. Small amounts are sorted in-memory using qsort(). Large + * of data. Small amounts are sorted in-memory using qsort(). Large * amounts are sorted using temporary files and a standard external sort * algorithm. * @@ -40,7 +40,7 @@ * into sorted runs in temporary tapes, emitting just enough tuples at each * step to get back within the workMem limit. Whenever the run number at * the top of the heap changes, we begin a new run with a new output tape - * (selected per Algorithm D). After the end of the input is reached, + * (selected per Algorithm D). After the end of the input is reached, * we dump out remaining tuples in memory into a final run (or two), * then merge the runs using Algorithm D. * @@ -57,17 +57,17 @@ * access at all, defeating the read-ahead methods used by most Unix kernels. * Worse, the output tape gets written into a very random sequence of blocks * of the temp file, ensuring that things will be even worse when it comes - * time to read that tape. A straightforward merge pass thus ends up doing a + * time to read that tape. A straightforward merge pass thus ends up doing a * lot of waiting for disk seeks. We can improve matters by prereading from * each source tape sequentially, loading about workMem/M bytes from each tape * in turn. Then we run the merge algorithm, writing but not reading until - * one of the preloaded tuple series runs out. Then we switch back to preread + * one of the preloaded tuple series runs out. Then we switch back to preread * mode, fill memory again, and repeat. This approach helps to localize both * read and write accesses. * * When the caller requests random access to the sort result, we form * the final sorted run on a logical tape which is then "frozen", so - * that we can access it randomly. When the caller does not need random + * that we can access it randomly. When the caller does not need random * access, we return from tuplesort_performsort() as soon as we are down * to one run per logical tape. The final merge is then performed * on-the-fly as the caller repeatedly calls tuplesort_getXXX; this @@ -77,7 +77,7 @@ * grounds that 7 is the "sweet spot" on the tapes-to-passes curve according * to Knuth's figure 70 (section 5.4.2). However, Knuth is assuming that * tape drives are expensive beasts, and in particular that there will always - * be many more runs than tape drives. In our implementation a "tape drive" + * be many more runs than tape drives. In our implementation a "tape drive" * doesn't cost much more than a few Kb of memory buffers, so we can afford * to have lots of them. In particular, if we can have as many tape drives * as sorted runs, we can eliminate any repeated I/O at all. In the current @@ -136,28 +136,28 @@ bool optimize_bounded_sort = true; /* - * The objects we actually sort are SortTuple structs. These contain + * The objects we actually sort are SortTuple structs. These contain * a pointer to the tuple proper (might be a MinimalTuple or IndexTuple), * which is a separate palloc chunk --- we assume it is just one chunk and * can be freed by a simple pfree(). SortTuples also contain the tuple's * first key column in Datum/nullflag format, and an index integer. * * Storing the first key column lets us save heap_getattr or index_getattr - * calls during tuple comparisons. We could extract and save all the key + * calls during tuple comparisons. We could extract and save all the key * columns not just the first, but this would increase code complexity and * overhead, and wouldn't actually save any comparison cycles in the common * case where the first key determines the comparison result. Note that * for a pass-by-reference datatype, datum1 points into the "tuple" storage. * * When sorting single Datums, the data value is represented directly by - * datum1/isnull1. If the datatype is pass-by-reference and isnull1 is false, + * datum1/isnull1. If the datatype is pass-by-reference and isnull1 is false, * then datum1 points to a separately palloc'd data value that is also pointed * to by the "tuple" pointer; otherwise "tuple" is NULL. * * While building initial runs, tupindex holds the tuple's run number. During * merge passes, we re-use it to hold the input tape number that each tuple in * the heap was read from, or to hold the index of the next tuple pre-read - * from the same tape in the case of pre-read entries. tupindex goes unused + * from the same tape in the case of pre-read entries. tupindex goes unused * if the sort occurs entirely in memory. */ typedef struct @@ -238,7 +238,7 @@ struct Tuplesortstate void (*copytup) (Tuplesortstate *state, SortTuple *stup, void *tup); /* - * Function to write a stored tuple onto tape. The representation of the + * Function to write a stored tuple onto tape. The representation of the * tuple on tape need not be the same as it is in memory; requirements on * the tape representation are given below. After writing the tuple, * pfree() the out-of-line data (not the SortTuple struct!), and increase @@ -264,7 +264,7 @@ struct Tuplesortstate void (*reversedirection) (Tuplesortstate *state); /* - * This array holds the tuples now in sort memory. If we are in state + * This array holds the tuples now in sort memory. If we are in state * INITIAL, the tuples are in no particular order; if we are in state * SORTEDINMEM, the tuples are in final sorted order; in states BUILDRUNS * and FINALMERGE, the tuples are organized in "heap" order per Algorithm @@ -407,7 +407,7 @@ struct Tuplesortstate * If state->randomAccess is true, then the stored representation of the * tuple must be followed by another "unsigned int" that is a copy of the * length --- so the total tape space used is actually sizeof(unsigned int) - * more than the stored length value. This allows read-backwards. When + * more than the stored length value. This allows read-backwards. When * randomAccess is not true, the write/read routines may omit the extra * length word. * @@ -417,7 +417,7 @@ struct Tuplesortstate * the back length word (if present). * * The write/read routines can make use of the tuple description data - * stored in the Tuplesortstate record, if needed. They are also expected + * stored in the Tuplesortstate record, if needed. They are also expected * to adjust state->availMem by the amount of memory space (not tape space!) * released or consumed. There is no error return from either writetup * or readtup; they should ereport() on failure. @@ -505,7 +505,7 @@ static void free_sort_tuple(Tuplesortstate *state, SortTuple *stup); * * After calling tuplesort_begin, the caller should call tuplesort_putXXX * zero or more times, then call tuplesort_performsort when all the tuples - * have been supplied. After performsort, retrieve the tuples in sorted + * have been supplied. After performsort, retrieve the tuples in sorted * order by calling tuplesort_getXXX until it returns false/NULL. (If random * access was requested, rescan, markpos, and restorepos can also be called.) * Call tuplesort_end to terminate the operation and release memory/disk space. @@ -861,7 +861,7 @@ tuplesort_begin_datum(Oid datumType, Oid sortOperator, Oid sortCollation, * * Advise tuplesort that at most the first N result tuples are required. * - * Must be called before inserting any tuples. (Actually, we could allow it + * Must be called before inserting any tuples. (Actually, we could allow it * as long as the sort hasn't spilled to disk, but there seems no need for * delayed calls at the moment.) * @@ -978,7 +978,7 @@ grow_memtuples(Tuplesortstate *state) * We need to be sure that we do not cause LACKMEM to become true, else * the space management algorithm will go nuts. We assume here that the * memory chunk overhead associated with the memtuples array is constant - * and so there will be no unexpected addition to what we ask for. (The + * and so there will be no unexpected addition to what we ask for. (The * minimum array size established in tuplesort_begin_common is large * enough to force palloc to treat it as a separate chunk, so this * assumption should be good. But let's check it.) @@ -1115,7 +1115,7 @@ puttuple_common(Tuplesortstate *state, SortTuple *tuple) case TSS_INITIAL: /* - * Save the tuple into the unsorted array. First, grow the array + * Save the tuple into the unsorted array. First, grow the array * as needed. Note that we try to grow the array when there is * still one free slot remaining --- if we fail, there'll still be * room to store the incoming tuple, and then we'll switch to @@ -1136,7 +1136,7 @@ puttuple_common(Tuplesortstate *state, SortTuple *tuple) * enough tuples to meet the bound. * * Note that once we enter TSS_BOUNDED state we will always try to - * complete the sort that way. In the worst case, if later input + * complete the sort that way. In the worst case, if later input * tuples are larger than earlier ones, this might cause us to * exceed workMem significantly. */ @@ -1267,7 +1267,7 @@ tuplesort_performsort(Tuplesortstate *state) /* * We were able to accumulate all the tuples required for output - * in memory, using a heap to eliminate excess tuples. Now we + * in memory, using a heap to eliminate excess tuples. Now we * have to transform the heap to a properly-sorted array. */ sort_bounded_heap(state); @@ -1281,7 +1281,7 @@ tuplesort_performsort(Tuplesortstate *state) case TSS_BUILDRUNS: /* - * Finish tape-based sort. First, flush all tuples remaining in + * Finish tape-based sort. First, flush all tuples remaining in * memory out to tape; then merge until we have a single remaining * run (or, if !randomAccess, one run per tape). Note that * mergeruns sets the correct state->status. @@ -1342,7 +1342,7 @@ tuplesort_gettuple_common(Tuplesortstate *state, bool forward, /* * Complain if caller tries to retrieve more tuples than - * originally asked for in a bounded sort. This is because + * originally asked for in a bounded sort. This is because * returning EOF here might be the wrong thing. */ if (state->bounded && state->current >= state->bound) @@ -1548,7 +1548,7 @@ tuplesort_gettupleslot(Tuplesortstate *state, bool forward, /* * Fetch the next tuple in either forward or back direction. - * Returns NULL if no more tuples. If *should_free is set, the + * Returns NULL if no more tuples. If *should_free is set, the * caller must pfree the returned tuple when done with it. */ HeapTuple @@ -1567,7 +1567,7 @@ tuplesort_getheaptuple(Tuplesortstate *state, bool forward, bool *should_free) /* * Fetch the next index tuple in either forward or back direction. - * Returns NULL if no more tuples. If *should_free is set, the + * Returns NULL if no more tuples. If *should_free is set, the * caller must pfree the returned tuple when done with it. */ IndexTuple @@ -1638,7 +1638,7 @@ tuplesort_merge_order(long allowedMem) /* * We need one tape for each merge input, plus another one for the output, - * and each of these tapes needs buffer space. In addition we want + * and each of these tapes needs buffer space. In addition we want * MERGE_BUFFER_SIZE workspace per input tape (but the output tape doesn't * count). * @@ -1692,7 +1692,7 @@ inittapes(Tuplesortstate *state) * don't decrease it to the point that we have no room for tuples. (That * case is only likely to occur if sorting pass-by-value Datums; in all * other scenarios the memtuples[] array is unlikely to occupy more than - * half of allowedMem. In the pass-by-value case it's not important to + * half of allowedMem. In the pass-by-value case it's not important to * account for tuple space, so we don't care if LACKMEM becomes * inaccurate.) */ @@ -1816,7 +1816,7 @@ mergeruns(Tuplesortstate *state) /* * If we produced only one initial run (quite likely if the total data * volume is between 1X and 2X workMem), we can just use that tape as the - * finished output, rather than doing a useless merge. (This obvious + * finished output, rather than doing a useless merge. (This obvious * optimization is not in Knuth's algorithm.) */ if (state->currentRun == 1) @@ -1922,7 +1922,7 @@ mergeruns(Tuplesortstate *state) * the loop without performing the last iteration of step D6, we have not * rearranged the tape unit assignment, and therefore the result is on * TAPE[T]. We need to do it this way so that we can freeze the final - * output tape while rewinding it. The last iteration of step D6 would be + * output tape while rewinding it. The last iteration of step D6 would be * a waste of cycles anyway... */ state->result_tape = state->tp_tapenum[state->tapeRange]; @@ -2006,7 +2006,7 @@ mergeonerun(Tuplesortstate *state) * beginmerge - initialize for a merge pass * * We decrease the counts of real and dummy runs for each tape, and mark - * which tapes contain active input runs in mergeactive[]. Then, load + * which tapes contain active input runs in mergeactive[]. Then, load * as many tuples as we can from each active input tape, and finally * fill the merge heap with the first tuple from each active tape. */ @@ -2099,7 +2099,7 @@ beginmerge(Tuplesortstate *state) * This routine exists to improve sequentiality of reads during a merge pass, * as explained in the header comments of this file. Load tuples from each * active source tape until the tape's run is exhausted or it has used up - * its fair share of available memory. In any case, we guarantee that there + * its fair share of available memory. In any case, we guarantee that there * is at least one preread tuple available from each unexhausted input tape. * * We invoke this routine at the start of a merge pass for initial load, @@ -2362,7 +2362,7 @@ tuplesort_get_stats(Tuplesortstate *state, * accurately once we have begun to return tuples to the caller (since we * don't account for pfree's the caller is expected to do), so we cannot * rely on availMem in a disk sort. This does not seem worth the overhead - * to fix. Is it worth creating an API for the memory context code to + * to fix. Is it worth creating an API for the memory context code to * tell us how much is actually used in sortcontext? */ if (state->tapeset) @@ -2400,7 +2400,7 @@ tuplesort_get_stats(Tuplesortstate *state, /* * Heap manipulation routines, per Knuth's Algorithm 5.2.3H. * - * Compare two SortTuples. If checkIndex is true, use the tuple index + * Compare two SortTuples. If checkIndex is true, use the tuple index * as the front of the sort key; otherwise, no. */ @@ -2504,7 +2504,7 @@ sort_bounded_heap(Tuplesortstate *state) /* * Insert a new tuple into an empty or existing heap, maintaining the - * heap invariant. Caller is responsible for ensuring there's room. + * heap invariant. Caller is responsible for ensuring there's room. * * Note: we assume *tuple is a temporary variable that can be scribbled on. * For some callers, tuple actually points to a memtuples[] entry above the @@ -2609,7 +2609,7 @@ markrunend(Tuplesortstate *state, int tapenum) /* - * Set up for an external caller of ApplySortFunction. This function + * Set up for an external caller of ApplySortFunction. This function * basically just exists to localize knowledge of the encoding of sk_flags * used in this module. */ diff --git a/src/backend/utils/sort/tuplestore.c b/src/backend/utils/sort/tuplestore.c index 4d6e3aa0e4b..9739c6bbed6 100644 --- a/src/backend/utils/sort/tuplestore.c +++ b/src/backend/utils/sort/tuplestore.c @@ -8,7 +8,7 @@ * a dumbed-down version of tuplesort.c; it does no sorting of tuples * but can only store and regurgitate a sequence of tuples. However, * because no sort is required, it is allowed to start reading the sequence - * before it has all been written. This is particularly useful for cursors, + * before it has all been written. This is particularly useful for cursors, * because it allows random access within the already-scanned portion of * a query without having to process the underlying scan to completion. * Also, it is possible to support multiple independent read pointers. @@ -17,7 +17,7 @@ * space limit specified by the caller. * * The (approximate) amount of memory allowed to the tuplestore is specified - * in kilobytes by the caller. We absorb tuples and simply store them in an + * in kilobytes by the caller. We absorb tuples and simply store them in an * in-memory array as long as we haven't exceeded maxKBytes. If we do exceed * maxKBytes, we dump all the tuples into a temp file and then read from that * when needed. @@ -29,7 +29,7 @@ * When the caller requests backward-scan capability, we write the temp file * in a format that allows either forward or backward scan. Otherwise, only * forward scan is allowed. A request for backward scan must be made before - * putting any tuples into the tuplestore. Rewind is normally allowed but + * putting any tuples into the tuplestore. Rewind is normally allowed but * can be turned off via tuplestore_set_eflags; turning off rewind for all * read pointers enables truncation of the tuplestore at the oldest read point * for minimal memory usage. (The caller must explicitly call tuplestore_trim @@ -63,7 +63,7 @@ /* - * Possible states of a Tuplestore object. These denote the states that + * Possible states of a Tuplestore object. These denote the states that * persist between calls of Tuplestore routines. */ typedef enum @@ -82,7 +82,7 @@ typedef enum * * Special case: if eof_reached is true, then the pointer's read position is * implicitly equal to the write position, and current/file/offset aren't - * maintained. This way we need not update all the read pointers each time + * maintained. This way we need not update all the read pointers each time * we write. */ typedef struct @@ -126,7 +126,7 @@ struct Tuplestorestate void *(*copytup) (Tuplestorestate *state, void *tup); /* - * Function to write a stored tuple onto tape. The representation of the + * Function to write a stored tuple onto tape. The representation of the * tuple on tape need not be the same as it is in memory; requirements on * the tape representation are given below. After writing the tuple, * pfree() it, and increase state->availMem by the amount of memory space @@ -194,7 +194,7 @@ struct Tuplestorestate * If state->backward is true, then the stored representation of * the tuple must be followed by another "unsigned int" that is a copy of the * length --- so the total tape space used is actually sizeof(unsigned int) - * more than the stored length value. This allows read-backwards. When + * more than the stored length value. This allows read-backwards. When * state->backward is not set, the write/read routines may omit the extra * length word. * @@ -290,7 +290,7 @@ tuplestore_begin_common(int eflags, bool interXact, int maxKBytes) * tuple store are allowed. * * interXact: if true, the files used for on-disk storage persist beyond the - * end of the current transaction. NOTE: It's the caller's responsibility to + * end of the current transaction. NOTE: It's the caller's responsibility to * create such a tuplestore in a memory context and resource owner that will * also survive transaction boundaries, and to ensure the tuplestore is closed * when it's no longer wanted. @@ -329,7 +329,7 @@ tuplestore_begin_heap(bool randomAccess, bool interXact, int maxKBytes) * any data into the tuplestore. * * eflags is a bitmask following the meanings used for executor node - * startup flags (see executor.h). tuplestore pays attention to these bits: + * startup flags (see executor.h). tuplestore pays attention to these bits: * EXEC_FLAG_REWIND need rewind to start * EXEC_FLAG_BACKWARD need backward fetch * If tuplestore_set_eflags is not called, REWIND is allowed, and BACKWARD @@ -739,7 +739,7 @@ tuplestore_puttuple_common(Tuplestorestate *state, void *tuple) /* * Fetch the next tuple in either forward or back direction. - * Returns NULL if no more tuples. If should_free is set, the + * Returns NULL if no more tuples. If should_free is set, the * caller must pfree the returned tuple when done with it. * * Backward scan is only allowed if randomAccess was set true or diff --git a/src/backend/utils/time/combocid.c b/src/backend/utils/time/combocid.c index d9b37b2ba3f..772ae17ba43 100644 --- a/src/backend/utils/time/combocid.c +++ b/src/backend/utils/time/combocid.c @@ -15,7 +15,7 @@ * this module. * * To allow reusing existing combo cids, we also keep a hash table that - * maps cmin,cmax pairs to combo cids. This keeps the data structure size + * maps cmin,cmax pairs to combo cids. This keeps the data structure size * reasonable in most cases, since the number of unique pairs used by any * one transaction is likely to be small. * diff --git a/src/backend/utils/time/snapmgr.c b/src/backend/utils/time/snapmgr.c index 5917b014fdc..2ce477f3991 100644 --- a/src/backend/utils/time/snapmgr.c +++ b/src/backend/utils/time/snapmgr.c @@ -8,9 +8,9 @@ * (tracked by separate refcounts on each snapshot), its memory can be freed. * * These arrangements let us reset MyProc->xmin when there are no snapshots - * referenced by this transaction. (One possible improvement would be to be + * referenced by this transaction. (One possible improvement would be to be * able to advance Xmin when the snapshot with the earliest Xmin is no longer - * referenced. That's a bit harder though, it requires more locking, and + * referenced. That's a bit harder though, it requires more locking, and * anyway it should be rather uncommon to keep snapshots referenced for too * long.) * @@ -41,7 +41,7 @@ * CurrentSnapshot points to the only snapshot taken in transaction-snapshot * mode, and to the latest one taken in a read-committed transaction. * SecondarySnapshot is a snapshot that's always up-to-date as of the current - * instant, even in transaction-snapshot mode. It should only be used for + * instant, even in transaction-snapshot mode. It should only be used for * special-purpose code (say, RI checking.) * * These SnapshotData structs are static to simplify memory allocation @@ -60,7 +60,7 @@ static Snapshot SecondarySnapshot = NULL; * mode, we don't want it to say that BootstrapTransactionId is in progress. * * RecentGlobalXmin is initialized to InvalidTransactionId, to ensure that no - * one tries to use a stale value. Readers should ensure that it has been set + * one tries to use a stale value. Readers should ensure that it has been set * to something else before using it. */ TransactionId TransactionXmin = FirstNormalTransactionId; @@ -270,7 +270,7 @@ FreeSnapshot(Snapshot snapshot) * * If the passed snapshot is a statically-allocated one, or it is possibly * subject to a future command counter update, create a new long-lived copy - * with active refcount=1. Otherwise, only increment the refcount. + * with active refcount=1. Otherwise, only increment the refcount. */ void PushActiveSnapshot(Snapshot snap) diff --git a/src/backend/utils/time/tqual.c b/src/backend/utils/time/tqual.c index b396e7ecf78..d7cb7a4a9c5 100644 --- a/src/backend/utils/time/tqual.c +++ b/src/backend/utils/time/tqual.c @@ -20,7 +20,7 @@ * TransactionIdDidCommit will both return true. If we check only * TransactionIdDidCommit, we could consider a tuple committed when a * later GetSnapshotData call will still think the originating transaction - * is in progress, which leads to application-level inconsistency. The + * is in progress, which leads to application-level inconsistency. The * upshot is that we gotta check TransactionIdIsInProgress first in all * code paths, except for a few cases where we are looking at * subtransactions of our own main transaction and so there can't be any @@ -87,13 +87,13 @@ static bool XidInMVCCSnapshot(TransactionId xid, Snapshot snapshot); * buffer, so we can't use the LSN to interlock this; we have to just refrain * from setting the hint bit until some future re-examination of the tuple. * - * We can always set hint bits when marking a transaction aborted. (Some + * We can always set hint bits when marking a transaction aborted. (Some * code in heapam.c relies on that!) * * Also, if we are cleaning up HEAP_MOVED_IN or HEAP_MOVED_OFF entries, then * we can always set the hint bits, since pre-9.0 VACUUM FULL always used * synchronous commits and didn't move tuples that weren't previously - * hinted. (This is not known by this subroutine, but is applied by its + * hinted. (This is not known by this subroutine, but is applied by its * callers.) Note: old-style VACUUM FULL is gone, but we have to keep this * module's support for MOVED_OFF/MOVED_IN flag bits for as long as we * support in-place update from pre-9.0 databases. @@ -490,7 +490,7 @@ HeapTupleSatisfiesAny(HeapTupleHeader tuple, Snapshot snapshot, Buffer buffer) * This is a simplified version that only checks for VACUUM moving conditions. * It's appropriate for TOAST usage because TOAST really doesn't want to do * its own time qual checks; if you can see the main table row that contains - * a TOAST reference, you should be able to see the TOASTed value. However, + * a TOAST reference, you should be able to see the TOASTed value. However, * vacuuming a TOAST table is independent of the main table, and in case such * a vacuum fails partway through, we'd better do this much checking. * @@ -1060,7 +1060,7 @@ HeapTupleSatisfiesMVCC(HeapTupleHeader tuple, Snapshot snapshot, * we mainly want to know is if a tuple is potentially visible to *any* * running transaction. If so, it can't be removed yet by VACUUM. * - * OldestXmin is a cutoff XID (obtained from GetOldestXmin()). Tuples + * OldestXmin is a cutoff XID (obtained from GetOldestXmin()). Tuples * deleted by XIDs >= OldestXmin are deemed "recently dead"; they might * still be visible to some open transaction, so we can't remove them, * even if we see that the deleting transaction has committed. @@ -1146,7 +1146,7 @@ HeapTupleSatisfiesVacuum(HeapTupleHeader tuple, TransactionId OldestXmin, } /* - * Okay, the inserter committed, so it was good at some point. Now what + * Okay, the inserter committed, so it was good at some point. Now what * about the deleting transaction? */ if (tuple->t_infomask & HEAP_XMAX_INVALID) @@ -1245,7 +1245,7 @@ XidInMVCCSnapshot(TransactionId xid, Snapshot snapshot) /* * Make a quick range check to eliminate most XIDs without looking at the - * xip arrays. Note that this is OK even if we convert a subxact XID to + * xip arrays. Note that this is OK even if we convert a subxact XID to * its parent below, because a subxact with XID < xmin has surely also got * a parent with XID < xmin, while one with XID >= xmax must belong to a * parent that was not yet committed at the time of this snapshot. |