aboutsummaryrefslogtreecommitdiff
path: root/src/backend/utils
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/utils')
-rw-r--r--src/backend/utils/adt/arrayfuncs.c109
-rw-r--r--src/backend/utils/adt/arrayutils.c7
-rw-r--r--src/backend/utils/adt/datetime.c18
-rw-r--r--src/backend/utils/adt/formatting.c15
-rw-r--r--src/backend/utils/adt/geo_selfuncs.c4
-rw-r--r--src/backend/utils/adt/numeric.c40
-rw-r--r--src/backend/utils/adt/oid.c10
-rw-r--r--src/backend/utils/adt/pg_lzcompress.c8
-rw-r--r--src/backend/utils/adt/regexp.c8
-rw-r--r--src/backend/utils/adt/ri_triggers.c26
-rw-r--r--src/backend/utils/adt/ruleutils.c10
-rw-r--r--src/backend/utils/adt/selfuncs.c54
-rw-r--r--src/backend/utils/adt/timestamp.c43
-rw-r--r--src/backend/utils/adt/varlena.c28
-rw-r--r--src/backend/utils/cache/catcache.c6
-rw-r--r--src/backend/utils/cache/inval.c6
-rw-r--r--src/backend/utils/cache/relcache.c52
-rw-r--r--src/backend/utils/cache/typcache.c12
-rw-r--r--src/backend/utils/error/elog.c8
-rw-r--r--src/backend/utils/fmgr/fmgr.c6
-rw-r--r--src/backend/utils/hash/dynahash.c6
-rw-r--r--src/backend/utils/init/flatfiles.c10
-rw-r--r--src/backend/utils/init/miscinit.c43
-rw-r--r--src/backend/utils/init/postinit.c6
-rw-r--r--src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c6
-rw-r--r--src/backend/utils/mb/conversion_procs/utf8_and_iso8859/utf8_and_iso8859.c4
-rw-r--r--src/backend/utils/misc/guc.c19
-rw-r--r--src/backend/utils/misc/ps_status.c4
-rw-r--r--src/backend/utils/mmgr/portalmem.c28
-rw-r--r--src/backend/utils/resowner/resowner.c16
-rw-r--r--src/backend/utils/sort/tuplesort.c11
-rw-r--r--src/backend/utils/sort/tuplestore.c30
-rw-r--r--src/backend/utils/time/tqual.c18
33 files changed, 336 insertions, 335 deletions
diff --git a/src/backend/utils/adt/arrayfuncs.c b/src/backend/utils/adt/arrayfuncs.c
index ec7009816b8..1b8274ba982 100644
--- a/src/backend/utils/adt/arrayfuncs.c
+++ b/src/backend/utils/adt/arrayfuncs.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/arrayfuncs.c,v 1.125 2005/11/19 19:44:55 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/arrayfuncs.c,v 1.126 2005/11/22 18:17:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -34,7 +34,7 @@
/*
* GUC parameter
*/
-bool Array_nulls = true;
+bool Array_nulls = true;
/*
* Local definitions
@@ -68,9 +68,9 @@ static void ReadArrayBinary(StringInfo buf, int nitems,
Datum *values, bool *nulls,
bool *hasnulls, int32 *nbytes);
static void CopyArrayEls(ArrayType *array,
- Datum *values, bool *nulls, int nitems,
- int typlen, bool typbyval, char typalign,
- bool freedata);
+ Datum *values, bool *nulls, int nitems,
+ int typlen, bool typbyval, char typalign,
+ bool freedata);
static bool array_get_isnull(const bits8 *nullbitmap, int offset);
static void array_set_isnull(bits8 *nullbitmap, int offset, bool isNull);
static Datum ArrayCast(char *value, bool byval, int len);
@@ -78,26 +78,26 @@ static int ArrayCastAndSet(Datum src,
int typlen, bool typbyval, char typalign,
char *dest);
static char *array_seek(char *ptr, int offset, bits8 *nullbitmap, int nitems,
- int typlen, bool typbyval, char typalign);
-static int array_nelems_size(char *ptr, int offset, bits8 *nullbitmap,
- int nitems, int typlen, bool typbyval, char typalign);
-static int array_copy(char *destptr, int nitems,
- char *srcptr, int offset, bits8 *nullbitmap,
- int typlen, bool typbyval, char typalign);
+ int typlen, bool typbyval, char typalign);
+static int array_nelems_size(char *ptr, int offset, bits8 *nullbitmap,
+ int nitems, int typlen, bool typbyval, char typalign);
+static int array_copy(char *destptr, int nitems,
+ char *srcptr, int offset, bits8 *nullbitmap,
+ int typlen, bool typbyval, char typalign);
static int array_slice_size(char *arraydataptr, bits8 *arraynullsptr,
- int ndim, int *dim, int *lb,
- int *st, int *endp,
- int typlen, bool typbyval, char typalign);
+ int ndim, int *dim, int *lb,
+ int *st, int *endp,
+ int typlen, bool typbyval, char typalign);
static void array_extract_slice(ArrayType *newarray,
- int ndim, int *dim, int *lb,
- char *arraydataptr, bits8 *arraynullsptr,
- int *st, int *endp,
- int typlen, bool typbyval, char typalign);
+ int ndim, int *dim, int *lb,
+ char *arraydataptr, bits8 *arraynullsptr,
+ int *st, int *endp,
+ int typlen, bool typbyval, char typalign);
static void array_insert_slice(ArrayType *destArray, ArrayType *origArray,
- ArrayType *srcArray,
- int ndim, int *dim, int *lb,
- int *st, int *endp,
- int typlen, bool typbyval, char typalign);
+ ArrayType *srcArray,
+ int ndim, int *dim, int *lb,
+ int *st, int *endp,
+ int typlen, bool typbyval, char typalign);
static int array_cmp(FunctionCallInfo fcinfo);
static Datum array_type_length_coerce_internal(ArrayType *src,
int32 desttypmod,
@@ -181,8 +181,8 @@ array_in(PG_FUNCTION_ARGS)
* Otherwise, we require the input to be in curly-brace style, and we
* prescan the input to determine dimensions.
*
- * Dimension info takes the form of one or more [n] or [m:n] items.
- * The outer loop iterates once per dimension item.
+ * Dimension info takes the form of one or more [n] or [m:n] items. The
+ * outer loop iterates once per dimension item.
*/
p = string_save;
ndim = 0;
@@ -644,9 +644,9 @@ ReadArrayStr(char *arrayStr,
* in-place within arrayStr to do this. srcptr is the current scan point,
* and dstptr is where we are copying to.
*
- * We also want to suppress leading and trailing unquoted whitespace.
- * We use the leadingspace flag to suppress leading space. Trailing space
- * is tracked by using dstendptr to point to the last significant output
+ * We also want to suppress leading and trailing unquoted whitespace. We
+ * use the leadingspace flag to suppress leading space. Trailing space is
+ * tracked by using dstendptr to point to the last significant output
* character.
*
* The error checking in this routine is mostly pro-forma, since we expect
@@ -688,7 +688,7 @@ ReadArrayStr(char *arrayStr,
/* Treat the escaped character as non-whitespace */
leadingspace = false;
dstendptr = dstptr;
- hasquoting = true; /* can't be a NULL marker */
+ hasquoting = true; /* can't be a NULL marker */
break;
case '\"':
in_quotes = !in_quotes;
@@ -703,7 +703,7 @@ ReadArrayStr(char *arrayStr,
*/
dstendptr = dstptr;
}
- hasquoting = true; /* can't be a NULL marker */
+ hasquoting = true; /* can't be a NULL marker */
srcptr++;
break;
case '{':
@@ -783,7 +783,7 @@ ReadArrayStr(char *arrayStr,
errmsg("malformed array literal: \"%s\"",
origStr)));
- if (Array_nulls && !hasquoting &&
+ if (Array_nulls && !hasquoting &&
pg_strcasecmp(itemstart, "NULL") == 0)
{
/* it's a NULL item */
@@ -866,7 +866,7 @@ CopyArrayEls(ArrayType *array,
{
if (nulls && nulls[i])
{
- if (!bitmap) /* shouldn't happen */
+ if (!bitmap) /* shouldn't happen */
elog(ERROR, "null array element where not supported");
/* bitmap bit stays 0 */
}
@@ -912,6 +912,7 @@ array_out(PG_FUNCTION_ARGS)
*retval,
**values,
dims_str[(MAXDIM * 33) + 2];
+
/*
* 33 per dim since we assume 15 digits per number + ':' +'[]'
*
@@ -1024,9 +1025,9 @@ array_out(PG_FUNCTION_ARGS)
/* count data plus backslashes; detect chars needing quotes */
if (values[i][0] == '\0')
- needquote = true; /* force quotes for empty string */
+ needquote = true; /* force quotes for empty string */
else if (pg_strcasecmp(values[i], "NULL") == 0)
- needquote = true; /* force quotes for literal NULL */
+ needquote = true; /* force quotes for literal NULL */
else
needquote = false;
@@ -2158,12 +2159,12 @@ array_set(ArrayType *array,
/*
* Fill in nulls bitmap if needed
*
- * Note: it's possible we just replaced the last NULL with a non-NULL,
- * and could get rid of the bitmap. Seems not worth testing for though.
+ * Note: it's possible we just replaced the last NULL with a non-NULL, and
+ * could get rid of the bitmap. Seems not worth testing for though.
*/
if (newhasnulls)
{
- bits8 *newnullbitmap = ARR_NULLBITMAP(newarray);
+ bits8 *newnullbitmap = ARR_NULLBITMAP(newarray);
array_set_isnull(newnullbitmap, offset, isNull);
if (extendbefore)
@@ -2176,8 +2177,8 @@ array_set(ArrayType *array,
oldnullbitmap, 0,
offset);
if (!extendafter)
- array_bitmap_copy(newnullbitmap, offset+1,
- oldnullbitmap, offset+1,
+ array_bitmap_copy(newnullbitmap, offset + 1,
+ oldnullbitmap, offset + 1,
oldnitems - offset - 1);
}
}
@@ -2471,8 +2472,8 @@ array_set_slice(ArrayType *array,
/* fill in nulls bitmap if needed */
if (newhasnulls)
{
- bits8 *newnullbitmap = ARR_NULLBITMAP(newarray);
- bits8 *oldnullbitmap = ARR_NULLBITMAP(array);
+ bits8 *newnullbitmap = ARR_NULLBITMAP(newarray);
+ bits8 *oldnullbitmap = ARR_NULLBITMAP(array);
array_bitmap_copy(newnullbitmap, 0,
oldnullbitmap, 0,
@@ -2480,8 +2481,8 @@ array_set_slice(ArrayType *array,
array_bitmap_copy(newnullbitmap, itemsbefore,
ARR_NULLBITMAP(srcArray), 0,
nsrcitems);
- array_bitmap_copy(newnullbitmap, itemsbefore+nsrcitems,
- oldnullbitmap, itemsbefore+nolditems,
+ array_bitmap_copy(newnullbitmap, itemsbefore + nsrcitems,
+ oldnullbitmap, itemsbefore + nolditems,
itemsafter);
}
}
@@ -2632,7 +2633,7 @@ array_map(FunctionCallInfo fcinfo, Oid inpType, Oid retType,
*/
if (fcinfo->flinfo->fn_strict)
{
- int j;
+ int j;
for (j = 0; j < fcinfo->nargs; j++)
{
@@ -2922,7 +2923,7 @@ deconstruct_array(ArrayType *array,
else
ereport(ERROR,
(errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED),
- errmsg("NULL array element not allowed in this context")));
+ errmsg("NULL array element not allowed in this context")));
}
else
{
@@ -3319,10 +3320,10 @@ array_cmp(FunctionCallInfo fcinfo)
}
/*
- * If arrays contain same data (up to end of shorter one), apply additional
- * rules to sort by dimensionality. The relative significance of the
- * different bits of information is historical; mainly we just care that
- * we don't say "equal" for arrays of different dimensionality.
+ * If arrays contain same data (up to end of shorter one), apply
+ * additional rules to sort by dimensionality. The relative significance
+ * of the different bits of information is historical; mainly we just care
+ * that we don't say "equal" for arrays of different dimensionality.
*/
if (result == 0)
{
@@ -3545,7 +3546,7 @@ array_copy(char *destptr, int nitems,
*
* Note: this could certainly be optimized using standard bitblt methods.
* However, it's not clear that the typical Postgres array has enough elements
- * to make it worth worrying too much. For the moment, KISS.
+ * to make it worth worrying too much. For the moment, KISS.
*/
void
array_bitmap_copy(bits8 *destbitmap, int destoffset,
@@ -3706,7 +3707,7 @@ array_extract_slice(ArrayType *newarray,
src_offset = ArrayGetOffset(ndim, dim, lb, st);
srcdataptr = array_seek(arraydataptr, 0, arraynullsptr, src_offset,
- typlen, typbyval, typalign);
+ typlen, typbyval, typalign);
mda_get_prod(ndim, dim, prod);
mda_get_range(ndim, span, st, endp);
mda_get_offset_values(ndim, dist, prod, span);
@@ -3742,7 +3743,7 @@ array_extract_slice(ArrayType *newarray,
* Insert a slice into an array.
*
* ndim/dim[]/lb[] are dimensions of the original array. A new array with
- * those same dimensions is to be constructed. destArray must already
+ * those same dimensions is to be constructed. destArray must already
* have been allocated and its header initialized.
*
* st[]/endp[] identify the slice to be replaced. Elements within the slice
@@ -3969,8 +3970,8 @@ array_type_length_coerce_internal(ArrayType *src,
/*
* Use array_map to apply the function to each array element.
*
- * We pass on the desttypmod and isExplicit flags whether or not the function
- * wants them.
+ * We pass on the desttypmod and isExplicit flags whether or not the
+ * function wants them.
*/
InitFunctionCallInfoData(locfcinfo, &my_extra->coerce_finfo, 3,
NULL, NULL);
@@ -4112,7 +4113,7 @@ accumArrayResult(ArrayBuildState *astate,
(astate->nelems + ARRAY_ELEMS_CHUNKSIZE) * sizeof(Datum));
astate->dnulls = (bool *)
repalloc(astate->dnulls,
- (astate->nelems + ARRAY_ELEMS_CHUNKSIZE) * sizeof(bool));
+ (astate->nelems + ARRAY_ELEMS_CHUNKSIZE) * sizeof(bool));
}
}
diff --git a/src/backend/utils/adt/arrayutils.c b/src/backend/utils/adt/arrayutils.c
index c7355968d78..aab0639a6d4 100644
--- a/src/backend/utils/adt/arrayutils.c
+++ b/src/backend/utils/adt/arrayutils.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/arrayutils.c,v 1.19 2005/11/17 22:14:52 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/arrayutils.c,v 1.20 2005/11/22 18:17:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -82,7 +82,7 @@ ArrayGetNItems(int ndim, const int *dims)
ret = 1;
for (i = 0; i < ndim; i++)
{
- int64 prod;
+ int64 prod;
/* A negative dimension implies that UB-LB overflowed ... */
if (dims[i] < 0)
@@ -91,7 +91,8 @@ ArrayGetNItems(int ndim, const int *dims)
errmsg("array size exceeds the maximum allowed (%d)",
(int) MaxArraySize)));
- prod = (int64) ret * (int64) dims[i];
+ prod = (int64) ret *(int64) dims[i];
+
ret = (int32) prod;
if ((int64) ret != prod)
ereport(ERROR,
diff --git a/src/backend/utils/adt/datetime.c b/src/backend/utils/adt/datetime.c
index 5b3fc46d9c2..5fc8b2be3b9 100644
--- a/src/backend/utils/adt/datetime.c
+++ b/src/backend/utils/adt/datetime.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/datetime.c,v 1.160 2005/10/15 02:49:28 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/datetime.c,v 1.161 2005/11/22 18:17:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -3550,8 +3550,8 @@ EncodeDateTime(struct pg_tm * tm, fsec_t fsec, int *tzp, char **tzn, int style,
* Print fractional seconds if any. The field widths here should
* be at least equal to MAX_TIMESTAMP_PRECISION.
*
- * In float mode, don't print fractional seconds before 1 AD, since
- * it's unlikely there's any precision left ...
+ * In float mode, don't print fractional seconds before 1 AD,
+ * since it's unlikely there's any precision left ...
*/
#ifdef HAVE_INT64_TIMESTAMP
if (fsec != 0)
@@ -3602,8 +3602,8 @@ EncodeDateTime(struct pg_tm * tm, fsec_t fsec, int *tzp, char **tzn, int style,
* Print fractional seconds if any. The field widths here should
* be at least equal to MAX_TIMESTAMP_PRECISION.
*
- * In float mode, don't print fractional seconds before 1 AD, since
- * it's unlikely there's any precision left ...
+ * In float mode, don't print fractional seconds before 1 AD,
+ * since it's unlikely there's any precision left ...
*/
#ifdef HAVE_INT64_TIMESTAMP
if (fsec != 0)
@@ -3650,8 +3650,8 @@ EncodeDateTime(struct pg_tm * tm, fsec_t fsec, int *tzp, char **tzn, int style,
* Print fractional seconds if any. The field widths here should
* be at least equal to MAX_TIMESTAMP_PRECISION.
*
- * In float mode, don't print fractional seconds before 1 AD, since
- * it's unlikely there's any precision left ...
+ * In float mode, don't print fractional seconds before 1 AD,
+ * since it's unlikely there's any precision left ...
*/
#ifdef HAVE_INT64_TIMESTAMP
if (fsec != 0)
@@ -3706,8 +3706,8 @@ EncodeDateTime(struct pg_tm * tm, fsec_t fsec, int *tzp, char **tzn, int style,
* Print fractional seconds if any. The field widths here should
* be at least equal to MAX_TIMESTAMP_PRECISION.
*
- * In float mode, don't print fractional seconds before 1 AD, since
- * it's unlikely there's any precision left ...
+ * In float mode, don't print fractional seconds before 1 AD,
+ * since it's unlikely there's any precision left ...
*/
#ifdef HAVE_INT64_TIMESTAMP
if (fsec != 0)
diff --git a/src/backend/utils/adt/formatting.c b/src/backend/utils/adt/formatting.c
index de84afe42c9..224f3e5b3db 100644
--- a/src/backend/utils/adt/formatting.c
+++ b/src/backend/utils/adt/formatting.c
@@ -1,7 +1,7 @@
/* -----------------------------------------------------------------------
* formatting.c
*
- * $PostgreSQL: pgsql/src/backend/utils/adt/formatting.c,v 1.101 2005/10/20 15:59:46 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/formatting.c,v 1.102 2005/11/22 18:17:22 momjian Exp $
*
*
* Portions Copyright (c) 1999-2005, PostgreSQL Global Development Group
@@ -1326,8 +1326,8 @@ DCH_processor(FormatNode *node, char *inout, bool is_to_char,
* The input string is shorter than format picture, so it's good
* time to break this loop...
*
- * Note: this isn't relevant for TO_CHAR mode, beacuse it use 'inout'
- * allocated by format picture length.
+ * Note: this isn't relevant for TO_CHAR mode, beacuse it use
+ * 'inout' allocated by format picture length.
*/
break;
@@ -3752,8 +3752,8 @@ NUM_numpart_from_char(NUMProc *Np, int id, int plen)
* We need sign detection because determine exact position of post-sign is
* difficult:
*
- * FM9999.9999999S -> 123.001- 9.9S -> .5- FM9.999999MI
- * -> 5.01-
+ * FM9999.9999999S -> 123.001- 9.9S -> .5- FM9.999999MI ->
+ * 5.01-
*/
if (*Np->number == ' ' && Np->read_pre + Np->read_post > 0)
{
@@ -3797,8 +3797,9 @@ NUM_numpart_from_char(NUMProc *Np, int id, int plen)
*
* FM9.999999MI -> 5.01-
*
- * if (.... && IS_LSIGN(Np->Num)==FALSE) prevents read wrong formats like
- * to_number('1 -', '9S') where sign is not anchored to last number.
+ * if (.... && IS_LSIGN(Np->Num)==FALSE) prevents read wrong formats
+ * like to_number('1 -', '9S') where sign is not anchored to last
+ * number.
*/
else if (isread == FALSE && IS_LSIGN(Np->Num) == FALSE &&
(IS_PLUS(Np->Num) || IS_MINUS(Np->Num)))
diff --git a/src/backend/utils/adt/geo_selfuncs.c b/src/backend/utils/adt/geo_selfuncs.c
index 9aa33831379..449d05a6a0c 100644
--- a/src/backend/utils/adt/geo_selfuncs.c
+++ b/src/backend/utils/adt/geo_selfuncs.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/geo_selfuncs.c,v 1.25 2005/11/07 17:36:45 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/geo_selfuncs.c,v 1.26 2005/11/22 18:17:23 momjian Exp $
*
* XXX These are totally bogus. Perhaps someone will make them do
* something reasonable, someday.
@@ -22,7 +22,7 @@
/*
- * Selectivity functions for geometric operators. These are bogus -- unless
+ * Selectivity functions for geometric operators. These are bogus -- unless
* we know the actual key distribution in the index, we can't make a good
* prediction of the selectivity of these operators.
*
diff --git a/src/backend/utils/adt/numeric.c b/src/backend/utils/adt/numeric.c
index 8a69a936dc1..fb2e16ee53f 100644
--- a/src/backend/utils/adt/numeric.c
+++ b/src/backend/utils/adt/numeric.c
@@ -14,7 +14,7 @@
* Copyright (c) 1998-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/numeric.c,v 1.87 2005/11/17 22:14:53 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/numeric.c,v 1.88 2005/11/22 18:17:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -348,8 +348,8 @@ numeric_out(PG_FUNCTION_ARGS)
/*
* Get the number in the variable format.
*
- * Even if we didn't need to change format, we'd still need to copy the value
- * to have a modifiable copy for rounding. set_var_from_num() also
+ * Even if we didn't need to change format, we'd still need to copy the
+ * value to have a modifiable copy for rounding. set_var_from_num() also
* guarantees there is extra digit space in case we produce a carry out
* from rounding.
*/
@@ -459,7 +459,7 @@ numeric_send(PG_FUNCTION_ARGS)
* scale of the attribute have to be applied on the value.
*/
Datum
-numeric(PG_FUNCTION_ARGS)
+numeric (PG_FUNCTION_ARGS)
{
Numeric num = PG_GETARG_NUMERIC(0);
int32 typmod = PG_GETARG_INT32(1);
@@ -2961,10 +2961,10 @@ get_str_from_var(NumericVar *var, int dscale)
/*
* Allocate space for the result.
*
- * i is set to to # of decimal digits before decimal point. dscale is the #
- * of decimal digits we will print after decimal point. We may generate as
- * many as DEC_DIGITS-1 excess digits at the end, and in addition we need
- * room for sign, decimal point, null terminator.
+ * i is set to to # of decimal digits before decimal point. dscale is the
+ * # of decimal digits we will print after decimal point. We may generate
+ * as many as DEC_DIGITS-1 excess digits at the end, and in addition we
+ * need room for sign, decimal point, null terminator.
*/
i = (var->weight + 1) * DEC_DIGITS;
if (i <= 0)
@@ -3901,12 +3901,12 @@ div_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
* INT_MAX is noticeably larger than NBASE*NBASE, this gives us headroom
* to avoid normalizing carries immediately.
*
- * We start with div[] containing one zero digit followed by the dividend's
- * digits (plus appended zeroes to reach the desired precision including
- * guard digits). Each step of the main loop computes an (approximate)
- * quotient digit and stores it into div[], removing one position of
- * dividend space. A final pass of carry propagation takes care of any
- * mistaken quotient digits.
+ * We start with div[] containing one zero digit followed by the
+ * dividend's digits (plus appended zeroes to reach the desired precision
+ * including guard digits). Each step of the main loop computes an
+ * (approximate) quotient digit and stores it into div[], removing one
+ * position of dividend space. A final pass of carry propagation takes
+ * care of any mistaken quotient digits.
*/
div = (int *) palloc0((div_ndigits + 1) * sizeof(int));
for (i = 0; i < var1ndigits; i++)
@@ -4433,8 +4433,8 @@ exp_var_internal(NumericVar *arg, NumericVar *result, int rscale)
*
* exp(x) = 1 + x + x^2/2! + x^3/3! + ...
*
- * Given the limited range of x, this should converge reasonably quickly. We
- * run the series until the terms fall below the local_rscale limit.
+ * Given the limited range of x, this should converge reasonably quickly.
+ * We run the series until the terms fall below the local_rscale limit.
*/
add_var(&const_one, &x, result);
set_var_from_var(&x, &xpow);
@@ -4522,11 +4522,11 @@ ln_var(NumericVar *arg, NumericVar *result, int rscale)
*
* z + z^3/3 + z^5/5 + ...
*
- * where z = (x-1)/(x+1) is in the range (approximately) -0.053 .. 0.048 due
- * to the above range-reduction of x.
+ * where z = (x-1)/(x+1) is in the range (approximately) -0.053 .. 0.048
+ * due to the above range-reduction of x.
*
- * The convergence of this is not as fast as one would like, but is tolerable
- * given that z is small.
+ * The convergence of this is not as fast as one would like, but is
+ * tolerable given that z is small.
*/
sub_var(&x, &const_one, result);
add_var(&x, &const_one, &elem);
diff --git a/src/backend/utils/adt/oid.c b/src/backend/utils/adt/oid.c
index e400c9a1b4f..c31dbf4f666 100644
--- a/src/backend/utils/adt/oid.c
+++ b/src/backend/utils/adt/oid.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/oid.c,v 1.65 2005/11/17 22:14:53 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/oid.c,v 1.66 2005/11/22 18:17:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -92,12 +92,12 @@ oidin_subr(const char *funcname, const char *s, char **endloc)
* case strtoul will not raise an error for some values that are out of
* the range of Oid.
*
- * For backwards compatibility, we want to accept inputs that are given with
- * a minus sign, so allow the input value if it matches after either
+ * For backwards compatibility, we want to accept inputs that are given
+ * with a minus sign, so allow the input value if it matches after either
* signed or unsigned extension to long.
*
- * To ensure consistent results on 32-bit and 64-bit platforms, make sure the
- * error message is the same as if strtoul() had returned ERANGE.
+ * To ensure consistent results on 32-bit and 64-bit platforms, make sure
+ * the error message is the same as if strtoul() had returned ERANGE.
*/
#if OID_MAX != ULONG_MAX
if (cvt != (unsigned long) result &&
diff --git a/src/backend/utils/adt/pg_lzcompress.c b/src/backend/utils/adt/pg_lzcompress.c
index 48d93d0602c..dc3708deb3d 100644
--- a/src/backend/utils/adt/pg_lzcompress.c
+++ b/src/backend/utils/adt/pg_lzcompress.c
@@ -1,7 +1,7 @@
/* ----------
* pg_lzcompress.c -
*
- * $PostgreSQL: pgsql/src/backend/utils/adt/pg_lzcompress.c,v 1.20 2005/10/15 02:49:29 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/pg_lzcompress.c,v 1.21 2005/11/22 18:17:23 momjian Exp $
*
* This is an implementation of LZ compression for PostgreSQL.
* It uses a simple history table and generates 2-3 byte tags
@@ -782,9 +782,9 @@ pglz_get_next_decomp_char_from_lzdata(PGLZ_DecompState *dstate)
* function and a difference occurs early). Otherwise, all the checks,
* needed here, cause too much overhead.
*
- * Thus we decompress the entire rest at once into the temporary buffer
- * and change the decomp state to return the prepared data from the
- * buffer by the more simple calls to
+ * Thus we decompress the entire rest at once into the temporary
+ * buffer and change the decomp state to return the prepared data from
+ * the buffer by the more simple calls to
* pglz_get_next_decomp_char_from_plain().
*/
if (dstate->cp_out - dstate->temp_buf >= 256)
diff --git a/src/backend/utils/adt/regexp.c b/src/backend/utils/adt/regexp.c
index ce04ce77e67..1820cedfa19 100644
--- a/src/backend/utils/adt/regexp.c
+++ b/src/backend/utils/adt/regexp.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/regexp.c,v 1.60 2005/10/18 20:38:58 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/regexp.c,v 1.61 2005/11/22 18:17:23 momjian Exp $
*
* Alistair Crooks added the code for the regex caching
* agc - cached the regular expressions used - there's a good chance
@@ -212,7 +212,7 @@ RE_compile_and_execute(text *text_re, char *dat, int dat_len,
pg_wchar *data;
size_t data_len;
int regexec_result;
- regex_t *re;
+ regex_t *re;
char errMsg[100];
/* Convert data string to wide characters */
@@ -452,7 +452,7 @@ textregexreplace_noopt(PG_FUNCTION_ARGS)
text *s = PG_GETARG_TEXT_P(0);
text *p = PG_GETARG_TEXT_P(1);
text *r = PG_GETARG_TEXT_P(2);
- regex_t *re;
+ regex_t *re;
re = RE_compile_and_cache(p, regex_flavor);
@@ -475,7 +475,7 @@ textregexreplace(PG_FUNCTION_ARGS)
int i;
bool glob = false;
bool ignorecase = false;
- regex_t *re;
+ regex_t *re;
/* parse options */
for (i = 0; i < opt_len; i++)
diff --git a/src/backend/utils/adt/ri_triggers.c b/src/backend/utils/adt/ri_triggers.c
index 347f82d8c3d..354fed20c45 100644
--- a/src/backend/utils/adt/ri_triggers.c
+++ b/src/backend/utils/adt/ri_triggers.c
@@ -17,7 +17,7 @@
*
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/backend/utils/adt/ri_triggers.c,v 1.82 2005/10/29 18:39:17 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/ri_triggers.c,v 1.83 2005/11/22 18:17:23 momjian Exp $
*
* ----------
*/
@@ -995,8 +995,8 @@ RI_FKey_cascade_del(PG_FUNCTION_ARGS)
/*
* Get the relation descriptors of the FK and PK tables and the old tuple.
*
- * fk_rel is opened in RowExclusiveLock mode since that's what our eventual
- * DELETE will get on it.
+ * fk_rel is opened in RowExclusiveLock mode since that's what our
+ * eventual DELETE will get on it.
*/
fk_rel = heap_open(trigdata->tg_trigger->tgconstrrelid, RowExclusiveLock);
pk_rel = trigdata->tg_relation;
@@ -1156,8 +1156,8 @@ RI_FKey_cascade_upd(PG_FUNCTION_ARGS)
* Get the relation descriptors of the FK and PK tables and the new and
* old tuple.
*
- * fk_rel is opened in RowExclusiveLock mode since that's what our eventual
- * UPDATE will get on it.
+ * fk_rel is opened in RowExclusiveLock mode since that's what our
+ * eventual UPDATE will get on it.
*/
fk_rel = heap_open(trigdata->tg_trigger->tgconstrrelid, RowExclusiveLock);
pk_rel = trigdata->tg_relation;
@@ -1680,8 +1680,8 @@ RI_FKey_setnull_del(PG_FUNCTION_ARGS)
/*
* Get the relation descriptors of the FK and PK tables and the old tuple.
*
- * fk_rel is opened in RowExclusiveLock mode since that's what our eventual
- * UPDATE will get on it.
+ * fk_rel is opened in RowExclusiveLock mode since that's what our
+ * eventual UPDATE will get on it.
*/
fk_rel = heap_open(trigdata->tg_trigger->tgconstrrelid, RowExclusiveLock);
pk_rel = trigdata->tg_relation;
@@ -1849,8 +1849,8 @@ RI_FKey_setnull_upd(PG_FUNCTION_ARGS)
/*
* Get the relation descriptors of the FK and PK tables and the old tuple.
*
- * fk_rel is opened in RowExclusiveLock mode since that's what our eventual
- * UPDATE will get on it.
+ * fk_rel is opened in RowExclusiveLock mode since that's what our
+ * eventual UPDATE will get on it.
*/
fk_rel = heap_open(trigdata->tg_trigger->tgconstrrelid, RowExclusiveLock);
pk_rel = trigdata->tg_relation;
@@ -2059,8 +2059,8 @@ RI_FKey_setdefault_del(PG_FUNCTION_ARGS)
/*
* Get the relation descriptors of the FK and PK tables and the old tuple.
*
- * fk_rel is opened in RowExclusiveLock mode since that's what our eventual
- * UPDATE will get on it.
+ * fk_rel is opened in RowExclusiveLock mode since that's what our
+ * eventual UPDATE will get on it.
*/
fk_rel = heap_open(trigdata->tg_trigger->tgconstrrelid, RowExclusiveLock);
pk_rel = trigdata->tg_relation;
@@ -2238,8 +2238,8 @@ RI_FKey_setdefault_upd(PG_FUNCTION_ARGS)
/*
* Get the relation descriptors of the FK and PK tables and the old tuple.
*
- * fk_rel is opened in RowExclusiveLock mode since that's what our eventual
- * UPDATE will get on it.
+ * fk_rel is opened in RowExclusiveLock mode since that's what our
+ * eventual UPDATE will get on it.
*/
fk_rel = heap_open(trigdata->tg_trigger->tgconstrrelid, RowExclusiveLock);
pk_rel = trigdata->tg_relation;
diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c
index 5411e6ab8c5..c7b8066cbfb 100644
--- a/src/backend/utils/adt/ruleutils.c
+++ b/src/backend/utils/adt/ruleutils.c
@@ -3,7 +3,7 @@
* back to source text
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/ruleutils.c,v 1.208 2005/11/17 22:14:53 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/ruleutils.c,v 1.209 2005/11/22 18:17:23 momjian Exp $
*
* This software is copyrighted by Jan Wieck - Hamburg.
*
@@ -3845,8 +3845,8 @@ get_const_expr(Const *constval, deparse_context *context)
* 'NaN'). Note that strtod() and friends might accept NaN,
* so we can't use that to test.
*
- * In reality we only need to defend against infinity and NaN, so
- * we need not get too crazy about pattern matching here.
+ * In reality we only need to defend against infinity and NaN,
+ * so we need not get too crazy about pattern matching here.
*/
if (strspn(extval, "0123456789+-eE.") == strlen(extval))
{
@@ -4579,8 +4579,8 @@ quote_identifier(const char *ident)
* parser doesn't provide any easy way to test for whether an
* identifier is safe or not... so be safe not sorry.
*
- * Note: ScanKeywordLookup() does case-insensitive comparison, but that's
- * fine, since we already know we have all-lower-case.
+ * Note: ScanKeywordLookup() does case-insensitive comparison, but
+ * that's fine, since we already know we have all-lower-case.
*/
if (ScanKeywordLookup(ident) != NULL)
safe = false;
diff --git a/src/backend/utils/adt/selfuncs.c b/src/backend/utils/adt/selfuncs.c
index 85c22ca6c45..9ff98f05cbe 100644
--- a/src/backend/utils/adt/selfuncs.c
+++ b/src/backend/utils/adt/selfuncs.c
@@ -15,7 +15,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/selfuncs.c,v 1.192 2005/11/07 17:36:45 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/selfuncs.c,v 1.193 2005/11/22 18:17:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -1396,11 +1396,11 @@ eqjoinsel(PG_FUNCTION_ARGS)
* the righthand relation are unique (ie, act as if it's been
* DISTINCT'd).
*
- * NOTE: it might seem that we should unique-ify the lefthand input when
- * considering JOIN_REVERSE_IN. But this is not so, because the join
- * clause we've been handed has not been commuted from the way the
- * parser originally wrote it. We know that the unique side of the IN
- * clause is *always* on the right.
+ * NOTE: it might seem that we should unique-ify the lefthand input
+ * when considering JOIN_REVERSE_IN. But this is not so, because the
+ * join clause we've been handed has not been commuted from the way
+ * the parser originally wrote it. We know that the unique side of
+ * the IN clause is *always* on the right.
*
* NOTE: it would be dangerous to try to be smart about JOIN_LEFT or
* JOIN_RIGHT here, because we do not have enough information to
@@ -2190,8 +2190,8 @@ estimate_hash_bucketsize(PlannerInfo *root, Node *hashkey, double nbuckets)
* assuming that the data distribution is affected uniformly by the
* restriction clauses!
*
- * XXX Possibly better way, but much more expensive: multiply by selectivity
- * of rel's restriction clauses that mention the target Var.
+ * XXX Possibly better way, but much more expensive: multiply by
+ * selectivity of rel's restriction clauses that mention the target Var.
*/
if (vardata.rel)
ndistinct *= vardata.rel->rows / vardata.rel->tuples;
@@ -2296,10 +2296,10 @@ convert_to_scalar(Datum value, Oid valuetypid, double *scaledvalue,
* declared input type(s) of the operator we are invoked for, so we just
* error out if either is not recognized.
*
- * XXX The histogram we are interpolating between points of could belong to a
- * column that's only binary-compatible with the declared type. In essence
- * we are assuming that the semantics of binary-compatible types are
- * enough alike that we can use a histogram generated with one type's
+ * XXX The histogram we are interpolating between points of could belong
+ * to a column that's only binary-compatible with the declared type. In
+ * essence we are assuming that the semantics of binary-compatible types
+ * are enough alike that we can use a histogram generated with one type's
* operators to estimate selectivity for the other's. This is outright
* wrong in some cases --- in particular signed versus unsigned
* interpretation could trip us up. But it's useful enough in the
@@ -2636,10 +2636,10 @@ convert_string_datum(Datum value, Oid typid)
* that can write past the specified buffer length in that scenario.
* So, do it the dumb way for portability.
*
- * Yet other systems (e.g., glibc) sometimes return a smaller value from
- * the second call than the first; thus the Assert must be <= not ==
- * as you'd expect. Can't any of these people program their way out
- * of a paper bag?
+ * Yet other systems (e.g., glibc) sometimes return a smaller value
+ * from the second call than the first; thus the Assert must be <= not
+ * == as you'd expect. Can't any of these people program their way
+ * out of a paper bag?
*/
xfrmlen = strxfrm(NULL, val, 0);
xfrmstr = (char *) palloc(xfrmlen + 1);
@@ -3150,7 +3150,8 @@ get_variable_numdistinct(VariableStatData *vardata)
/*
* Special-case boolean columns: presumably, two distinct values.
*
- * Are there any other datatypes we should wire in special estimates for?
+ * Are there any other datatypes we should wire in special estimates
+ * for?
*/
stadistinct = 2.0;
}
@@ -3265,8 +3266,9 @@ get_variable_maximum(PlannerInfo *root, VariableStatData *vardata,
/*
* If there is a histogram, grab the last or first value as appropriate.
*
- * If there is a histogram that is sorted with some other operator than the
- * one we want, fail --- this suggests that there is data we can't use.
+ * If there is a histogram that is sorted with some other operator than
+ * the one we want, fail --- this suggests that there is data we can't
+ * use.
*/
if (get_attstatsslot(vardata->statsTuple,
vardata->atttype, vardata->atttypmod,
@@ -4214,8 +4216,8 @@ genericcostestimate(PlannerInfo *root,
* system in favor of using partial indexes where possible, which is not
* necessarily a bad thing. But it'd be nice to do better someday.
*
- * Note that index->indpred and indexQuals are both in implicit-AND form, so
- * ANDing them together just takes merging the lists. However,
+ * Note that index->indpred and indexQuals are both in implicit-AND form,
+ * so ANDing them together just takes merging the lists. However,
* eliminating duplicates is a bit trickier because indexQuals contains
* RestrictInfo nodes and the indpred does not. It is okay to pass a
* mixed list to clauselist_selectivity, but we have to work a bit to
@@ -4261,8 +4263,8 @@ genericcostestimate(PlannerInfo *root,
/*
* Estimate the number of index pages that will be retrieved.
*
- * For all currently-supported index types, the first page of the index is a
- * metadata page, and we should figure on fetching that plus a pro-rated
+ * For all currently-supported index types, the first page of the index is
+ * a metadata page, and we should figure on fetching that plus a pro-rated
* fraction of the remaining pages.
*/
if (index->pages > 1 && index->tuples > 0)
@@ -4289,9 +4291,9 @@ genericcostestimate(PlannerInfo *root,
* CPU costs as cpu_index_tuple_cost plus one cpu_operator_cost per
* indexqual operator.
*
- * Note: this neglects the possible costs of rechecking lossy operators and
- * OR-clause expressions. Detecting that that might be needed seems more
- * expensive than it's worth, though, considering all the other
+ * Note: this neglects the possible costs of rechecking lossy operators
+ * and OR-clause expressions. Detecting that that might be needed seems
+ * more expensive than it's worth, though, considering all the other
* inaccuracies here ...
*/
cost_qual_eval(&index_qual_cost, indexQuals);
diff --git a/src/backend/utils/adt/timestamp.c b/src/backend/utils/adt/timestamp.c
index ec2e80fc2a6..c04a735aae9 100644
--- a/src/backend/utils/adt/timestamp.c
+++ b/src/backend/utils/adt/timestamp.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/timestamp.c,v 1.158 2005/11/17 22:14:53 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/timestamp.c,v 1.159 2005/11/22 18:17:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -1944,30 +1944,22 @@ timestamp_mi(PG_FUNCTION_ARGS)
result->day = 0;
/*
- * This is wrong, but removing it breaks a lot of regression tests.
- * For example:
+ * This is wrong, but removing it breaks a lot of regression tests. For
+ * example:
*
- * test=> SET timezone = 'EST5EDT';
- * test=> SELECT
- * test-> ('2005-10-30 13:22:00-05'::timestamptz -
- * test(> '2005-10-29 13:22:00-04'::timestamptz);
- * ?column?
- * ----------------
- * 1 day 01:00:00
- * (1 row)
+ * test=> SET timezone = 'EST5EDT'; test=> SELECT test-> ('2005-10-30
+ * 13:22:00-05'::timestamptz - test(> '2005-10-29
+ * 13:22:00-04'::timestamptz); ?column? ---------------- 1 day 01:00:00 (1
+ * row)
*
- * so adding that to the first timestamp gets:
+ * so adding that to the first timestamp gets:
*
- * test=> SELECT
- * test-> ('2005-10-29 13:22:00-04'::timestamptz +
- * test(> ('2005-10-30 13:22:00-05'::timestamptz -
- * test(> '2005-10-29 13:22:00-04'::timestamptz)) at time zone 'EST';
- * timezone
- * --------------------
- * 2005-10-30 14:22:00
- * (1 row)
+ * test=> SELECT test-> ('2005-10-29 13:22:00-04'::timestamptz + test(>
+ * ('2005-10-30 13:22:00-05'::timestamptz - test(> '2005-10-29
+ * 13:22:00-04'::timestamptz)) at time zone 'EST'; timezone
+ * -------------------- 2005-10-30 14:22:00 (1 row)
*/
- result = DatumGetIntervalP(DirectFunctionCall1(interval_justify_hours,
+ result = DatumGetIntervalP(DirectFunctionCall1(interval_justify_hours,
IntervalPGetDatum(result)));
PG_RETURN_INTERVAL_P(result);
@@ -1986,6 +1978,7 @@ interval_justify_hours(PG_FUNCTION_ARGS)
{
Interval *span = PG_GETARG_INTERVAL_P(0);
Interval *result;
+
#ifdef HAVE_INT64_TIMESTAMP
int64 wholeday;
#else
@@ -2334,12 +2327,12 @@ interval_mul(PG_FUNCTION_ARGS)
day_remainder -= result->day;
/*
- * The above correctly handles the whole-number part of the month and
- * day products, but we have to do something with any fractional part
+ * The above correctly handles the whole-number part of the month and day
+ * products, but we have to do something with any fractional part
* resulting when the factor is nonintegral. We cascade the fractions
* down to lower units using the conversion factors DAYS_PER_MONTH and
- * SECS_PER_DAY. Note we do NOT cascade up, since we are not forced to
- * do so by the representation. The user can choose to cascade up later,
+ * SECS_PER_DAY. Note we do NOT cascade up, since we are not forced to do
+ * so by the representation. The user can choose to cascade up later,
* using justify_hours and/or justify_days.
*/
diff --git a/src/backend/utils/adt/varlena.c b/src/backend/utils/adt/varlena.c
index dd877c7d3f0..69544ea90f6 100644
--- a/src/backend/utils/adt/varlena.c
+++ b/src/backend/utils/adt/varlena.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/varlena.c,v 1.140 2005/11/18 02:38:23 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/varlena.c,v 1.141 2005/11/22 18:17:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -888,8 +888,8 @@ varstr_cmp(char *arg1, int len1, char *arg2, int len2)
(LPWSTR) a1p, a1len / 2);
if (!r)
ereport(ERROR,
- (errmsg("could not convert string to UTF-16: error %lu",
- GetLastError())));
+ (errmsg("could not convert string to UTF-16: error %lu",
+ GetLastError())));
}
((LPWSTR) a1p)[r] = 0;
@@ -901,8 +901,8 @@ varstr_cmp(char *arg1, int len1, char *arg2, int len2)
(LPWSTR) a2p, a2len / 2);
if (!r)
ereport(ERROR,
- (errmsg("could not convert string to UTF-16: error %lu",
- GetLastError())));
+ (errmsg("could not convert string to UTF-16: error %lu",
+ GetLastError())));
}
((LPWSTR) a2p)[r] = 0;
@@ -2118,12 +2118,12 @@ appendStringInfoRegexpSubstr(StringInfo str, text *replace_text,
if (eml == 1)
{
for (; p < p_end && *p != '\\'; p++)
- /* nothing */ ;
+ /* nothing */ ;
}
else
{
for (; p < p_end && *p != '\\'; p += pg_mblen(p))
- /* nothing */ ;
+ /* nothing */ ;
}
/* Copy the text we just scanned over, if any. */
@@ -2168,9 +2168,9 @@ appendStringInfoRegexpSubstr(StringInfo str, text *replace_text,
else
{
/*
- * If escape char is not followed by any expected char,
- * just treat it as ordinary data to copy. (XXX would it be
- * better to throw an error?)
+ * If escape char is not followed by any expected char, just treat
+ * it as ordinary data to copy. (XXX would it be better to throw
+ * an error?)
*/
appendStringInfoChar(str, '\\');
continue;
@@ -2179,7 +2179,7 @@ appendStringInfoRegexpSubstr(StringInfo str, text *replace_text,
if (so != -1 && eo != -1)
{
/*
- * Copy the text that is back reference of regexp. Because so and
+ * Copy the text that is back reference of regexp. Because so and
* eo are counted in characters not bytes, it's easiest to use
* text_substring to pull out the correct chunk of text.
*/
@@ -2252,9 +2252,9 @@ replace_text_regexp(text *src_text, void *regexp,
break;
/*
- * Copy the text to the left of the match position. Because we
- * are working with character not byte indexes, it's easiest to
- * use text_substring to pull out the needed data.
+ * Copy the text to the left of the match position. Because we are
+ * working with character not byte indexes, it's easiest to use
+ * text_substring to pull out the needed data.
*/
if (pmatch[0].rm_so - data_pos > 0)
{
diff --git a/src/backend/utils/cache/catcache.c b/src/backend/utils/cache/catcache.c
index 918ab7c081a..824dbaed02f 100644
--- a/src/backend/utils/cache/catcache.c
+++ b/src/backend/utils/cache/catcache.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/cache/catcache.c,v 1.125 2005/10/15 02:49:30 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/cache/catcache.c,v 1.126 2005/11/22 18:17:24 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -1426,8 +1426,8 @@ SearchCatCacheList(CatCache *cache,
* relation. For each matching tuple found in the relation, use an
* existing cache entry if possible, else build a new one.
*
- * We have to bump the member refcounts temporarily to ensure they won't get
- * dropped from the cache while loading other members. We use a PG_TRY
+ * We have to bump the member refcounts temporarily to ensure they won't
+ * get dropped from the cache while loading other members. We use a PG_TRY
* block to ensure we can undo those refcounts if we get an error before
* we finish constructing the CatCList.
*/
diff --git a/src/backend/utils/cache/inval.c b/src/backend/utils/cache/inval.c
index 59250feac1a..437bd4b0869 100644
--- a/src/backend/utils/cache/inval.c
+++ b/src/backend/utils/cache/inval.c
@@ -80,7 +80,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/cache/inval.c,v 1.73 2005/10/15 02:49:31 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/cache/inval.c,v 1.74 2005/11/22 18:17:24 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -564,8 +564,8 @@ PrepareForTupleInvalidation(Relation relation, HeapTuple tuple)
* is needed because other backends might possibly possess smgr cache
* but not relcache entries for the target relation.
*
- * Note: during a pg_class row update that assigns a new relfilenode or
- * reltablespace value, we will be called on both the old and new
+ * Note: during a pg_class row update that assigns a new relfilenode
+ * or reltablespace value, we will be called on both the old and new
* tuples, and thus will broadcast invalidation messages showing both
* the old and new RelFileNode values. This ensures that other
* backends will close smgr references to the old file.
diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c
index 2b58874ac79..7ad8f7d0241 100644
--- a/src/backend/utils/cache/relcache.c
+++ b/src/backend/utils/cache/relcache.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/cache/relcache.c,v 1.231 2005/11/20 19:49:07 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/cache/relcache.c,v 1.232 2005/11/22 18:17:24 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -303,11 +303,11 @@ AllocateRelationDesc(Relation relation, Form_pg_class relp)
/*
* Copy the relation tuple form
*
- * We only allocate space for the fixed fields, ie, CLASS_TUPLE_SIZE. relacl
- * is NOT stored in the relcache --- there'd be little point in it, since
- * we don't copy the tuple's nullvalues bitmap and hence wouldn't know if
- * the value is valid ... bottom line is that relacl *cannot* be retrieved
- * from the relcache. Get it from the syscache if you need it.
+ * We only allocate space for the fixed fields, ie, CLASS_TUPLE_SIZE.
+ * relacl is NOT stored in the relcache --- there'd be little point in it,
+ * since we don't copy the tuple's nullvalues bitmap and hence wouldn't
+ * know if the value is valid ... bottom line is that relacl *cannot* be
+ * retrieved from the relcache. Get it from the syscache if you need it.
*/
relationForm = (Form_pg_class) palloc(CLASS_TUPLE_SIZE);
@@ -549,8 +549,8 @@ RelationBuildRuleLock(Relation relation)
/*
* open pg_rewrite and begin a scan
*
- * Note: since we scan the rules using RewriteRelRulenameIndexId, we will be
- * reading the rules in name order, except possibly during
+ * Note: since we scan the rules using RewriteRelRulenameIndexId, we will
+ * be reading the rules in name order, except possibly during
* emergency-recovery operations (ie, IsIgnoringSystemIndexes). This in
* turn ensures that rules will be fired in name order.
*/
@@ -1199,9 +1199,9 @@ formrdesc(const char *relationName, Oid relationReltype,
/*
* initialize relation tuple form
*
- * The data we insert here is pretty incomplete/bogus, but it'll serve to get
- * us launched. RelationCacheInitializePhase2() will read the real data
- * from pg_class and replace what we've done here.
+ * The data we insert here is pretty incomplete/bogus, but it'll serve to
+ * get us launched. RelationCacheInitializePhase2() will read the real
+ * data from pg_class and replace what we've done here.
*/
relation->rd_rel = (Form_pg_class) palloc0(CLASS_TUPLE_SIZE);
@@ -1453,8 +1453,8 @@ RelationReloadClassinfo(Relation relation)
/*
* Read the pg_class row
*
- * Don't try to use an indexscan of pg_class_oid_index to reload the info for
- * pg_class_oid_index ...
+ * Don't try to use an indexscan of pg_class_oid_index to reload the info
+ * for pg_class_oid_index ...
*/
indexOK = (RelationGetRelid(relation) != ClassOidIndexId);
pg_class_tuple = ScanPgRelation(RelationGetRelid(relation), indexOK);
@@ -1501,9 +1501,9 @@ RelationClearRelation(Relation relation, bool rebuild)
* got called because of a relation cache flush that was triggered by
* VACUUM.
*
- * If it's a nailed index, then we need to re-read the pg_class row to see if
- * its relfilenode changed. We can't necessarily do that here, because we
- * might be in a failed transaction. We assume it's okay to do it if
+ * If it's a nailed index, then we need to re-read the pg_class row to see
+ * if its relfilenode changed. We can't necessarily do that here, because
+ * we might be in a failed transaction. We assume it's okay to do it if
* there are open references to the relcache entry (cf notes for
* AtEOXact_RelationCache). Otherwise just mark the entry as possibly
* invalid, and it'll be fixed when next opened.
@@ -1574,8 +1574,8 @@ RelationClearRelation(Relation relation, bool rebuild)
* rd_createSubid state. Also attempt to preserve the tupledesc and
* rewrite-rule substructures in place.
*
- * Note that this process does not touch CurrentResourceOwner; which is
- * good because whatever ref counts the entry may have do not
+ * Note that this process does not touch CurrentResourceOwner; which
+ * is good because whatever ref counts the entry may have do not
* necessarily belong to that resource owner.
*/
Oid save_relid = RelationGetRelid(relation);
@@ -1934,8 +1934,8 @@ AtEOSubXact_RelationCache(bool isCommit, SubTransactionId mySubid,
/*
* Is it a relation created in the current subtransaction?
*
- * During subcommit, mark it as belonging to the parent, instead. During
- * subabort, simply delete the relcache entry.
+ * During subcommit, mark it as belonging to the parent, instead.
+ * During subabort, simply delete the relcache entry.
*/
if (relation->rd_createSubid == mySubid)
{
@@ -3076,8 +3076,8 @@ load_relcache_init_file(void)
* Rules and triggers are not saved (mainly because the internal
* format is complex and subject to change). They must be rebuilt if
* needed by RelationCacheInitializePhase2. This is not expected to
- * be a big performance hit since few system catalogs have such.
- * Ditto for index expressions and predicates.
+ * be a big performance hit since few system catalogs have such. Ditto
+ * for index expressions and predicates.
*/
rel->rd_rules = NULL;
rel->rd_rulescxt = NULL;
@@ -3320,10 +3320,10 @@ write_relcache_init_file(void)
* OK, rename the temp file to its final name, deleting any
* previously-existing init file.
*
- * Note: a failure here is possible under Cygwin, if some other backend
- * is holding open an unlinked-but-not-yet-gone init file. So treat
- * this as a noncritical failure; just remove the useless temp file on
- * failure.
+ * Note: a failure here is possible under Cygwin, if some other
+ * backend is holding open an unlinked-but-not-yet-gone init file. So
+ * treat this as a noncritical failure; just remove the useless temp
+ * file on failure.
*/
if (rename(tempfilename, finalfilename) < 0)
unlink(tempfilename);
diff --git a/src/backend/utils/cache/typcache.c b/src/backend/utils/cache/typcache.c
index ff9cc975437..9726bd499bd 100644
--- a/src/backend/utils/cache/typcache.c
+++ b/src/backend/utils/cache/typcache.c
@@ -36,7 +36,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/cache/typcache.c,v 1.15 2005/10/15 02:49:32 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/cache/typcache.c,v 1.16 2005/11/22 18:17:24 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -238,9 +238,9 @@ lookup_type_cache(Oid type_id, int flags)
/*
* Set up fmgr lookup info as requested
*
- * Note: we tell fmgr the finfo structures live in CacheMemoryContext, which
- * is not quite right (they're really in DynaHashContext) but this will do
- * for our purposes.
+ * Note: we tell fmgr the finfo structures live in CacheMemoryContext,
+ * which is not quite right (they're really in DynaHashContext) but this
+ * will do for our purposes.
*/
if ((flags & TYPECACHE_EQ_OPR_FINFO) &&
typentry->eq_opr_finfo.fn_oid == InvalidOid &&
@@ -319,8 +319,8 @@ lookup_default_opclass(Oid type_id, Oid am_id)
* require the user to specify which one he wants. If we find more than
* one exact match, then someone put bogus entries in pg_opclass.
*
- * This is the same logic as GetDefaultOpClass() in indexcmds.c, except that
- * we consider all opclasses, regardless of the current search path.
+ * This is the same logic as GetDefaultOpClass() in indexcmds.c, except
+ * that we consider all opclasses, regardless of the current search path.
*/
rel = heap_open(OperatorClassRelationId, AccessShareLock);
diff --git a/src/backend/utils/error/elog.c b/src/backend/utils/error/elog.c
index 44ebac245c9..55622397e77 100644
--- a/src/backend/utils/error/elog.c
+++ b/src/backend/utils/error/elog.c
@@ -42,7 +42,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/error/elog.c,v 1.167 2005/11/05 03:04:52 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/error/elog.c,v 1.168 2005/11/22 18:17:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -1158,8 +1158,8 @@ set_syslog_parameters(const char *ident, int facility)
* the connection until needed, since this routine will get called whether
* or not Log_destination actually mentions syslog.
*
- * Note that we make our own copy of the ident string rather than relying on
- * guc.c's. This may be overly paranoid, but it ensures that we cannot
+ * Note that we make our own copy of the ident string rather than relying
+ * on guc.c's. This may be overly paranoid, but it ensures that we cannot
* accidentally free a string that syslog is still using.
*/
if (syslog_ident == NULL || strcmp(syslog_ident, ident) != 0 ||
@@ -1487,7 +1487,7 @@ log_line_prefix(StringInfo buf)
if (MyProcPort)
{
const char *psdisp;
- int displen;
+ int displen;
psdisp = get_ps_display(&displen);
appendStringInfo(buf, "%.*s", displen, psdisp);
diff --git a/src/backend/utils/fmgr/fmgr.c b/src/backend/utils/fmgr/fmgr.c
index 4e5dcc3002b..a99289bef7b 100644
--- a/src/backend/utils/fmgr/fmgr.c
+++ b/src/backend/utils/fmgr/fmgr.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/fmgr/fmgr.c,v 1.97 2005/10/15 02:49:32 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/fmgr/fmgr.c,v 1.98 2005/11/22 18:17:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -608,8 +608,8 @@ fmgr_oldstyle(PG_FUNCTION_ARGS)
* backwards-compatibility wrapper). Note, however, that we'll never get
* here with NULL arguments if the function is marked strict.
*
- * We also need to detoast any TOAST-ed inputs, since it's unlikely that an
- * old-style function knows about TOASTing.
+ * We also need to detoast any TOAST-ed inputs, since it's unlikely that
+ * an old-style function knows about TOASTing.
*/
isnull = false;
for (i = 0; i < n_arguments; i++)
diff --git a/src/backend/utils/hash/dynahash.c b/src/backend/utils/hash/dynahash.c
index 292673ac26a..622d6184592 100644
--- a/src/backend/utils/hash/dynahash.c
+++ b/src/backend/utils/hash/dynahash.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/hash/dynahash.c,v 1.65 2005/10/15 02:49:33 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/hash/dynahash.c,v 1.66 2005/11/22 18:17:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -118,8 +118,8 @@ hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
* For shared hash tables, we have a local hash header (HTAB struct) that
* we allocate in TopMemoryContext; all else is in shared memory.
*
- * For non-shared hash tables, everything including the hash header is in a
- * memory context created specially for the hash table --- this makes
+ * For non-shared hash tables, everything including the hash header is in
+ * a memory context created specially for the hash table --- this makes
* hash_destroy very simple. The memory context is made a child of either
* a context specified by the caller, or TopMemoryContext if nothing is
* specified.
diff --git a/src/backend/utils/init/flatfiles.c b/src/backend/utils/init/flatfiles.c
index 9906682c320..2898ccf943f 100644
--- a/src/backend/utils/init/flatfiles.c
+++ b/src/backend/utils/init/flatfiles.c
@@ -23,7 +23,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/utils/init/flatfiles.c,v 1.15 2005/10/15 02:49:33 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/init/flatfiles.c,v 1.16 2005/11/22 18:17:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -593,8 +593,8 @@ write_auth_file(Relation rel_authid, Relation rel_authmem)
* Convert list of role Oids to list of role names. We must do
* this before re-sorting auth_info.
*
- * We skip the first list element (curr_role itself) since there is
- * no point in writing that a role is a member of itself.
+ * We skip the first list element (curr_role itself) since there
+ * is no point in writing that a role is a member of itself.
*/
for_each_cell(mem, lnext(list_head(roles_list)))
{
@@ -775,8 +775,8 @@ AtEOXact_UpdateFlatFiles(bool isCommit)
* likely won't have gotten a strong enough lock), so get the locks we
* need before writing anything.
*
- * For writing the auth file, it's sufficient to ExclusiveLock pg_authid; we
- * take just regular AccessShareLock on pg_auth_members.
+ * For writing the auth file, it's sufficient to ExclusiveLock pg_authid;
+ * we take just regular AccessShareLock on pg_auth_members.
*/
if (database_file_update_subid != InvalidSubTransactionId)
drel = heap_open(DatabaseRelationId, ExclusiveLock);
diff --git a/src/backend/utils/init/miscinit.c b/src/backend/utils/init/miscinit.c
index 5c6f2f95d5f..eb9478fa2a3 100644
--- a/src/backend/utils/init/miscinit.c
+++ b/src/backend/utils/init/miscinit.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/init/miscinit.c,v 1.150 2005/10/15 02:49:33 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/init/miscinit.c,v 1.151 2005/11/22 18:17:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -695,8 +695,8 @@ CreateLockFile(const char *filename, bool amPostmaster,
/*
* Try to create the lock file --- O_EXCL makes this atomic.
*
- * Think not to make the file protection weaker than 0600. See comments
- * below.
+ * Think not to make the file protection weaker than 0600. See
+ * comments below.
*/
fd = open(filename, O_RDWR | O_CREAT | O_EXCL, 0600);
if (fd >= 0)
@@ -757,26 +757,27 @@ CreateLockFile(const char *filename, bool amPostmaster,
* carefully then all but the immediate parent shell will be
* root-owned processes and so the kill test will fail with EPERM.
*
- * We can treat the EPERM-error case as okay because that error implies
- * that the existing process has a different userid than we do, which
- * means it cannot be a competing postmaster. A postmaster cannot
- * successfully attach to a data directory owned by a userid other
- * than its own. (This is now checked directly in checkDataDir(), but
- * has been true for a long time because of the restriction that the
- * data directory isn't group- or world-accessible.) Also, since we
- * create the lockfiles mode 600, we'd have failed above if the
- * lockfile belonged to another userid --- which means that whatever
- * process kill() is reporting about isn't the one that made the
- * lockfile. (NOTE: this last consideration is the only one that
- * keeps us from blowing away a Unix socket file belonging to an
- * instance of Postgres being run by someone else, at least on
- * machines where /tmp hasn't got a stickybit.)
+ * We can treat the EPERM-error case as okay because that error
+ * implies that the existing process has a different userid than we
+ * do, which means it cannot be a competing postmaster. A postmaster
+ * cannot successfully attach to a data directory owned by a userid
+ * other than its own. (This is now checked directly in
+ * checkDataDir(), but has been true for a long time because of the
+ * restriction that the data directory isn't group- or
+ * world-accessible.) Also, since we create the lockfiles mode 600,
+ * we'd have failed above if the lockfile belonged to another userid
+ * --- which means that whatever process kill() is reporting about
+ * isn't the one that made the lockfile. (NOTE: this last
+ * consideration is the only one that keeps us from blowing away a
+ * Unix socket file belonging to an instance of Postgres being run by
+ * someone else, at least on machines where /tmp hasn't got a
+ * stickybit.)
*
- * Windows hasn't got getppid(), but doesn't need it since it's not using
- * real kill() either...
+ * Windows hasn't got getppid(), but doesn't need it since it's not
+ * using real kill() either...
*
- * Normally kill() will fail with ESRCH if the given PID doesn't exist.
- * BeOS returns EINVAL for some silly reason, however.
+ * Normally kill() will fail with ESRCH if the given PID doesn't
+ * exist. BeOS returns EINVAL for some silly reason, however.
*/
if (other_pid != my_pid
#ifndef WIN32
diff --git a/src/backend/utils/init/postinit.c b/src/backend/utils/init/postinit.c
index 3c763e39292..b9b1f56dd08 100644
--- a/src/backend/utils/init/postinit.c
+++ b/src/backend/utils/init/postinit.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/init/postinit.c,v 1.158 2005/10/15 02:49:33 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/init/postinit.c,v 1.159 2005/11/22 18:17:26 momjian Exp $
*
*
*-------------------------------------------------------------------------
@@ -325,8 +325,8 @@ InitPostgres(const char *dbname, const char *username)
/*
* Set up the global variables holding database id and path.
*
- * We take a shortcut in the bootstrap case, otherwise we have to look up the
- * db name in pg_database.
+ * We take a shortcut in the bootstrap case, otherwise we have to look up
+ * the db name in pg_database.
*/
if (bootstrap)
{
diff --git a/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c b/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c
index 0447c2a9e7d..a167d4e4c8c 100644
--- a/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c
+++ b/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c
@@ -7,7 +7,7 @@
*
* 1999/1/15 Tatsuo Ishii
*
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c,v 1.6 2005/10/15 02:49:34 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c,v 1.7 2005/11/22 18:17:26 momjian Exp $
*/
/* can be used in either frontend or backend */
@@ -19,7 +19,7 @@ typedef struct
{
unsigned short code,
peer;
-} codes_t;
+} codes_t;
/* map Big5 Level 1 to CNS 11643-1992 Plane 1 */
static codes_t big5Level1ToCnsPlane1[25] = { /* range */
@@ -205,7 +205,7 @@ static unsigned short b2c3[][2] = {
};
static unsigned short BinarySearchRange
- (codes_t * array, int high, unsigned short code)
+ (codes_t *array, int high, unsigned short code)
{
int low,
mid,
diff --git a/src/backend/utils/mb/conversion_procs/utf8_and_iso8859/utf8_and_iso8859.c b/src/backend/utils/mb/conversion_procs/utf8_and_iso8859/utf8_and_iso8859.c
index 0038db58e62..e1600a1fee6 100644
--- a/src/backend/utils/mb/conversion_procs/utf8_and_iso8859/utf8_and_iso8859.c
+++ b/src/backend/utils/mb/conversion_procs/utf8_and_iso8859/utf8_and_iso8859.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_iso8859/utf8_and_iso8859.c,v 1.15 2005/10/15 02:49:35 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_iso8859/utf8_and_iso8859.c,v 1.16 2005/11/22 18:17:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -65,7 +65,7 @@ typedef struct
pg_utf_to_local *map2; /* from UTF8 map name */
int size1; /* size of map1 */
int size2; /* size of map2 */
-} pg_conv_map;
+} pg_conv_map;
static pg_conv_map maps[] = {
{PG_SQL_ASCII}, /* SQL/ASCII */
diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c
index 6b83f363217..01e66f69762 100644
--- a/src/backend/utils/misc/guc.c
+++ b/src/backend/utils/misc/guc.c
@@ -10,7 +10,7 @@
* Written by Peter Eisentraut <peter_e@gmx.net>.
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/misc/guc.c,v 1.300 2005/11/17 22:14:54 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/misc/guc.c,v 1.301 2005/11/22 18:17:26 momjian Exp $
*
*--------------------------------------------------------------------
*/
@@ -466,7 +466,7 @@ static struct config_bool ConfigureNamesBool[] =
{"constraint_exclusion", PGC_USERSET, QUERY_TUNING_OTHER,
gettext_noop("Enables the planner to use constraints to optimize queries."),
gettext_noop("Child table scans will be skipped if their "
- "constraints guarantee that no rows match the query.")
+ "constraints guarantee that no rows match the query.")
},
&constraint_exclusion,
false, NULL, NULL
@@ -502,7 +502,7 @@ static struct config_bool ConfigureNamesBool[] =
{"fsync", PGC_SIGHUP, WAL_SETTINGS,
gettext_noop("Forces synchronization of updates to disk."),
gettext_noop("The server will use the fsync() system call in several places to make "
- "sure that updates are physically written to disk. This insures "
+ "sure that updates are physically written to disk. This insures "
"that a database cluster will recover to a consistent state after "
"an operating system or hardware crash.")
},
@@ -527,7 +527,7 @@ static struct config_bool ConfigureNamesBool[] =
gettext_noop("Writes full pages to WAL when first modified after a checkpoint."),
gettext_noop("A page write in process during an operating system crash might be "
"only partially written to disk. During recovery, the row changes "
- "stored in WAL are not enough to recover. This option writes "
+ "stored in WAL are not enough to recover. This option writes "
"pages when first modified after a checkpoint to WAL so full recovery "
"is possible.")
},
@@ -2781,8 +2781,8 @@ SelectConfigFiles(const char *userDoption, const char *progname)
* If the data_directory GUC variable has been set, use that as DataDir;
* otherwise use configdir if set; else punt.
*
- * Note: SetDataDir will copy and absolute-ize its argument, so we don't have
- * to.
+ * Note: SetDataDir will copy and absolute-ize its argument, so we don't
+ * have to.
*/
if (data_directory)
SetDataDir(data_directory);
@@ -3113,8 +3113,8 @@ AtEOXact_GUC(bool isCommit, bool isSubXact)
/*
* We have two cases:
*
- * If commit and HAVE_TENTATIVE, set actual value to tentative (this is
- * to override a SET LOCAL if one occurred later than SET). We keep
+ * If commit and HAVE_TENTATIVE, set actual value to tentative (this
+ * is to override a SET LOCAL if one occurred later than SET). We keep
* the tentative value and propagate HAVE_TENTATIVE to the parent
* status, allowing the SET's effect to percolate up. (But if we're
* exiting the outermost transaction, we'll drop the HAVE_TENTATIVE
@@ -3268,7 +3268,8 @@ AtEOXact_GUC(bool isCommit, bool isSubXact)
* If newval should now be freed, it'll be
* taken care of below.
*
- * See notes in set_config_option about casting
+ * See notes in set_config_option about
+ * casting
*/
newval = (char *) newstr;
}
diff --git a/src/backend/utils/misc/ps_status.c b/src/backend/utils/misc/ps_status.c
index 878ff81e241..3c78b1b5d93 100644
--- a/src/backend/utils/misc/ps_status.c
+++ b/src/backend/utils/misc/ps_status.c
@@ -5,7 +5,7 @@
* to contain some useful information. Mechanism differs wildly across
* platforms.
*
- * $PostgreSQL: pgsql/src/backend/utils/misc/ps_status.c,v 1.26 2005/11/05 03:04:52 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/misc/ps_status.c,v 1.27 2005/11/22 18:17:26 momjian Exp $
*
* Copyright (c) 2000-2005, PostgreSQL Global Development Group
* various details abducted from various places
@@ -380,7 +380,7 @@ get_ps_display(int *displen)
/* Remove any trailing spaces to offset the effect of PS_PADDING */
offset = ps_buffer_size;
- while (offset > ps_buffer_fixed_size && ps_buffer[offset-1] == PS_PADDING)
+ while (offset > ps_buffer_fixed_size && ps_buffer[offset - 1] == PS_PADDING)
offset--;
*displen = offset - ps_buffer_fixed_size;
diff --git a/src/backend/utils/mmgr/portalmem.c b/src/backend/utils/mmgr/portalmem.c
index 9866e12d68c..0402005a372 100644
--- a/src/backend/utils/mmgr/portalmem.c
+++ b/src/backend/utils/mmgr/portalmem.c
@@ -12,7 +12,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mmgr/portalmem.c,v 1.82 2005/10/15 02:49:36 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mmgr/portalmem.c,v 1.83 2005/11/22 18:17:27 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -320,21 +320,21 @@ PortalDrop(Portal portal, bool isTopCommit)
* Release any resources still attached to the portal. There are several
* cases being covered here:
*
- * Top transaction commit (indicated by isTopCommit): normally we should do
- * nothing here and let the regular end-of-transaction resource releasing
- * mechanism handle these resources too. However, if we have a FAILED
- * portal (eg, a cursor that got an error), we'd better clean up its
- * resources to avoid resource-leakage warning messages.
+ * Top transaction commit (indicated by isTopCommit): normally we should
+ * do nothing here and let the regular end-of-transaction resource
+ * releasing mechanism handle these resources too. However, if we have a
+ * FAILED portal (eg, a cursor that got an error), we'd better clean up
+ * its resources to avoid resource-leakage warning messages.
*
- * Sub transaction commit: never comes here at all, since we don't kill any
- * portals in AtSubCommit_Portals().
+ * Sub transaction commit: never comes here at all, since we don't kill
+ * any portals in AtSubCommit_Portals().
*
* Main or sub transaction abort: we will do nothing here because
* portal->resowner was already set NULL; the resources were already
* cleaned up in transaction abort.
*
- * Ordinary portal drop: must release resources. However, if the portal is
- * not FAILED then we do not release its locks. The locks become the
+ * Ordinary portal drop: must release resources. However, if the portal
+ * is not FAILED then we do not release its locks. The locks become the
* responsibility of the transaction's ResourceOwner (since it is the
* parent of the portal's owner) and will be released when the transaction
* eventually ends.
@@ -439,8 +439,8 @@ CommitHoldablePortals(void)
* Instead of dropping the portal, prepare it for access by later
* transactions.
*
- * Note that PersistHoldablePortal() must release all resources used
- * by the portal that are local to the creating transaction.
+ * Note that PersistHoldablePortal() must release all resources
+ * used by the portal that are local to the creating transaction.
*/
PortalCreateHoldStore(portal);
PersistHoldablePortal(portal);
@@ -698,8 +698,8 @@ AtSubAbort_Portals(SubTransactionId mySubid,
* If the portal is READY then allow it to survive into the parent
* transaction; otherwise shut it down.
*
- * Currently, we can't actually support that because the portal's query
- * might refer to objects created or changed in the failed
+ * Currently, we can't actually support that because the portal's
+ * query might refer to objects created or changed in the failed
* subtransaction, leading to crashes if execution is resumed. So,
* even READY portals are deleted. It would be nice to detect whether
* the query actually depends on any such object, instead.
diff --git a/src/backend/utils/resowner/resowner.c b/src/backend/utils/resowner/resowner.c
index dfdb9958f91..19c7f8ea8b5 100644
--- a/src/backend/utils/resowner/resowner.c
+++ b/src/backend/utils/resowner/resowner.c
@@ -14,7 +14,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/resowner/resowner.c,v 1.15 2005/11/07 17:36:45 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/resowner/resowner.c,v 1.16 2005/11/22 18:17:27 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -198,9 +198,9 @@ ResourceOwnerReleaseInternal(ResourceOwner owner,
* buffer entry from my list, so I just have to iterate till there are
* none.
*
- * During a commit, there shouldn't be any remaining pins --- that would
- * indicate failure to clean up the executor correctly --- so issue
- * warnings. In the abort case, just clean up quietly.
+ * During a commit, there shouldn't be any remaining pins --- that
+ * would indicate failure to clean up the executor correctly --- so
+ * issue warnings. In the abort case, just clean up quietly.
*
* We are careful to do the releasing back-to-front, so as to avoid
* O(N^2) behavior in ResourceOwnerForgetBuffer().
@@ -217,8 +217,8 @@ ResourceOwnerReleaseInternal(ResourceOwner owner,
* the relref entry from my list, so I just have to iterate till there
* are none.
*
- * As with buffer pins, warn if any are left at commit time, and release
- * back-to-front for speed.
+ * As with buffer pins, warn if any are left at commit time, and
+ * release back-to-front for speed.
*/
while (owner->nrelrefs > 0)
{
@@ -260,8 +260,8 @@ ResourceOwnerReleaseInternal(ResourceOwner owner,
* the catref entry from my list, so I just have to iterate till there
* are none. Ditto for catcache lists.
*
- * As with buffer pins, warn if any are left at commit time, and release
- * back-to-front for speed.
+ * As with buffer pins, warn if any are left at commit time, and
+ * release back-to-front for speed.
*/
while (owner->ncatrefs > 0)
{
diff --git a/src/backend/utils/sort/tuplesort.c b/src/backend/utils/sort/tuplesort.c
index 38120575f65..5b8e244277d 100644
--- a/src/backend/utils/sort/tuplesort.c
+++ b/src/backend/utils/sort/tuplesort.c
@@ -78,7 +78,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/sort/tuplesort.c,v 1.55 2005/11/20 19:49:08 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/sort/tuplesort.c,v 1.56 2005/11/22 18:17:27 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -584,6 +584,7 @@ void
tuplesort_end(Tuplesortstate *state)
{
int i;
+
#ifdef TRACE_SORT
long spaceUsed;
#endif
@@ -743,8 +744,8 @@ puttuple_common(Tuplesortstate *state, void *tuple)
* and it's simplest to let writetup free each tuple as soon as
* it's written.)
*
- * Note there will always be at least one tuple in the heap at this
- * point; see dumptuples.
+ * Note there will always be at least one tuple in the heap at
+ * this point; see dumptuples.
*/
Assert(state->memtupcount > 0);
if (COMPARETUP(state, tuple, state->memtuples[0]) >= 0)
@@ -890,8 +891,8 @@ tuplesort_gettuple(Tuplesortstate *state, bool forward,
/*
* Backward.
*
- * if all tuples are fetched already then we return last tuple, else
- * - tuple before last returned.
+ * if all tuples are fetched already then we return last tuple,
+ * else - tuple before last returned.
*/
if (state->eof_reached)
{
diff --git a/src/backend/utils/sort/tuplestore.c b/src/backend/utils/sort/tuplestore.c
index bdd04e21f4f..bb4195f8829 100644
--- a/src/backend/utils/sort/tuplestore.c
+++ b/src/backend/utils/sort/tuplestore.c
@@ -36,7 +36,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/sort/tuplestore.c,v 1.24 2005/11/20 19:49:08 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/sort/tuplestore.c,v 1.25 2005/11/22 18:17:27 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -115,17 +115,17 @@ struct Tuplestorestate
/*
* These variables are used to keep track of the current position.
*
- * In state WRITEFILE, the current file seek position is the write point, and
- * the read position is remembered in readpos_xxx; in state READFILE, the
- * current file seek position is the read point, and the write position is
- * remembered in writepos_xxx. (The write position is the same as EOF,
- * but since BufFileSeek doesn't currently implement SEEK_END, we have to
- * remember it explicitly.)
+ * In state WRITEFILE, the current file seek position is the write point,
+ * and the read position is remembered in readpos_xxx; in state READFILE,
+ * the current file seek position is the read point, and the write
+ * position is remembered in writepos_xxx. (The write position is the
+ * same as EOF, but since BufFileSeek doesn't currently implement
+ * SEEK_END, we have to remember it explicitly.)
*
- * Special case: if we are in WRITEFILE state and eof_reached is true, then
- * the read position is implicitly equal to the write position (and hence
- * to the file seek position); this way we need not update the readpos_xxx
- * variables on each write.
+ * Special case: if we are in WRITEFILE state and eof_reached is true,
+ * then the read position is implicitly equal to the write position (and
+ * hence to the file seek position); this way we need not update the
+ * readpos_xxx variables on each write.
*/
bool eof_reached; /* read reached EOF (always valid) */
int current; /* next array index (valid if INMEM) */
@@ -454,11 +454,11 @@ tuplestore_gettuple(Tuplestorestate *state, bool forward,
/*
* Backward.
*
- * if all tuples are fetched already then we return last tuple, else
- * - tuple before last returned.
+ * if all tuples are fetched already then we return last tuple,
+ * else - tuple before last returned.
*
- * Back up to fetch previously-returned tuple's ending length word.
- * If seek fails, assume we are at start of file.
+ * Back up to fetch previously-returned tuple's ending length
+ * word. If seek fails, assume we are at start of file.
*/
if (BufFileSeek(state->myfile, 0, -(long) sizeof(unsigned int),
SEEK_CUR) != 0)
diff --git a/src/backend/utils/time/tqual.c b/src/backend/utils/time/tqual.c
index fa6bd4a3c58..35208140090 100644
--- a/src/backend/utils/time/tqual.c
+++ b/src/backend/utils/time/tqual.c
@@ -32,7 +32,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/time/tqual.c,v 1.91 2005/10/15 02:49:37 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/time/tqual.c,v 1.92 2005/11/22 18:17:28 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -941,12 +941,12 @@ HeapTupleSatisfiesSnapshot(HeapTupleHeader tuple, Snapshot snapshot,
* By here, the inserting transaction has committed - have to check
* when...
*
- * Note that the provided snapshot contains only top-level XIDs, so we have
- * to convert a subxact XID to its parent for comparison. However, we can
- * make first-pass range checks with the given XID, because a subxact with
- * XID < xmin has surely also got a parent with XID < xmin, while one with
- * XID >= xmax must belong to a parent that was not yet committed at the
- * time of this snapshot.
+ * Note that the provided snapshot contains only top-level XIDs, so we
+ * have to convert a subxact XID to its parent for comparison. However, we
+ * can make first-pass range checks with the given XID, because a subxact
+ * with XID < xmin has surely also got a parent with XID < xmin, while one
+ * with XID >= xmax must belong to a parent that was not yet committed at
+ * the time of this snapshot.
*/
if (TransactionIdFollowsOrEquals(HeapTupleHeaderGetXmin(tuple),
snapshot->xmin))
@@ -1070,8 +1070,8 @@ HeapTupleSatisfiesVacuum(HeapTupleHeader tuple, TransactionId OldestXmin,
/*
* Has inserting transaction committed?
*
- * If the inserting transaction aborted, then the tuple was never visible to
- * any other transaction, so we can delete it immediately.
+ * If the inserting transaction aborted, then the tuple was never visible
+ * to any other transaction, so we can delete it immediately.
*/
if (!(tuple->t_infomask & HEAP_XMIN_COMMITTED))
{