aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBruce Momjian <bruce@momjian.us>2005-11-22 18:23:31 +0000
committerBruce Momjian <bruce@momjian.us>2005-11-22 18:23:31 +0000
commitbef7764835725e5d8468da1c139e9020be689b95 (patch)
tree71075b16ab6ed5152b31757e5dd65cd2b9383ba0
parentc8de36352fe72ae2265eb53a6e1bf334e4f24888 (diff)
downloadpostgresql-bef7764835725e5d8468da1c139e9020be689b95.tar.gz
postgresql-bef7764835725e5d8468da1c139e9020be689b95.zip
Re-run pgindent, fixing a problem where comment lines after a blank
comment line where output as too long, and update typedefs for /lib directory. Also fix case where identifiers were used as variable names in the backend, but as typedefs in ecpg (favor the backend for indenting). Backpatch to 8.1.X.
-rw-r--r--contrib/dblink/dblink.c10
-rw-r--r--contrib/pgbench/pgbench.c5
-rw-r--r--contrib/pgcrypto/pgp-decrypt.c14
-rw-r--r--contrib/pgcrypto/pgp-pgsql.c6
-rw-r--r--contrib/tablefunc/tablefunc.c8
-rw-r--r--contrib/tsearch2/query.c13
-rw-r--r--contrib/tsearch2/rank.c45
-rw-r--r--contrib/tsearch2/snowball/api.h2
-rw-r--r--contrib/tsearch2/snowball/english_stem.c158
-rw-r--r--contrib/tsearch2/snowball/english_stem.h6
-rw-r--r--contrib/tsearch2/snowball/russian_stem.c116
-rw-r--r--contrib/tsearch2/snowball/russian_stem.h6
-rw-r--r--src/backend/access/common/heaptuple.c12
-rw-r--r--src/backend/access/common/tupdesc.c6
-rw-r--r--src/backend/access/gist/gistget.c6
-rw-r--r--src/backend/access/hash/hashovfl.c8
-rw-r--r--src/backend/access/hash/hashpage.c16
-rw-r--r--src/backend/access/heap/heapam.c32
-rw-r--r--src/backend/access/heap/hio.c12
-rw-r--r--src/backend/access/heap/tuptoaster.c12
-rw-r--r--src/backend/access/index/genam.c6
-rw-r--r--src/backend/access/nbtree/nbtinsert.c38
-rw-r--r--src/backend/access/nbtree/nbtpage.c36
-rw-r--r--src/backend/access/nbtree/nbtree.c10
-rw-r--r--src/backend/access/nbtree/nbtsearch.c39
-rw-r--r--src/backend/access/nbtree/nbtsort.c8
-rw-r--r--src/backend/access/nbtree/nbtutils.c6
-rw-r--r--src/backend/access/rtree/rtree.c32
-rw-r--r--src/backend/access/transam/multixact.c83
-rw-r--r--src/backend/access/transam/slru.c16
-rw-r--r--src/backend/access/transam/subtrans.c12
-rw-r--r--src/backend/access/transam/transam.c8
-rw-r--r--src/backend/access/transam/twophase.c24
-rw-r--r--src/backend/access/transam/varsup.c10
-rw-r--r--src/backend/access/transam/xact.c57
-rw-r--r--src/backend/access/transam/xlog.c114
-rw-r--r--src/backend/bootstrap/bootstrap.c6
-rw-r--r--src/backend/catalog/aclchk.c10
-rw-r--r--src/backend/catalog/dependency.c10
-rw-r--r--src/backend/catalog/heap.c21
-rw-r--r--src/backend/catalog/index.c33
-rw-r--r--src/backend/catalog/namespace.c11
-rw-r--r--src/backend/catalog/pg_aggregate.c10
-rw-r--r--src/backend/catalog/pg_constraint.c6
-rw-r--r--src/backend/catalog/pg_proc.c14
-rw-r--r--src/backend/catalog/pg_shdepend.c10
-rw-r--r--src/backend/commands/aggregatecmds.c6
-rw-r--r--src/backend/commands/analyze.c14
-rw-r--r--src/backend/commands/async.c22
-rw-r--r--src/backend/commands/cluster.c35
-rw-r--r--src/backend/commands/comment.c6
-rw-r--r--src/backend/commands/copy.c30
-rw-r--r--src/backend/commands/dbcommands.c24
-rw-r--r--src/backend/commands/explain.c8
-rw-r--r--src/backend/commands/indexcmds.c14
-rw-r--r--src/backend/commands/sequence.c14
-rw-r--r--src/backend/commands/tablecmds.c82
-rw-r--r--src/backend/commands/trigger.c21
-rw-r--r--src/backend/commands/typecmds.c10
-rw-r--r--src/backend/commands/vacuum.c75
-rw-r--r--src/backend/commands/vacuumlazy.c30
-rw-r--r--src/backend/commands/variable.c10
-rw-r--r--src/backend/commands/view.c5
-rw-r--r--src/backend/executor/execAmi.c8
-rw-r--r--src/backend/executor/execGrouping.c8
-rw-r--r--src/backend/executor/execJunk.c22
-rw-r--r--src/backend/executor/execMain.c42
-rw-r--r--src/backend/executor/execQual.c27
-rw-r--r--src/backend/executor/execTuples.c7
-rw-r--r--src/backend/executor/execUtils.c18
-rw-r--r--src/backend/executor/functions.c22
-rw-r--r--src/backend/executor/nodeAgg.c14
-rw-r--r--src/backend/executor/nodeBitmapIndexscan.c6
-rw-r--r--src/backend/executor/nodeHash.c6
-rw-r--r--src/backend/executor/nodeHashjoin.c12
-rw-r--r--src/backend/executor/nodeIndexscan.c6
-rw-r--r--src/backend/executor/nodeMergejoin.c20
-rw-r--r--src/backend/executor/nodeNestloop.c6
-rw-r--r--src/backend/executor/nodeSubplan.c28
-rw-r--r--src/backend/executor/nodeUnique.c10
-rw-r--r--src/backend/executor/spi.c6
-rw-r--r--src/backend/libpq/auth.c4
-rw-r--r--src/backend/libpq/be-fsstubs.c8
-rw-r--r--src/backend/libpq/be-secure.c10
-rw-r--r--src/backend/libpq/ip.c60
-rw-r--r--src/backend/main/main.c16
-rw-r--r--src/backend/optimizer/geqo/geqo_eval.c16
-rw-r--r--src/backend/optimizer/geqo/geqo_pool.c8
-rw-r--r--src/backend/optimizer/path/allpaths.c6
-rw-r--r--src/backend/optimizer/path/costsize.c87
-rw-r--r--src/backend/optimizer/path/indxpath.c30
-rw-r--r--src/backend/optimizer/path/joinpath.c16
-rw-r--r--src/backend/optimizer/path/joinrels.c7
-rw-r--r--src/backend/optimizer/path/orindxpath.c11
-rw-r--r--src/backend/optimizer/path/pathkeys.c32
-rw-r--r--src/backend/optimizer/plan/createplan.c46
-rw-r--r--src/backend/optimizer/plan/initsplan.c10
-rw-r--r--src/backend/optimizer/plan/planagg.c26
-rw-r--r--src/backend/optimizer/plan/planmain.c18
-rw-r--r--src/backend/optimizer/plan/planner.c65
-rw-r--r--src/backend/optimizer/plan/setrefs.c10
-rw-r--r--src/backend/optimizer/plan/subselect.c28
-rw-r--r--src/backend/optimizer/prep/prepjointree.c18
-rw-r--r--src/backend/optimizer/prep/prepqual.c8
-rw-r--r--src/backend/optimizer/prep/preptlist.c33
-rw-r--r--src/backend/optimizer/prep/prepunion.c12
-rw-r--r--src/backend/optimizer/util/clauses.c20
-rw-r--r--src/backend/optimizer/util/plancat.c20
-rw-r--r--src/backend/optimizer/util/relnode.c12
-rw-r--r--src/backend/optimizer/util/restrictinfo.c4
-rw-r--r--src/backend/parser/analyze.c38
-rw-r--r--src/backend/parser/parse_clause.c22
-rw-r--r--src/backend/parser/parse_coerce.c20
-rw-r--r--src/backend/parser/parse_expr.c14
-rw-r--r--src/backend/parser/parse_func.c50
-rw-r--r--src/backend/parser/parse_oper.c8
-rw-r--r--src/backend/parser/parse_relation.c16
-rw-r--r--src/backend/parser/parse_target.c8
-rw-r--r--src/backend/port/beos/support.c2
-rw-r--r--src/backend/port/posix_sema.c19
-rw-r--r--src/backend/port/sysv_sema.c18
-rw-r--r--src/backend/port/win32/signal.c8
-rw-r--r--src/backend/port/win32/timer.c21
-rw-r--r--src/backend/postmaster/autovacuum.c25
-rw-r--r--src/backend/postmaster/pgstat.c14
-rw-r--r--src/backend/postmaster/postmaster.c59
-rw-r--r--src/backend/postmaster/syslogger.c12
-rw-r--r--src/backend/regex/regc_locale.c5
-rw-r--r--src/backend/rewrite/rewriteDefine.c7
-rw-r--r--src/backend/rewrite/rewriteHandler.c55
-rw-r--r--src/backend/rewrite/rewriteManip.c12
-rw-r--r--src/backend/storage/buffer/bufmgr.c24
-rw-r--r--src/backend/storage/buffer/localbuf.c4
-rw-r--r--src/backend/storage/file/fd.c8
-rw-r--r--src/backend/storage/ipc/ipc.c15
-rw-r--r--src/backend/storage/ipc/procarray.c10
-rw-r--r--src/backend/storage/ipc/shmem.c6
-rw-r--r--src/backend/storage/ipc/sinval.c32
-rw-r--r--src/backend/storage/lmgr/lock.c26
-rw-r--r--src/backend/storage/lmgr/proc.c46
-rw-r--r--src/backend/storage/lmgr/s_lock.c35
-rw-r--r--src/backend/storage/lmgr/spin.c6
-rw-r--r--src/backend/storage/page/bufpage.c10
-rw-r--r--src/backend/storage/smgr/smgr.c10
-rw-r--r--src/backend/tcop/fastpath.c8
-rw-r--r--src/backend/tcop/postgres.c80
-rw-r--r--src/backend/tcop/pquery.c26
-rw-r--r--src/backend/utils/adt/arrayfuncs.c18
-rw-r--r--src/backend/utils/adt/datetime.c18
-rw-r--r--src/backend/utils/adt/formatting.c15
-rw-r--r--src/backend/utils/adt/numeric.c40
-rw-r--r--src/backend/utils/adt/oid.c10
-rw-r--r--src/backend/utils/adt/pg_lzcompress.c8
-rw-r--r--src/backend/utils/adt/regexp.c8
-rw-r--r--src/backend/utils/adt/ri_triggers.c26
-rw-r--r--src/backend/utils/adt/ruleutils.c10
-rw-r--r--src/backend/utils/adt/selfuncs.c54
-rw-r--r--src/backend/utils/adt/timestamp.c43
-rw-r--r--src/backend/utils/adt/varlena.c28
-rw-r--r--src/backend/utils/cache/catcache.c6
-rw-r--r--src/backend/utils/cache/inval.c6
-rw-r--r--src/backend/utils/cache/relcache.c52
-rw-r--r--src/backend/utils/cache/typcache.c12
-rw-r--r--src/backend/utils/error/elog.c8
-rw-r--r--src/backend/utils/fmgr/fmgr.c6
-rw-r--r--src/backend/utils/hash/dynahash.c6
-rw-r--r--src/backend/utils/init/flatfiles.c10
-rw-r--r--src/backend/utils/init/miscinit.c43
-rw-r--r--src/backend/utils/init/postinit.c6
-rw-r--r--src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c6
-rw-r--r--src/backend/utils/mb/conversion_procs/utf8_and_iso8859/utf8_and_iso8859.c4
-rw-r--r--src/backend/utils/misc/guc.c19
-rw-r--r--src/backend/utils/misc/ps_status.c4
-rw-r--r--src/backend/utils/mmgr/portalmem.c28
-rw-r--r--src/backend/utils/resowner/resowner.c16
-rw-r--r--src/backend/utils/sort/tuplesort.c11
-rw-r--r--src/backend/utils/sort/tuplestore.c30
-rw-r--r--src/backend/utils/time/tqual.c18
-rw-r--r--src/bin/initdb/initdb.c21
-rw-r--r--src/bin/pg_ctl/pg_ctl.c8
-rw-r--r--src/bin/pg_dump/dumputils.c10
-rw-r--r--src/bin/pg_dump/pg_backup_archiver.c15
-rw-r--r--src/bin/pg_dump/pg_dump.c64
-rw-r--r--src/bin/pg_dump/pg_dump_sort.c14
-rw-r--r--src/bin/psql/common.c14
-rw-r--r--src/bin/psql/describe.c6
-rw-r--r--src/bin/psql/startup.c4
-rw-r--r--src/include/access/tuptoaster.h4
-rw-r--r--src/include/catalog/pg_constraint.h6
-rw-r--r--src/include/catalog/pg_control.h8
-rw-r--r--src/include/catalog/pg_shdepend.h6
-rw-r--r--src/include/catalog/pg_type.h6
-rw-r--r--src/include/funcapi.h22
-rw-r--r--src/include/libpq/crypt.h4
-rw-r--r--src/include/libpq/ip.h30
-rw-r--r--src/include/libpq/libpq-be.h8
-rw-r--r--src/include/miscadmin.h4
-rw-r--r--src/include/nodes/execnodes.h14
-rw-r--r--src/include/nodes/nodes.h6
-rw-r--r--src/include/nodes/parsenodes.h8
-rw-r--r--src/include/nodes/plannodes.h14
-rw-r--r--src/include/storage/buf_internals.h4
-rw-r--r--src/include/tcop/dest.h14
-rw-r--r--src/include/utils/builtins.h6
-rw-r--r--src/include/utils/catcache.h20
-rw-r--r--src/include/utils/typcache.h6
-rw-r--r--src/interfaces/ecpg/compatlib/informix.c32
-rw-r--r--src/interfaces/ecpg/include/pgtypes_numeric.h6
-rw-r--r--src/interfaces/ecpg/pgtypeslib/datetime.c10
-rw-r--r--src/interfaces/ecpg/pgtypeslib/dt_common.c31
-rw-r--r--src/interfaces/ecpg/pgtypeslib/numeric.c52
-rw-r--r--src/interfaces/libpq/fe-auth.c18
-rw-r--r--src/interfaces/libpq/fe-auth.h4
-rw-r--r--src/interfaces/libpq/fe-connect.c27
-rw-r--r--src/interfaces/libpq/fe-exec.c22
-rw-r--r--src/interfaces/libpq/fe-misc.c20
-rw-r--r--src/interfaces/libpq/fe-protocol2.c10
-rw-r--r--src/interfaces/libpq/fe-protocol3.c14
-rw-r--r--src/interfaces/libpq/libpq-fe.h4
-rw-r--r--src/interfaces/libpq/libpq-int.h30
-rw-r--r--src/interfaces/libpq/pthread-win32.h2
-rw-r--r--src/pl/plperl/plperl.c22
-rw-r--r--src/pl/plperl/ppport.h8
-rw-r--r--src/pl/plpgsql/src/pl_comp.c58
-rw-r--r--src/pl/plpgsql/src/pl_exec.c292
-rw-r--r--src/pl/plpgsql/src/pl_funcs.c106
-rw-r--r--src/pl/plpgsql/src/plpgsql.h106
-rw-r--r--src/pl/tcl/pltcl.c60
-rw-r--r--src/port/exec.c12
-rw-r--r--src/port/path.c6
-rw-r--r--src/port/strtol.c3
-rw-r--r--src/port/unsetenv.c6
-rw-r--r--src/test/examples/testlibpq3.c18
-rw-r--r--src/timezone/pgtz.c4
234 files changed, 2571 insertions, 2544 deletions
diff --git a/contrib/dblink/dblink.c b/contrib/dblink/dblink.c
index 54e787bb206..35641c2e21e 100644
--- a/contrib/dblink/dblink.c
+++ b/contrib/dblink/dblink.c
@@ -60,9 +60,9 @@
typedef struct remoteConn
{
- PGconn *conn; /* Hold the remote connection */
+ PGconn *conn; /* Hold the remote connection */
int openCursorCount; /* The number of open cursors */
- bool newXactForCursor; /* Opened a transaction for a cursor */
+ bool newXactForCursor; /* Opened a transaction for a cursor */
} remoteConn;
/*
@@ -84,8 +84,8 @@ static Oid get_relid_from_relname(text *relname_text);
static char *generate_relation_name(Oid relid);
/* Global */
-static remoteConn *pconn = NULL;
-static HTAB *remoteConnHash = NULL;
+static remoteConn *pconn = NULL;
+static HTAB *remoteConnHash = NULL;
/*
* Following is list that holds multiple remote connections.
@@ -346,7 +346,7 @@ dblink_open(PG_FUNCTION_ARGS)
else
conn = rconn->conn;
- /* If we are not in a transaction, start one */
+ /* If we are not in a transaction, start one */
if (PQtransactionStatus(conn) == PQTRANS_IDLE)
{
res = PQexec(conn, "BEGIN");
diff --git a/contrib/pgbench/pgbench.c b/contrib/pgbench/pgbench.c
index c6d95dc0ccb..b764bdf9a76 100644
--- a/contrib/pgbench/pgbench.c
+++ b/contrib/pgbench/pgbench.c
@@ -1,5 +1,5 @@
/*
- * $PostgreSQL: pgsql/contrib/pgbench/pgbench.c,v 1.45 2005/10/29 19:38:07 tgl Exp $
+ * $PostgreSQL: pgsql/contrib/pgbench/pgbench.c,v 1.45.2.1 2005/11/22 18:23:01 momjian Exp $
*
* pgbench: a simple benchmark program for PostgreSQL
* written by Tatsuo Ishii
@@ -1110,7 +1110,8 @@ main(int argc, char **argv)
fprintf(stderr, "Use limit/ulimt to increase the limit before using pgbench.\n");
exit(1);
}
-#endif /* #if !(defined(__CYGWIN__) || defined(__MINGW32__)) */
+#endif /* #if !(defined(__CYGWIN__) ||
+ * defined(__MINGW32__)) */
break;
case 'C':
is_connect = 1;
diff --git a/contrib/pgcrypto/pgp-decrypt.c b/contrib/pgcrypto/pgp-decrypt.c
index 63f832e2ce1..54791f0aa80 100644
--- a/contrib/pgcrypto/pgp-decrypt.c
+++ b/contrib/pgcrypto/pgp-decrypt.c
@@ -26,7 +26,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $PostgreSQL: pgsql/contrib/pgcrypto/pgp-decrypt.c,v 1.6 2005/10/15 02:49:06 momjian Exp $
+ * $PostgreSQL: pgsql/contrib/pgcrypto/pgp-decrypt.c,v 1.6.2.1 2005/11/22 18:23:01 momjian Exp $
*/
#include "postgres.h"
@@ -269,14 +269,14 @@ prefix_init(void **priv_p, void *arg, PullFilter * src)
* The original purpose of the 2-byte check was to show user a
* friendly "wrong key" message. This made following possible:
*
- * "An Attack on CFB Mode Encryption As Used By OpenPGP" by Serge Mister
- * and Robert Zuccherato
+ * "An Attack on CFB Mode Encryption As Used By OpenPGP" by Serge
+ * Mister and Robert Zuccherato
*
- * To avoid being 'oracle', we delay reporting, which basically means we
- * prefer to run into corrupt packet header.
+ * To avoid being 'oracle', we delay reporting, which basically means
+ * we prefer to run into corrupt packet header.
*
- * We _could_ throw PXE_PGP_CORRUPT_DATA here, but there is possibility
- * of attack via timing, so we don't.
+ * We _could_ throw PXE_PGP_CORRUPT_DATA here, but there is
+ * possibility of attack via timing, so we don't.
*/
ctx->corrupt_prefix = 1;
}
diff --git a/contrib/pgcrypto/pgp-pgsql.c b/contrib/pgcrypto/pgp-pgsql.c
index afb926c5282..8f8fae0c8c7 100644
--- a/contrib/pgcrypto/pgp-pgsql.c
+++ b/contrib/pgcrypto/pgp-pgsql.c
@@ -26,7 +26,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $PostgreSQL: pgsql/contrib/pgcrypto/pgp-pgsql.c,v 1.6 2005/10/15 02:49:06 momjian Exp $
+ * $PostgreSQL: pgsql/contrib/pgcrypto/pgp-pgsql.c,v 1.6.2.1 2005/11/22 18:23:01 momjian Exp $
*/
#include "postgres.h"
@@ -125,8 +125,8 @@ add_entropy(text *data1, text *data2, text *data3)
/*
* Try to make the feeding unpredictable.
*
- * Prefer data over keys, as it's rather likely that key is same in several
- * calls.
+ * Prefer data over keys, as it's rather likely that key is same in
+ * several calls.
*/
/* chance: 7/8 */
diff --git a/contrib/tablefunc/tablefunc.c b/contrib/tablefunc/tablefunc.c
index 7f67f37b00c..0d8a17ea619 100644
--- a/contrib/tablefunc/tablefunc.c
+++ b/contrib/tablefunc/tablefunc.c
@@ -547,8 +547,8 @@ crosstab(PG_FUNCTION_ARGS)
* Get the next category item value, which is alway
* attribute number three.
*
- * Be careful to sssign the value to the array index based on
- * which category we are presently processing.
+ * Be careful to sssign the value to the array index based
+ * on which category we are presently processing.
*/
values[1 + i] = SPI_getvalue(spi_tuple, spi_tupdesc, 3);
@@ -870,8 +870,8 @@ get_crosstab_tuplestore(char *sql,
/*
* The provided SQL query must always return at least three columns:
*
- * 1. rowname the label for each row - column 1 in the final result 2.
- * category the label for each value-column in the final result 3.
+ * 1. rowname the label for each row - column 1 in the final result
+ * 2. category the label for each value-column in the final result 3.
* value the values used to populate the value-columns
*
* If there are more than three columns, the last two are taken as
diff --git a/contrib/tsearch2/query.c b/contrib/tsearch2/query.c
index 013f0031965..0a1805e0e75 100644
--- a/contrib/tsearch2/query.c
+++ b/contrib/tsearch2/query.c
@@ -169,7 +169,7 @@ gettoken_query(QPRS_STATE * state, int4 *val, int4 *lenval, char **strval, int2
state->state = WAITOPERATOR;
return VAL;
}
- else if ( state->state == WAITFIRSTOPERAND )
+ else if (state->state == WAITFIRSTOPERAND)
return END;
else
ereport(ERROR,
@@ -620,12 +620,13 @@ static QUERYTYPE *
/* parse query & make polish notation (postfix, but in reverse order) */
makepol(&state, pushval);
pfree(state.valstate.word);
- if (!state.num) {
+ if (!state.num)
+ {
elog(NOTICE, "Query doesn't contain lexem(s)");
- query = (QUERYTYPE*)palloc( HDRSIZEQT );
+ query = (QUERYTYPE *) palloc(HDRSIZEQT);
query->len = HDRSIZEQT;
query->size = 0;
- return query;
+ return query;
}
/* make finish struct */
@@ -912,8 +913,8 @@ to_tsquery(PG_FUNCTION_ARGS)
PG_FREE_IF_COPY(in, 1);
query = queryin(str, pushval_morph, PG_GETARG_INT32(0));
-
- if ( query->size == 0 )
+
+ if (query->size == 0)
PG_RETURN_POINTER(query);
res = clean_fakeval_v2(GETQUERY(query), &len);
diff --git a/contrib/tsearch2/rank.c b/contrib/tsearch2/rank.c
index 445898eff69..760fe11e444 100644
--- a/contrib/tsearch2/rank.c
+++ b/contrib/tsearch2/rank.c
@@ -266,8 +266,10 @@ calc_rank_or(float *w, tsvector * t, QUERYTYPE * q)
for (i = 0; i < size; i++)
{
- float resj,wjm;
- int4 jm;
+ float resj,
+ wjm;
+ int4 jm;
+
entry = find_wordentry(t, q, item[i]);
if (!entry)
continue;
@@ -283,28 +285,29 @@ calc_rank_or(float *w, tsvector * t, QUERYTYPE * q)
post = POSNULL + 1;
}
- resj = 0.0;
- wjm = -1.0;
- jm = 0;
- for (j = 0; j < dimt; j++)
- {
- resj = resj + wpos(post[j])/((j+1)*(j+1));
- if ( wpos(post[j]) > wjm ) {
- wjm = wpos(post[j]);
- jm = j;
- }
- }
-/*
- limit (sum(i/i^2),i->inf) = pi^2/6
- resj = sum(wi/i^2),i=1,noccurence,
- wi - should be sorted desc,
- don't sort for now, just choose maximum weight. This should be corrected
+ resj = 0.0;
+ wjm = -1.0;
+ jm = 0;
+ for (j = 0; j < dimt; j++)
+ {
+ resj = resj + wpos(post[j]) / ((j + 1) * (j + 1));
+ if (wpos(post[j]) > wjm)
+ {
+ wjm = wpos(post[j]);
+ jm = j;
+ }
+ }
+/*
+ limit (sum(i/i^2),i->inf) = pi^2/6
+ resj = sum(wi/i^2),i=1,noccurence,
+ wi - should be sorted desc,
+ don't sort for now, just choose maximum weight. This should be corrected
Oleg Bartunov
*/
- res = res + ( wjm + resj - wjm/((jm+1)*(jm+1)))/1.64493406685;
+ res = res + (wjm + resj - wjm / ((jm + 1) * (jm + 1))) / 1.64493406685;
}
- if ( size > 0 )
- res = res /size;
+ if (size > 0)
+ res = res / size;
pfree(item);
return res;
}
diff --git a/contrib/tsearch2/snowball/api.h b/contrib/tsearch2/snowball/api.h
index fb0a69469c6..a66935efcc9 100644
--- a/contrib/tsearch2/snowball/api.h
+++ b/contrib/tsearch2/snowball/api.h
@@ -23,7 +23,7 @@ struct SN_env
int S_size;
int I_size;
int B_size;
- symbol **S;
+ symbol **S;
int *I;
symbol *B;
};
diff --git a/contrib/tsearch2/snowball/english_stem.c b/contrib/tsearch2/snowball/english_stem.c
index de4f99114fe..ccb3d129884 100644
--- a/contrib/tsearch2/snowball/english_stem.c
+++ b/contrib/tsearch2/snowball/english_stem.c
@@ -28,8 +28,8 @@ static symbol s_0_1[5] = {'g', 'e', 'n', 'e', 'r'};
static struct among a_0[2] =
{
- /* 0 */ {6, s_0_0, -1, -1, 0},
- /* 1 */ {5, s_0_1, -1, -1, 0}
+ /* 0 */ {6, s_0_0, -1, -1, 0},
+ /* 1 */ {5, s_0_1, -1, -1, 0}
};
static symbol s_1_0[1] = {'\''};
@@ -38,9 +38,9 @@ static symbol s_1_2[2] = {'\'', 's'};
static struct among a_1[3] =
{
- /* 0 */ {1, s_1_0, -1, 1, 0},
- /* 1 */ {3, s_1_1, 0, 1, 0},
- /* 2 */ {2, s_1_2, -1, 1, 0}
+ /* 0 */ {1, s_1_0, -1, 1, 0},
+ /* 1 */ {3, s_1_1, 0, 1, 0},
+ /* 2 */ {2, s_1_2, -1, 1, 0}
};
static symbol s_2_0[3] = {'i', 'e', 'd'};
@@ -52,12 +52,12 @@ static symbol s_2_5[2] = {'u', 's'};
static struct among a_2[6] =
{
- /* 0 */ {3, s_2_0, -1, 2, 0},
- /* 1 */ {1, s_2_1, -1, 3, 0},
- /* 2 */ {3, s_2_2, 1, 2, 0},
- /* 3 */ {4, s_2_3, 1, 1, 0},
- /* 4 */ {2, s_2_4, 1, -1, 0},
- /* 5 */ {2, s_2_5, 1, -1, 0}
+ /* 0 */ {3, s_2_0, -1, 2, 0},
+ /* 1 */ {1, s_2_1, -1, 3, 0},
+ /* 2 */ {3, s_2_2, 1, 2, 0},
+ /* 3 */ {4, s_2_3, 1, 1, 0},
+ /* 4 */ {2, s_2_4, 1, -1, 0},
+ /* 5 */ {2, s_2_5, 1, -1, 0}
};
static symbol s_3_1[2] = {'b', 'b'};
@@ -75,16 +75,16 @@ static symbol s_3_12[2] = {'i', 'z'};
static struct among a_3[13] =
{
- /* 0 */ {0, 0, -1, 3, 0},
- /* 1 */ {2, s_3_1, 0, 2, 0},
- /* 2 */ {2, s_3_2, 0, 2, 0},
- /* 3 */ {2, s_3_3, 0, 2, 0},
- /* 4 */ {2, s_3_4, 0, 2, 0},
- /* 5 */ {2, s_3_5, 0, 1, 0},
- /* 6 */ {2, s_3_6, 0, 2, 0},
- /* 7 */ {2, s_3_7, 0, 2, 0},
- /* 8 */ {2, s_3_8, 0, 2, 0},
- /* 9 */ {2, s_3_9, 0, 2, 0},
+ /* 0 */ {0, 0, -1, 3, 0},
+ /* 1 */ {2, s_3_1, 0, 2, 0},
+ /* 2 */ {2, s_3_2, 0, 2, 0},
+ /* 3 */ {2, s_3_3, 0, 2, 0},
+ /* 4 */ {2, s_3_4, 0, 2, 0},
+ /* 5 */ {2, s_3_5, 0, 1, 0},
+ /* 6 */ {2, s_3_6, 0, 2, 0},
+ /* 7 */ {2, s_3_7, 0, 2, 0},
+ /* 8 */ {2, s_3_8, 0, 2, 0},
+ /* 9 */ {2, s_3_9, 0, 2, 0},
/* 10 */ {2, s_3_10, 0, 1, 0},
/* 11 */ {2, s_3_11, 0, 2, 0},
/* 12 */ {2, s_3_12, 0, 1, 0}
@@ -99,12 +99,12 @@ static symbol s_4_5[5] = {'i', 'n', 'g', 'l', 'y'};
static struct among a_4[6] =
{
- /* 0 */ {2, s_4_0, -1, 2, 0},
- /* 1 */ {3, s_4_1, 0, 1, 0},
- /* 2 */ {3, s_4_2, -1, 2, 0},
- /* 3 */ {4, s_4_3, -1, 2, 0},
- /* 4 */ {5, s_4_4, 3, 1, 0},
- /* 5 */ {5, s_4_5, -1, 2, 0}
+ /* 0 */ {2, s_4_0, -1, 2, 0},
+ /* 1 */ {3, s_4_1, 0, 1, 0},
+ /* 2 */ {3, s_4_2, -1, 2, 0},
+ /* 3 */ {4, s_4_3, -1, 2, 0},
+ /* 4 */ {5, s_4_4, 3, 1, 0},
+ /* 5 */ {5, s_4_5, -1, 2, 0}
};
static symbol s_5_0[4] = {'a', 'n', 'c', 'i'};
@@ -134,16 +134,16 @@ static symbol s_5_23[7] = {'o', 'u', 's', 'n', 'e', 's', 's'};
static struct among a_5[24] =
{
- /* 0 */ {4, s_5_0, -1, 3, 0},
- /* 1 */ {4, s_5_1, -1, 2, 0},
- /* 2 */ {3, s_5_2, -1, 13, 0},
- /* 3 */ {2, s_5_3, -1, 16, 0},
- /* 4 */ {3, s_5_4, 3, 12, 0},
- /* 5 */ {4, s_5_5, 4, 4, 0},
- /* 6 */ {4, s_5_6, 3, 8, 0},
- /* 7 */ {5, s_5_7, 3, 14, 0},
- /* 8 */ {6, s_5_8, 3, 15, 0},
- /* 9 */ {5, s_5_9, 3, 10, 0},
+ /* 0 */ {4, s_5_0, -1, 3, 0},
+ /* 1 */ {4, s_5_1, -1, 2, 0},
+ /* 2 */ {3, s_5_2, -1, 13, 0},
+ /* 3 */ {2, s_5_3, -1, 16, 0},
+ /* 4 */ {3, s_5_4, 3, 12, 0},
+ /* 5 */ {4, s_5_5, 4, 4, 0},
+ /* 6 */ {4, s_5_6, 3, 8, 0},
+ /* 7 */ {5, s_5_7, 3, 14, 0},
+ /* 8 */ {6, s_5_8, 3, 15, 0},
+ /* 9 */ {5, s_5_9, 3, 10, 0},
/* 10 */ {5, s_5_10, 3, 5, 0},
/* 11 */ {5, s_5_11, -1, 8, 0},
/* 12 */ {6, s_5_12, -1, 12, 0},
@@ -172,15 +172,15 @@ static symbol s_6_8[4] = {'n', 'e', 's', 's'};
static struct among a_6[9] =
{
- /* 0 */ {5, s_6_0, -1, 4, 0},
- /* 1 */ {5, s_6_1, -1, 6, 0},
- /* 2 */ {5, s_6_2, -1, 3, 0},
- /* 3 */ {5, s_6_3, -1, 4, 0},
- /* 4 */ {4, s_6_4, -1, 4, 0},
- /* 5 */ {6, s_6_5, -1, 1, 0},
- /* 6 */ {7, s_6_6, 5, 2, 0},
- /* 7 */ {3, s_6_7, -1, 5, 0},
- /* 8 */ {4, s_6_8, -1, 5, 0}
+ /* 0 */ {5, s_6_0, -1, 4, 0},
+ /* 1 */ {5, s_6_1, -1, 6, 0},
+ /* 2 */ {5, s_6_2, -1, 3, 0},
+ /* 3 */ {5, s_6_3, -1, 4, 0},
+ /* 4 */ {4, s_6_4, -1, 4, 0},
+ /* 5 */ {6, s_6_5, -1, 1, 0},
+ /* 6 */ {7, s_6_6, 5, 2, 0},
+ /* 7 */ {3, s_6_7, -1, 5, 0},
+ /* 8 */ {4, s_6_8, -1, 5, 0}
};
static symbol s_7_0[2] = {'i', 'c'};
@@ -204,16 +204,16 @@ static symbol s_7_17[5] = {'e', 'm', 'e', 'n', 't'};
static struct among a_7[18] =
{
- /* 0 */ {2, s_7_0, -1, 1, 0},
- /* 1 */ {4, s_7_1, -1, 1, 0},
- /* 2 */ {4, s_7_2, -1, 1, 0},
- /* 3 */ {4, s_7_3, -1, 1, 0},
- /* 4 */ {4, s_7_4, -1, 1, 0},
- /* 5 */ {3, s_7_5, -1, 1, 0},
- /* 6 */ {3, s_7_6, -1, 1, 0},
- /* 7 */ {3, s_7_7, -1, 1, 0},
- /* 8 */ {3, s_7_8, -1, 1, 0},
- /* 9 */ {2, s_7_9, -1, 1, 0},
+ /* 0 */ {2, s_7_0, -1, 1, 0},
+ /* 1 */ {4, s_7_1, -1, 1, 0},
+ /* 2 */ {4, s_7_2, -1, 1, 0},
+ /* 3 */ {4, s_7_3, -1, 1, 0},
+ /* 4 */ {4, s_7_4, -1, 1, 0},
+ /* 5 */ {3, s_7_5, -1, 1, 0},
+ /* 6 */ {3, s_7_6, -1, 1, 0},
+ /* 7 */ {3, s_7_7, -1, 1, 0},
+ /* 8 */ {3, s_7_8, -1, 1, 0},
+ /* 9 */ {2, s_7_9, -1, 1, 0},
/* 10 */ {3, s_7_10, -1, 1, 0},
/* 11 */ {3, s_7_11, -1, 2, 0},
/* 12 */ {2, s_7_12, -1, 1, 0},
@@ -229,8 +229,8 @@ static symbol s_8_1[1] = {'l'};
static struct among a_8[2] =
{
- /* 0 */ {1, s_8_0, -1, 1, 0},
- /* 1 */ {1, s_8_1, -1, 2, 0}
+ /* 0 */ {1, s_8_0, -1, 1, 0},
+ /* 1 */ {1, s_8_1, -1, 2, 0}
};
static symbol s_9_0[7] = {'s', 'u', 'c', 'c', 'e', 'e', 'd'};
@@ -244,14 +244,14 @@ static symbol s_9_7[6] = {'o', 'u', 't', 'i', 'n', 'g'};
static struct among a_9[8] =
{
- /* 0 */ {7, s_9_0, -1, -1, 0},
- /* 1 */ {7, s_9_1, -1, -1, 0},
- /* 2 */ {6, s_9_2, -1, -1, 0},
- /* 3 */ {7, s_9_3, -1, -1, 0},
- /* 4 */ {6, s_9_4, -1, -1, 0},
- /* 5 */ {7, s_9_5, -1, -1, 0},
- /* 6 */ {7, s_9_6, -1, -1, 0},
- /* 7 */ {6, s_9_7, -1, -1, 0}
+ /* 0 */ {7, s_9_0, -1, -1, 0},
+ /* 1 */ {7, s_9_1, -1, -1, 0},
+ /* 2 */ {6, s_9_2, -1, -1, 0},
+ /* 3 */ {7, s_9_3, -1, -1, 0},
+ /* 4 */ {6, s_9_4, -1, -1, 0},
+ /* 5 */ {7, s_9_5, -1, -1, 0},
+ /* 6 */ {7, s_9_6, -1, -1, 0},
+ /* 7 */ {6, s_9_7, -1, -1, 0}
};
static symbol s_10_0[5] = {'a', 'n', 'd', 'e', 's'};
@@ -275,16 +275,16 @@ static symbol s_10_17[4] = {'u', 'g', 'l', 'y'};
static struct among a_10[18] =
{
- /* 0 */ {5, s_10_0, -1, -1, 0},
- /* 1 */ {5, s_10_1, -1, -1, 0},
- /* 2 */ {4, s_10_2, -1, -1, 0},
- /* 3 */ {6, s_10_3, -1, -1, 0},
- /* 4 */ {5, s_10_4, -1, 3, 0},
- /* 5 */ {5, s_10_5, -1, 9, 0},
- /* 6 */ {6, s_10_6, -1, 7, 0},
- /* 7 */ {4, s_10_7, -1, -1, 0},
- /* 8 */ {4, s_10_8, -1, 6, 0},
- /* 9 */ {5, s_10_9, -1, 4, 0},
+ /* 0 */ {5, s_10_0, -1, -1, 0},
+ /* 1 */ {5, s_10_1, -1, -1, 0},
+ /* 2 */ {4, s_10_2, -1, -1, 0},
+ /* 3 */ {6, s_10_3, -1, -1, 0},
+ /* 4 */ {5, s_10_4, -1, 3, 0},
+ /* 5 */ {5, s_10_5, -1, 9, 0},
+ /* 6 */ {6, s_10_6, -1, 7, 0},
+ /* 7 */ {4, s_10_7, -1, -1, 0},
+ /* 8 */ {4, s_10_8, -1, 6, 0},
+ /* 9 */ {5, s_10_9, -1, 4, 0},
/* 10 */ {4, s_10_10, -1, -1, 0},
/* 11 */ {4, s_10_11, -1, 10, 0},
/* 12 */ {6, s_10_12, -1, 11, 0},
@@ -1609,12 +1609,14 @@ lab0:
return 1;
}
-extern struct SN_env *english_ISO_8859_1_create_env(void)
+extern struct SN_env *
+english_ISO_8859_1_create_env(void)
{
return SN_create_env(0, 2, 1);
}
-extern void english_ISO_8859_1_close_env(struct SN_env * z)
+extern void
+english_ISO_8859_1_close_env(struct SN_env * z)
{
SN_close_env(z);
}
diff --git a/contrib/tsearch2/snowball/english_stem.h b/contrib/tsearch2/snowball/english_stem.h
index 8a3c3816d67..c889c7bfa96 100644
--- a/contrib/tsearch2/snowball/english_stem.h
+++ b/contrib/tsearch2/snowball/english_stem.h
@@ -6,10 +6,10 @@ extern "C"
{
#endif
- extern struct SN_env *english_ISO_8859_1_create_env(void);
- extern void english_ISO_8859_1_close_env(struct SN_env * z);
+extern struct SN_env *english_ISO_8859_1_create_env(void);
+extern void english_ISO_8859_1_close_env(struct SN_env * z);
- extern int english_ISO_8859_1_stem(struct SN_env * z);
+extern int english_ISO_8859_1_stem(struct SN_env * z);
#ifdef __cplusplus
}
diff --git a/contrib/tsearch2/snowball/russian_stem.c b/contrib/tsearch2/snowball/russian_stem.c
index 213e88f7b91..a9558b3ab5f 100644
--- a/contrib/tsearch2/snowball/russian_stem.c
+++ b/contrib/tsearch2/snowball/russian_stem.c
@@ -30,15 +30,15 @@ static symbol s_0_8[6] = {0xD9, 0xD7, 0xDB, 0xC9, 0xD3, 0xD8};
static struct among a_0[9] =
{
- /* 0 */ {3, s_0_0, -1, 1, 0},
- /* 1 */ {4, s_0_1, 0, 2, 0},
- /* 2 */ {4, s_0_2, 0, 2, 0},
- /* 3 */ {1, s_0_3, -1, 1, 0},
- /* 4 */ {2, s_0_4, 3, 2, 0},
- /* 5 */ {2, s_0_5, 3, 2, 0},
- /* 6 */ {5, s_0_6, -1, 1, 0},
- /* 7 */ {6, s_0_7, 6, 2, 0},
- /* 8 */ {6, s_0_8, 6, 2, 0}
+ /* 0 */ {3, s_0_0, -1, 1, 0},
+ /* 1 */ {4, s_0_1, 0, 2, 0},
+ /* 2 */ {4, s_0_2, 0, 2, 0},
+ /* 3 */ {1, s_0_3, -1, 1, 0},
+ /* 4 */ {2, s_0_4, 3, 2, 0},
+ /* 5 */ {2, s_0_5, 3, 2, 0},
+ /* 6 */ {5, s_0_6, -1, 1, 0},
+ /* 7 */ {6, s_0_7, 6, 2, 0},
+ /* 8 */ {6, s_0_8, 6, 2, 0}
};
static symbol s_1_0[2] = {0xC0, 0xC0};
@@ -70,16 +70,16 @@ static symbol s_1_25[3] = {0xCF, 0xCD, 0xD5};
static struct among a_1[26] =
{
- /* 0 */ {2, s_1_0, -1, 1, 0},
- /* 1 */ {2, s_1_1, -1, 1, 0},
- /* 2 */ {2, s_1_2, -1, 1, 0},
- /* 3 */ {2, s_1_3, -1, 1, 0},
- /* 4 */ {2, s_1_4, -1, 1, 0},
- /* 5 */ {2, s_1_5, -1, 1, 0},
- /* 6 */ {2, s_1_6, -1, 1, 0},
- /* 7 */ {2, s_1_7, -1, 1, 0},
- /* 8 */ {2, s_1_8, -1, 1, 0},
- /* 9 */ {2, s_1_9, -1, 1, 0},
+ /* 0 */ {2, s_1_0, -1, 1, 0},
+ /* 1 */ {2, s_1_1, -1, 1, 0},
+ /* 2 */ {2, s_1_2, -1, 1, 0},
+ /* 3 */ {2, s_1_3, -1, 1, 0},
+ /* 4 */ {2, s_1_4, -1, 1, 0},
+ /* 5 */ {2, s_1_5, -1, 1, 0},
+ /* 6 */ {2, s_1_6, -1, 1, 0},
+ /* 7 */ {2, s_1_7, -1, 1, 0},
+ /* 8 */ {2, s_1_8, -1, 1, 0},
+ /* 9 */ {2, s_1_9, -1, 1, 0},
/* 10 */ {3, s_1_10, -1, 1, 0},
/* 11 */ {3, s_1_11, -1, 1, 0},
/* 12 */ {2, s_1_12, -1, 1, 0},
@@ -109,14 +109,14 @@ static symbol s_2_7[3] = {0xD5, 0xC0, 0xDD};
static struct among a_2[8] =
{
- /* 0 */ {2, s_2_0, -1, 1, 0},
- /* 1 */ {2, s_2_1, -1, 1, 0},
- /* 2 */ {2, s_2_2, -1, 1, 0},
- /* 3 */ {3, s_2_3, 2, 2, 0},
- /* 4 */ {3, s_2_4, 2, 2, 0},
- /* 5 */ {1, s_2_5, -1, 1, 0},
- /* 6 */ {2, s_2_6, 5, 1, 0},
- /* 7 */ {3, s_2_7, 6, 2, 0}
+ /* 0 */ {2, s_2_0, -1, 1, 0},
+ /* 1 */ {2, s_2_1, -1, 1, 0},
+ /* 2 */ {2, s_2_2, -1, 1, 0},
+ /* 3 */ {3, s_2_3, 2, 2, 0},
+ /* 4 */ {3, s_2_4, 2, 2, 0},
+ /* 5 */ {1, s_2_5, -1, 1, 0},
+ /* 6 */ {2, s_2_6, 5, 1, 0},
+ /* 7 */ {3, s_2_7, 6, 2, 0}
};
static symbol s_3_0[2] = {0xD3, 0xD1};
@@ -124,8 +124,8 @@ static symbol s_3_1[2] = {0xD3, 0xD8};
static struct among a_3[2] =
{
- /* 0 */ {2, s_3_0, -1, 1, 0},
- /* 1 */ {2, s_3_1, -1, 1, 0}
+ /* 0 */ {2, s_3_0, -1, 1, 0},
+ /* 1 */ {2, s_3_1, -1, 1, 0}
};
static symbol s_4_0[1] = {0xC0};
@@ -177,16 +177,16 @@ static symbol s_4_45[3] = {0xC5, 0xCE, 0xD9};
static struct among a_4[46] =
{
- /* 0 */ {1, s_4_0, -1, 2, 0},
- /* 1 */ {2, s_4_1, 0, 2, 0},
- /* 2 */ {2, s_4_2, -1, 1, 0},
- /* 3 */ {3, s_4_3, 2, 2, 0},
- /* 4 */ {3, s_4_4, 2, 2, 0},
- /* 5 */ {2, s_4_5, -1, 1, 0},
- /* 6 */ {3, s_4_6, 5, 2, 0},
- /* 7 */ {3, s_4_7, -1, 1, 0},
- /* 8 */ {3, s_4_8, -1, 2, 0},
- /* 9 */ {3, s_4_9, -1, 1, 0},
+ /* 0 */ {1, s_4_0, -1, 2, 0},
+ /* 1 */ {2, s_4_1, 0, 2, 0},
+ /* 2 */ {2, s_4_2, -1, 1, 0},
+ /* 3 */ {3, s_4_3, 2, 2, 0},
+ /* 4 */ {3, s_4_4, 2, 2, 0},
+ /* 5 */ {2, s_4_5, -1, 1, 0},
+ /* 6 */ {3, s_4_6, 5, 2, 0},
+ /* 7 */ {3, s_4_7, -1, 1, 0},
+ /* 8 */ {3, s_4_8, -1, 2, 0},
+ /* 9 */ {3, s_4_9, -1, 1, 0},
/* 10 */ {4, s_4_10, 9, 2, 0},
/* 11 */ {4, s_4_11, 9, 2, 0},
/* 12 */ {2, s_4_12, -1, 1, 0},
@@ -264,16 +264,16 @@ static symbol s_5_35[1] = {0xD9};
static struct among a_5[36] =
{
- /* 0 */ {1, s_5_0, -1, 1, 0},
- /* 1 */ {2, s_5_1, 0, 1, 0},
- /* 2 */ {2, s_5_2, 0, 1, 0},
- /* 3 */ {1, s_5_3, -1, 1, 0},
- /* 4 */ {1, s_5_4, -1, 1, 0},
- /* 5 */ {2, s_5_5, 4, 1, 0},
- /* 6 */ {2, s_5_6, 4, 1, 0},
- /* 7 */ {2, s_5_7, -1, 1, 0},
- /* 8 */ {2, s_5_8, -1, 1, 0},
- /* 9 */ {3, s_5_9, 8, 1, 0},
+ /* 0 */ {1, s_5_0, -1, 1, 0},
+ /* 1 */ {2, s_5_1, 0, 1, 0},
+ /* 2 */ {2, s_5_2, 0, 1, 0},
+ /* 3 */ {1, s_5_3, -1, 1, 0},
+ /* 4 */ {1, s_5_4, -1, 1, 0},
+ /* 5 */ {2, s_5_5, 4, 1, 0},
+ /* 6 */ {2, s_5_6, 4, 1, 0},
+ /* 7 */ {2, s_5_7, -1, 1, 0},
+ /* 8 */ {2, s_5_8, -1, 1, 0},
+ /* 9 */ {3, s_5_9, 8, 1, 0},
/* 10 */ {1, s_5_10, -1, 1, 0},
/* 11 */ {2, s_5_11, 10, 1, 0},
/* 12 */ {2, s_5_12, 10, 1, 0},
@@ -307,8 +307,8 @@ static symbol s_6_1[4] = {0xCF, 0xD3, 0xD4, 0xD8};
static struct among a_6[2] =
{
- /* 0 */ {3, s_6_0, -1, 1, 0},
- /* 1 */ {4, s_6_1, -1, 1, 0}
+ /* 0 */ {3, s_6_0, -1, 1, 0},
+ /* 1 */ {4, s_6_1, -1, 1, 0}
};
static symbol s_7_0[4] = {0xC5, 0xCA, 0xDB, 0xC5};
@@ -318,10 +318,10 @@ static symbol s_7_3[3] = {0xC5, 0xCA, 0xDB};
static struct among a_7[4] =
{
- /* 0 */ {4, s_7_0, -1, 1, 0},
- /* 1 */ {1, s_7_1, -1, 2, 0},
- /* 2 */ {1, s_7_2, -1, 3, 0},
- /* 3 */ {3, s_7_3, -1, 1, 0}
+ /* 0 */ {4, s_7_0, -1, 1, 0},
+ /* 1 */ {1, s_7_1, -1, 2, 0},
+ /* 2 */ {1, s_7_2, -1, 3, 0},
+ /* 3 */ {3, s_7_3, -1, 1, 0}
};
static unsigned char g_v[] = {35, 130, 34, 18};
@@ -915,12 +915,14 @@ lab0:
return 1;
}
-extern struct SN_env *russian_KOI8_R_create_env(void)
+extern struct SN_env *
+russian_KOI8_R_create_env(void)
{
return SN_create_env(0, 2, 0);
}
-extern void russian_KOI8_R_close_env(struct SN_env * z)
+extern void
+russian_KOI8_R_close_env(struct SN_env * z)
{
SN_close_env(z);
}
diff --git a/contrib/tsearch2/snowball/russian_stem.h b/contrib/tsearch2/snowball/russian_stem.h
index 84941b037f3..217c20abdd9 100644
--- a/contrib/tsearch2/snowball/russian_stem.h
+++ b/contrib/tsearch2/snowball/russian_stem.h
@@ -6,10 +6,10 @@ extern "C"
{
#endif
- extern struct SN_env *russian_KOI8_R_create_env(void);
- extern void russian_KOI8_R_close_env(struct SN_env * z);
+extern struct SN_env *russian_KOI8_R_create_env(void);
+extern void russian_KOI8_R_close_env(struct SN_env * z);
- extern int russian_KOI8_R_stem(struct SN_env * z);
+extern int russian_KOI8_R_stem(struct SN_env * z);
#ifdef __cplusplus
}
diff --git a/src/backend/access/common/heaptuple.c b/src/backend/access/common/heaptuple.c
index 5551f744bb2..bda7a67172c 100644
--- a/src/backend/access/common/heaptuple.c
+++ b/src/backend/access/common/heaptuple.c
@@ -16,7 +16,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/common/heaptuple.c,v 1.102 2005/10/19 22:30:30 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/common/heaptuple.c,v 1.102.2.1 2005/11/22 18:23:03 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -512,11 +512,11 @@ nocachegetattr(HeapTuple tuple,
/*
* Now we know that we have to walk the tuple CAREFULLY.
*
- * Note - This loop is a little tricky. For each non-null attribute, we
- * have to first account for alignment padding before the attr, then
- * advance over the attr based on its length. Nulls have no storage
- * and no alignment padding either. We can use/set attcacheoff until
- * we pass either a null or a var-width attribute.
+ * Note - This loop is a little tricky. For each non-null attribute,
+ * we have to first account for alignment padding before the attr,
+ * then advance over the attr based on its length. Nulls have no
+ * storage and no alignment padding either. We can use/set
+ * attcacheoff until we pass either a null or a var-width attribute.
*/
for (i = 0; i < attnum; i++)
diff --git a/src/backend/access/common/tupdesc.c b/src/backend/access/common/tupdesc.c
index cfa455beec9..d6b63bd79bd 100644
--- a/src/backend/access/common/tupdesc.c
+++ b/src/backend/access/common/tupdesc.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/common/tupdesc.c,v 1.112 2005/10/15 02:49:08 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/common/tupdesc.c,v 1.112.2.1 2005/11/22 18:23:03 momjian Exp $
*
* NOTES
* some of the executor utility code such as "ExecTypeFromTL" should be
@@ -49,8 +49,8 @@ CreateTemplateTupleDesc(int natts, bool hasoid)
* Allocate enough memory for the tuple descriptor, including the
* attribute rows, and set up the attribute row pointers.
*
- * Note: we assume that sizeof(struct tupleDesc) is a multiple of the struct
- * pointer alignment requirement, and hence we don't need to insert
+ * Note: we assume that sizeof(struct tupleDesc) is a multiple of the
+ * struct pointer alignment requirement, and hence we don't need to insert
* alignment padding between the struct and the array of attribute row
* pointers.
*/
diff --git a/src/backend/access/gist/gistget.c b/src/backend/access/gist/gistget.c
index 5ae48bd66e3..1e02ec082f4 100644
--- a/src/backend/access/gist/gistget.c
+++ b/src/backend/access/gist/gistget.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gist/gistget.c,v 1.52 2005/10/06 02:29:07 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gist/gistget.c,v 1.52.2.1 2005/11/22 18:23:03 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -392,8 +392,8 @@ gistindex_keytest(IndexTuple tuple,
* are the index datum (as a GISTENTRY*), the comparison datum, and
* the comparison operator's strategy number and subtype from pg_amop.
*
- * (Presently there's no need to pass the subtype since it'll always be
- * zero, but might as well pass it for possible future use.)
+ * (Presently there's no need to pass the subtype since it'll always
+ * be zero, but might as well pass it for possible future use.)
*/
test = FunctionCall4(&key->sk_func,
PointerGetDatum(&de),
diff --git a/src/backend/access/hash/hashovfl.c b/src/backend/access/hash/hashovfl.c
index 7289d9a0b35..6fadfb20c0a 100644
--- a/src/backend/access/hash/hashovfl.c
+++ b/src/backend/access/hash/hashovfl.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/hash/hashovfl.c,v 1.47 2005/10/15 02:49:08 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/hash/hashovfl.c,v 1.47.2.1 2005/11/22 18:23:03 momjian Exp $
*
* NOTES
* Overflow pages look like ordinary relation pages.
@@ -488,9 +488,9 @@ _hash_initbitmap(Relation rel, HashMetaPage metap, BlockNumber blkno)
* It is okay to write-lock the new bitmap page while holding metapage
* write lock, because no one else could be contending for the new page.
*
- * There is some loss of concurrency in possibly doing I/O for the new page
- * while holding the metapage lock, but this path is taken so seldom that
- * it's not worth worrying about.
+ * There is some loss of concurrency in possibly doing I/O for the new
+ * page while holding the metapage lock, but this path is taken so seldom
+ * that it's not worth worrying about.
*/
buf = _hash_getbuf(rel, blkno, HASH_WRITE);
pg = BufferGetPage(buf);
diff --git a/src/backend/access/hash/hashpage.c b/src/backend/access/hash/hashpage.c
index b40c20b480b..a7da7609d79 100644
--- a/src/backend/access/hash/hashpage.c
+++ b/src/backend/access/hash/hashpage.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/hash/hashpage.c,v 1.52 2005/10/15 02:49:08 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/hash/hashpage.c,v 1.52.2.1 2005/11/22 18:23:03 momjian Exp $
*
* NOTES
* Postgres hash pages look like ordinary relation pages. The opaque
@@ -400,8 +400,8 @@ _hash_expandtable(Relation rel, Buffer metabuf)
* The lock protects us against other backends, but not against our own
* backend. Must check for active scans separately.
*
- * Ideally we would lock the new bucket too before proceeding, but if we are
- * about to cross a splitpoint then the BUCKET_TO_BLKNO mapping isn't
+ * Ideally we would lock the new bucket too before proceeding, but if we
+ * are about to cross a splitpoint then the BUCKET_TO_BLKNO mapping isn't
* correct yet. For simplicity we update the metapage first and then
* lock. This should be okay because no one else should be trying to lock
* the new bucket yet...
@@ -420,11 +420,11 @@ _hash_expandtable(Relation rel, Buffer metabuf)
/*
* Okay to proceed with split. Update the metapage bucket mapping info.
*
- * Since we are scribbling on the metapage data right in the shared buffer,
- * any failure in this next little bit leaves us with a big problem: the
- * metapage is effectively corrupt but could get written back to disk. We
- * don't really expect any failure, but just to be sure, establish a
- * critical section.
+ * Since we are scribbling on the metapage data right in the shared
+ * buffer, any failure in this next little bit leaves us with a big
+ * problem: the metapage is effectively corrupt but could get written back
+ * to disk. We don't really expect any failure, but just to be sure,
+ * establish a critical section.
*/
START_CRIT_SECTION();
diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c
index c6d300fe482..4e9c0ff8bba 100644
--- a/src/backend/access/heap/heapam.c
+++ b/src/backend/access/heap/heapam.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/heap/heapam.c,v 1.200.2.1 2005/11/20 18:38:42 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/heap/heapam.c,v 1.200.2.2 2005/11/22 18:23:03 momjian Exp $
*
*
* INTERFACE ROUTINES
@@ -1089,7 +1089,7 @@ heap_get_latest_tid(Relation relation,
* The return value is the OID assigned to the tuple (either here or by the
* caller), or InvalidOid if no OID. The header fields of *tup are updated
* to match the stored tuple; in particular tup->t_self receives the actual
- * TID where the tuple was stored. But note that any toasting of fields
+ * TID where the tuple was stored. But note that any toasting of fields
* within the tuple data is NOT reflected into *tup.
*/
Oid
@@ -1136,8 +1136,8 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
* If the new tuple is too big for storage or contains already toasted
* out-of-line attributes from some other relation, invoke the toaster.
*
- * Note: below this point, heaptup is the data we actually intend to
- * store into the relation; tup is the caller's original untoasted data.
+ * Note: below this point, heaptup is the data we actually intend to store
+ * into the relation; tup is the caller's original untoasted data.
*/
if (HeapTupleHasExternal(tup) ||
(MAXALIGN(tup->t_len) > TOAST_TUPLE_THRESHOLD))
@@ -1224,8 +1224,8 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
/*
* If tuple is cachable, mark it for invalidation from the caches in case
* we abort. Note it is OK to do this after WriteBuffer releases the
- * buffer, because the heaptup data structure is all in local memory,
- * not in the shared buffer.
+ * buffer, because the heaptup data structure is all in local memory, not
+ * in the shared buffer.
*/
CacheInvalidateHeapTuple(relation, heaptup);
@@ -1333,8 +1333,8 @@ l1:
* heap_lock_tuple). LockTuple will release us when we are
* next-in-line for the tuple.
*
- * If we are forced to "start over" below, we keep the tuple lock; this
- * arranges that we stay at the head of the line while rechecking
+ * If we are forced to "start over" below, we keep the tuple lock;
+ * this arranges that we stay at the head of the line while rechecking
* tuple state.
*/
if (!have_tuple_lock)
@@ -1577,7 +1577,7 @@ simple_heap_delete(Relation relation, ItemPointer tid)
*
* On success, the header fields of *newtup are updated to match the new
* stored tuple; in particular, newtup->t_self is set to the TID where the
- * new tuple was inserted. However, any TOAST changes in the new tuple's
+ * new tuple was inserted. However, any TOAST changes in the new tuple's
* data are not reflected into *newtup.
*
* In the failure cases, the routine returns the tuple's t_ctid and t_xmax.
@@ -1649,8 +1649,8 @@ l2:
* heap_lock_tuple). LockTuple will release us when we are
* next-in-line for the tuple.
*
- * If we are forced to "start over" below, we keep the tuple lock; this
- * arranges that we stay at the head of the line while rechecking
+ * If we are forced to "start over" below, we keep the tuple lock;
+ * this arranges that we stay at the head of the line while rechecking
* tuple state.
*/
if (!have_tuple_lock)
@@ -1782,8 +1782,8 @@ l2:
* show that it's already being updated, else other processes may try to
* update it themselves.
*
- * We need to invoke the toaster if there are already any out-of-line toasted
- * values present, or if the new tuple is over-threshold.
+ * We need to invoke the toaster if there are already any out-of-line
+ * toasted values present, or if the new tuple is over-threshold.
*/
newtupsize = MAXALIGN(newtup->t_len);
@@ -1886,7 +1886,7 @@ l2:
/* NO EREPORT(ERROR) from here till changes are logged */
START_CRIT_SECTION();
- RelationPutHeapTuple(relation, newbuf, heaptup); /* insert new tuple */
+ RelationPutHeapTuple(relation, newbuf, heaptup); /* insert new tuple */
if (!already_marked)
{
@@ -2123,8 +2123,8 @@ l3:
* LockTuple will release us when we are next-in-line for the tuple.
* We must do this even if we are share-locking.
*
- * If we are forced to "start over" below, we keep the tuple lock; this
- * arranges that we stay at the head of the line while rechecking
+ * If we are forced to "start over" below, we keep the tuple lock;
+ * this arranges that we stay at the head of the line while rechecking
* tuple state.
*/
if (!have_tuple_lock)
diff --git a/src/backend/access/heap/hio.c b/src/backend/access/heap/hio.c
index 800ee4a805b..440c94bf56b 100644
--- a/src/backend/access/heap/hio.c
+++ b/src/backend/access/heap/hio.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/heap/hio.c,v 1.58 2005/10/15 02:49:08 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/heap/hio.c,v 1.58.2.1 2005/11/22 18:23:04 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -296,11 +296,11 @@ RelationGetBufferForTuple(Relation relation, Size len,
/*
* Remember the new page as our target for future insertions.
*
- * XXX should we enter the new page into the free space map immediately, or
- * just keep it for this backend's exclusive use in the short run (until
- * VACUUM sees it)? Seems to depend on whether you expect the current
- * backend to make more insertions or not, which is probably a good bet
- * most of the time. So for now, don't add it to FSM yet.
+ * XXX should we enter the new page into the free space map immediately,
+ * or just keep it for this backend's exclusive use in the short run
+ * (until VACUUM sees it)? Seems to depend on whether you expect the
+ * current backend to make more insertions or not, which is probably a
+ * good bet most of the time. So for now, don't add it to FSM yet.
*/
relation->rd_targblock = BufferGetBlockNumber(buffer);
diff --git a/src/backend/access/heap/tuptoaster.c b/src/backend/access/heap/tuptoaster.c
index de5396a5150..2292f696512 100644
--- a/src/backend/access/heap/tuptoaster.c
+++ b/src/backend/access/heap/tuptoaster.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/heap/tuptoaster.c,v 1.53.2.1 2005/11/20 18:38:42 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/heap/tuptoaster.c,v 1.53.2.2 2005/11/22 18:23:04 momjian Exp $
*
*
* INTERFACE ROUTINES
@@ -1075,8 +1075,8 @@ toast_save_datum(Relation rel, Datum value)
* FormIndexDatum: this relies on the knowledge that the index columns
* are the same as the initial columns of the table.
*
- * Note also that there had better not be any user-created index on the
- * TOAST table, since we don't bother to update anything else.
+ * Note also that there had better not be any user-created index on
+ * the TOAST table, since we don't bother to update anything else.
*/
index_insert(toastidx, t_values, t_isnull,
&(toasttup->t_self),
@@ -1214,9 +1214,9 @@ toast_fetch_datum(varattrib *attr)
/*
* Read the chunks by index
*
- * Note that because the index is actually on (valueid, chunkidx) we will see
- * the chunks in chunkidx order, even though we didn't explicitly ask for
- * it.
+ * Note that because the index is actually on (valueid, chunkidx) we will
+ * see the chunks in chunkidx order, even though we didn't explicitly ask
+ * for it.
*/
nextidx = 0;
diff --git a/src/backend/access/index/genam.c b/src/backend/access/index/genam.c
index ed604f9c5dc..7303bd2e604 100644
--- a/src/backend/access/index/genam.c
+++ b/src/backend/access/index/genam.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/index/genam.c,v 1.49 2005/10/15 02:49:09 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/index/genam.c,v 1.49.2.1 2005/11/22 18:23:04 momjian Exp $
*
* NOTES
* many of the old access method routines have been turned into
@@ -203,8 +203,8 @@ systable_beginscan(Relation heapRelation,
/*
* Change attribute numbers to be index column numbers.
*
- * This code could be generalized to search for the index key numbers to
- * substitute, but for now there's no need.
+ * This code could be generalized to search for the index key numbers
+ * to substitute, but for now there's no need.
*/
for (i = 0; i < nkeys; i++)
{
diff --git a/src/backend/access/nbtree/nbtinsert.c b/src/backend/access/nbtree/nbtinsert.c
index 33c7612aac5..669459bac76 100644
--- a/src/backend/access/nbtree/nbtinsert.c
+++ b/src/backend/access/nbtree/nbtinsert.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.127 2005/10/15 02:49:09 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.127.2.1 2005/11/22 18:23:04 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -104,8 +104,8 @@ top:
* If we're not allowing duplicates, make sure the key isn't already in
* the index.
*
- * NOTE: obviously, _bt_check_unique can only detect keys that are already in
- * the index; so it cannot defend against concurrent insertions of the
+ * NOTE: obviously, _bt_check_unique can only detect keys that are already
+ * in the index; so it cannot defend against concurrent insertions of the
* same key. We protect against that by means of holding a write lock on
* the target page. Any other would-be inserter of the same key must
* acquire a write lock on the same target page, so only one would-be
@@ -114,8 +114,8 @@ top:
* our insertion, so no later inserter can fail to see our insertion.
* (This requires some care in _bt_insertonpg.)
*
- * If we must wait for another xact, we release the lock while waiting, and
- * then must start over completely.
+ * If we must wait for another xact, we release the lock while waiting,
+ * and then must start over completely.
*/
if (index_is_unique)
{
@@ -193,8 +193,8 @@ _bt_check_unique(Relation rel, BTItem btitem, Relation heapRel,
/*
* We can skip items that are marked killed.
*
- * Formerly, we applied _bt_isequal() before checking the kill flag,
- * so as to fall out of the item loop as soon as possible.
+ * Formerly, we applied _bt_isequal() before checking the kill
+ * flag, so as to fall out of the item loop as soon as possible.
* However, in the presence of heavy update activity an index may
* contain many killed items with the same key; running
* _bt_isequal() on each killed item gets expensive. Furthermore
@@ -431,11 +431,11 @@ _bt_insertonpg(Relation rel,
/*
* step right to next non-dead page
*
- * must write-lock that page before releasing write lock on current
- * page; else someone else's _bt_check_unique scan could fail to
- * see our insertion. write locks on intermediate dead pages
- * won't do because we don't know when they will get de-linked
- * from the tree.
+ * must write-lock that page before releasing write lock on
+ * current page; else someone else's _bt_check_unique scan could
+ * fail to see our insertion. write locks on intermediate dead
+ * pages won't do because we don't know when they will get
+ * de-linked from the tree.
*/
Buffer rbuf = InvalidBuffer;
@@ -471,9 +471,9 @@ _bt_insertonpg(Relation rel,
/*
* Do we need to split the page to fit the item on it?
*
- * Note: PageGetFreeSpace() subtracts sizeof(ItemIdData) from its result, so
- * this comparison is correct even though we appear to be accounting only
- * for the item and not for its line pointer.
+ * Note: PageGetFreeSpace() subtracts sizeof(ItemIdData) from its result,
+ * so this comparison is correct even though we appear to be accounting
+ * only for the item and not for its line pointer.
*/
if (PageGetFreeSpace(page) < itemsz)
{
@@ -1158,10 +1158,10 @@ _bt_insert_parent(Relation rel,
* the next higher level that someone constructed meanwhile, and find the
* right place to insert as for the normal case.
*
- * If we have to search for the parent level, we do so by re-descending from
- * the root. This is not super-efficient, but it's rare enough not to
- * matter. (This path is also taken when called from WAL recovery --- we
- * have no stack in that case.)
+ * If we have to search for the parent level, we do so by re-descending
+ * from the root. This is not super-efficient, but it's rare enough not
+ * to matter. (This path is also taken when called from WAL recovery ---
+ * we have no stack in that case.)
*/
if (is_root)
{
diff --git a/src/backend/access/nbtree/nbtpage.c b/src/backend/access/nbtree/nbtpage.c
index 927860030c8..8464d5478f6 100644
--- a/src/backend/access/nbtree/nbtpage.c
+++ b/src/backend/access/nbtree/nbtpage.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtpage.c,v 1.88 2005/10/15 02:49:09 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtpage.c,v 1.88.2.1 2005/11/22 18:23:04 momjian Exp $
*
* NOTES
* Postgres btree pages look like ordinary relation pages. The opaque
@@ -440,21 +440,21 @@ _bt_getbuf(Relation rel, BlockNumber blkno, int access)
* have been re-used between the time the last VACUUM scanned it and
* the time the VACUUM made its FSM updates.)
*
- * In fact, it's worse than that: we can't even assume that it's safe to
- * take a lock on the reported page. If somebody else has a lock on
- * it, or even worse our own caller does, we could deadlock. (The
+ * In fact, it's worse than that: we can't even assume that it's safe
+ * to take a lock on the reported page. If somebody else has a lock
+ * on it, or even worse our own caller does, we could deadlock. (The
* own-caller scenario is actually not improbable. Consider an index
* on a serial or timestamp column. Nearly all splits will be at the
* rightmost page, so it's entirely likely that _bt_split will call us
- * while holding a lock on the page most recently acquired from FSM.
- * A VACUUM running concurrently with the previous split could well
- * have placed that page back in FSM.)
+ * while holding a lock on the page most recently acquired from FSM. A
+ * VACUUM running concurrently with the previous split could well have
+ * placed that page back in FSM.)
*
- * To get around that, we ask for only a conditional lock on the reported
- * page. If we fail, then someone else is using the page, and we may
- * reasonably assume it's not free. (If we happen to be wrong, the
- * worst consequence is the page will be lost to use till the next
- * VACUUM, which is no big problem.)
+ * To get around that, we ask for only a conditional lock on the
+ * reported page. If we fail, then someone else is using the page,
+ * and we may reasonably assume it's not free. (If we happen to be
+ * wrong, the worst consequence is the page will be lost to use till
+ * the next VACUUM, which is no big problem.)
*/
for (;;)
{
@@ -803,12 +803,12 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
* We have to lock the pages we need to modify in the standard order:
* moving right, then up. Else we will deadlock against other writers.
*
- * So, we need to find and write-lock the current left sibling of the target
- * page. The sibling that was current a moment ago could have split, so
- * we may have to move right. This search could fail if either the
- * sibling or the target page was deleted by someone else meanwhile; if
- * so, give up. (Right now, that should never happen, since page deletion
- * is only done in VACUUM and there shouldn't be multiple VACUUMs
+ * So, we need to find and write-lock the current left sibling of the
+ * target page. The sibling that was current a moment ago could have
+ * split, so we may have to move right. This search could fail if either
+ * the sibling or the target page was deleted by someone else meanwhile;
+ * if so, give up. (Right now, that should never happen, since page
+ * deletion is only done in VACUUM and there shouldn't be multiple VACUUMs
* concurrently on the same table.)
*/
if (leftsib != P_NONE)
diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c
index 10e2fe6190d..8612554ca57 100644
--- a/src/backend/access/nbtree/nbtree.c
+++ b/src/backend/access/nbtree/nbtree.c
@@ -12,7 +12,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtree.c,v 1.132 2005/10/15 02:49:09 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtree.c,v 1.132.2.1 2005/11/22 18:23:04 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -307,8 +307,8 @@ btgettuple(PG_FUNCTION_ARGS)
* Save heap TID to use it in _bt_restscan. Then release the read lock on
* the buffer so that we aren't blocking other backends.
*
- * NOTE: we do keep the pin on the buffer! This is essential to ensure that
- * someone else doesn't delete the index entry we are stopped on.
+ * NOTE: we do keep the pin on the buffer! This is essential to ensure
+ * that someone else doesn't delete the index entry we are stopped on.
*/
if (res)
{
@@ -734,8 +734,8 @@ btvacuumcleanup(PG_FUNCTION_ARGS)
* buffer and it will be fully initialized before we can examine it. (See
* also vacuumlazy.c, which has the same issue.)
*
- * We can skip locking for new or temp relations, however, since no one else
- * could be accessing them.
+ * We can skip locking for new or temp relations, however, since no one
+ * else could be accessing them.
*/
needLock = !RELATION_IS_LOCAL(rel);
diff --git a/src/backend/access/nbtree/nbtsearch.c b/src/backend/access/nbtree/nbtsearch.c
index e487b498820..5f795073f0b 100644
--- a/src/backend/access/nbtree/nbtsearch.c
+++ b/src/backend/access/nbtree/nbtsearch.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.96 2005/10/18 01:06:23 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.96.2.1 2005/11/22 18:23:04 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -164,10 +164,11 @@ _bt_moveright(Relation rel,
*
* When nextkey = true: move right if the scan key is >= page's high key.
*
- * The page could even have split more than once, so scan as far as needed.
+ * The page could even have split more than once, so scan as far as
+ * needed.
*
- * We also have to move right if we followed a link that brought us to a dead
- * page.
+ * We also have to move right if we followed a link that brought us to a
+ * dead page.
*/
cmpval = nextkey ? 0 : 1;
@@ -255,8 +256,8 @@ _bt_binsrch(Relation rel,
* For nextkey=false (cmpval=1), the loop invariant is: all slots before
* 'low' are < scan key, all slots at or after 'high' are >= scan key.
*
- * For nextkey=true (cmpval=0), the loop invariant is: all slots before 'low'
- * are <= scan key, all slots at or after 'high' are > scan key.
+ * For nextkey=true (cmpval=0), the loop invariant is: all slots before
+ * 'low' are <= scan key, all slots at or after 'high' are > scan key.
*
* We can fall out when high == low.
*/
@@ -282,8 +283,8 @@ _bt_binsrch(Relation rel,
* At this point we have high == low, but be careful: they could point
* past the last slot on the page.
*
- * On a leaf page, we always return the first key >= scan key (resp. > scan
- * key), which could be the last slot + 1.
+ * On a leaf page, we always return the first key >= scan key (resp. >
+ * scan key), which could be the last slot + 1.
*/
if (P_ISLEAF(opaque))
return low;
@@ -350,8 +351,8 @@ _bt_compare(Relation rel,
* you think about how multi-key ordering works, you'll understand why
* this is.
*
- * We don't test for violation of this condition here, however. The initial
- * setup for the index scan had better have gotten it right (see
+ * We don't test for violation of this condition here, however. The
+ * initial setup for the index scan had better have gotten it right (see
* _bt_first).
*/
@@ -692,9 +693,9 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
* where we need to start the scan, and set flag variables to control the
* code below.
*
- * If nextkey = false, _bt_search and _bt_binsrch will locate the first item
- * >= scan key. If nextkey = true, they will locate the first item > scan
- * key.
+ * If nextkey = false, _bt_search and _bt_binsrch will locate the first
+ * item >= scan key. If nextkey = true, they will locate the first item >
+ * scan key.
*
* If goback = true, we will then step back one item, while if goback =
* false, we will start the scan on the located item.
@@ -819,9 +820,9 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
* than or equal to the scan key and we know that everything on later
* pages is greater than scan key.
*
- * The actually desired starting point is either this item or the prior one,
- * or in the end-of-page case it's the first item on the next page or the
- * last item on this page. We apply _bt_step if needed to get to the
+ * The actually desired starting point is either this item or the prior
+ * one, or in the end-of-page case it's the first item on the next page or
+ * the last item on this page. We apply _bt_step if needed to get to the
* right place.
*
* If _bt_step fails (meaning we fell off the end of the index in one
@@ -1044,9 +1045,9 @@ _bt_walk_left(Relation rel, Buffer buf)
* the original page got deleted and isn't in the sibling chain at all
* anymore, not that its left sibling got split more than four times.
*
- * Note that it is correct to test P_ISDELETED not P_IGNORE here, because
- * half-dead pages are still in the sibling chain. Caller must reject
- * half-dead pages if wanted.
+ * Note that it is correct to test P_ISDELETED not P_IGNORE here,
+ * because half-dead pages are still in the sibling chain. Caller
+ * must reject half-dead pages if wanted.
*/
tries = 0;
for (;;)
diff --git a/src/backend/access/nbtree/nbtsort.c b/src/backend/access/nbtree/nbtsort.c
index 6ee5d42b63a..8bfa8130a23 100644
--- a/src/backend/access/nbtree/nbtsort.c
+++ b/src/backend/access/nbtree/nbtsort.c
@@ -56,7 +56,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsort.c,v 1.95 2005/10/15 02:49:09 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsort.c,v 1.95.2.1 2005/11/22 18:23:04 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -487,9 +487,9 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, BTItem bti)
* the per-page available space. Note that at this point, btisz doesn't
* include the ItemId.
*
- * NOTE: similar code appears in _bt_insertonpg() to defend against oversize
- * items being inserted into an already-existing index. But during
- * creation of an index, we don't go through there.
+ * NOTE: similar code appears in _bt_insertonpg() to defend against
+ * oversize items being inserted into an already-existing index. But
+ * during creation of an index, we don't go through there.
*/
if (btisz > BTMaxItemSize(npage))
ereport(ERROR,
diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c
index 7d60c98f38d..27ec83a0f0c 100644
--- a/src/backend/access/nbtree/nbtutils.c
+++ b/src/backend/access/nbtree/nbtutils.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtutils.c,v 1.65 2005/10/18 01:06:23 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtutils.c,v 1.65.2.1 2005/11/22 18:23:04 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -549,8 +549,8 @@ _bt_checkkeys(IndexScanDesc scan, IndexTuple tuple,
* able to conclude no further tuples will pass, either. We have
* to look at the scan direction and the qual type.
*
- * Note: the only case in which we would keep going after failing a
- * required qual is if there are partially-redundant quals that
+ * Note: the only case in which we would keep going after failing
+ * a required qual is if there are partially-redundant quals that
* _bt_preprocess_keys() was unable to eliminate. For example,
* given "x > 4 AND x > 10" where both are cross-type comparisons
* and so not removable, we might start the scan at the x = 4
diff --git a/src/backend/access/rtree/rtree.c b/src/backend/access/rtree/rtree.c
index d684101d261..652daa38203 100644
--- a/src/backend/access/rtree/rtree.c
+++ b/src/backend/access/rtree/rtree.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/rtree/rtree.c,v 1.92 2005/10/15 02:49:09 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/rtree/rtree.c,v 1.92.2.1 2005/11/22 18:23:04 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -527,9 +527,9 @@ rtdosplit(Relation r,
* the left page, we expect it to get smaller. This happens in the
* internal insertion routine.
*
- * 3) Insert a pointer to the right page in the parent. This may cause the
- * parent to split. If it does, we need to repeat steps one and two for
- * each split node in the tree.
+ * 3) Insert a pointer to the right page in the parent. This may cause
+ * the parent to split. If it does, we need to repeat steps one and two
+ * for each split node in the tree.
*/
/* adjust active scans */
@@ -834,8 +834,8 @@ rtpicksplit(Relation r,
/*
* Now split up the regions between the two seeds.
*
- * The cost_vector array will contain hints for determining where each tuple
- * should go. Each record in the array will contain a boolean,
+ * The cost_vector array will contain hints for determining where each
+ * tuple should go. Each record in the array will contain a boolean,
* choose_left, that indicates which node the tuple prefers to be on, and
* the absolute difference in cost between putting the tuple in its
* favored node and in the other node.
@@ -848,9 +848,9 @@ rtpicksplit(Relation r,
* First, build the cost_vector array. The new index tuple will also be
* handled in this loop, and represented in the array, with i==newitemoff.
*
- * In the case of variable size tuples it is possible that we only have the
- * two seeds and no other tuples, in which case we don't do any of this
- * cost_vector stuff.
+ * In the case of variable size tuples it is possible that we only have
+ * the two seeds and no other tuples, in which case we don't do any of
+ * this cost_vector stuff.
*/
/* to keep compiler quiet */
@@ -966,11 +966,11 @@ rtpicksplit(Relation r,
* need not worry about any other problem than failing to fit the new
* item.)
*
- * Guttman's algorithm actually has two factors to consider (in order):
- * 1. if one node has so many tuples already assigned to it that the
- * other needs all the rest in order to satisfy the condition that
- * neither node has fewer than m tuples, then that is decisive; 2.
- * otherwise, choose the page that shows the smaller enlargement of
+ * Guttman's algorithm actually has two factors to consider (in
+ * order): 1. if one node has so many tuples already assigned to it
+ * that the other needs all the rest in order to satisfy the condition
+ * that neither node has fewer than m tuples, then that is decisive;
+ * 2. otherwise, choose the page that shows the smaller enlargement of
* its union area.
*
* I have chosen m = M/2, where M is the maximum number of tuples on a
@@ -979,8 +979,8 @@ rtpicksplit(Relation r,
* tuple on a page, if it is really big. But even with variable size
* tuples we still try to get m as close as possible to M/2.)
*
- * The question of which page shows the smaller enlargement of its union
- * area has already been answered, and the answer stored in the
+ * The question of which page shows the smaller enlargement of its
+ * union area has already been answered, and the answer stored in the
* choose_left field of the SPLITCOST record.
*/
left_feasible = (left_avail_space >= item_1_sz &&
diff --git a/src/backend/access/transam/multixact.c b/src/backend/access/transam/multixact.c
index af254da173d..f55f2c2c2fa 100644
--- a/src/backend/access/transam/multixact.c
+++ b/src/backend/access/transam/multixact.c
@@ -42,7 +42,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/access/transam/multixact.c,v 1.11 2005/10/28 19:00:19 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/multixact.c,v 1.11.2.1 2005/11/22 18:23:05 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -129,22 +129,23 @@ typedef struct MultiXactStateData
* member of a MultiXact, and that MultiXact would have to be created
* during or after the lock acquisition.)
*
- * OldestVisibleMXactId[k] is the oldest MultiXactId each backend's current
- * transaction(s) think is potentially live, or InvalidMultiXactId when
- * not in a transaction or not in a transaction that's paid any attention
- * to MultiXacts yet. This is computed when first needed in a given
- * transaction, and cleared at transaction end. We can compute it as the
- * minimum of the valid OldestMemberMXactId[] entries at the time we
- * compute it (using nextMXact if none are valid). Each backend is
+ * OldestVisibleMXactId[k] is the oldest MultiXactId each backend's
+ * current transaction(s) think is potentially live, or InvalidMultiXactId
+ * when not in a transaction or not in a transaction that's paid any
+ * attention to MultiXacts yet. This is computed when first needed in a
+ * given transaction, and cleared at transaction end. We can compute it
+ * as the minimum of the valid OldestMemberMXactId[] entries at the time
+ * we compute it (using nextMXact if none are valid). Each backend is
* required not to attempt to access any SLRU data for MultiXactIds older
* than its own OldestVisibleMXactId[] setting; this is necessary because
* the checkpointer could truncate away such data at any instant.
*
- * The checkpointer can compute the safe truncation point as the oldest valid
- * value among all the OldestMemberMXactId[] and OldestVisibleMXactId[]
- * entries, or nextMXact if none are valid. Clearly, it is not possible
- * for any later-computed OldestVisibleMXactId value to be older than
- * this, and so there is no risk of truncating data that is still needed.
+ * The checkpointer can compute the safe truncation point as the oldest
+ * valid value among all the OldestMemberMXactId[] and
+ * OldestVisibleMXactId[] entries, or nextMXact if none are valid.
+ * Clearly, it is not possible for any later-computed OldestVisibleMXactId
+ * value to be older than this, and so there is no risk of truncating data
+ * that is still needed.
*/
MultiXactId perBackendXactIds[1]; /* VARIABLE LENGTH ARRAY */
} MultiXactStateData;
@@ -631,8 +632,8 @@ CreateMultiXactId(int nxids, TransactionId *xids)
}
/*
- * Assign the MXID and offsets range to use, and make sure there is
- * space in the OFFSETs and MEMBERs files. NB: this routine does
+ * Assign the MXID and offsets range to use, and make sure there is space
+ * in the OFFSETs and MEMBERs files. NB: this routine does
* START_CRIT_SECTION().
*/
multi = GetNewMultiXactId(nxids, &offset);
@@ -788,9 +789,9 @@ GetNewMultiXactId(int nxids, MultiXactOffset *offset)
ExtendMultiXactOffset(result);
/*
- * Reserve the members space, similarly to above. Also, be
- * careful not to return zero as the starting offset for any multixact.
- * See GetMultiXactIdMembers() for motivation.
+ * Reserve the members space, similarly to above. Also, be careful not to
+ * return zero as the starting offset for any multixact. See
+ * GetMultiXactIdMembers() for motivation.
*/
nextOffset = MultiXactState->nextOffset;
if (nextOffset == 0)
@@ -804,8 +805,8 @@ GetNewMultiXactId(int nxids, MultiXactOffset *offset)
ExtendMultiXactMember(nextOffset, nxids);
/*
- * Critical section from here until caller has written the data into
- * the just-reserved SLRU space; we don't want to error out with a partly
+ * Critical section from here until caller has written the data into the
+ * just-reserved SLRU space; we don't want to error out with a partly
* written MultiXact structure. (In particular, failing to write our
* start offset after advancing nextMXact would effectively corrupt the
* previous MultiXact.)
@@ -819,8 +820,8 @@ GetNewMultiXactId(int nxids, MultiXactOffset *offset)
* We don't care about MultiXactId wraparound here; it will be handled by
* the next iteration. But note that nextMXact may be InvalidMultiXactId
* after this routine exits, so anyone else looking at the variable must
- * be prepared to deal with that. Similarly, nextOffset may be zero,
- * but we won't use that as the actual start offset of the next multixact.
+ * be prepared to deal with that. Similarly, nextOffset may be zero, but
+ * we won't use that as the actual start offset of the next multixact.
*/
(MultiXactState->nextMXact)++;
@@ -881,7 +882,7 @@ GetMultiXactIdMembers(MultiXactId multi, TransactionId **xids)
* SLRU data if we did try to examine it.
*
* Conversely, an ID >= nextMXact shouldn't ever be seen here; if it is
- * seen, it implies undetected ID wraparound has occurred. We just
+ * seen, it implies undetected ID wraparound has occurred. We just
* silently assume that such an ID is no longer running.
*
* Shared lock is enough here since we aren't modifying any global state.
@@ -897,7 +898,7 @@ GetMultiXactIdMembers(MultiXactId multi, TransactionId **xids)
/*
* Acquire the shared lock just long enough to grab the current counter
- * values. We may need both nextMXact and nextOffset; see below.
+ * values. We may need both nextMXact and nextOffset; see below.
*/
LWLockAcquire(MultiXactGenLock, LW_SHARED);
@@ -915,27 +916,27 @@ GetMultiXactIdMembers(MultiXactId multi, TransactionId **xids)
/*
* Find out the offset at which we need to start reading MultiXactMembers
- * and the number of members in the multixact. We determine the latter
- * as the difference between this multixact's starting offset and the
- * next one's. However, there are some corner cases to worry about:
+ * and the number of members in the multixact. We determine the latter as
+ * the difference between this multixact's starting offset and the next
+ * one's. However, there are some corner cases to worry about:
*
- * 1. This multixact may be the latest one created, in which case there
- * is no next one to look at. In this case the nextOffset value we just
+ * 1. This multixact may be the latest one created, in which case there is
+ * no next one to look at. In this case the nextOffset value we just
* saved is the correct endpoint.
*
- * 2. The next multixact may still be in process of being filled in:
- * that is, another process may have done GetNewMultiXactId but not yet
- * written the offset entry for that ID. In that scenario, it is
- * guaranteed that the offset entry for that multixact exists (because
- * GetNewMultiXactId won't release MultiXactGenLock until it does)
- * but contains zero (because we are careful to pre-zero offset pages).
- * Because GetNewMultiXactId will never return zero as the starting offset
- * for a multixact, when we read zero as the next multixact's offset, we
- * know we have this case. We sleep for a bit and try again.
+ * 2. The next multixact may still be in process of being filled in: that
+ * is, another process may have done GetNewMultiXactId but not yet written
+ * the offset entry for that ID. In that scenario, it is guaranteed that
+ * the offset entry for that multixact exists (because GetNewMultiXactId
+ * won't release MultiXactGenLock until it does) but contains zero
+ * (because we are careful to pre-zero offset pages). Because
+ * GetNewMultiXactId will never return zero as the starting offset for a
+ * multixact, when we read zero as the next multixact's offset, we know we
+ * have this case. We sleep for a bit and try again.
*
- * 3. Because GetNewMultiXactId increments offset zero to offset one
- * to handle case #2, there is an ambiguity near the point of offset
- * wraparound. If we see next multixact's offset is one, is that our
+ * 3. Because GetNewMultiXactId increments offset zero to offset one to
+ * handle case #2, there is an ambiguity near the point of offset
+ * wraparound. If we see next multixact's offset is one, is that our
* multixact's actual endpoint, or did it end at zero with a subsequent
* increment? We handle this using the knowledge that if the zero'th
* member slot wasn't filled, it'll contain zero, and zero isn't a valid
diff --git a/src/backend/access/transam/slru.c b/src/backend/access/transam/slru.c
index b8273d84f59..041c6cf84e0 100644
--- a/src/backend/access/transam/slru.c
+++ b/src/backend/access/transam/slru.c
@@ -42,7 +42,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/access/transam/slru.c,v 1.29 2005/11/03 00:23:36 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/slru.c,v 1.29.2.1 2005/11/22 18:23:05 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -278,7 +278,7 @@ SimpleLruReadPage(SlruCtl ctl, int pageno, TransactionId xid)
SlruRecentlyUsed(shared, slotno);
/*
- * We must grab the per-buffer lock to do I/O. To avoid deadlock,
+ * We must grab the per-buffer lock to do I/O. To avoid deadlock,
* must release ControlLock while waiting for per-buffer lock.
* Fortunately, most of the time the per-buffer lock shouldn't be
* already held, so we can do this:
@@ -352,10 +352,10 @@ SimpleLruWritePage(SlruCtl ctl, int slotno, SlruFlush fdata)
pageno = shared->page_number[slotno];
/*
- * We must grab the per-buffer lock to do I/O. To avoid deadlock,
- * must release ControlLock while waiting for per-buffer lock.
- * Fortunately, most of the time the per-buffer lock shouldn't be
- * already held, so we can do this:
+ * We must grab the per-buffer lock to do I/O. To avoid deadlock, must
+ * release ControlLock while waiting for per-buffer lock. Fortunately,
+ * most of the time the per-buffer lock shouldn't be already held, so we
+ * can do this:
*/
if (!LWLockConditionalAcquire(shared->buffer_locks[slotno],
LW_EXCLUSIVE))
@@ -754,8 +754,8 @@ SlruSelectLRUPage(SlruCtl ctl, int pageno)
/*
* We need to do I/O. Normal case is that we have to write it out,
* but it's possible in the worst case to have selected a read-busy
- * page. In that case we just wait for someone else to complete
- * the I/O, which we can do by waiting for the per-buffer lock.
+ * page. In that case we just wait for someone else to complete the
+ * I/O, which we can do by waiting for the per-buffer lock.
*/
if (shared->page_status[bestslot] == SLRU_PAGE_READ_IN_PROGRESS)
{
diff --git a/src/backend/access/transam/subtrans.c b/src/backend/access/transam/subtrans.c
index 7671eb6a45e..3c111e62c66 100644
--- a/src/backend/access/transam/subtrans.c
+++ b/src/backend/access/transam/subtrans.c
@@ -22,7 +22,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/access/transam/subtrans.c,v 1.11 2005/10/15 02:49:09 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/subtrans.c,v 1.11.2.1 2005/11/22 18:23:05 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -261,8 +261,8 @@ ShutdownSUBTRANS(void)
/*
* Flush dirty SUBTRANS pages to disk
*
- * This is not actually necessary from a correctness point of view. We do it
- * merely as a debugging aid.
+ * This is not actually necessary from a correctness point of view. We do
+ * it merely as a debugging aid.
*/
SimpleLruFlush(SubTransCtl, false);
}
@@ -276,9 +276,9 @@ CheckPointSUBTRANS(void)
/*
* Flush dirty SUBTRANS pages to disk
*
- * This is not actually necessary from a correctness point of view. We do it
- * merely to improve the odds that writing of dirty pages is done by the
- * checkpoint process and not by backends.
+ * This is not actually necessary from a correctness point of view. We do
+ * it merely to improve the odds that writing of dirty pages is done by
+ * the checkpoint process and not by backends.
*/
SimpleLruFlush(SubTransCtl, true);
}
diff --git a/src/backend/access/transam/transam.c b/src/backend/access/transam/transam.c
index 59852520521..ed6c4bb608c 100644
--- a/src/backend/access/transam/transam.c
+++ b/src/backend/access/transam/transam.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/transam/transam.c,v 1.66 2005/10/15 02:49:09 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/transam.c,v 1.66.2.1 2005/11/22 18:23:05 momjian Exp $
*
* NOTES
* This file contains the high level access-method interface to the
@@ -174,9 +174,9 @@ TransactionIdDidCommit(TransactionId transactionId)
* pg_subtrans; instead assume that the parent crashed without cleaning up
* its children.
*
- * Originally we Assert'ed that the result of SubTransGetParent was not zero.
- * However with the introduction of prepared transactions, there can be a
- * window just after database startup where we do not have complete
+ * Originally we Assert'ed that the result of SubTransGetParent was not
+ * zero. However with the introduction of prepared transactions, there can
+ * be a window just after database startup where we do not have complete
* knowledge in pg_subtrans of the transactions after TransactionXmin.
* StartupSUBTRANS() has ensured that any missing information will be
* zeroed. Since this case should not happen under normal conditions, it
diff --git a/src/backend/access/transam/twophase.c b/src/backend/access/transam/twophase.c
index 5423060653d..ca44e19a8ef 100644
--- a/src/backend/access/transam/twophase.c
+++ b/src/backend/access/transam/twophase.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/transam/twophase.c,v 1.16 2005/10/29 00:31:50 petere Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/twophase.c,v 1.16.2.1 2005/11/22 18:23:05 momjian Exp $
*
* NOTES
* Each global transaction is associated with a global transaction
@@ -851,10 +851,10 @@ EndPrepare(GlobalTransaction gxact)
/*
* Create the 2PC state file.
*
- * Note: because we use BasicOpenFile(), we are responsible for ensuring the
- * FD gets closed in any error exit path. Once we get into the critical
- * section, though, it doesn't matter since any failure causes PANIC
- * anyway.
+ * Note: because we use BasicOpenFile(), we are responsible for ensuring
+ * the FD gets closed in any error exit path. Once we get into the
+ * critical section, though, it doesn't matter since any failure causes
+ * PANIC anyway.
*/
TwoPhaseFilePath(path, xid);
@@ -911,8 +911,8 @@ EndPrepare(GlobalTransaction gxact)
* The state file isn't valid yet, because we haven't written the correct
* CRC yet. Before we do that, insert entry in WAL and flush it to disk.
*
- * Between the time we have written the WAL entry and the time we write out
- * the correct state file CRC, we have an inconsistency: the xact is
+ * Between the time we have written the WAL entry and the time we write
+ * out the correct state file CRC, we have an inconsistency: the xact is
* prepared according to WAL but not according to our on-disk state. We
* use a critical section to force a PANIC if we are unable to complete
* the write --- then, WAL replay should repair the inconsistency. The
@@ -1344,11 +1344,11 @@ CheckPointTwoPhase(XLogRecPtr redo_horizon)
* it just long enough to make a list of the XIDs that require fsyncing,
* and then do the I/O afterwards.
*
- * This approach creates a race condition: someone else could delete a GXACT
- * between the time we release TwoPhaseStateLock and the time we try to
- * open its state file. We handle this by special-casing ENOENT failures:
- * if we see that, we verify that the GXACT is no longer valid, and if so
- * ignore the failure.
+ * This approach creates a race condition: someone else could delete a
+ * GXACT between the time we release TwoPhaseStateLock and the time we try
+ * to open its state file. We handle this by special-casing ENOENT
+ * failures: if we see that, we verify that the GXACT is no longer valid,
+ * and if so ignore the failure.
*/
if (max_prepared_xacts <= 0)
return; /* nothing to do */
diff --git a/src/backend/access/transam/varsup.c b/src/backend/access/transam/varsup.c
index 874a9736c70..21c0e069219 100644
--- a/src/backend/access/transam/varsup.c
+++ b/src/backend/access/transam/varsup.c
@@ -6,7 +6,7 @@
* Copyright (c) 2000-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/transam/varsup.c,v 1.68 2005/10/29 00:31:50 petere Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/varsup.c,v 1.68.2.1 2005/11/22 18:23:05 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -56,8 +56,8 @@ GetNewTransactionId(bool isSubXact)
* (which gives an escape hatch to the DBA who ignored all those
* warnings).
*
- * Test is coded to fall out as fast as possible during normal operation, ie,
- * when the warn limit is set and we haven't violated it.
+ * Test is coded to fall out as fast as possible during normal operation,
+ * ie, when the warn limit is set and we haven't violated it.
*/
if (TransactionIdFollowsOrEquals(xid, ShmemVariableCache->xidWarnLimit) &&
TransactionIdIsValid(ShmemVariableCache->xidWarnLimit))
@@ -268,8 +268,8 @@ GetNewObjectId(void)
* right after a wrap occurs, so as to avoid a possibly large number of
* iterations in GetNewOid.) Note we are relying on unsigned comparison.
*
- * During initdb, we start the OID generator at FirstBootstrapObjectId, so we
- * only enforce wrapping to that point when in bootstrap or standalone
+ * During initdb, we start the OID generator at FirstBootstrapObjectId, so
+ * we only enforce wrapping to that point when in bootstrap or standalone
* mode. The first time through this routine after normal postmaster
* start, the counter will be forced up to FirstNormalObjectId. This
* mechanism leaves the OIDs between FirstBootstrapObjectId and
diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c
index ea19e075640..a1bac34e168 100644
--- a/src/backend/access/transam/xact.c
+++ b/src/backend/access/transam/xact.c
@@ -10,7 +10,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/transam/xact.c,v 1.215 2005/10/15 02:49:09 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/xact.c,v 1.215.2.1 2005/11/22 18:23:05 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -750,8 +750,8 @@ RecordTransactionCommit(void)
* XLOG record generated by nextval will hit the disk before we report
* the transaction committed.
*
- * Note: if we generated a commit record above, MyXactMadeXLogEntry will
- * certainly be set now.
+ * Note: if we generated a commit record above, MyXactMadeXLogEntry
+ * will certainly be set now.
*/
if (MyXactMadeXLogEntry)
{
@@ -762,8 +762,8 @@ RecordTransactionCommit(void)
* because on most Unixen, the minimum select() delay is 10msec or
* more, which is way too long.)
*
- * We do not sleep if enableFsync is not turned on, nor if there are
- * fewer than CommitSiblings other backends with active
+ * We do not sleep if enableFsync is not turned on, nor if there
+ * are fewer than CommitSiblings other backends with active
* transactions.
*/
if (CommitDelay > 0 && enableFsync &&
@@ -993,10 +993,10 @@ RecordTransactionAbort(void)
* nowhere in permanent storage, so no one else will ever care if it
* committed.)
*
- * We do not flush XLOG to disk unless deleting files, since the default
- * assumption after a crash would be that we aborted, anyway. For the
- * same reason, we don't need to worry about interlocking against
- * checkpoint start.
+ * We do not flush XLOG to disk unless deleting files, since the
+ * default assumption after a crash would be that we aborted, anyway.
+ * For the same reason, we don't need to worry about interlocking
+ * against checkpoint start.
*/
if (MyLastRecPtr.xrecoff != 0 || nrels > 0)
{
@@ -1042,8 +1042,8 @@ RecordTransactionAbort(void)
* Mark the transaction aborted in clog. This is not absolutely
* necessary but we may as well do it while we are here.
*
- * The ordering here isn't critical but it seems best to mark the parent
- * first. This assures an atomic transition of all the
+ * The ordering here isn't critical but it seems best to mark the
+ * parent first. This assures an atomic transition of all the
* subtransactions to aborted state from the point of view of
* concurrent TransactionIdDidAbort calls.
*/
@@ -1520,11 +1520,11 @@ CommitTransaction(void)
* it's too late to abort the transaction. This should be just
* noncritical resource releasing.
*
- * The ordering of operations is not entirely random. The idea is: release
- * resources visible to other backends (eg, files, buffer pins); then
- * release locks; then release backend-local resources. We want to release
- * locks at the point where any backend waiting for us will see our
- * transaction as being fully cleaned up.
+ * The ordering of operations is not entirely random. The idea is:
+ * release resources visible to other backends (eg, files, buffer pins);
+ * then release locks; then release backend-local resources. We want to
+ * release locks at the point where any backend waiting for us will see
+ * our transaction as being fully cleaned up.
*
* Resources that can be associated with individual queries are handled by
* the ResourceOwner mechanism. The other calls here are for backend-wide
@@ -1630,9 +1630,9 @@ PrepareTransaction(void)
* Do pre-commit processing (most of this stuff requires database access,
* and in fact could still cause an error...)
*
- * It is possible for PrepareHoldablePortals to invoke functions that queue
- * deferred triggers, and it's also possible that triggers create holdable
- * cursors. So we have to loop until there's nothing left to do.
+ * It is possible for PrepareHoldablePortals to invoke functions that
+ * queue deferred triggers, and it's also possible that triggers create
+ * holdable cursors. So we have to loop until there's nothing left to do.
*/
for (;;)
{
@@ -1715,9 +1715,9 @@ PrepareTransaction(void)
/*
* Here is where we really truly prepare.
*
- * We have to record transaction prepares even if we didn't make any updates,
- * because the transaction manager might get confused if we lose a global
- * transaction.
+ * We have to record transaction prepares even if we didn't make any
+ * updates, because the transaction manager might get confused if we lose
+ * a global transaction.
*/
EndPrepare(gxact);
@@ -1868,10 +1868,11 @@ AbortTransaction(void)
* s->currentUser, since it may not be set yet; instead rely on internal
* state of miscinit.c.
*
- * (Note: it is not necessary to restore session authorization here because
- * that can only be changed via GUC, and GUC will take care of rolling it
- * back if need be. However, an error within a SECURITY DEFINER function
- * could send control here with the wrong current userid.)
+ * (Note: it is not necessary to restore session authorization here
+ * because that can only be changed via GUC, and GUC will take care of
+ * rolling it back if need be. However, an error within a SECURITY
+ * DEFINER function could send control here with the wrong current
+ * userid.)
*/
AtAbort_UserId();
@@ -2353,8 +2354,8 @@ AbortCurrentTransaction(void)
/*
* Here, we are already in an aborted transaction state and are
- * waiting for a ROLLBACK, but for some reason we failed again!
- * So we just remain in the abort state.
+ * waiting for a ROLLBACK, but for some reason we failed again! So
+ * we just remain in the abort state.
*/
case TBLOCK_ABORT:
case TBLOCK_SUBABORT:
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index 66db5d9dd26..5722540b0c2 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/access/transam/xlog.c,v 1.222 2005/10/29 00:31:50 petere Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/xlog.c,v 1.222.2.1 2005/11/22 18:23:05 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -571,11 +571,11 @@ XLogInsert(RmgrId rmid, uint8 info, XLogRecData *rdata)
* the whole record in the order "rdata, then backup blocks, then record
* header".
*
- * We may have to loop back to here if a race condition is detected below. We
- * could prevent the race by doing all this work while holding the insert
- * lock, but it seems better to avoid doing CRC calculations while holding
- * the lock. This means we have to be careful about modifying the rdata
- * chain until we know we aren't going to loop back again. The only
+ * We may have to loop back to here if a race condition is detected below.
+ * We could prevent the race by doing all this work while holding the
+ * insert lock, but it seems better to avoid doing CRC calculations while
+ * holding the lock. This means we have to be careful about modifying the
+ * rdata chain until we know we aren't going to loop back again. The only
* change we allow ourselves to make earlier is to set rdt->data = NULL in
* chain items we have decided we will have to back up the whole buffer
* for. This is OK because we will certainly decide the same thing again
@@ -763,9 +763,9 @@ begin:;
* now irrevocably changed the input rdata chain. At the exit of this
* loop, write_len includes the backup block data.
*
- * Also set the appropriate info bits to show which buffers were backed up.
- * The i'th XLR_SET_BKP_BLOCK bit corresponds to the i'th distinct buffer
- * value (ignoring InvalidBuffer) appearing in the rdata chain.
+ * Also set the appropriate info bits to show which buffers were backed
+ * up. The i'th XLR_SET_BKP_BLOCK bit corresponds to the i'th distinct
+ * buffer value (ignoring InvalidBuffer) appearing in the rdata chain.
*/
write_len = len;
for (i = 0; i < XLR_MAX_BKP_BLOCKS; i++)
@@ -1666,20 +1666,20 @@ XLogFlush(XLogRecPtr record)
* problem; most likely, the requested flush point is past end of XLOG.
* This has been seen to occur when a disk page has a corrupted LSN.
*
- * Formerly we treated this as a PANIC condition, but that hurts the system's
- * robustness rather than helping it: we do not want to take down the
- * whole system due to corruption on one data page. In particular, if the
- * bad page is encountered again during recovery then we would be unable
- * to restart the database at all! (This scenario has actually happened
- * in the field several times with 7.1 releases. Note that we cannot get
- * here while InRedo is true, but if the bad page is brought in and marked
- * dirty during recovery then CreateCheckPoint will try to flush it at the
- * end of recovery.)
+ * Formerly we treated this as a PANIC condition, but that hurts the
+ * system's robustness rather than helping it: we do not want to take down
+ * the whole system due to corruption on one data page. In particular, if
+ * the bad page is encountered again during recovery then we would be
+ * unable to restart the database at all! (This scenario has actually
+ * happened in the field several times with 7.1 releases. Note that we
+ * cannot get here while InRedo is true, but if the bad page is brought in
+ * and marked dirty during recovery then CreateCheckPoint will try to
+ * flush it at the end of recovery.)
*
- * The current approach is to ERROR under normal conditions, but only WARNING
- * during recovery, so that the system can be brought up even if there's a
- * corrupt LSN. Note that for calls from xact.c, the ERROR will be
- * promoted to PANIC since xact.c calls this routine inside a critical
+ * The current approach is to ERROR under normal conditions, but only
+ * WARNING during recovery, so that the system can be brought up even if
+ * there's a corrupt LSN. Note that for calls from xact.c, the ERROR will
+ * be promoted to PANIC since xact.c calls this routine inside a critical
* section. However, calls from bufmgr.c are not within critical sections
* and so we will not force a restart for a bad LSN on a data page.
*/
@@ -2152,14 +2152,14 @@ RestoreArchivedFile(char *path, const char *xlogfname,
* preserved correctly when we copied to archive. Our aim is robustness,
* so we elect not to do this.
*
- * If we cannot obtain the log file from the archive, however, we will try to
- * use the XLOGDIR file if it exists. This is so that we can make use of
- * log segments that weren't yet transferred to the archive.
+ * If we cannot obtain the log file from the archive, however, we will try
+ * to use the XLOGDIR file if it exists. This is so that we can make use
+ * of log segments that weren't yet transferred to the archive.
*
- * Notice that we don't actually overwrite any files when we copy back from
- * archive because the recoveryRestoreCommand may inadvertently restore
- * inappropriate xlogs, or they may be corrupt, so we may wish to fallback
- * to the segments remaining in current XLOGDIR later. The
+ * Notice that we don't actually overwrite any files when we copy back
+ * from archive because the recoveryRestoreCommand may inadvertently
+ * restore inappropriate xlogs, or they may be corrupt, so we may wish to
+ * fallback to the segments remaining in current XLOGDIR later. The
* copy-from-archive filename is always the same, ensuring that we don't
* run out of disk space on long recoveries.
*/
@@ -2246,11 +2246,11 @@ RestoreArchivedFile(char *path, const char *xlogfname,
* command apparently succeeded, but let's make sure the file is
* really there now and has the correct size.
*
- * XXX I made wrong-size a fatal error to ensure the DBA would notice it,
- * but is that too strong? We could try to plow ahead with a local
- * copy of the file ... but the problem is that there probably isn't
- * one, and we'd incorrectly conclude we've reached the end of WAL and
- * we're done recovering ...
+ * XXX I made wrong-size a fatal error to ensure the DBA would notice
+ * it, but is that too strong? We could try to plow ahead with a
+ * local copy of the file ... but the problem is that there probably
+ * isn't one, and we'd incorrectly conclude we've reached the end of
+ * WAL and we're done recovering ...
*/
if (stat(xlogpath, &stat_buf) == 0)
{
@@ -3533,8 +3533,8 @@ ReadControlFile(void)
/*
* Do compatibility checking immediately. We do this here for 2 reasons:
*
- * (1) if the database isn't compatible with the backend executable, we want
- * to abort before we can possibly do any damage;
+ * (1) if the database isn't compatible with the backend executable, we
+ * want to abort before we can possibly do any damage;
*
* (2) this code is executed in the postmaster, so the setlocale() will
* propagate to forked backends, which aren't going to read this file for
@@ -4148,9 +4148,9 @@ exitArchiveRecovery(TimeLineID endTLI, uint32 endLogId, uint32 endLogSeg)
* descriptive of what our current database state is, because that is what
* we replayed from.
*
- * Note that if we are establishing a new timeline, ThisTimeLineID is already
- * set to the new value, and so we will create a new file instead of
- * overwriting any existing file.
+ * Note that if we are establishing a new timeline, ThisTimeLineID is
+ * already set to the new value, and so we will create a new file instead
+ * of overwriting any existing file.
*/
snprintf(recoveryPath, MAXPGPATH, XLOGDIR "/RECOVERYXLOG");
XLogFilePath(xlogpath, ThisTimeLineID, endLogId, endLogSeg);
@@ -4341,8 +4341,8 @@ StartupXLOG(void)
/*
* Read control file and check XLOG status looks valid.
*
- * Note: in most control paths, *ControlFile is already valid and we need not
- * do ReadControlFile() here, but might as well do it to be sure.
+ * Note: in most control paths, *ControlFile is already valid and we need
+ * not do ReadControlFile() here, but might as well do it to be sure.
*/
ReadControlFile();
@@ -4766,14 +4766,14 @@ StartupXLOG(void)
/*
* Perform a new checkpoint to update our recovery activity to disk.
*
- * Note that we write a shutdown checkpoint rather than an on-line one.
- * This is not particularly critical, but since we may be assigning a
- * new TLI, using a shutdown checkpoint allows us to have the rule
- * that TLI only changes in shutdown checkpoints, which allows some
- * extra error checking in xlog_redo.
+ * Note that we write a shutdown checkpoint rather than an on-line
+ * one. This is not particularly critical, but since we may be
+ * assigning a new TLI, using a shutdown checkpoint allows us to have
+ * the rule that TLI only changes in shutdown checkpoints, which
+ * allows some extra error checking in xlog_redo.
*
- * In case we had to use the secondary checkpoint, make sure that it will
- * still be shown as the secondary checkpoint after this
+ * In case we had to use the secondary checkpoint, make sure that it
+ * will still be shown as the secondary checkpoint after this
* CreateCheckPoint operation; we don't want the broken primary
* checkpoint to become prevCheckPoint...
*/
@@ -5106,10 +5106,10 @@ CreateCheckPoint(bool shutdown, bool force)
* (Perhaps it'd make even more sense to checkpoint only when the previous
* checkpoint record is in a different xlog page?)
*
- * We have to make two tests to determine that nothing has happened since the
- * start of the last checkpoint: current insertion point must match the
- * end of the last checkpoint record, and its redo pointer must point to
- * itself.
+ * We have to make two tests to determine that nothing has happened since
+ * the start of the last checkpoint: current insertion point must match
+ * the end of the last checkpoint record, and its redo pointer must point
+ * to itself.
*/
if (!shutdown && !force)
{
@@ -5198,11 +5198,11 @@ CreateCheckPoint(bool shutdown, bool force)
* Having constructed the checkpoint record, ensure all shmem disk buffers
* and commit-log buffers are flushed to disk.
*
- * This I/O could fail for various reasons. If so, we will fail to complete
- * the checkpoint, but there is no reason to force a system panic.
- * Accordingly, exit critical section while doing it. (If we are doing a
- * shutdown checkpoint, we probably *should* panic --- but that will
- * happen anyway because we'll still be inside the critical section
+ * This I/O could fail for various reasons. If so, we will fail to
+ * complete the checkpoint, but there is no reason to force a system
+ * panic. Accordingly, exit critical section while doing it. (If we are
+ * doing a shutdown checkpoint, we probably *should* panic --- but that
+ * will happen anyway because we'll still be inside the critical section
* established by ShutdownXLOG.)
*/
END_CRIT_SECTION();
diff --git a/src/backend/bootstrap/bootstrap.c b/src/backend/bootstrap/bootstrap.c
index 898b6f72a29..40928c8b607 100644
--- a/src/backend/bootstrap/bootstrap.c
+++ b/src/backend/bootstrap/bootstrap.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/bootstrap/bootstrap.c,v 1.208 2005/10/20 20:05:44 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/bootstrap/bootstrap.c,v 1.208.2.1 2005/11/22 18:23:05 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -466,8 +466,8 @@ BootstrapMain(int argc, char *argv[])
/*
* Process bootstrap input.
*
- * the sed script boot.sed renamed yyparse to Int_yyparse for the bootstrap
- * parser to avoid conflicts with the normal SQL parser
+ * the sed script boot.sed renamed yyparse to Int_yyparse for the
+ * bootstrap parser to avoid conflicts with the normal SQL parser
*/
Int_yyparse();
diff --git a/src/backend/catalog/aclchk.c b/src/backend/catalog/aclchk.c
index 15a197af81b..86bead6b5ee 100644
--- a/src/backend/catalog/aclchk.c
+++ b/src/backend/catalog/aclchk.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/aclchk.c,v 1.120 2005/10/15 02:49:12 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/aclchk.c,v 1.120.2.1 2005/11/22 18:23:06 momjian Exp $
*
* NOTES
* See acl.h.
@@ -794,8 +794,8 @@ ExecuteGrantStmt_Language(GrantStmt *stmt)
* Get owner ID and working copy of existing ACL. If there's no ACL,
* substitute the proper default.
*
- * Note: for now, languages are treated as owned by the bootstrap user.
- * We should add an owner column to pg_language instead.
+ * Note: for now, languages are treated as owned by the bootstrap
+ * user. We should add an owner column to pg_language instead.
*/
ownerId = BOOTSTRAP_SUPERUSERID;
aclDatum = SysCacheGetAttr(LANGNAME, tuple, Anum_pg_language_lanacl,
@@ -1732,8 +1732,8 @@ pg_namespace_aclmask(Oid nsp_oid, Oid roleid,
* the namespace. If we don't have CREATE TEMP, act as though we have
* only USAGE (and not CREATE) rights.
*
- * This may seem redundant given the check in InitTempTableNamespace, but it
- * really isn't since current user ID may have changed since then. The
+ * This may seem redundant given the check in InitTempTableNamespace, but
+ * it really isn't since current user ID may have changed since then. The
* upshot of this behavior is that a SECURITY DEFINER function can create
* temp tables that can then be accessed (if permission is granted) by
* code in the same session that doesn't have permissions to create temp
diff --git a/src/backend/catalog/dependency.c b/src/backend/catalog/dependency.c
index 92d72af0f9c..4839a5acb97 100644
--- a/src/backend/catalog/dependency.c
+++ b/src/backend/catalog/dependency.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/dependency.c,v 1.47 2005/10/15 02:49:12 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/dependency.c,v 1.47.2.1 2005/11/22 18:23:06 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -276,8 +276,8 @@ findAutoDeletableObjects(const ObjectAddress *object,
* that depend on it. For each one that is AUTO or INTERNAL, visit the
* referencing object.
*
- * When dropping a whole object (subId = 0), find pg_depend records for its
- * sub-objects too.
+ * When dropping a whole object (subId = 0), find pg_depend records for
+ * its sub-objects too.
*/
ScanKeyInit(&key[0],
Anum_pg_depend_refclassid,
@@ -411,8 +411,8 @@ recursiveDeletion(const ObjectAddress *object,
* avoid infinite recursion in the case of cycles. Also, some dependency
* types require extra processing here.
*
- * When dropping a whole object (subId = 0), remove all pg_depend records for
- * its sub-objects too.
+ * When dropping a whole object (subId = 0), remove all pg_depend records
+ * for its sub-objects too.
*/
ScanKeyInit(&key[0],
Anum_pg_depend_classid,
diff --git a/src/backend/catalog/heap.c b/src/backend/catalog/heap.c
index f8a4ff769af..6213fe9faa2 100644
--- a/src/backend/catalog/heap.c
+++ b/src/backend/catalog/heap.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/heap.c,v 1.292 2005/10/18 01:06:23 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/heap.c,v 1.292.2.1 2005/11/22 18:23:06 momjian Exp $
*
*
* INTERFACE ROUTINES
@@ -697,8 +697,8 @@ heap_create_with_catalog(const char *relname,
/*
* Allocate an OID for the relation, unless we were told what to use.
*
- * The OID will be the relfilenode as well, so make sure it doesn't collide
- * with either pg_class OIDs or existing physical files.
+ * The OID will be the relfilenode as well, so make sure it doesn't
+ * collide with either pg_class OIDs or existing physical files.
*/
if (!OidIsValid(relid))
relid = GetNewRelFileNode(reltablespace, shared_relation,
@@ -724,8 +724,8 @@ heap_create_with_catalog(const char *relname,
* since defining a relation also defines a complex type, we add a new
* system type corresponding to the new relation.
*
- * NOTE: we could get a unique-index failure here, in case the same name has
- * already been used for a type.
+ * NOTE: we could get a unique-index failure here, in case the same name
+ * has already been used for a type.
*/
new_type_oid = AddNewRelationType(relname,
relnamespace,
@@ -778,9 +778,9 @@ heap_create_with_catalog(const char *relname,
/*
* store constraints and defaults passed in the tupdesc, if any.
*
- * NB: this may do a CommandCounterIncrement and rebuild the relcache entry,
- * so the relation must be valid and self-consistent at this point. In
- * particular, there are not yet constraints and defaults anywhere.
+ * NB: this may do a CommandCounterIncrement and rebuild the relcache
+ * entry, so the relation must be valid and self-consistent at this point.
+ * In particular, there are not yet constraints and defaults anywhere.
*/
StoreConstraints(new_rel_desc, tupdesc);
@@ -1329,8 +1329,9 @@ StoreRelCheck(Relation rel, char *ccname, char *ccbin)
/*
* Find columns of rel that are used in ccbin
*
- * NB: pull_var_clause is okay here only because we don't allow subselects in
- * check constraints; it would fail to examine the contents of subselects.
+ * NB: pull_var_clause is okay here only because we don't allow subselects
+ * in check constraints; it would fail to examine the contents of
+ * subselects.
*/
varList = pull_var_clause(expr, false);
keycount = list_length(varList);
diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c
index a25f34b85e0..ca082ecfac6 100644
--- a/src/backend/catalog/index.c
+++ b/src/backend/catalog/index.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/index.c,v 1.261 2005/10/15 02:49:12 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/index.c,v 1.261.2.1 2005/11/22 18:23:06 momjian Exp $
*
*
* INTERFACE ROUTINES
@@ -524,8 +524,8 @@ index_create(Oid heapRelationId,
/*
* Allocate an OID for the index, unless we were told what to use.
*
- * The OID will be the relfilenode as well, so make sure it doesn't collide
- * with either pg_class OIDs or existing physical files.
+ * The OID will be the relfilenode as well, so make sure it doesn't
+ * collide with either pg_class OIDs or existing physical files.
*/
if (!OidIsValid(indexRelationId))
indexRelationId = GetNewRelFileNode(tableSpaceId, shared_relation,
@@ -600,16 +600,16 @@ index_create(Oid heapRelationId,
/*
* Register constraint and dependencies for the index.
*
- * If the index is from a CONSTRAINT clause, construct a pg_constraint entry.
- * The index is then linked to the constraint, which in turn is linked to
- * the table. If it's not a CONSTRAINT, make the dependency directly on
- * the table.
+ * If the index is from a CONSTRAINT clause, construct a pg_constraint
+ * entry. The index is then linked to the constraint, which in turn is
+ * linked to the table. If it's not a CONSTRAINT, make the dependency
+ * directly on the table.
*
* We don't need a dependency on the namespace, because there'll be an
* indirect dependency via our parent table.
*
- * During bootstrap we can't register any dependencies, and we don't try to
- * make a constraint either.
+ * During bootstrap we can't register any dependencies, and we don't try
+ * to make a constraint either.
*/
if (!IsBootstrapProcessingMode())
{
@@ -737,8 +737,8 @@ index_create(Oid heapRelationId,
* delayed till later (ALTER TABLE can save work in some cases with this).
* Otherwise, we call the AM routine that constructs the index.
*
- * In normal processing mode, the heap and index relations are closed, but we
- * continue to hold the ShareLock on the heap and the exclusive lock on
+ * In normal processing mode, the heap and index relations are closed, but
+ * we continue to hold the ShareLock on the heap and the exclusive lock on
* the index that we acquired above, until end of transaction.
*/
if (IsBootstrapProcessingMode())
@@ -1243,8 +1243,8 @@ UpdateStats(Oid relid, double reltuples)
* tuple in-place. (Note: as of PG 8.0 this isn't called during
* bootstrap, but leave the code here for possible future use.)
*
- * We also must cheat if reindexing pg_class itself, because the target index
- * may presently not be part of the set of indexes that
+ * We also must cheat if reindexing pg_class itself, because the target
+ * index may presently not be part of the set of indexes that
* CatalogUpdateIndexes would update (see reindex_relation). In this case
* the stats updates will not be WAL-logged and so could be lost in a
* crash. This seems OK considering VACUUM does the same thing.
@@ -1745,9 +1745,10 @@ reindex_relation(Oid relid, bool toast_too)
* entry for its own pg_class row because we do setNewRelfilenode() before
* we do index_build().
*
- * Note that we also clear pg_class's rd_oidindex until the loop is done, so
- * that that index can't be accessed either. This means we cannot safely
- * generate new relation OIDs while in the loop; shouldn't be a problem.
+ * Note that we also clear pg_class's rd_oidindex until the loop is done,
+ * so that that index can't be accessed either. This means we cannot
+ * safely generate new relation OIDs while in the loop; shouldn't be a
+ * problem.
*/
is_pg_class = (RelationGetRelid(rel) == RelationRelationId);
doneIndexes = NIL;
diff --git a/src/backend/catalog/namespace.c b/src/backend/catalog/namespace.c
index 0cafa9f9faf..133177d6fe8 100644
--- a/src/backend/catalog/namespace.c
+++ b/src/backend/catalog/namespace.c
@@ -13,7 +13,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/namespace.c,v 1.79 2005/10/15 02:49:14 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/namespace.c,v 1.79.2.1 2005/11/22 18:23:06 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -958,10 +958,11 @@ OpclassGetCandidates(Oid amid)
* something we already accepted? If so, keep only the one that
* appears earlier in the search path.
*
- * If we have an ordered list from SearchSysCacheList (the normal case),
- * then any conflicting opclass must immediately adjoin this one in
- * the list, so we only need to look at the newest result item. If we
- * have an unordered list, we have to scan the whole result list.
+ * If we have an ordered list from SearchSysCacheList (the normal
+ * case), then any conflicting opclass must immediately adjoin this
+ * one in the list, so we only need to look at the newest result item.
+ * If we have an unordered list, we have to scan the whole result
+ * list.
*/
if (resultList)
{
diff --git a/src/backend/catalog/pg_aggregate.c b/src/backend/catalog/pg_aggregate.c
index fb7562e3062..2b80d507535 100644
--- a/src/backend/catalog/pg_aggregate.c
+++ b/src/backend/catalog/pg_aggregate.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/pg_aggregate.c,v 1.76 2005/10/15 02:49:14 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/pg_aggregate.c,v 1.76.2.1 2005/11/22 18:23:06 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -104,10 +104,10 @@ AggregateCreate(const char *aggName,
* enforce_generic_type_consistency, if transtype isn't polymorphic) must
* exactly match declared transtype.
*
- * In the non-polymorphic-transtype case, it might be okay to allow a rettype
- * that's binary-coercible to transtype, but I'm not quite convinced that
- * it's either safe or useful. When transtype is polymorphic we *must*
- * demand exact equality.
+ * In the non-polymorphic-transtype case, it might be okay to allow a
+ * rettype that's binary-coercible to transtype, but I'm not quite
+ * convinced that it's either safe or useful. When transtype is
+ * polymorphic we *must* demand exact equality.
*/
if (rettype != aggTransType)
ereport(ERROR,
diff --git a/src/backend/catalog/pg_constraint.c b/src/backend/catalog/pg_constraint.c
index cf18051f52d..83b24584f2d 100644
--- a/src/backend/catalog/pg_constraint.c
+++ b/src/backend/catalog/pg_constraint.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/pg_constraint.c,v 1.27 2005/10/15 02:49:14 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/pg_constraint.c,v 1.27.2.1 2005/11/22 18:23:06 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -497,8 +497,8 @@ RemoveConstraintById(Oid conId)
/*
* XXX for now, do nothing special when dropping a domain constraint
*
- * Probably there should be some form of locking on the domain type, but
- * we have no such concept at the moment.
+ * Probably there should be some form of locking on the domain type,
+ * but we have no such concept at the moment.
*/
}
else
diff --git a/src/backend/catalog/pg_proc.c b/src/backend/catalog/pg_proc.c
index b2559a0e77c..de2a5bf554f 100644
--- a/src/backend/catalog/pg_proc.c
+++ b/src/backend/catalog/pg_proc.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/pg_proc.c,v 1.135 2005/10/29 00:31:50 petere Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/pg_proc.c,v 1.135.2.1 2005/11/22 18:23:06 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -581,8 +581,8 @@ fmgr_sql_validator(PG_FUNCTION_ARGS)
* expression results will be unresolvable. The check will be done at
* runtime instead.
*
- * We can run the text through the raw parser though; this will at least
- * catch silly syntactic errors.
+ * We can run the text through the raw parser though; this will at
+ * least catch silly syntactic errors.
*/
if (!haspolyarg)
{
@@ -651,8 +651,8 @@ function_parse_error_transpose(const char *prosrc)
* Nothing to do unless we are dealing with a syntax error that has a
* cursor position.
*
- * Some PLs may prefer to report the error position as an internal error to
- * begin with, so check that too.
+ * Some PLs may prefer to report the error position as an internal error
+ * to begin with, so check that too.
*/
origerrposition = geterrposition();
if (origerrposition <= 0)
@@ -767,8 +767,8 @@ match_prosrc_to_literal(const char *prosrc, const char *literal,
* string literal. It does not handle the SQL syntax for literals
* continued across line boundaries.
*
- * We do the comparison a character at a time, not a byte at a time, so that
- * we can do the correct cursorpos math.
+ * We do the comparison a character at a time, not a byte at a time, so
+ * that we can do the correct cursorpos math.
*/
while (*prosrc)
{
diff --git a/src/backend/catalog/pg_shdepend.c b/src/backend/catalog/pg_shdepend.c
index 4cce7ba13cf..d379a231341 100644
--- a/src/backend/catalog/pg_shdepend.c
+++ b/src/backend/catalog/pg_shdepend.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/pg_shdepend.c,v 1.3 2005/10/15 02:49:14 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/pg_shdepend.c,v 1.3.2.1 2005/11/22 18:23:06 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -402,8 +402,8 @@ updateAclDependencies(Oid classId, Oid objectId, Oid ownerId, bool isGrant,
/*
* Skip the owner: he has an OWNER shdep entry instead. (This is
- * not just a space optimization; it makes ALTER OWNER easier.
- * See notes in changeDependencyOnOwner.)
+ * not just a space optimization; it makes ALTER OWNER easier. See
+ * notes in changeDependencyOnOwner.)
*/
if (roleid == ownerId)
continue;
@@ -572,8 +572,8 @@ checkSharedDependencies(Oid classId, Oid objectId)
/*
* Report seems unreasonably long, so reduce it to per-database info
*
- * Note: we don't ever suppress per-database totals, which should be OK
- * as long as there aren't too many databases ...
+ * Note: we don't ever suppress per-database totals, which should be
+ * OK as long as there aren't too many databases ...
*/
descs.len = 0; /* reset to empty */
descs.data[0] = '\0';
diff --git a/src/backend/commands/aggregatecmds.c b/src/backend/commands/aggregatecmds.c
index 160cd8e488a..7ef2b34ca3b 100644
--- a/src/backend/commands/aggregatecmds.c
+++ b/src/backend/commands/aggregatecmds.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/aggregatecmds.c,v 1.30 2005/10/15 02:49:14 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/aggregatecmds.c,v 1.30.2.1 2005/11/22 18:23:06 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
@@ -119,8 +119,8 @@ DefineAggregate(List *names, List *parameters)
/*
* look up the aggregate's base type (input datatype) and transtype.
*
- * We have historically allowed the command to look like basetype = 'ANY' so
- * we must do a case-insensitive comparison for the name ANY. Ugh.
+ * We have historically allowed the command to look like basetype = 'ANY'
+ * so we must do a case-insensitive comparison for the name ANY. Ugh.
*
* basetype can be a pseudo-type, but transtype can't, since we need to be
* able to store values of the transtype. However, we can allow
diff --git a/src/backend/commands/analyze.c b/src/backend/commands/analyze.c
index 431e39f3b07..e5cf86621de 100644
--- a/src/backend/commands/analyze.c
+++ b/src/backend/commands/analyze.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/analyze.c,v 1.89 2005/10/15 02:49:15 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/analyze.c,v 1.89.2.1 2005/11/22 18:23:06 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -891,9 +891,9 @@ acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows,
* If we didn't find as many tuples as we wanted then we're done. No sort
* is needed, since they're already in order.
*
- * Otherwise we need to sort the collected tuples by position (itempointer).
- * It's not worth worrying about corner cases where the tuples are already
- * sorted.
+ * Otherwise we need to sort the collected tuples by position
+ * (itempointer). It's not worth worrying about corner cases where the
+ * tuples are already sorted.
*/
if (numrows == targrows)
qsort((void *) rows, numrows, sizeof(HeapTuple), compare_rows);
@@ -1849,9 +1849,9 @@ compute_scalar_stats(VacAttrStatsP stats,
* Now scan the values in order, find the most common ones, and also
* accumulate ordering-correlation statistics.
*
- * To determine which are most common, we first have to count the number
- * of duplicates of each value. The duplicates are adjacent in the
- * sorted list, so a brute-force approach is to compare successive
+ * To determine which are most common, we first have to count the
+ * number of duplicates of each value. The duplicates are adjacent in
+ * the sorted list, so a brute-force approach is to compare successive
* datum values until we find two that are not equal. However, that
* requires N-1 invocations of the datum comparison routine, which are
* completely redundant with work that was done during the sort. (The
diff --git a/src/backend/commands/async.c b/src/backend/commands/async.c
index 2186aa8d288..cb54c1d3ebc 100644
--- a/src/backend/commands/async.c
+++ b/src/backend/commands/async.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/async.c,v 1.127 2005/11/03 17:11:34 alvherre Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/async.c,v 1.127.2.1 2005/11/22 18:23:06 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -820,18 +820,18 @@ EnableNotifyInterrupt(void)
* steps. (A very small time window, perhaps, but Murphy's Law says you
* can hit it...) Instead, we first set the enable flag, then test the
* occurred flag. If we see an unserviced interrupt has occurred, we
- * re-clear the enable flag before going off to do the service work.
- * (That prevents re-entrant invocation of ProcessIncomingNotify() if
- * another interrupt occurs.) If an interrupt comes in between the setting
- * and clearing of notifyInterruptEnabled, then it will have done the
- * service work and left notifyInterruptOccurred zero, so we have to check
- * again after clearing enable. The whole thing has to be in a loop in
- * case another interrupt occurs while we're servicing the first. Once we
- * get out of the loop, enable is set and we know there is no unserviced
+ * re-clear the enable flag before going off to do the service work. (That
+ * prevents re-entrant invocation of ProcessIncomingNotify() if another
+ * interrupt occurs.) If an interrupt comes in between the setting and
+ * clearing of notifyInterruptEnabled, then it will have done the service
+ * work and left notifyInterruptOccurred zero, so we have to check again
+ * after clearing enable. The whole thing has to be in a loop in case
+ * another interrupt occurs while we're servicing the first. Once we get
+ * out of the loop, enable is set and we know there is no unserviced
* interrupt.
*
- * NB: an overenthusiastic optimizing compiler could easily break this code.
- * Hopefully, they all understand what "volatile" means these days.
+ * NB: an overenthusiastic optimizing compiler could easily break this
+ * code. Hopefully, they all understand what "volatile" means these days.
*/
for (;;)
{
diff --git a/src/backend/commands/cluster.c b/src/backend/commands/cluster.c
index 740250835db..208bae99bea 100644
--- a/src/backend/commands/cluster.c
+++ b/src/backend/commands/cluster.c
@@ -11,7 +11,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/cluster.c,v 1.141 2005/10/29 00:31:51 petere Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/cluster.c,v 1.141.2.1 2005/11/22 18:23:06 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -177,8 +177,8 @@ cluster(ClusterStmt *stmt)
/*
* Create special memory context for cross-transaction storage.
*
- * Since it is a child of PortalContext, it will go away even in case of
- * error.
+ * Since it is a child of PortalContext, it will go away even in case
+ * of error.
*/
cluster_context = AllocSetContextCreate(PortalContext,
"Cluster",
@@ -242,9 +242,9 @@ cluster_rel(RelToCluster *rvtc, bool recheck)
* Since we may open a new transaction for each relation, we have to check
* that the relation still is what we think it is.
*
- * If this is a single-transaction CLUSTER, we can skip these tests. We *must*
- * skip the one on indisclustered since it would reject an attempt to
- * cluster a not-previously-clustered index.
+ * If this is a single-transaction CLUSTER, we can skip these tests. We
+ * *must* skip the one on indisclustered since it would reject an attempt
+ * to cluster a not-previously-clustered index.
*/
if (recheck)
{
@@ -360,9 +360,9 @@ check_index_is_clusterable(Relation OldHeap, Oid indexOid, bool recheck)
RelationGetRelationName(OldIndex)),
recheck
? errhint("You may be able to work around this by marking column \"%s\" NOT NULL, or use ALTER TABLE ... SET WITHOUT CLUSTER to remove the cluster specification from the table.",
- NameStr(OldHeap->rd_att->attrs[colno - 1]->attname))
+ NameStr(OldHeap->rd_att->attrs[colno - 1]->attname))
: errhint("You may be able to work around this by marking column \"%s\" NOT NULL.",
- NameStr(OldHeap->rd_att->attrs[colno - 1]->attname))));
+ NameStr(OldHeap->rd_att->attrs[colno - 1]->attname))));
}
else if (colno < 0)
{
@@ -651,12 +651,13 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex)
* We cannot simply pass the tuple to heap_insert(), for several
* reasons:
*
- * 1. heap_insert() will overwrite the commit-status fields of the tuple
- * it's handed. This would trash the source relation, which is bad
- * news if we abort later on. (This was a bug in releases thru 7.0)
+ * 1. heap_insert() will overwrite the commit-status fields of the
+ * tuple it's handed. This would trash the source relation, which is
+ * bad news if we abort later on. (This was a bug in releases thru
+ * 7.0)
*
- * 2. We'd like to squeeze out the values of any dropped columns, both to
- * save space and to ensure we have no corner-case failures. (It's
+ * 2. We'd like to squeeze out the values of any dropped columns, both
+ * to save space and to ensure we have no corner-case failures. (It's
* possible for example that the new table hasn't got a TOAST table
* and so is unable to store any large values of dropped cols.)
*
@@ -788,10 +789,10 @@ swap_relation_files(Oid r1, Oid r2)
* happen in CLUSTER if there were dropped columns in the old table, and
* in ALTER TABLE when adding or changing type of columns.
*
- * NOTE: at present, a TOAST table's only dependency is the one on its owning
- * table. If more are ever created, we'd need to use something more
- * selective than deleteDependencyRecordsFor() to get rid of only the link
- * we want.
+ * NOTE: at present, a TOAST table's only dependency is the one on its
+ * owning table. If more are ever created, we'd need to use something
+ * more selective than deleteDependencyRecordsFor() to get rid of only the
+ * link we want.
*/
if (relform1->reltoastrelid || relform2->reltoastrelid)
{
diff --git a/src/backend/commands/comment.c b/src/backend/commands/comment.c
index cf7dc06fa7f..b2832618f1f 100644
--- a/src/backend/commands/comment.c
+++ b/src/backend/commands/comment.c
@@ -7,7 +7,7 @@
* Copyright (c) 1996-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/comment.c,v 1.84 2005/10/15 02:49:15 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/comment.c,v 1.84.2.1 2005/11/22 18:23:06 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -445,8 +445,8 @@ CommentDatabase(List *qualname, char *comment)
* comment on a database other than the current one. Someday this might be
* improved, but it would take a redesigned infrastructure.
*
- * When loading a dump, we may see a COMMENT ON DATABASE for the old name of
- * the database. Erroring out would prevent pg_restore from completing
+ * When loading a dump, we may see a COMMENT ON DATABASE for the old name
+ * of the database. Erroring out would prevent pg_restore from completing
* (which is really pg_restore's fault, but for now we will work around
* the problem here). Consensus is that the best fix is to treat wrong
* database name as a WARNING not an ERROR.
diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c
index 63d88c5df07..030ad6f329c 100644
--- a/src/backend/commands/copy.c
+++ b/src/backend/commands/copy.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/copy.c,v 1.254 2005/11/03 17:11:34 alvherre Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/copy.c,v 1.254.2.1 2005/11/22 18:23:07 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -127,8 +127,8 @@ typedef struct CopyStateData
/*
* These variables are used to reduce overhead in textual COPY FROM.
*
- * attribute_buf holds the separated, de-escaped text for each field of the
- * current line. The CopyReadAttributes functions return arrays of
+ * attribute_buf holds the separated, de-escaped text for each field of
+ * the current line. The CopyReadAttributes functions return arrays of
* pointers into this buffer. We avoid palloc/pfree overhead by re-using
* the buffer on each cycle.
*/
@@ -2085,8 +2085,8 @@ CopyReadLineText(CopyState cstate)
* examine; any characters from raw_buf_index to raw_buf_ptr have been
* determined to be part of the line, but not yet transferred to line_buf.
*
- * For a little extra speed within the loop, we copy raw_buf and raw_buf_len
- * into local variables.
+ * For a little extra speed within the loop, we copy raw_buf and
+ * raw_buf_len into local variables.
*/
copy_raw_buf = cstate->raw_buf;
raw_buf_ptr = cstate->raw_buf_index;
@@ -2148,8 +2148,8 @@ CopyReadLineText(CopyState cstate)
/*
* If need more data, go back to loop top to load it.
*
- * Note that if we are at EOF, c will wind up as '\0' because of
- * the guaranteed pad of raw_buf.
+ * Note that if we are at EOF, c will wind up as '\0' because
+ * of the guaranteed pad of raw_buf.
*/
if (raw_buf_ptr >= copy_buf_len && !hit_eof)
{
@@ -2283,8 +2283,8 @@ CopyReadLineText(CopyState cstate)
* Do we need to be careful about trailing bytes of multibyte
* characters? (See note above about client_only_encoding)
*
- * We assume here that pg_encoding_mblen only looks at the first byte of
- * the character!
+ * We assume here that pg_encoding_mblen only looks at the first byte
+ * of the character!
*/
if (cstate->client_only_encoding)
{
@@ -2369,8 +2369,8 @@ CopyReadLineCSV(CopyState cstate)
* examine; any characters from raw_buf_index to raw_buf_ptr have been
* determined to be part of the line, but not yet transferred to line_buf.
*
- * For a little extra speed within the loop, we copy raw_buf and raw_buf_len
- * into local variables.
+ * For a little extra speed within the loop, we copy raw_buf and
+ * raw_buf_len into local variables.
*/
copy_raw_buf = cstate->raw_buf;
raw_buf_ptr = cstate->raw_buf_index;
@@ -2475,8 +2475,8 @@ CopyReadLineCSV(CopyState cstate)
/*
* If need more data, go back to loop top to load it.
*
- * Note that if we are at EOF, c will wind up as '\0' because of
- * the guaranteed pad of raw_buf.
+ * Note that if we are at EOF, c will wind up as '\0' because
+ * of the guaranteed pad of raw_buf.
*/
if (raw_buf_ptr >= copy_buf_len && !hit_eof)
{
@@ -2621,8 +2621,8 @@ CopyReadLineCSV(CopyState cstate)
* Do we need to be careful about trailing bytes of multibyte
* characters? (See note above about client_only_encoding)
*
- * We assume here that pg_encoding_mblen only looks at the first byte of
- * the character!
+ * We assume here that pg_encoding_mblen only looks at the first byte
+ * of the character!
*/
if (cstate->client_only_encoding)
{
diff --git a/src/backend/commands/dbcommands.c b/src/backend/commands/dbcommands.c
index accbafc8486..5fa0f6155df 100644
--- a/src/backend/commands/dbcommands.c
+++ b/src/backend/commands/dbcommands.c
@@ -15,7 +15,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/dbcommands.c,v 1.173 2005/10/15 02:49:15 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/dbcommands.c,v 1.173.2.1 2005/11/22 18:23:07 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -346,8 +346,8 @@ createdb(const CreatedbStmt *stmt)
src_vacuumxid = src_frozenxid = GetCurrentTransactionId();
/*
- * Preassign OID for pg_database tuple, so that we can compute db path.
- * We have to open pg_database to do this, but we don't want to take
+ * Preassign OID for pg_database tuple, so that we can compute db path. We
+ * have to open pg_database to do this, but we don't want to take
* ExclusiveLock yet, so just do it and close again.
*/
pg_database_rel = heap_open(DatabaseRelationId, AccessShareLock);
@@ -512,14 +512,14 @@ createdb(const CreatedbStmt *stmt)
*
* (Both of these were real bugs in releases 8.0 through 8.0.3.)
*
- * In PITR replay, the first of these isn't an issue, and the second is
- * only a risk if the CREATE DATABASE and subsequent template database
- * change both occur while a base backup is being taken. There doesn't
- * seem to be much we can do about that except document it as a
- * limitation.
+ * In PITR replay, the first of these isn't an issue, and the second
+ * is only a risk if the CREATE DATABASE and subsequent template
+ * database change both occur while a base backup is being taken.
+ * There doesn't seem to be much we can do about that except document
+ * it as a limitation.
*
- * Perhaps if we ever implement CREATE DATABASE in a less cheesy way, we
- * can avoid this.
+ * Perhaps if we ever implement CREATE DATABASE in a less cheesy way,
+ * we can avoid this.
*/
RequestCheckpoint(true, false);
@@ -642,8 +642,8 @@ dropdb(const char *dbname)
/*
* Delete any comments associated with the database
*
- * NOTE: this is probably dead code since any such comments should have been
- * in that database, not mine.
+ * NOTE: this is probably dead code since any such comments should have
+ * been in that database, not mine.
*/
DeleteComments(db_id, DatabaseRelationId, 0);
diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c
index 08480631fec..9d8ee2e781f 100644
--- a/src/backend/commands/explain.c
+++ b/src/backend/commands/explain.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994-5, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/explain.c,v 1.139 2005/10/21 16:43:33 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/explain.c,v 1.139.2.1 2005/11/22 18:23:07 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -199,9 +199,9 @@ ExplainOneQuery(Query *query, ExplainStmt *stmt, TupOutputState *tstate)
/*
* Update snapshot command ID to ensure this query sees results of any
* previously executed queries. (It's a bit cheesy to modify
- * ActiveSnapshot without making a copy, but for the limited ways in
- * which EXPLAIN can be invoked, I think it's OK, because the active
- * snapshot shouldn't be shared with anything else anyway.)
+ * ActiveSnapshot without making a copy, but for the limited ways in which
+ * EXPLAIN can be invoked, I think it's OK, because the active snapshot
+ * shouldn't be shared with anything else anyway.)
*/
ActiveSnapshot->curcid = GetCurrentCommandId();
diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c
index 07654e455ab..2c55a857f81 100644
--- a/src/backend/commands/indexcmds.c
+++ b/src/backend/commands/indexcmds.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/indexcmds.c,v 1.134 2005/10/15 02:49:15 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/indexcmds.c,v 1.134.2.1 2005/11/22 18:23:07 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -341,10 +341,10 @@ DefineIndex(RangeVar *heapRelation,
* we don't cascade the notnull constraint(s) either; but this is
* pretty debatable.
*
- * XXX: possible future improvement: when being called from ALTER TABLE,
- * it would be more efficient to merge this with the outer ALTER
- * TABLE, so as to avoid two scans. But that seems to complicate
- * DefineIndex's API unduly.
+ * XXX: possible future improvement: when being called from ALTER
+ * TABLE, it would be more efficient to merge this with the outer
+ * ALTER TABLE, so as to avoid two scans. But that seems to
+ * complicate DefineIndex's API unduly.
*/
if (cmds)
AlterTableInternal(relationId, cmds, false);
@@ -551,8 +551,8 @@ GetIndexOpClass(List *opclass, Oid attrType,
* Release 7.1 removes lztext_ops, so suppress that too for a while. tgl
* 2000/07/30
*
- * Release 7.2 renames timestamp_ops to timestamptz_ops, so suppress that too
- * for awhile. I'm starting to think we need a better approach. tgl
+ * Release 7.2 renames timestamp_ops to timestamptz_ops, so suppress that
+ * too for awhile. I'm starting to think we need a better approach. tgl
* 2000/10/01
*
* Release 8.0 removes bigbox_ops (which was dead code for a long while
diff --git a/src/backend/commands/sequence.c b/src/backend/commands/sequence.c
index 201fcbf0c6b..03a60b13904 100644
--- a/src/backend/commands/sequence.c
+++ b/src/backend/commands/sequence.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/sequence.c,v 1.125 2005/10/15 02:49:15 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/sequence.c,v 1.125.2.1 2005/11/22 18:23:07 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -219,8 +219,8 @@ DefineSequence(CreateSeqStmt *seq)
/*
* Two special hacks here:
*
- * 1. Since VACUUM does not process sequences, we have to force the tuple to
- * have xmin = FrozenTransactionId now. Otherwise it would become
+ * 1. Since VACUUM does not process sequences, we have to force the tuple
+ * to have xmin = FrozenTransactionId now. Otherwise it would become
* invisible to SELECTs after 2G transactions. It is okay to do this
* because if the current transaction aborts, no other xact will ever
* examine the sequence tuple anyway.
@@ -459,10 +459,10 @@ nextval_internal(Oid relid)
* fetch count to grab SEQ_LOG_VALS more values than we actually need to
* cache. (These will then be usable without logging.)
*
- * If this is the first nextval after a checkpoint, we must force a new WAL
- * record to be written anyway, else replay starting from the checkpoint
- * would fail to advance the sequence past the logged values. In this
- * case we may as well fetch extra values.
+ * If this is the first nextval after a checkpoint, we must force a new
+ * WAL record to be written anyway, else replay starting from the
+ * checkpoint would fail to advance the sequence past the logged values.
+ * In this case we may as well fetch extra values.
*/
if (log < fetch)
{
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index abec1a835d1..1f2e8b2367c 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/tablecmds.c,v 1.174 2005/10/15 02:49:15 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/tablecmds.c,v 1.174.2.1 2005/11/22 18:23:07 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -908,9 +908,9 @@ MergeAttributes(List *schema, List *supers, bool istemp,
* If default expr could contain any vars, we'd need to fix
* 'em, but it can't; so default is ready to apply to child.
*
- * If we already had a default from some prior parent, check to
- * see if they are the same. If so, no problem; if not, mark
- * the column as having a bogus default. Below, we will
+ * If we already had a default from some prior parent, check
+ * to see if they are the same. If so, no problem; if not,
+ * mark the column as having a bogus default. Below, we will
* complain if the bogus default isn't overridden by the child
* schema.
*/
@@ -1125,9 +1125,9 @@ StoreCatalogInheritance(Oid relationId, List *supers)
* Also enter dependencies on the direct ancestors, and make sure they are
* marked with relhassubclass = true.
*
- * (Once upon a time, both direct and indirect ancestors were found here and
- * then entered into pg_ipl. Since that catalog doesn't exist anymore,
- * there's no need to look for indirect ancestors.)
+ * (Once upon a time, both direct and indirect ancestors were found here
+ * and then entered into pg_ipl. Since that catalog doesn't exist
+ * anymore, there's no need to look for indirect ancestors.)
*/
relation = heap_open(InheritsRelationId, RowExclusiveLock);
desc = RelationGetDescr(relation);
@@ -1217,8 +1217,8 @@ setRelhassubclassInRelation(Oid relationId, bool relhassubclass)
/*
* Fetch a modifiable copy of the tuple, modify it, update pg_class.
*
- * If the tuple already has the right relhassubclass setting, we don't need
- * to update it, but we still need to issue an SI inval message.
+ * If the tuple already has the right relhassubclass setting, we don't
+ * need to update it, but we still need to issue an SI inval message.
*/
relationRelation = heap_open(RelationRelationId, RowExclusiveLock);
tuple = SearchSysCacheCopy(RELOID,
@@ -1302,8 +1302,8 @@ renameatt(Oid myrelid,
* attribute in all classes that inherit from 'relname' (as well as in
* 'relname').
*
- * any permissions or problems with duplicate attributes will cause the whole
- * transaction to abort, which is what we want -- all or nothing.
+ * any permissions or problems with duplicate attributes will cause the
+ * whole transaction to abort, which is what we want -- all or nothing.
*/
if (recurse)
{
@@ -1633,8 +1633,8 @@ update_ri_trigger_args(Oid relid,
/*
* It is an RI trigger, so parse the tgargs bytea.
*
- * NB: we assume the field will never be compressed or moved out of line;
- * so does trigger.c ...
+ * NB: we assume the field will never be compressed or moved out of
+ * line; so does trigger.c ...
*/
tgnargs = pg_trigger->tgnargs;
val = (bytea *)
@@ -2393,9 +2393,9 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap)
* If we need to rewrite the table, the operation has to be propagated to
* tables that use this table's rowtype as a column type.
*
- * (Eventually this will probably become true for scans as well, but at the
- * moment a composite type does not enforce any constraints, so it's not
- * necessary/appropriate to enforce them just during ALTER.)
+ * (Eventually this will probably become true for scans as well, but at
+ * the moment a composite type does not enforce any constraints, so it's
+ * not necessary/appropriate to enforce them just during ALTER.)
*/
if (newrel)
find_composite_type_dependencies(oldrel->rd_rel->reltype,
@@ -2837,9 +2837,9 @@ ATPrepAddColumn(List **wqueue, Relation rel, bool recurse,
/*
* Recurse to add the column to child classes, if requested.
*
- * We must recurse one level at a time, so that multiply-inheriting children
- * are visited the right number of times and end up with the right
- * attinhcount.
+ * We must recurse one level at a time, so that multiply-inheriting
+ * children are visited the right number of times and end up with the
+ * right attinhcount.
*/
if (recurse)
{
@@ -3039,8 +3039,8 @@ ATExecAddColumn(AlteredTableInfo *tab, Relation rel,
/*
* Tell Phase 3 to fill in the default expression, if there is one.
*
- * If there is no default, Phase 3 doesn't have to do anything, because that
- * effectively means that the default is NULL. The heap tuple access
+ * If there is no default, Phase 3 doesn't have to do anything, because
+ * that effectively means that the default is NULL. The heap tuple access
* routines always check for attnum > # of attributes in tuple, and return
* NULL if so, so without any modification of the tuple data we will get
* the effect of NULL values in the new column.
@@ -3833,8 +3833,8 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel,
/*
* Validity and permissions checks
*
- * Note: REFERENCES permissions checks are redundant with CREATE TRIGGER, but
- * we may as well error out sooner instead of later.
+ * Note: REFERENCES permissions checks are redundant with CREATE TRIGGER,
+ * but we may as well error out sooner instead of later.
*/
if (pkrel->rd_rel->relkind != RELKIND_RELATION)
ereport(ERROR,
@@ -3932,9 +3932,9 @@ ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel,
* pktypoid[i] is the primary key table's i'th key's type fktypoid[i]
* is the foreign key table's i'th key's type
*
- * Note that we look for an operator with the PK type on the left; when
- * the types are different this is critical because the PK index will
- * need operators with the indexkey on the left. (Ordinarily both
+ * Note that we look for an operator with the PK type on the left;
+ * when the types are different this is critical because the PK index
+ * will need operators with the indexkey on the left. (Ordinarily both
* commutator operators will exist if either does, but we won't get
* the right answer from the test below on opclass membership unless
* we select the proper operator.)
@@ -4862,10 +4862,10 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
* the column type, because build_column_default itself will try to
* coerce, and will not issue the error message we want if it fails.)
*
- * We remove any implicit coercion steps at the top level of the old default
- * expression; this has been agreed to satisfy the principle of least
- * surprise. (The conversion to the new column type should act like it
- * started from what the user sees as the stored expression, and the
+ * We remove any implicit coercion steps at the top level of the old
+ * default expression; this has been agreed to satisfy the principle of
+ * least surprise. (The conversion to the new column type should act like
+ * it started from what the user sees as the stored expression, and the
* implicit coercions aren't going to be shown.)
*/
if (attTup->atthasdef)
@@ -4896,8 +4896,8 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
* the info before executing ALTER TYPE, though, else the deparser will
* get confused.
*
- * There could be multiple entries for the same object, so we must check to
- * ensure we process each one only once. Note: we assume that an index
+ * There could be multiple entries for the same object, so we must check
+ * to ensure we process each one only once. Note: we assume that an index
* that implements a constraint will not show a direct dependency on the
* column.
*/
@@ -5782,9 +5782,9 @@ copy_relation_data(Relation rel, SMgrRelation dst)
* to commit the transaction. (For a temp rel we don't care since the rel
* will be uninteresting after a crash anyway.)
*
- * It's obvious that we must do this when not WAL-logging the copy. It's less
- * obvious that we have to do it even if we did WAL-log the copied pages.
- * The reason is that since we're copying outside shared buffers, a
+ * It's obvious that we must do this when not WAL-logging the copy. It's
+ * less obvious that we have to do it even if we did WAL-log the copied
+ * pages. The reason is that since we're copying outside shared buffers, a
* CHECKPOINT occurring during the copy has no way to flush the previously
* written data to disk (indeed it won't know the new rel even exists). A
* crash later on would replay WAL from the checkpoint, therefore it
@@ -5842,12 +5842,12 @@ AlterTableCreateToastTable(Oid relOid, bool silent)
/*
* Toast table is shared if and only if its parent is.
*
- * We cannot allow toasting a shared relation after initdb (because there's
- * no way to mark it toasted in other databases' pg_class). Unfortunately
- * we can't distinguish initdb from a manually started standalone backend
- * (toasting happens after the bootstrap phase, so checking
- * IsBootstrapProcessingMode() won't work). However, we can at least
- * prevent this mistake under normal multi-user operation.
+ * We cannot allow toasting a shared relation after initdb (because
+ * there's no way to mark it toasted in other databases' pg_class).
+ * Unfortunately we can't distinguish initdb from a manually started
+ * standalone backend (toasting happens after the bootstrap phase, so
+ * checking IsBootstrapProcessingMode() won't work). However, we can at
+ * least prevent this mistake under normal multi-user operation.
*/
shared_relation = rel->rd_rel->relisshared;
if (shared_relation && IsUnderPostmaster)
diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c
index a3f7c37dc28..2bcbd89fddf 100644
--- a/src/backend/commands/trigger.c
+++ b/src/backend/commands/trigger.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/trigger.c,v 1.195 2005/10/15 02:49:15 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/trigger.c,v 1.195.2.1 2005/11/22 18:23:07 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -566,8 +566,8 @@ RemoveTriggerById(Oid trigOid)
* (and this one too!) are sent SI message to make them rebuild relcache
* entries.
*
- * Note this is OK only because we have AccessExclusiveLock on the rel, so no
- * one else is creating/deleting triggers on this rel at the same time.
+ * Note this is OK only because we have AccessExclusiveLock on the rel, so
+ * no one else is creating/deleting triggers on this rel at the same time.
*/
pgrel = heap_open(RelationRelationId, RowExclusiveLock);
tuple = SearchSysCacheCopy(RELOID,
@@ -1182,8 +1182,8 @@ equalTriggerDescs(TriggerDesc *trigdesc1, TriggerDesc *trigdesc2)
* we have the same triggers with the same types, the derived index data
* should match.
*
- * As of 7.3 we assume trigger set ordering is significant in the comparison;
- * so we just compare corresponding slots of the two sets.
+ * As of 7.3 we assume trigger set ordering is significant in the
+ * comparison; so we just compare corresponding slots of the two sets.
*/
if (trigdesc1 != NULL)
{
@@ -2534,13 +2534,14 @@ AfterTriggerEndQuery(EState *estate)
* Process all immediate-mode triggers queued by the query, and move the
* deferred ones to the main list of deferred events.
*
- * Notice that we decide which ones will be fired, and put the deferred ones
- * on the main list, before anything is actually fired. This ensures
+ * Notice that we decide which ones will be fired, and put the deferred
+ * ones on the main list, before anything is actually fired. This ensures
* reasonably sane behavior if a trigger function does SET CONSTRAINTS ...
* IMMEDIATE: all events we have decided to defer will be available for it
* to fire.
*
- * If we find no firable events, we don't have to increment firing_counter.
+ * If we find no firable events, we don't have to increment
+ * firing_counter.
*/
events = &afterTriggers->query_stack[afterTriggers->query_depth];
if (afterTriggerMarkEvents(events, &afterTriggers->events, true))
@@ -3027,8 +3028,8 @@ AfterTriggerSetState(ConstraintsSetStmt *stmt)
* list of previously deferred events to fire any that have now become
* immediate.
*
- * Obviously, if this was SET ... DEFERRED then it can't have converted any
- * unfired events to immediate, so we need do nothing in that case.
+ * Obviously, if this was SET ... DEFERRED then it can't have converted
+ * any unfired events to immediate, so we need do nothing in that case.
*/
if (!stmt->deferred)
{
diff --git a/src/backend/commands/typecmds.c b/src/backend/commands/typecmds.c
index 5cf51658eeb..8cc58a9f588 100644
--- a/src/backend/commands/typecmds.c
+++ b/src/backend/commands/typecmds.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/typecmds.c,v 1.82 2005/10/18 01:06:24 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/typecmds.c,v 1.82.2.1 2005/11/22 18:23:08 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
@@ -853,8 +853,8 @@ findTypeInputFunction(List *procname, Oid typeOid)
* Input functions can take a single argument of type CSTRING, or three
* arguments (string, typioparam OID, typmod).
*
- * For backwards compatibility we allow OPAQUE in place of CSTRING; if we see
- * this, we issue a warning and fix up the pg_proc entry.
+ * For backwards compatibility we allow OPAQUE in place of CSTRING; if we
+ * see this, we issue a warning and fix up the pg_proc entry.
*/
argList[0] = CSTRINGOID;
@@ -1838,8 +1838,8 @@ domainAddConstraint(Oid domainOid, Oid domainNamespace, Oid baseTypeOid,
/*
* Deparse it to produce text for consrc.
*
- * Since VARNOs aren't allowed in domain constraints, relation context isn't
- * required as anything other than a shell.
+ * Since VARNOs aren't allowed in domain constraints, relation context
+ * isn't required as anything other than a shell.
*/
ccsrc = deparse_expression(expr,
deparse_context_for(domainName,
diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c
index 506eb23e707..e83a8333d2d 100644
--- a/src/backend/commands/vacuum.c
+++ b/src/backend/commands/vacuum.c
@@ -13,7 +13,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/vacuum.c,v 1.317 2005/10/15 02:49:16 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/vacuum.c,v 1.317.2.1 2005/11/22 18:23:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -313,8 +313,8 @@ vacuum(VacuumStmt *vacstmt, List *relids)
* compared to telling people to use two operations. See pgsql-hackers
* discussion of 27-Nov-2004, and comments below for update_hint_bits().
*
- * Note: this is enforced here, and not in the grammar, since (a) we can give
- * a better error message, and (b) we might want to allow it again
+ * Note: this is enforced here, and not in the grammar, since (a) we can
+ * give a better error message, and (b) we might want to allow it again
* someday.
*/
if (vacstmt->vacuum && vacstmt->full && vacstmt->freeze)
@@ -332,8 +332,8 @@ vacuum(VacuumStmt *vacstmt, List *relids)
/*
* Create special memory context for cross-transaction storage.
*
- * Since it is a child of PortalContext, it will go away eventually even if
- * we suffer an error; there's no need for special abort cleanup logic.
+ * Since it is a child of PortalContext, it will go away eventually even
+ * if we suffer an error; there's no need for special abort cleanup logic.
*/
vac_context = AllocSetContextCreate(PortalContext,
"Vacuum",
@@ -355,14 +355,14 @@ vacuum(VacuumStmt *vacstmt, List *relids)
/*
* It's a database-wide VACUUM.
*
- * Compute the initially applicable OldestXmin and FreezeLimit XIDs, so
- * that we can record these values at the end of the VACUUM. Note that
- * individual tables may well be processed with newer values, but we
- * can guarantee that no (non-shared) relations are processed with
+ * Compute the initially applicable OldestXmin and FreezeLimit XIDs,
+ * so that we can record these values at the end of the VACUUM. Note
+ * that individual tables may well be processed with newer values, but
+ * we can guarantee that no (non-shared) relations are processed with
* older ones.
*
- * It is okay to record non-shared values in pg_database, even though we
- * may vacuum shared relations with older cutoffs, because only the
+ * It is okay to record non-shared values in pg_database, even though
+ * we may vacuum shared relations with older cutoffs, because only the
* minimum of the values present in pg_database matters. We can be
* sure that shared relations have at some time been vacuumed with
* cutoffs no worse than the global minimum; for, if there is a
@@ -379,8 +379,8 @@ vacuum(VacuumStmt *vacstmt, List *relids)
/*
* Decide whether we need to start/commit our own transactions.
*
- * For VACUUM (with or without ANALYZE): always do so, so that we can release
- * locks as soon as possible. (We could possibly use the outer
+ * For VACUUM (with or without ANALYZE): always do so, so that we can
+ * release locks as soon as possible. (We could possibly use the outer
* transaction for a one-table VACUUM, but handling TOAST tables would be
* problematic.)
*
@@ -981,21 +981,20 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
/*
* Determine the type of lock we want --- hard exclusive lock for a FULL
- * vacuum, but just ShareUpdateExclusiveLock for concurrent vacuum.
- * Either way, we can be sure that no other backend is vacuuming the same
- * table.
+ * vacuum, but just ShareUpdateExclusiveLock for concurrent vacuum. Either
+ * way, we can be sure that no other backend is vacuuming the same table.
*/
lmode = vacstmt->full ? AccessExclusiveLock : ShareUpdateExclusiveLock;
/*
* Open the class, get an appropriate lock on it, and check permissions.
*
- * We allow the user to vacuum a table if he is superuser, the table owner,
- * or the database owner (but in the latter case, only if it's not a
- * shared relation). pg_class_ownercheck includes the superuser case.
+ * We allow the user to vacuum a table if he is superuser, the table
+ * owner, or the database owner (but in the latter case, only if it's not
+ * a shared relation). pg_class_ownercheck includes the superuser case.
*
- * Note we choose to treat permissions failure as a WARNING and keep trying
- * to vacuum the rest of the DB --- is this appropriate?
+ * Note we choose to treat permissions failure as a WARNING and keep
+ * trying to vacuum the rest of the DB --- is this appropriate?
*/
onerel = relation_open(relid, lmode);
@@ -1661,8 +1660,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
* find a page we cannot completely empty (this last condition is handled
* by "break" statements within the loop).
*
- * NB: this code depends on the vacuum_pages and fraged_pages lists being in
- * order by blkno.
+ * NB: this code depends on the vacuum_pages and fraged_pages lists being
+ * in order by blkno.
*/
nblocks = vacrelstats->rel_pages;
for (blkno = nblocks - vacuum_pages->empty_end_pages - 1;
@@ -1685,9 +1684,9 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
* since we stop the outer loop at last_move_dest_block, pages removed
* here cannot have had anything moved onto them already.
*
- * Also note that we don't change the stored fraged_pages list, only our
- * local variable num_fraged_pages; so the forgotten pages are still
- * available to be loaded into the free space map later.
+ * Also note that we don't change the stored fraged_pages list, only
+ * our local variable num_fraged_pages; so the forgotten pages are
+ * still available to be loaded into the free space map later.
*/
while (num_fraged_pages > 0 &&
fraged_pages->pagedesc[num_fraged_pages - 1]->blkno >= blkno)
@@ -1841,17 +1840,17 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
* --- it must be recently obsoleted, else scan_heap would have
* deemed it removable.)
*
- * NOTE: this test is not 100% accurate: it is possible for a tuple
- * to be an updated one with recent xmin, and yet not match any
- * new_tid entry in the vtlinks list. Presumably there was once a
- * parent tuple with xmax matching the xmin, but it's possible
- * that that tuple has been removed --- for example, if it had
- * xmin = xmax and wasn't itself an updated version, then
+ * NOTE: this test is not 100% accurate: it is possible for a
+ * tuple to be an updated one with recent xmin, and yet not match
+ * any new_tid entry in the vtlinks list. Presumably there was
+ * once a parent tuple with xmax matching the xmin, but it's
+ * possible that that tuple has been removed --- for example, if
+ * it had xmin = xmax and wasn't itself an updated version, then
* HeapTupleSatisfiesVacuum would deem it removable as soon as the
* xmin xact completes.
*
- * To be on the safe side, we abandon the repair_frag process if we
- * cannot find the parent tuple in vtlinks. This may be overly
+ * To be on the safe side, we abandon the repair_frag process if
+ * we cannot find the parent tuple in vtlinks. This may be overly
* conservative; AFAICS it would be safe to move the chain.
*/
if (((tuple.t_data->t_infomask & HEAP_UPDATED) &&
@@ -2393,8 +2392,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
/*
* Clean moved-off tuples from last page in Nvacpagelist list.
*
- * We need only do this in this one page, because higher-numbered pages
- * are going to be truncated from the relation entirely. But see
+ * We need only do this in this one page, because higher-numbered
+ * pages are going to be truncated from the relation entirely. But see
* comments for update_hint_bits().
*/
if (vacpage->blkno == (blkno - 1) &&
@@ -2549,8 +2548,8 @@ move_chain_tuple(Relation rel,
* Therefore we must do everything that uses old_tup->t_data BEFORE this
* step!!
*
- * This path is different from the other callers of vacuum_page, because we
- * have already incremented the vacpage's offsets_used field to account
+ * This path is different from the other callers of vacuum_page, because
+ * we have already incremented the vacpage's offsets_used field to account
* for the tuple(s) we expect to move onto the page. Therefore
* vacuum_page's check for offsets_used == 0 is wrong. But since that's a
* good debugging check for all other callers, we work around it here
diff --git a/src/backend/commands/vacuumlazy.c b/src/backend/commands/vacuumlazy.c
index 7f276199015..5237a8c3cdd 100644
--- a/src/backend/commands/vacuumlazy.c
+++ b/src/backend/commands/vacuumlazy.c
@@ -31,7 +31,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/vacuumlazy.c,v 1.61 2005/10/15 02:49:16 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/vacuumlazy.c,v 1.61.2.1 2005/11/22 18:23:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -286,21 +286,21 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
* relation but crashes before initializing the page. Reclaim such
* pages for use.
*
- * We have to be careful here because we could be looking at a page
- * that someone has just added to the relation and not yet been
- * able to initialize (see RelationGetBufferForTuple). To
+ * We have to be careful here because we could be looking at a
+ * page that someone has just added to the relation and not yet
+ * been able to initialize (see RelationGetBufferForTuple). To
* interlock against that, release the buffer read lock (which we
* must do anyway) and grab the relation extension lock before
* re-locking in exclusive mode. If the page is still
* uninitialized by then, it must be left over from a crashed
* backend, and we can initialize it.
*
- * We don't really need the relation lock when this is a new or temp
- * relation, but it's probably not worth the code space to check
- * that, since this surely isn't a critical path.
+ * We don't really need the relation lock when this is a new or
+ * temp relation, but it's probably not worth the code space to
+ * check that, since this surely isn't a critical path.
*
- * Note: the comparable code in vacuum.c need not worry because it's
- * got exclusive lock on the whole relation.
+ * Note: the comparable code in vacuum.c need not worry because
+ * it's got exclusive lock on the whole relation.
*/
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
LockRelationForExtension(onerel, ExclusiveLock);
@@ -367,12 +367,12 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
* Tuple is good. Consider whether to replace its xmin
* value with FrozenTransactionId.
*
- * NB: Since we hold only a shared buffer lock here, we are
- * assuming that TransactionId read/write is atomic. This
- * is not the only place that makes such an assumption.
- * It'd be possible to avoid the assumption by momentarily
- * acquiring exclusive lock, but for the moment I see no
- * need to.
+ * NB: Since we hold only a shared buffer lock here, we
+ * are assuming that TransactionId read/write is atomic.
+ * This is not the only place that makes such an
+ * assumption. It'd be possible to avoid the assumption by
+ * momentarily acquiring exclusive lock, but for the
+ * moment I see no need to.
*/
if (TransactionIdIsNormal(HeapTupleHeaderGetXmin(tuple.t_data)) &&
TransactionIdPrecedes(HeapTupleHeaderGetXmin(tuple.t_data),
diff --git a/src/backend/commands/variable.c b/src/backend/commands/variable.c
index 31113fffe2d..ec56cb573ce 100644
--- a/src/backend/commands/variable.c
+++ b/src/backend/commands/variable.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/variable.c,v 1.114 2005/10/15 02:49:16 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/variable.c,v 1.114.2.1 2005/11/22 18:23:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -134,8 +134,8 @@ assign_datestyle(const char *value, bool doit, GucSource source)
* Easiest way to get the current DEFAULT state is to fetch the
* DEFAULT string from guc.c and recursively parse it.
*
- * We can't simply "return assign_datestyle(...)" because we need to
- * handle constructs like "DEFAULT, ISO".
+ * We can't simply "return assign_datestyle(...)" because we need
+ * to handle constructs like "DEFAULT, ISO".
*/
int saveDateStyle = DateStyle;
int saveDateOrder = DateOrder;
@@ -339,8 +339,8 @@ assign_timezone(const char *value, bool doit, GucSource source)
* timezone setting, we will return that name rather than UNKNOWN
* as the canonical spelling.
*
- * During GUC initialization, since the timezone library isn't set up
- * yet, pg_get_timezone_name will return NULL and we will leave
+ * During GUC initialization, since the timezone library isn't set
+ * up yet, pg_get_timezone_name will return NULL and we will leave
* the setting as UNKNOWN. If this isn't overridden from the
* config file then pg_timezone_initialize() will eventually
* select a default value from the environment.
diff --git a/src/backend/commands/view.c b/src/backend/commands/view.c
index 54030452f8a..03cd25b2a47 100644
--- a/src/backend/commands/view.c
+++ b/src/backend/commands/view.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/view.c,v 1.91 2005/10/15 02:49:16 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/view.c,v 1.91.2.1 2005/11/22 18:23:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -410,7 +410,8 @@ DefineView(RangeVar *view, Query *viewParse, bool replace)
/*
* Create the view relation
*
- * NOTE: if it already exists and replace is false, the xact will be aborted.
+ * NOTE: if it already exists and replace is false, the xact will be
+ * aborted.
*/
viewOid = DefineVirtualRelation(view, viewParse->targetList, replace);
diff --git a/src/backend/executor/execAmi.c b/src/backend/executor/execAmi.c
index 06e4ab7b232..2cd86cca8c8 100644
--- a/src/backend/executor/execAmi.c
+++ b/src/backend/executor/execAmi.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/executor/execAmi.c,v 1.85 2005/10/15 02:49:16 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execAmi.c,v 1.85.2.1 2005/11/22 18:23:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -403,9 +403,9 @@ ExecMayReturnRawTuples(PlanState *node)
* but just pass up input tuples, we have to recursively examine the input
* plan node.
*
- * Note: Hash and Material are listed here because they sometimes return an
- * original input tuple, not a copy. But Sort and SetOp never return an
- * original tuple, so they can be treated like projecting nodes.
+ * Note: Hash and Material are listed here because they sometimes return
+ * an original input tuple, not a copy. But Sort and SetOp never return
+ * an original tuple, so they can be treated like projecting nodes.
*/
switch (nodeTag(node))
{
diff --git a/src/backend/executor/execGrouping.c b/src/backend/executor/execGrouping.c
index 688e2157e8b..58addc41a92 100644
--- a/src/backend/executor/execGrouping.c
+++ b/src/backend/executor/execGrouping.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/execGrouping.c,v 1.16 2005/10/15 02:49:16 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execGrouping.c,v 1.16.2.1 2005/11/22 18:23:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -381,9 +381,9 @@ LookupTupleHashEntry(TupleHashTable hashtable, TupleTableSlot *slot,
/*
* created new entry
*
- * Zero any caller-requested space in the entry. (This zaps the "key
- * data" dynahash.c copied into the new entry, but we don't care
- * since we're about to overwrite it anyway.)
+ * Zero any caller-requested space in the entry. (This zaps the
+ * "key data" dynahash.c copied into the new entry, but we don't
+ * care since we're about to overwrite it anyway.)
*/
MemSet(entry, 0, hashtable->entrysize);
diff --git a/src/backend/executor/execJunk.c b/src/backend/executor/execJunk.c
index 2245c61e7fe..9a4102f3b27 100644
--- a/src/backend/executor/execJunk.c
+++ b/src/backend/executor/execJunk.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/execJunk.c,v 1.50 2005/10/15 02:49:16 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execJunk.c,v 1.50.2.1 2005/11/22 18:23:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -87,11 +87,11 @@ ExecInitJunkFilter(List *targetList, bool hasoid, TupleTableSlot *slot)
* Now calculate the mapping between the original tuple's attributes and
* the "clean" tuple's attributes.
*
- * The "map" is an array of "cleanLength" attribute numbers, i.e. one entry
- * for every attribute of the "clean" tuple. The value of this entry is
- * the attribute number of the corresponding attribute of the "original"
- * tuple. (Zero indicates a NULL output attribute, but we do not use that
- * feature in this routine.)
+ * The "map" is an array of "cleanLength" attribute numbers, i.e. one
+ * entry for every attribute of the "clean" tuple. The value of this entry
+ * is the attribute number of the corresponding attribute of the
+ * "original" tuple. (Zero indicates a NULL output attribute, but we do
+ * not use that feature in this routine.)
*/
cleanLength = cleanTupType->natts;
if (cleanLength > 0)
@@ -158,11 +158,11 @@ ExecInitJunkFilterConversion(List *targetList,
* Calculate the mapping between the original tuple's attributes and the
* "clean" tuple's attributes.
*
- * The "map" is an array of "cleanLength" attribute numbers, i.e. one entry
- * for every attribute of the "clean" tuple. The value of this entry is
- * the attribute number of the corresponding attribute of the "original"
- * tuple. We store zero for any deleted attributes, marking that a NULL
- * is needed in the output tuple.
+ * The "map" is an array of "cleanLength" attribute numbers, i.e. one
+ * entry for every attribute of the "clean" tuple. The value of this entry
+ * is the attribute number of the corresponding attribute of the
+ * "original" tuple. We store zero for any deleted attributes, marking
+ * that a NULL is needed in the output tuple.
*/
cleanLength = cleanTupType->natts;
if (cleanLength > 0)
diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c
index 32c7711664d..04e36b87b08 100644
--- a/src/backend/executor/execMain.c
+++ b/src/backend/executor/execMain.c
@@ -26,7 +26,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.256.2.3 2005/11/20 18:38:42 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.256.2.4 2005/11/22 18:23:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -376,10 +376,10 @@ ExecCheckRTEPerms(RangeTblEntry *rte)
/*
* userid to check as: current user unless we have a setuid indication.
*
- * Note: GetUserId() is presently fast enough that there's no harm in calling
- * it separately for each RTE. If that stops being true, we could call it
- * once in ExecCheckRTPerms and pass the userid down from there. But for
- * now, no need for the extra clutter.
+ * Note: GetUserId() is presently fast enough that there's no harm in
+ * calling it separately for each RTE. If that stops being true, we could
+ * call it once in ExecCheckRTPerms and pass the userid down from there.
+ * But for now, no need for the extra clutter.
*/
userid = rte->checkAsUser ? rte->checkAsUser : GetUserId();
@@ -582,8 +582,8 @@ InitPlan(QueryDesc *queryDesc, bool explainOnly)
* initialize the executor "tuple" table. We need slots for all the plan
* nodes, plus possibly output slots for the junkfilter(s). At this point
* we aren't sure if we need junkfilters, so just add slots for them
- * unconditionally. Also, if it's not a SELECT, set up a slot for use
- * for trigger output tuples.
+ * unconditionally. Also, if it's not a SELECT, set up a slot for use for
+ * trigger output tuples.
*/
{
int nSlots = ExecCountSlotsNode(plan);
@@ -797,11 +797,11 @@ InitPlan(QueryDesc *queryDesc, bool explainOnly)
/*
* We can skip WAL-logging the insertions, unless PITR is in use.
*
- * Note that for a non-temp INTO table, this is safe only because we know
- * that the catalog changes above will have been WAL-logged, and so
- * RecordTransactionCommit will think it needs to WAL-log the eventual
- * transaction commit. Else the commit might be lost, even though all
- * the data is safely fsync'd ...
+ * Note that for a non-temp INTO table, this is safe only because we
+ * know that the catalog changes above will have been WAL-logged, and
+ * so RecordTransactionCommit will think it needs to WAL-log the
+ * eventual transaction commit. Else the commit might be lost, even
+ * though all the data is safely fsync'd ...
*/
estate->es_into_relation_use_wal = XLogArchivingActive();
}
@@ -1495,8 +1495,8 @@ ExecDelete(TupleTableSlot *slot,
/*
* delete the tuple
*
- * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that the
- * row to be deleted is visible to that snapshot, and throw a can't-
+ * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
+ * the row to be deleted is visible to that snapshot, and throw a can't-
* serialize error if not. This is a special-case behavior needed for
* referential integrity updates in serializable transactions.
*/
@@ -1549,9 +1549,9 @@ ldelete:;
* Note: Normally one would think that we have to delete index tuples
* associated with the heap tuple now..
*
- * ... but in POSTGRES, we have no need to do this because the vacuum daemon
- * automatically opens an index scan and deletes index tuples when it
- * finds deleted heap tuples. -cim 9/27/89
+ * ... but in POSTGRES, we have no need to do this because the vacuum
+ * daemon automatically opens an index scan and deletes index tuples when
+ * it finds deleted heap tuples. -cim 9/27/89
*/
/* AFTER ROW DELETE Triggers */
@@ -1635,8 +1635,8 @@ ExecUpdate(TupleTableSlot *slot,
/*
* Check the constraints of the tuple
*
- * If we generate a new candidate tuple after EvalPlanQual testing, we must
- * loop back here and recheck constraints. (We don't need to redo
+ * If we generate a new candidate tuple after EvalPlanQual testing, we
+ * must loop back here and recheck constraints. (We don't need to redo
* triggers, however. If there are any BEFORE triggers then trigger.c
* will have done heap_lock_tuple to lock the correct tuple, so there's no
* need to do them again.)
@@ -1648,8 +1648,8 @@ lreplace:;
/*
* replace the heap tuple
*
- * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that the
- * row to be updated is visible to that snapshot, and throw a can't-
+ * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that
+ * the row to be updated is visible to that snapshot, and throw a can't-
* serialize error if not. This is a special-case behavior needed for
* referential integrity updates in serializable transactions.
*/
diff --git a/src/backend/executor/execQual.c b/src/backend/executor/execQual.c
index 4ee9a4ca622..37e4d704ce4 100644
--- a/src/backend/executor/execQual.c
+++ b/src/backend/executor/execQual.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/execQual.c,v 1.183 2005/10/19 22:30:30 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execQual.c,v 1.183.2.1 2005/11/22 18:23:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -65,7 +65,7 @@ static Datum ExecEvalAggref(AggrefExprState *aggref,
static Datum ExecEvalVar(ExprState *exprstate, ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalWholeRowVar(ExprState *exprstate, ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
+ bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalConst(ExprState *exprstate, ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalParam(ExprState *exprstate, ExprContext *econtext,
@@ -327,8 +327,8 @@ ExecEvalArrayRef(ArrayRefExprState *astate,
/*
* Evaluate the value to be assigned into the array.
*
- * XXX At some point we'll need to look into making the old value of the
- * array element available via CaseTestExpr, as is done by
+ * XXX At some point we'll need to look into making the old value of
+ * the array element available via CaseTestExpr, as is done by
* ExecEvalFieldStore. This is not needed now but will be needed to
* support arrays of composite types; in an assignment to a field of
* an array member, the parser would generate a FieldStore that
@@ -534,8 +534,8 @@ ExecEvalWholeRowVar(ExprState *exprstate, ExprContext *econtext,
Assert(variable->varattno == InvalidAttrNumber);
/*
- * Whole-row Vars can only appear at the level of a relation scan,
- * never in a join.
+ * Whole-row Vars can only appear at the level of a relation scan, never
+ * in a join.
*/
Assert(variable->varno != INNER);
Assert(variable->varno != OUTER);
@@ -545,8 +545,8 @@ ExecEvalWholeRowVar(ExprState *exprstate, ExprContext *econtext,
tupleDesc = slot->tts_tupleDescriptor;
/*
- * We have to make a copy of the tuple so we can safely insert the
- * Datum overhead fields, which are not set in on-disk tuples.
+ * We have to make a copy of the tuple so we can safely insert the Datum
+ * overhead fields, which are not set in on-disk tuples.
*/
dtuple = (HeapTupleHeader) palloc(tuple->t_len);
memcpy((char *) dtuple, (char *) tuple->t_data, tuple->t_len);
@@ -554,12 +554,11 @@ ExecEvalWholeRowVar(ExprState *exprstate, ExprContext *econtext,
HeapTupleHeaderSetDatumLength(dtuple, tuple->t_len);
/*
- * If the Var identifies a named composite type, label the tuple
- * with that type; otherwise use what is in the tupleDesc.
+ * If the Var identifies a named composite type, label the tuple with that
+ * type; otherwise use what is in the tupleDesc.
*
- * It's likely that the slot's tupleDesc is a record type; if so,
- * make sure it's been "blessed", so that the Datum can be interpreted
- * later.
+ * It's likely that the slot's tupleDesc is a record type; if so, make
+ * sure it's been "blessed", so that the Datum can be interpreted later.
*/
if (variable->vartype != RECORDOID)
{
@@ -2915,7 +2914,7 @@ ExecInitExpr(Expr *node, PlanState *parent)
{
case T_Var:
{
- Var *var = (Var *) node;
+ Var *var = (Var *) node;
state = (ExprState *) makeNode(ExprState);
if (var->varattno != InvalidAttrNumber)
diff --git a/src/backend/executor/execTuples.c b/src/backend/executor/execTuples.c
index b38bcc44cb4..0d3700c8e94 100644
--- a/src/backend/executor/execTuples.c
+++ b/src/backend/executor/execTuples.c
@@ -15,7 +15,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/execTuples.c,v 1.88 2005/10/15 02:49:16 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execTuples.c,v 1.88.2.1 2005/11/22 18:23:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -631,8 +631,9 @@ ExecMaterializeSlot(TupleTableSlot *slot)
* in which this could be optimized but it's probably not worth worrying
* about.)
*
- * We may be called in a context that is shorter-lived than the tuple slot,
- * but we have to ensure that the materialized tuple will survive anyway.
+ * We may be called in a context that is shorter-lived than the tuple
+ * slot, but we have to ensure that the materialized tuple will survive
+ * anyway.
*/
oldContext = MemoryContextSwitchTo(slot->tts_mcxt);
newTuple = ExecCopySlotTuple(slot);
diff --git a/src/backend/executor/execUtils.c b/src/backend/executor/execUtils.c
index cab0e6179fa..c7da61eeef2 100644
--- a/src/backend/executor/execUtils.c
+++ b/src/backend/executor/execUtils.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/execUtils.c,v 1.126.2.1 2005/11/14 17:43:13 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execUtils.c,v 1.126.2.2 2005/11/22 18:23:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -769,19 +769,19 @@ ExecOpenIndices(ResultRelInfo *resultRelInfo)
/*
* Open and lock the index relation
*
- * If the index AM supports concurrent updates, obtain RowExclusiveLock
- * to signify that we are updating the index. This locks out only
- * operations that need exclusive access, such as relocating the index
- * to a new tablespace.
+ * If the index AM supports concurrent updates, obtain
+ * RowExclusiveLock to signify that we are updating the index. This
+ * locks out only operations that need exclusive access, such as
+ * relocating the index to a new tablespace.
*
* If the index AM is not safe for concurrent updates, obtain an
* exclusive lock on the index to lock out other updaters as well as
* readers (index_beginscan places AccessShareLock).
*
- * If there are multiple not-concurrent-safe indexes, all backends must
- * lock the indexes in the same order or we will get deadlocks here.
- * This is guaranteed by RelationGetIndexList(), which promises to
- * return the index list in OID order.
+ * If there are multiple not-concurrent-safe indexes, all backends
+ * must lock the indexes in the same order or we will get deadlocks
+ * here. This is guaranteed by RelationGetIndexList(), which promises
+ * to return the index list in OID order.
*
* The locks will be released in ExecCloseIndices.
*/
diff --git a/src/backend/executor/functions.c b/src/backend/executor/functions.c
index 24a8b9a493a..b969fdf6de7 100644
--- a/src/backend/executor/functions.c
+++ b/src/backend/executor/functions.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/functions.c,v 1.98 2005/10/15 02:49:16 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/functions.c,v 1.98.2.1 2005/11/22 18:23:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -268,11 +268,11 @@ init_sql_fcache(FmgrInfo *finfo)
* If the function has any arguments declared as polymorphic types, then
* it wasn't type-checked at definition time; must do so now.
*
- * Also, force a type-check if the declared return type is a rowtype; we need
- * to find out whether we are actually returning the whole tuple result,
- * or just regurgitating a rowtype expression result. In the latter case
- * we clear returnsTuple because we need not act different from the scalar
- * result case.
+ * Also, force a type-check if the declared return type is a rowtype; we
+ * need to find out whether we are actually returning the whole tuple
+ * result, or just regurgitating a rowtype expression result. In the
+ * latter case we clear returnsTuple because we need not act different
+ * from the scalar result case.
*
* In the returnsTuple case, check_sql_fn_retval will also construct a
* JunkFilter we can use to coerce the returned rowtype to the desired
@@ -498,12 +498,12 @@ postquel_execute(execution_state *es,
* labeling to make it a valid Datum. There are several reasons why
* we do this:
*
- * 1. To copy the tuple out of the child execution context and into the
- * desired result context.
+ * 1. To copy the tuple out of the child execution context and into
+ * the desired result context.
*
- * 2. To remove any junk attributes present in the raw subselect result.
- * (This is probably not absolutely necessary, but it seems like good
- * policy.)
+ * 2. To remove any junk attributes present in the raw subselect
+ * result. (This is probably not absolutely necessary, but it seems
+ * like good policy.)
*
* 3. To insert dummy null columns if the declared result type has any
* attisdropped columns.
diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c
index 0403c9aca1b..014219a1051 100644
--- a/src/backend/executor/nodeAgg.c
+++ b/src/backend/executor/nodeAgg.c
@@ -61,7 +61,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeAgg.c,v 1.135 2005/10/15 02:49:17 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeAgg.c,v 1.135.2.1 2005/11/22 18:23:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -283,8 +283,8 @@ initialize_aggregates(AggState *aggstate,
/*
* (Re)set transValue to the initial value.
*
- * Note that when the initial value is pass-by-ref, we must copy it (into
- * the aggcontext) since we will pfree the transValue later.
+ * Note that when the initial value is pass-by-ref, we must copy it
+ * (into the aggcontext) since we will pfree the transValue later.
*/
if (peraggstate->initValueIsNull)
pergroupstate->transValue = peraggstate->initValue;
@@ -341,8 +341,8 @@ advance_transition_function(AggState *aggstate,
* already checked that the agg's input type is binary-compatible
* with its transtype, so straight copy here is OK.)
*
- * We must copy the datum into aggcontext if it is pass-by-ref. We do
- * not need to pfree the old transValue, since it's NULL.
+ * We must copy the datum into aggcontext if it is pass-by-ref. We
+ * do not need to pfree the old transValue, since it's NULL.
*/
oldContext = MemoryContextSwitchTo(aggstate->aggcontext);
pergroupstate->transValue = datumCopy(newVal,
@@ -842,8 +842,8 @@ agg_retrieve_direct(AggState *aggstate)
* aggregate will have a targetlist reference to ctid. We need to
* return a null for ctid in that situation, not coredump.
*
- * The values returned for the aggregates will be the initial values of
- * the transition functions.
+ * The values returned for the aggregates will be the initial values
+ * of the transition functions.
*/
if (TupIsNull(firstSlot))
{
diff --git a/src/backend/executor/nodeBitmapIndexscan.c b/src/backend/executor/nodeBitmapIndexscan.c
index 49b63170d49..3e66f74e28a 100644
--- a/src/backend/executor/nodeBitmapIndexscan.c
+++ b/src/backend/executor/nodeBitmapIndexscan.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeBitmapIndexscan.c,v 1.10 2005/10/15 02:49:17 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeBitmapIndexscan.c,v 1.10.2.1 2005/11/22 18:23:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -211,8 +211,8 @@ ExecInitBitmapIndexScan(BitmapIndexScan *node, EState *estate)
/*
* Miscellaneous initialization
*
- * We do not need a standard exprcontext for this node, though we may decide
- * below to create a runtime-key exprcontext
+ * We do not need a standard exprcontext for this node, though we may
+ * decide below to create a runtime-key exprcontext
*/
/*
diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c
index 8c51e785b28..320a7896c33 100644
--- a/src/backend/executor/nodeHash.c
+++ b/src/backend/executor/nodeHash.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeHash.c,v 1.96 2005/10/15 02:49:17 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeHash.c,v 1.96.2.1 2005/11/22 18:23:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -237,8 +237,8 @@ ExecHashTableCreate(Hash *node, List *hashOperators)
/*
* Initialize the hash table control block.
*
- * The hashtable control block is just palloc'd from the executor's per-query
- * memory context.
+ * The hashtable control block is just palloc'd from the executor's
+ * per-query memory context.
*/
hashtable = (HashJoinTable) palloc(sizeof(HashJoinTableData));
hashtable->nbuckets = nbuckets;
diff --git a/src/backend/executor/nodeHashjoin.c b/src/backend/executor/nodeHashjoin.c
index 817f3822ef6..40eebb44027 100644
--- a/src/backend/executor/nodeHashjoin.c
+++ b/src/backend/executor/nodeHashjoin.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeHashjoin.c,v 1.75 2005/10/18 01:06:24 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeHashjoin.c,v 1.75.2.1 2005/11/22 18:23:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -622,13 +622,13 @@ start_over:
* 1. In a LEFT JOIN, we have to process outer batches even if the inner
* batch is empty.
*
- * 2. If we have increased nbatch since the initial estimate, we have to scan
- * inner batches since they might contain tuples that need to be
+ * 2. If we have increased nbatch since the initial estimate, we have to
+ * scan inner batches since they might contain tuples that need to be
* reassigned to later inner batches.
*
- * 3. Similarly, if we have increased nbatch since starting the outer scan,
- * we have to rescan outer batches in case they contain tuples that need
- * to be reassigned.
+ * 3. Similarly, if we have increased nbatch since starting the outer
+ * scan, we have to rescan outer batches in case they contain tuples that
+ * need to be reassigned.
*/
curbatch++;
while (curbatch < nbatch &&
diff --git a/src/backend/executor/nodeIndexscan.c b/src/backend/executor/nodeIndexscan.c
index 94ab2223c75..009aba997f6 100644
--- a/src/backend/executor/nodeIndexscan.c
+++ b/src/backend/executor/nodeIndexscan.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeIndexscan.c,v 1.104 2005/10/15 02:49:17 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeIndexscan.c,v 1.104.2.1 2005/11/22 18:23:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -578,8 +578,8 @@ ExecIndexBuildScanKeys(PlanState *planstate, List *quals,
* listed in the var node and use the value of the const as comparison
* data.
*
- * If we don't have a const node, it means our scan key is a function of
- * information obtained during the execution of the plan, in which
+ * If we don't have a const node, it means our scan key is a function
+ * of information obtained during the execution of the plan, in which
* case we need to recalculate the index scan key at run time. Hence,
* we set have_runtime_keys to true and place the appropriate
* subexpression in run_keys. The corresponding scan key values are
diff --git a/src/backend/executor/nodeMergejoin.c b/src/backend/executor/nodeMergejoin.c
index 0d4eed4c9ba..a1d209db349 100644
--- a/src/backend/executor/nodeMergejoin.c
+++ b/src/backend/executor/nodeMergejoin.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeMergejoin.c,v 1.75 2005/10/15 02:49:17 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeMergejoin.c,v 1.75.2.1 2005/11/22 18:23:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -239,8 +239,8 @@ MJExamineQuals(List *qualList, PlanState *parent)
* much like SelectSortFunction except we insist on matching all the
* operators provided, and it can be a cross-type opclass.
*
- * XXX for now, insist on forward sort so that NULLs can be counted on to
- * be high.
+ * XXX for now, insist on forward sort so that NULLs can be counted on
+ * to be high.
*/
catlist = SearchSysCacheList(AMOPOPID, 1,
ObjectIdGetDatum(qual->opno),
@@ -1121,13 +1121,13 @@ ExecMergeJoin(MergeJoinState *node)
* scan position to the first mark, and go join that tuple
* (and any following ones) to the new outer.
*
- * NOTE: we do not need to worry about the MatchedInner state
- * for the rescanned inner tuples. We know all of them
- * will match this new outer tuple and therefore won't be
- * emitted as fill tuples. This works *only* because we
- * require the extra joinquals to be nil when doing a
- * right or full join --- otherwise some of the rescanned
- * tuples might fail the extra joinquals.
+ * NOTE: we do not need to worry about the MatchedInner
+ * state for the rescanned inner tuples. We know all of
+ * them will match this new outer tuple and therefore
+ * won't be emitted as fill tuples. This works *only*
+ * because we require the extra joinquals to be nil when
+ * doing a right or full join --- otherwise some of the
+ * rescanned tuples might fail the extra joinquals.
*/
ExecRestrPos(innerPlan);
diff --git a/src/backend/executor/nodeNestloop.c b/src/backend/executor/nodeNestloop.c
index a497e9ac337..24f3621bb99 100644
--- a/src/backend/executor/nodeNestloop.c
+++ b/src/backend/executor/nodeNestloop.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeNestloop.c,v 1.39 2005/10/15 02:49:17 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeNestloop.c,v 1.39.2.1 2005/11/22 18:23:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -223,8 +223,8 @@ ExecNestLoop(NestLoopState *node)
* test the inner and outer tuples to see if they satisfy the node's
* qualification.
*
- * Only the joinquals determine MatchedOuter status, but all quals must
- * pass to actually return the tuple.
+ * Only the joinquals determine MatchedOuter status, but all quals
+ * must pass to actually return the tuple.
*/
ENL1_printf("testing qualification");
diff --git a/src/backend/executor/nodeSubplan.c b/src/backend/executor/nodeSubplan.c
index 0e7b6df7225..061ca12eda4 100644
--- a/src/backend/executor/nodeSubplan.c
+++ b/src/backend/executor/nodeSubplan.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeSubplan.c,v 1.70 2005/10/15 02:49:17 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeSubplan.c,v 1.70.2.1 2005/11/22 18:23:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -128,8 +128,8 @@ ExecHashSubPlan(SubPlanState *node,
* unequal to the LHS; if so, the result is UNKNOWN. (We skip that part
* if we don't care about UNKNOWN.) Otherwise, the result is FALSE.
*
- * Note: the reason we can avoid a full scan of the main hash table is that
- * the combining operators are assumed never to yield NULL when both
+ * Note: the reason we can avoid a full scan of the main hash table is
+ * that the combining operators are assumed never to yield NULL when both
* inputs are non-null. If they were to do so, we might need to produce
* UNKNOWN instead of FALSE because of an UNKNOWN result in comparing the
* LHS to some main-table entry --- which is a comparison we will not even
@@ -255,9 +255,9 @@ ExecScanSubPlan(SubPlanState *node,
* FALSE for ANY_SUBLINK, TRUE for ALL_SUBLINK, NULL for
* MULTIEXPR_SUBLINK.
*
- * For EXPR_SUBLINK we require the subplan to produce no more than one tuple,
- * else an error is raised. For ARRAY_SUBLINK we allow the subplan to
- * produce more than one tuple. In either case, if zero tuples are
+ * For EXPR_SUBLINK we require the subplan to produce no more than one
+ * tuple, else an error is raised. For ARRAY_SUBLINK we allow the subplan
+ * to produce more than one tuple. In either case, if zero tuples are
* produced, we return NULL. Assuming we get a tuple, we just use its
* first column (there can be only one non-junk column in this case).
*/
@@ -480,13 +480,13 @@ buildSubPlanHash(SubPlanState *node)
* If we need to distinguish accurately between FALSE and UNKNOWN (i.e.,
* NULL) results of the IN operation, then we have to store subplan output
* rows that are partly or wholly NULL. We store such rows in a separate
- * hash table that we expect will be much smaller than the main table.
- * (We can use hashing to eliminate partly-null rows that are not
- * distinct. We keep them separate to minimize the cost of the inevitable
- * full-table searches; see findPartialMatch.)
+ * hash table that we expect will be much smaller than the main table. (We
+ * can use hashing to eliminate partly-null rows that are not distinct.
+ * We keep them separate to minimize the cost of the inevitable full-table
+ * searches; see findPartialMatch.)
*
- * If it's not necessary to distinguish FALSE and UNKNOWN, then we don't need
- * to store subplan output rows that contain NULL.
+ * If it's not necessary to distinguish FALSE and UNKNOWN, then we don't
+ * need to store subplan output rows that contain NULL.
*/
MemoryContextReset(node->tablecxt);
node->hashtable = NULL;
@@ -796,8 +796,8 @@ ExecInitSubPlan(SubPlanState *node, EState *estate)
* righthand sides. We need both the ExprState list (for ExecProject)
* and the underlying parse Exprs (for ExecTypeFromTL).
*
- * We also extract the combining operators themselves to initialize the
- * equality and hashing functions for the hash tables.
+ * We also extract the combining operators themselves to initialize
+ * the equality and hashing functions for the hash tables.
*/
lefttlist = righttlist = NIL;
leftptlist = rightptlist = NIL;
diff --git a/src/backend/executor/nodeUnique.c b/src/backend/executor/nodeUnique.c
index ab3879d7cc6..0b0ee93e4b1 100644
--- a/src/backend/executor/nodeUnique.c
+++ b/src/backend/executor/nodeUnique.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeUnique.c,v 1.48 2005/10/15 02:49:17 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeUnique.c,v 1.48.2.1 2005/11/22 18:23:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -56,10 +56,10 @@ ExecUnique(UniqueState *node)
* now loop, returning only non-duplicate tuples. We assume that the
* tuples arrive in sorted order so we can detect duplicates easily.
*
- * We return the first tuple from each group of duplicates (or the last tuple
- * of each group, when moving backwards). At either end of the subplan,
- * clear the result slot so that we correctly return the first/last tuple
- * when reversing direction.
+ * We return the first tuple from each group of duplicates (or the last
+ * tuple of each group, when moving backwards). At either end of the
+ * subplan, clear the result slot so that we correctly return the
+ * first/last tuple when reversing direction.
*/
for (;;)
{
diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c
index 0b45fe49df2..e8ba1f5f292 100644
--- a/src/backend/executor/spi.c
+++ b/src/backend/executor/spi.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/spi.c,v 1.144 2005/11/03 17:11:36 alvherre Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/spi.c,v 1.144.2.1 2005/11/22 18:23:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -113,8 +113,8 @@ SPI_connect(void)
/*
* Create memory contexts for this procedure
*
- * XXX it would be better to use PortalContext as the parent context, but we
- * may not be inside a portal (consider deferred-trigger execution).
+ * XXX it would be better to use PortalContext as the parent context, but
+ * we may not be inside a portal (consider deferred-trigger execution).
* Perhaps CurTransactionContext would do? For now it doesn't matter
* because we clean up explicitly in AtEOSubXact_SPI().
*/
diff --git a/src/backend/libpq/auth.c b/src/backend/libpq/auth.c
index b409b0a55c3..6bf3b9e88e6 100644
--- a/src/backend/libpq/auth.c
+++ b/src/backend/libpq/auth.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/libpq/auth.c,v 1.132 2005/10/17 16:24:19 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/libpq/auth.c,v 1.132.2.1 2005/11/22 18:23:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -163,7 +163,7 @@ pg_krb5_init(void)
{
ereport(LOG,
(errmsg("Kerberos sname_to_principal(\"%s\", \"%s\") returned error %d",
- khostname ? khostname : "server hostname", pg_krb_srvnam, retval)));
+ khostname ? khostname : "server hostname", pg_krb_srvnam, retval)));
com_err("postgres", retval,
"while getting server principal for server \"%s\" for service \"%s\"",
khostname ? khostname : "server hostname", pg_krb_srvnam);
diff --git a/src/backend/libpq/be-fsstubs.c b/src/backend/libpq/be-fsstubs.c
index 139f8946dd8..a63e38e02ed 100644
--- a/src/backend/libpq/be-fsstubs.c
+++ b/src/backend/libpq/be-fsstubs.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/libpq/be-fsstubs.c,v 1.79 2005/10/15 02:49:17 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/libpq/be-fsstubs.c,v 1.79.2.1 2005/11/22 18:23:09 momjian Exp $
*
* NOTES
* This should be moved to a more appropriate place. It is here
@@ -479,9 +479,9 @@ lo_export(PG_FUNCTION_ARGS)
/*
* open the file to be written to
*
- * Note: we reduce backend's normal 077 umask to the slightly friendlier 022.
- * This code used to drop it all the way to 0, but creating world-writable
- * export files doesn't seem wise.
+ * Note: we reduce backend's normal 077 umask to the slightly friendlier
+ * 022. This code used to drop it all the way to 0, but creating
+ * world-writable export files doesn't seem wise.
*/
nbytes = VARSIZE(filename) - VARHDRSZ;
if (nbytes >= MAXPGPATH)
diff --git a/src/backend/libpq/be-secure.c b/src/backend/libpq/be-secure.c
index a2404ebd38a..fa1de916a19 100644
--- a/src/backend/libpq/be-secure.c
+++ b/src/backend/libpq/be-secure.c
@@ -11,7 +11,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/libpq/be-secure.c,v 1.59 2005/10/15 02:49:17 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/libpq/be-secure.c,v 1.59.2.1 2005/11/22 18:23:09 momjian Exp $
*
* Since the server static private key ($DataDir/server.key)
* will normally be stored unencrypted so that the database
@@ -747,10 +747,10 @@ initialize_SSL(void)
/*
* Require no public access to key file.
*
- * XXX temporarily suppress check when on Windows, because there may not
- * be proper support for Unix-y file permissions. Need to think of a
- * reasonable check to apply on Windows. (See also the data directory
- * permission check in postmaster.c)
+ * XXX temporarily suppress check when on Windows, because there may
+ * not be proper support for Unix-y file permissions. Need to think
+ * of a reasonable check to apply on Windows. (See also the data
+ * directory permission check in postmaster.c)
*/
#if !defined(WIN32) && !defined(__CYGWIN__)
if (!S_ISREG(buf.st_mode) || (buf.st_mode & (S_IRWXG | S_IRWXO)) ||
diff --git a/src/backend/libpq/ip.c b/src/backend/libpq/ip.c
index e1b26e5da8e..efba0753ce8 100644
--- a/src/backend/libpq/ip.c
+++ b/src/backend/libpq/ip.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/libpq/ip.c,v 1.32 2005/10/17 16:24:19 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/libpq/ip.c,v 1.32.2.1 2005/11/22 18:23:09 momjian Exp $
*
* This file and the IPV6 implementation were initially provided by
* Nigel Kukard <nkukard@lbsd.net>, Linux Based Systems Design
@@ -38,22 +38,22 @@
#include "libpq/ip.h"
-static int range_sockaddr_AF_INET(const struct sockaddr_in *addr,
- const struct sockaddr_in *netaddr,
- const struct sockaddr_in *netmask);
+static int range_sockaddr_AF_INET(const struct sockaddr_in * addr,
+ const struct sockaddr_in * netaddr,
+ const struct sockaddr_in * netmask);
#ifdef HAVE_IPV6
-static int range_sockaddr_AF_INET6(const struct sockaddr_in6 *addr,
- const struct sockaddr_in6 *netaddr,
- const struct sockaddr_in6 *netmask);
+static int range_sockaddr_AF_INET6(const struct sockaddr_in6 * addr,
+ const struct sockaddr_in6 * netaddr,
+ const struct sockaddr_in6 * netmask);
#endif
#ifdef HAVE_UNIX_SOCKETS
static int getaddrinfo_unix(const char *path,
- const struct addrinfo *hintsp,
- struct addrinfo **result);
+ const struct addrinfo * hintsp,
+ struct addrinfo ** result);
-static int getnameinfo_unix(const struct sockaddr_un *sa, int salen,
+static int getnameinfo_unix(const struct sockaddr_un * sa, int salen,
char *node, int nodelen,
char *service, int servicelen,
int flags);
@@ -65,7 +65,7 @@ static int getnameinfo_unix(const struct sockaddr_un *sa, int salen,
*/
int
pg_getaddrinfo_all(const char *hostname, const char *servname,
- const struct addrinfo *hintp, struct addrinfo **result)
+ const struct addrinfo * hintp, struct addrinfo ** result)
{
/* not all versions of getaddrinfo() zero *result on failure */
*result = NULL;
@@ -91,7 +91,7 @@ pg_getaddrinfo_all(const char *hostname, const char *servname,
* not safe to look at ai_family in the addrinfo itself.
*/
void
-pg_freeaddrinfo_all(int hint_ai_family, struct addrinfo *ai)
+pg_freeaddrinfo_all(int hint_ai_family, struct addrinfo * ai)
{
#ifdef HAVE_UNIX_SOCKETS
if (hint_ai_family == AF_UNIX)
@@ -125,7 +125,7 @@ pg_freeaddrinfo_all(int hint_ai_family, struct addrinfo *ai)
* guaranteed to be filled with something even on failure return.
*/
int
-pg_getnameinfo_all(const struct sockaddr_storage *addr, int salen,
+pg_getnameinfo_all(const struct sockaddr_storage * addr, int salen,
char *node, int nodelen,
char *service, int servicelen,
int flags)
@@ -168,8 +168,8 @@ pg_getnameinfo_all(const struct sockaddr_storage *addr, int salen,
* -------
*/
static int
-getaddrinfo_unix(const char *path, const struct addrinfo *hintsp,
- struct addrinfo **result)
+getaddrinfo_unix(const char *path, const struct addrinfo * hintsp,
+ struct addrinfo ** result)
{
struct addrinfo hints;
struct addrinfo *aip;
@@ -234,7 +234,7 @@ getaddrinfo_unix(const char *path, const struct addrinfo *hintsp,
* Convert an address to a hostname.
*/
static int
-getnameinfo_unix(const struct sockaddr_un *sa, int salen,
+getnameinfo_unix(const struct sockaddr_un * sa, int salen,
char *node, int nodelen,
char *service, int servicelen,
int flags)
@@ -267,7 +267,6 @@ getnameinfo_unix(const struct sockaddr_un *sa, int salen,
return 0;
}
-
#endif /* HAVE_UNIX_SOCKETS */
@@ -278,9 +277,9 @@ getnameinfo_unix(const struct sockaddr_un *sa, int salen,
* in the same address family; and AF_UNIX addresses are not supported.
*/
int
-pg_range_sockaddr(const struct sockaddr_storage *addr,
- const struct sockaddr_storage *netaddr,
- const struct sockaddr_storage *netmask)
+pg_range_sockaddr(const struct sockaddr_storage * addr,
+ const struct sockaddr_storage * netaddr,
+ const struct sockaddr_storage * netmask)
{
if (addr->ss_family == AF_INET)
return range_sockaddr_AF_INET((struct sockaddr_in *) addr,
@@ -297,9 +296,9 @@ pg_range_sockaddr(const struct sockaddr_storage *addr,
}
static int
-range_sockaddr_AF_INET(const struct sockaddr_in *addr,
- const struct sockaddr_in *netaddr,
- const struct sockaddr_in *netmask)
+range_sockaddr_AF_INET(const struct sockaddr_in * addr,
+ const struct sockaddr_in * netaddr,
+ const struct sockaddr_in * netmask)
{
if (((addr->sin_addr.s_addr ^ netaddr->sin_addr.s_addr) &
netmask->sin_addr.s_addr) == 0)
@@ -312,9 +311,9 @@ range_sockaddr_AF_INET(const struct sockaddr_in *addr,
#ifdef HAVE_IPV6
static int
-range_sockaddr_AF_INET6(const struct sockaddr_in6 *addr,
- const struct sockaddr_in6 *netaddr,
- const struct sockaddr_in6 *netmask)
+range_sockaddr_AF_INET6(const struct sockaddr_in6 * addr,
+ const struct sockaddr_in6 * netaddr,
+ const struct sockaddr_in6 * netmask)
{
int i;
@@ -327,8 +326,7 @@ range_sockaddr_AF_INET6(const struct sockaddr_in6 *addr,
return 1;
}
-
-#endif /* HAVE_IPV6 */
+#endif /* HAVE_IPV6 */
/*
* pg_sockaddr_cidr_mask - make a network mask of the appropriate family
@@ -339,7 +337,7 @@ range_sockaddr_AF_INET6(const struct sockaddr_in6 *addr,
* Return value is 0 if okay, -1 if not.
*/
int
-pg_sockaddr_cidr_mask(struct sockaddr_storage *mask, char *numbits, int family)
+pg_sockaddr_cidr_mask(struct sockaddr_storage * mask, char *numbits, int family)
{
long bits;
char *endptr;
@@ -414,7 +412,7 @@ pg_sockaddr_cidr_mask(struct sockaddr_storage *mask, char *numbits, int family)
* that pg_range_sockaddr will look at.
*/
void
-pg_promote_v4_to_v6_addr(struct sockaddr_storage *addr)
+pg_promote_v4_to_v6_addr(struct sockaddr_storage * addr)
{
struct sockaddr_in addr4;
struct sockaddr_in6 addr6;
@@ -449,7 +447,7 @@ pg_promote_v4_to_v6_addr(struct sockaddr_storage *addr)
* that pg_range_sockaddr will look at.
*/
void
-pg_promote_v4_to_v6_mask(struct sockaddr_storage *addr)
+pg_promote_v4_to_v6_mask(struct sockaddr_storage * addr)
{
struct sockaddr_in addr4;
struct sockaddr_in6 addr6;
diff --git a/src/backend/main/main.c b/src/backend/main/main.c
index ea1a3bef254..bc89c9dc57d 100644
--- a/src/backend/main/main.c
+++ b/src/backend/main/main.c
@@ -13,7 +13,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/main/main.c,v 1.96 2005/10/15 02:49:18 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/main/main.c,v 1.96.2.1 2005/11/22 18:23:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -62,9 +62,9 @@ main(int argc, char *argv[])
* code will NOT be executed when a backend or sub-bootstrap run is forked
* by the postmaster.
*
- * XXX The need for code here is proof that the platform in question is too
- * brain-dead to provide a standard C execution environment without help.
- * Avoid adding more here, if you can.
+ * XXX The need for code here is proof that the platform in question is
+ * too brain-dead to provide a standard C execution environment without
+ * help. Avoid adding more here, if you can.
*/
#if defined(__alpha) /* no __alpha__ ? */
@@ -134,8 +134,8 @@ main(int argc, char *argv[])
* be overwritten in order to set the process title for ps. In such cases
* save_ps_display_args makes and returns a new copy of the argv[] array.
*
- * save_ps_display_args may also move the environment strings to make extra
- * room. Therefore this should be done as early as possible during
+ * save_ps_display_args may also move the environment strings to make
+ * extra room. Therefore this should be done as early as possible during
* startup, to avoid entanglements with code that might save a getenv()
* result pointer.
*/
@@ -271,8 +271,8 @@ main(int argc, char *argv[])
/*
* Start our win32 signal implementation
*
- * SubPostmasterMain() will do this for itself, but the remaining modes need
- * it here
+ * SubPostmasterMain() will do this for itself, but the remaining modes
+ * need it here
*/
pgwin32_signal_initialize();
#endif
diff --git a/src/backend/optimizer/geqo/geqo_eval.c b/src/backend/optimizer/geqo/geqo_eval.c
index 0a2dee08dc8..555ae296092 100644
--- a/src/backend/optimizer/geqo/geqo_eval.c
+++ b/src/backend/optimizer/geqo/geqo_eval.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_eval.c,v 1.77 2005/10/15 02:49:19 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_eval.c,v 1.77.2.1 2005/11/22 18:23:10 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -89,10 +89,10 @@ geqo_eval(Gene *tour, int num_gene, GeqoEvalData *evaldata)
* truncating the list to its original length. NOTE this assumes that any
* added entries are appended at the end!
*
- * We also must take care not to mess up the outer join_rel_hash, if there is
- * one. We can do this by just temporarily setting the link to NULL. (If
- * we are dealing with enough join rels, which we very likely are, a new
- * hash table will get built and used locally.)
+ * We also must take care not to mess up the outer join_rel_hash, if there
+ * is one. We can do this by just temporarily setting the link to NULL.
+ * (If we are dealing with enough join rels, which we very likely are, a
+ * new hash table will get built and used locally.)
*/
savelength = list_length(evaldata->root->join_rel_list);
savehash = evaldata->root->join_rel_hash;
@@ -182,9 +182,9 @@ gimme_tree(Gene *tour, int num_gene, GeqoEvalData *evaldata)
* tour other than the one given. To the extent that the heuristics are
* helpful, however, this will be a better plan than the raw tour.
*
- * Also, when a join attempt fails (because of IN-clause constraints), we may
- * be able to recover and produce a workable plan, where the old code just
- * had to give up. This case acts the same as a false result from
+ * Also, when a join attempt fails (because of IN-clause constraints), we
+ * may be able to recover and produce a workable plan, where the old code
+ * just had to give up. This case acts the same as a false result from
* desirable_join().
*/
for (rel_count = 0; rel_count < num_gene; rel_count++)
diff --git a/src/backend/optimizer/geqo/geqo_pool.c b/src/backend/optimizer/geqo/geqo_pool.c
index 83927facae5..f71cc9fe6b2 100644
--- a/src/backend/optimizer/geqo/geqo_pool.c
+++ b/src/backend/optimizer/geqo/geqo_pool.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_pool.c,v 1.27 2005/10/15 02:49:19 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_pool.c,v 1.27.2.1 2005/11/22 18:23:10 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -99,9 +99,9 @@ random_init_pool(Pool *pool, GeqoEvalData *evaldata)
* We immediately discard any invalid individuals (those that geqo_eval
* returns DBL_MAX for), thereby not wasting pool space on them.
*
- * If we fail to make any valid individuals after 10000 tries, give up; this
- * probably means something is broken, and we shouldn't just let ourselves
- * get stuck in an infinite loop.
+ * If we fail to make any valid individuals after 10000 tries, give up;
+ * this probably means something is broken, and we shouldn't just let
+ * ourselves get stuck in an infinite loop.
*/
i = 0;
while (i < pool->size)
diff --git a/src/backend/optimizer/path/allpaths.c b/src/backend/optimizer/path/allpaths.c
index d8a42b82548..55e928936cc 100644
--- a/src/backend/optimizer/path/allpaths.c
+++ b/src/backend/optimizer/path/allpaths.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/allpaths.c,v 1.137 2005/10/15 02:49:19 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/allpaths.c,v 1.137.2.1 2005/11/22 18:23:10 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -440,8 +440,8 @@ set_subquery_pathlist(PlannerInfo *root, RelOptInfo *rel,
* Restrictions on individual clauses are checked by
* qual_is_pushdown_safe().
*
- * Non-pushed-down clauses will get evaluated as qpquals of the SubqueryScan
- * node.
+ * Non-pushed-down clauses will get evaluated as qpquals of the
+ * SubqueryScan node.
*
* XXX Are there any cases where we want to make a policy decision not to
* push down a pushable qual, because it'd result in a worse plan?
diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c
index 8a1df9e0a2d..6fe4c77ad0d 100644
--- a/src/backend/optimizer/path/costsize.c
+++ b/src/backend/optimizer/path/costsize.c
@@ -49,7 +49,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/costsize.c,v 1.149 2005/10/15 02:49:19 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/costsize.c,v 1.149.2.1 2005/11/22 18:23:10 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -155,8 +155,8 @@ cost_seqscan(Path *path, PlannerInfo *root,
/*
* disk costs
*
- * The cost of reading a page sequentially is 1.0, by definition. Note that
- * the Unix kernel will typically do some amount of read-ahead
+ * The cost of reading a page sequentially is 1.0, by definition. Note
+ * that the Unix kernel will typically do some amount of read-ahead
* optimization, so that this cost is less than the true cost of reading a
* page from disk. We ignore that issue here, but must take it into
* account when estimating the cost of non-sequential accesses!
@@ -480,8 +480,8 @@ cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel,
/*
* Estimate CPU costs per tuple.
*
- * Often the indexquals don't need to be rechecked at each tuple ... but not
- * always, especially not if there are enough tuples involved that the
+ * Often the indexquals don't need to be rechecked at each tuple ... but
+ * not always, especially not if there are enough tuples involved that the
* bitmaps become lossy. For the moment, just assume they will be
* rechecked always.
*/
@@ -869,13 +869,14 @@ cost_agg(Path *path, PlannerInfo *root,
* We will produce a single output tuple if not grouping, and a tuple per
* group otherwise. We charge cpu_tuple_cost for each output tuple.
*
- * Note: in this cost model, AGG_SORTED and AGG_HASHED have exactly the same
- * total CPU cost, but AGG_SORTED has lower startup cost. If the input
- * path is already sorted appropriately, AGG_SORTED should be preferred
- * (since it has no risk of memory overflow). This will happen as long as
- * the computed total costs are indeed exactly equal --- but if there's
- * roundoff error we might do the wrong thing. So be sure that the
- * computations below form the same intermediate values in the same order.
+ * Note: in this cost model, AGG_SORTED and AGG_HASHED have exactly the
+ * same total CPU cost, but AGG_SORTED has lower startup cost. If the
+ * input path is already sorted appropriately, AGG_SORTED should be
+ * preferred (since it has no risk of memory overflow). This will happen
+ * as long as the computed total costs are indeed exactly equal --- but if
+ * there's roundoff error we might do the wrong thing. So be sure that
+ * the computations below form the same intermediate values in the same
+ * order.
*/
if (aggstrategy == AGG_PLAIN)
{
@@ -1074,8 +1075,8 @@ cost_mergejoin(MergePath *path, PlannerInfo *root)
* restriction clauses) separately. We use approx_selectivity here for
* speed --- in most cases, any errors won't affect the result much.
*
- * Note: it's probably bogus to use the normal selectivity calculation here
- * when either the outer or inner path is a UniquePath.
+ * Note: it's probably bogus to use the normal selectivity calculation
+ * here when either the outer or inner path is a UniquePath.
*/
merge_selec = approx_selectivity(root, mergeclauses,
path->jpath.jointype);
@@ -1095,22 +1096,22 @@ cost_mergejoin(MergePath *path, PlannerInfo *root)
* but on the other hand we ignore the bookkeeping costs of mark/restore.
* Not clear if it's worth developing a more refined model.
*
- * The number of re-fetches can be estimated approximately as size of merge
- * join output minus size of inner relation. Assume that the distinct key
- * values are 1, 2, ..., and denote the number of values of each key in
- * the outer relation as m1, m2, ...; in the inner relation, n1, n2, ...
- * Then we have
+ * The number of re-fetches can be estimated approximately as size of
+ * merge join output minus size of inner relation. Assume that the
+ * distinct key values are 1, 2, ..., and denote the number of values of
+ * each key in the outer relation as m1, m2, ...; in the inner relation,
+ * n1, n2, ... Then we have
*
* size of join = m1 * n1 + m2 * n2 + ...
*
- * number of rescanned tuples = (m1 - 1) * n1 + (m2 - 1) * n2 + ... = m1 * n1
- * + m2 * n2 + ... - (n1 + n2 + ...) = size of join - size of inner
+ * number of rescanned tuples = (m1 - 1) * n1 + (m2 - 1) * n2 + ... = m1 *
+ * n1 + m2 * n2 + ... - (n1 + n2 + ...) = size of join - size of inner
* relation
*
- * This equation works correctly for outer tuples having no inner match (nk =
- * 0), but not for inner tuples having no outer match (mk = 0); we are
- * effectively subtracting those from the number of rescanned tuples, when
- * we should not. Can we do better without expensive selectivity
+ * This equation works correctly for outer tuples having no inner match
+ * (nk = 0), but not for inner tuples having no outer match (mk = 0); we
+ * are effectively subtracting those from the number of rescanned tuples,
+ * when we should not. Can we do better without expensive selectivity
* computations?
*/
if (IsA(outer_path, UniquePath))
@@ -1132,9 +1133,9 @@ cost_mergejoin(MergePath *path, PlannerInfo *root)
* inputs that will actually need to be scanned. We use only the first
* (most significant) merge clause for this purpose.
*
- * Since this calculation is somewhat expensive, and will be the same for all
- * mergejoin paths associated with the merge clause, we cache the results
- * in the RestrictInfo node.
+ * Since this calculation is somewhat expensive, and will be the same for
+ * all mergejoin paths associated with the merge clause, we cache the
+ * results in the RestrictInfo node.
*/
if (mergeclauses && path->jpath.jointype != JOIN_FULL)
{
@@ -1300,8 +1301,8 @@ cost_hashjoin(HashPath *path, PlannerInfo *root)
* restriction clauses) separately. We use approx_selectivity here for
* speed --- in most cases, any errors won't affect the result much.
*
- * Note: it's probably bogus to use the normal selectivity calculation here
- * when either the outer or inner path is a UniquePath.
+ * Note: it's probably bogus to use the normal selectivity calculation
+ * here when either the outer or inner path is a UniquePath.
*/
hash_selec = approx_selectivity(root, hashclauses,
path->jpath.jointype);
@@ -1341,8 +1342,8 @@ cost_hashjoin(HashPath *path, PlannerInfo *root)
* bucketsize estimated for any individual hashclause; this is undoubtedly
* conservative.
*
- * BUT: if inner relation has been unique-ified, we can assume it's good for
- * hashing. This is important both because it's the right answer, and
+ * BUT: if inner relation has been unique-ified, we can assume it's good
+ * for hashing. This is important both because it's the right answer, and
* because we avoid contaminating the cache with a value that's wrong for
* non-unique-ified paths.
*/
@@ -1538,8 +1539,8 @@ cost_qual_eval_walker(Node *node, QualCost *total)
* and so are boolean operators (AND, OR, NOT). Simplistic, but a lot
* better than no model at all.
*
- * Should we try to account for the possibility of short-circuit evaluation
- * of AND/OR?
+ * Should we try to account for the possibility of short-circuit
+ * evaluation of AND/OR?
*/
if (IsA(node, FuncExpr) ||
IsA(node, OpExpr) ||
@@ -1564,8 +1565,8 @@ cost_qual_eval_walker(Node *node, QualCost *total)
* (Sub-selects that can be executed as InitPlans have already been
* removed from the expression.)
*
- * An exception occurs when we have decided we can implement the subplan
- * by hashing.
+ * An exception occurs when we have decided we can implement the
+ * subplan by hashing.
*
*/
SubPlan *subplan = (SubPlan *) node;
@@ -1760,12 +1761,12 @@ set_joinrel_size_estimates(PlannerInfo *root, RelOptInfo *rel,
/*
* Basically, we multiply size of Cartesian product by selectivity.
*
- * If we are doing an outer join, take that into account: the output must be
- * at least as large as the non-nullable input. (Is there any chance of
- * being even smarter?)
+ * If we are doing an outer join, take that into account: the output must
+ * be at least as large as the non-nullable input. (Is there any chance
+ * of being even smarter?)
*
- * For JOIN_IN and variants, the Cartesian product is figured with respect to
- * a unique-ified input, and then we can clamp to the size of the other
+ * For JOIN_IN and variants, the Cartesian product is figured with respect
+ * to a unique-ified input, and then we can clamp to the size of the other
* input.
*/
switch (jointype)
@@ -1893,8 +1894,8 @@ set_function_size_estimates(PlannerInfo *root, RelOptInfo *rel)
/*
* Estimate number of rows the function itself will return.
*
- * XXX no idea how to do this yet; but we can at least check whether function
- * returns set or not...
+ * XXX no idea how to do this yet; but we can at least check whether
+ * function returns set or not...
*/
if (expression_returns_set(rte->funcexpr))
rel->tuples = 1000;
diff --git a/src/backend/optimizer/path/indxpath.c b/src/backend/optimizer/path/indxpath.c
index 466c369cde5..2dbbd0bfbef 100644
--- a/src/backend/optimizer/path/indxpath.c
+++ b/src/backend/optimizer/path/indxpath.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/indxpath.c,v 1.191.2.1 2005/11/14 23:54:35 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/indxpath.c,v 1.191.2.2 2005/11/22 18:23:10 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -262,8 +262,8 @@ find_usable_indexes(PlannerInfo *root, RelOptInfo *rel,
* to imply the predicate. If so, we could use the index in the
* current context.
*
- * We set useful_predicate to true iff the predicate was proven using the
- * current set of clauses. This is needed to prevent matching a
+ * We set useful_predicate to true iff the predicate was proven using
+ * the current set of clauses. This is needed to prevent matching a
* predOK index to an arm of an OR, which would be a legal but
* pointlessly inefficient plan. (A better plan will be generated by
* just scanning the predOK index alone, no OR.)
@@ -524,19 +524,19 @@ choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel, List *paths)
* always take the first, and sequentially add on paths that result in a
* lower estimated cost.
*
- * We also make some effort to detect directly redundant input paths, as can
- * happen if there are multiple possibly usable indexes. For this we look
- * only at plain IndexPath inputs, not at sub-OR clauses. And we consider
- * an index redundant if all its index conditions were already used by
- * earlier indexes. (We could use predicate_implied_by to have a more
- * intelligent, but much more expensive, check --- but in most cases
+ * We also make some effort to detect directly redundant input paths, as
+ * can happen if there are multiple possibly usable indexes. For this we
+ * look only at plain IndexPath inputs, not at sub-OR clauses. And we
+ * consider an index redundant if all its index conditions were already
+ * used by earlier indexes. (We could use predicate_implied_by to have a
+ * more intelligent, but much more expensive, check --- but in most cases
* simple pointer equality should suffice, since after all the index
* conditions are all coming from the same RestrictInfo lists.)
*
- * XXX is there any risk of throwing away a useful partial index here because
- * we don't explicitly look at indpred? At least in simple cases, the
- * partial index will sort before competing non-partial indexes and so it
- * makes the right choice, but perhaps we need to work harder.
+ * XXX is there any risk of throwing away a useful partial index here
+ * because we don't explicitly look at indpred? At least in simple cases,
+ * the partial index will sort before competing non-partial indexes and so
+ * it makes the right choice, but perhaps we need to work harder.
*
* Note: outputting the selected sub-paths in selectivity order is a good
* thing even if we weren't using that as part of the selection method,
@@ -920,8 +920,8 @@ check_partial_indexes(PlannerInfo *root, RelOptInfo *rel)
* index. For now, the test only uses restriction clauses (those in
* baserestrictinfo). --Nels, Dec '92
*
- * XXX as of 7.1, equivalence class info *is* available. Consider improving
- * this code as foreseen by Nels.
+ * XXX as of 7.1, equivalence class info *is* available. Consider
+ * improving this code as foreseen by Nels.
*/
foreach(ilist, rel->indexlist)
diff --git a/src/backend/optimizer/path/joinpath.c b/src/backend/optimizer/path/joinpath.c
index 3d6b333e31e..215d03a9719 100644
--- a/src/backend/optimizer/path/joinpath.c
+++ b/src/backend/optimizer/path/joinpath.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/joinpath.c,v 1.97 2005/10/25 20:30:30 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/joinpath.c,v 1.97.2.1 2005/11/22 18:23:10 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -176,8 +176,8 @@ sort_inner_and_outer(PlannerInfo *root,
* cheapest-startup-cost input paths later, and only if they don't need a
* sort.
*
- * If unique-ification is requested, do it and then handle as a plain inner
- * join.
+ * If unique-ification is requested, do it and then handle as a plain
+ * inner join.
*/
outer_path = outerrel->cheapest_total_path;
inner_path = innerrel->cheapest_total_path;
@@ -512,8 +512,8 @@ match_unsorted_outer(PlannerInfo *root,
/*
* Generate a mergejoin on the basis of sorting the cheapest inner.
- * Since a sort will be needed, only cheapest total cost matters.
- * (But create_mergejoin_path will do the right thing if
+ * Since a sort will be needed, only cheapest total cost matters. (But
+ * create_mergejoin_path will do the right thing if
* inner_cheapest_total is already correctly sorted.)
*/
add_path(joinrel, (Path *)
@@ -804,9 +804,9 @@ select_mergejoin_clauses(RelOptInfo *joinrel,
/*
* If processing an outer join, only use its own join clauses in the
- * merge. For inner joins we can use pushed-down clauses too.
- * (Note: we don't set have_nonmergeable_joinclause here because
- * pushed-down clauses will become otherquals not joinquals.)
+ * merge. For inner joins we can use pushed-down clauses too. (Note:
+ * we don't set have_nonmergeable_joinclause here because pushed-down
+ * clauses will become otherquals not joinquals.)
*/
if (isouterjoin && restrictinfo->is_pushed_down)
continue;
diff --git a/src/backend/optimizer/path/joinrels.c b/src/backend/optimizer/path/joinrels.c
index ecb63156860..04ad96f802e 100644
--- a/src/backend/optimizer/path/joinrels.c
+++ b/src/backend/optimizer/path/joinrels.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/joinrels.c,v 1.76 2005/10/15 02:49:20 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/joinrels.c,v 1.76.2.1 2005/11/22 18:23:10 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -496,8 +496,9 @@ make_join_rel(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2,
* innerrel is exactly RHS; conversely JOIN_REVERSE_IN handles
* RHS/LHS.
*
- * JOIN_UNIQUE_OUTER will work if outerrel is exactly RHS; conversely
- * JOIN_UNIQUE_INNER will work if innerrel is exactly RHS.
+ * JOIN_UNIQUE_OUTER will work if outerrel is exactly RHS;
+ * conversely JOIN_UNIQUE_INNER will work if innerrel is exactly
+ * RHS.
*
* But none of these will work if we already found another IN that
* needs to trigger here.
diff --git a/src/backend/optimizer/path/orindxpath.c b/src/backend/optimizer/path/orindxpath.c
index 5580b9a2772..8b6f2e4a674 100644
--- a/src/backend/optimizer/path/orindxpath.c
+++ b/src/backend/optimizer/path/orindxpath.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/orindxpath.c,v 1.75.2.1 2005/11/14 23:54:35 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/orindxpath.c,v 1.75.2.2 2005/11/22 18:23:10 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -91,11 +91,10 @@ create_or_index_quals(PlannerInfo *root, RelOptInfo *rel)
/*
* Find potentially interesting OR joinclauses. Note we must ignore any
- * joinclauses that are marked outerjoin_delayed, because they cannot
- * be pushed down to the per-relation level due to outer-join rules.
- * (XXX in some cases it might be possible to allow this, but it would
- * require substantially more bookkeeping about where the clause came
- * from.)
+ * joinclauses that are marked outerjoin_delayed, because they cannot be
+ * pushed down to the per-relation level due to outer-join rules. (XXX in
+ * some cases it might be possible to allow this, but it would require
+ * substantially more bookkeeping about where the clause came from.)
*/
foreach(i, rel->joininfo)
{
diff --git a/src/backend/optimizer/path/pathkeys.c b/src/backend/optimizer/path/pathkeys.c
index a2626929826..04d0914f036 100644
--- a/src/backend/optimizer/path/pathkeys.c
+++ b/src/backend/optimizer/path/pathkeys.c
@@ -11,7 +11,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/pathkeys.c,v 1.73 2005/10/15 02:49:20 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/pathkeys.c,v 1.73.2.1 2005/11/22 18:23:10 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -125,8 +125,8 @@ add_equijoined_keys(PlannerInfo *root, RestrictInfo *restrictinfo)
* structure. If we find both of them in the same equivalence set to
* start with, we can quit immediately.
*
- * This is a standard UNION-FIND problem, for which there exist better data
- * structures than simple lists. If this code ever proves to be a
+ * This is a standard UNION-FIND problem, for which there exist better
+ * data structures than simple lists. If this code ever proves to be a
* bottleneck then it could be sped up --- but for now, simple is
* beautiful.
*/
@@ -255,9 +255,9 @@ generate_implied_equalities(PlannerInfo *root)
* Match each item in the set with all that appear after it (it's
* sufficient to generate A=B, need not process B=A too).
*
- * A set containing only two items cannot imply any equalities beyond the
- * one that created the set, so we can skip this processing in that
- * case.
+ * A set containing only two items cannot imply any equalities beyond
+ * the one that created the set, so we can skip this processing in
+ * that case.
*/
if (nitems >= 3)
{
@@ -516,12 +516,12 @@ sub_generate_join_implications(PlannerInfo *root,
* the join clause, since both were automatically generated in the
* cases we care about.
*
- * XXX currently this may fail to match in cross-type cases because
- * the COALESCE will contain typecast operations while the join
- * clause may not (if there is a cross-type mergejoin operator
- * available for the two column types). Is it OK to strip implicit
- * coercions from the COALESCE arguments? What of the sortops in
- * such cases?
+ * XXX currently this may fail to match in cross-type cases
+ * because the COALESCE will contain typecast operations while the
+ * join clause may not (if there is a cross-type mergejoin
+ * operator available for the two column types). Is it OK to strip
+ * implicit coercions from the COALESCE arguments? What of the
+ * sortops in such cases?
*/
if (equal(leftop, cfirst) &&
equal(rightop, csecond) &&
@@ -1151,8 +1151,8 @@ build_join_pathkeys(PlannerInfo *root,
* here! The inner-rel vars we used to need to add are *already* part of
* the outer pathkey!
*
- * We do, however, need to truncate the pathkeys list, since it may contain
- * pathkeys that were useful for forming this joinrel but are
+ * We do, however, need to truncate the pathkeys list, since it may
+ * contain pathkeys that were useful for forming this joinrel but are
* uninteresting to higher levels.
*/
return truncate_useless_pathkeys(root, joinrel, outer_pathkeys);
@@ -1299,8 +1299,8 @@ find_mergeclauses_for_pathkeys(PlannerInfo *root,
* any redundant mergeclauses from the input list. However, in
* outer-join scenarios there might be multiple matches. An example is
*
- * select * from a full join b on a.v1 = b.v1 and a.v2 = b.v2 and a.v1 =
- * b.v2;
+ * select * from a full join b on a.v1 = b.v1 and a.v2 = b.v2 and a.v1
+ * = b.v2;
*
* Given the pathkeys ((a.v1), (a.v2)) it is okay to return all three
* clauses (in the order a.v1=b.v1, a.v1=b.v2, a.v2=b.v2) and indeed
diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c
index fc093fdf180..0a474fa8ce4 100644
--- a/src/backend/optimizer/plan/createplan.c
+++ b/src/backend/optimizer/plan/createplan.c
@@ -10,7 +10,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/plan/createplan.c,v 1.202 2005/10/19 17:31:20 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/plan/createplan.c,v 1.202.2.1 2005/11/22 18:23:10 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -325,7 +325,7 @@ use_physical_tlist(RelOptInfo *rel)
/*
* Can't do it if any system columns or whole-row Vars are requested,
- * either. (This could possibly be fixed but would take some fragile
+ * either. (This could possibly be fixed but would take some fragile
* assumptions in setrefs.c, I think.)
*/
for (i = rel->min_attr; i <= 0; i++)
@@ -1251,8 +1251,8 @@ create_nestloop_plan(PlannerInfo *root,
* caught this case because the join clauses would never have been put
* in the same joininfo list.
*
- * We can skip this if the index path is an ordinary indexpath and not a
- * special innerjoin path.
+ * We can skip this if the index path is an ordinary indexpath and not
+ * a special innerjoin path.
*/
IndexPath *innerpath = (IndexPath *) best_path->innerjoinpath;
@@ -1270,13 +1270,13 @@ create_nestloop_plan(PlannerInfo *root,
/*
* Same deal for bitmapped index scans.
*
- * Note: both here and above, we ignore any implicit index restrictions
- * associated with the use of partial indexes. This is OK because
- * we're only trying to prove we can dispense with some join quals;
- * failing to prove that doesn't result in an incorrect plan. It is
- * the right way to proceed because adding more quals to the stuff we
- * got from the original query would just make it harder to detect
- * duplication.
+ * Note: both here and above, we ignore any implicit index
+ * restrictions associated with the use of partial indexes. This is
+ * OK because we're only trying to prove we can dispense with some
+ * join quals; failing to prove that doesn't result in an incorrect
+ * plan. It is the right way to proceed because adding more quals to
+ * the stuff we got from the original query would just make it harder
+ * to detect duplication.
*/
BitmapHeapPath *innerpath = (BitmapHeapPath *) best_path->innerjoinpath;
@@ -1547,8 +1547,9 @@ fix_indexqual_references(List *indexquals, IndexPath *index_path,
/*
* Make a copy that will become the fixed clause.
*
- * We used to try to do a shallow copy here, but that fails if there is a
- * subplan in the arguments of the opclause. So just do a full copy.
+ * We used to try to do a shallow copy here, but that fails if there
+ * is a subplan in the arguments of the opclause. So just do a full
+ * copy.
*/
newclause = (OpExpr *) copyObject((Node *) clause);
@@ -2232,8 +2233,8 @@ make_sort_from_pathkeys(PlannerInfo *root, Plan *lefttree, List *pathkeys)
* available Var in the tlist. If there isn't any, use the first one
* that is an expression in the input's vars.
*
- * XXX if we have a choice, is there any way of figuring out which might
- * be cheapest to execute? (For example, int4lt is likely much
+ * XXX if we have a choice, is there any way of figuring out which
+ * might be cheapest to execute? (For example, int4lt is likely much
* cheaper to execute than numericlt, but both might appear in the
* same pathkey sublist...) Not clear that we ever will have a choice
* in practice, so it may not matter.
@@ -2553,12 +2554,13 @@ make_group(PlannerInfo *root,
* We also need to account for the cost of evaluation of the qual (ie, the
* HAVING clause) and the tlist.
*
- * XXX this double-counts the cost of evaluation of any expressions used for
- * grouping, since in reality those will have been evaluated at a lower
- * plan level and will only be copied by the Group node. Worth fixing?
+ * XXX this double-counts the cost of evaluation of any expressions used
+ * for grouping, since in reality those will have been evaluated at a
+ * lower plan level and will only be copied by the Group node. Worth
+ * fixing?
*
- * See notes in grouping_planner about why this routine and make_agg are the
- * only ones in this file that worry about tlist eval cost.
+ * See notes in grouping_planner about why this routine and make_agg are
+ * the only ones in this file that worry about tlist eval cost.
*/
if (qual)
{
@@ -2715,8 +2717,8 @@ make_limit(Plan *lefttree, Node *limitOffset, Node *limitCount,
* building a subquery then it's important to report correct info to the
* outer planner.
*
- * When the offset or count couldn't be estimated, use 10% of the estimated
- * number of rows emitted from the subplan.
+ * When the offset or count couldn't be estimated, use 10% of the
+ * estimated number of rows emitted from the subplan.
*/
if (offset_est != 0)
{
diff --git a/src/backend/optimizer/plan/initsplan.c b/src/backend/optimizer/plan/initsplan.c
index 0901c66518c..66f5dc9d3d6 100644
--- a/src/backend/optimizer/plan/initsplan.c
+++ b/src/backend/optimizer/plan/initsplan.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/plan/initsplan.c,v 1.110.2.1 2005/11/14 23:54:35 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/plan/initsplan.c,v 1.110.2.2 2005/11/22 18:23:11 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -466,8 +466,8 @@ distribute_qual_to_rels(PlannerInfo *root, Node *clause,
* we'd produce no output rows, rather than the intended single
* null-extended row, for any nonnullable-side rows failing the qual.
*
- * Note: an outer-join qual that mentions only nullable-side rels can be
- * pushed down into the nullable side without changing the join
+ * Note: an outer-join qual that mentions only nullable-side rels can
+ * be pushed down into the nullable side without changing the join
* result, so we treat it the same as an ordinary inner-join qual,
* except for not setting maybe_equijoin (see below).
*/
@@ -860,8 +860,8 @@ process_implied_equality(PlannerInfo *root,
/*
* Push the new clause into all the appropriate restrictinfo lists.
*
- * Note: we mark the qual "pushed down" to ensure that it can never be taken
- * for an original JOIN/ON clause.
+ * Note: we mark the qual "pushed down" to ensure that it can never be
+ * taken for an original JOIN/ON clause.
*/
distribute_qual_to_rels(root, (Node *) clause,
true, true, false, NULL, relids);
diff --git a/src/backend/optimizer/plan/planagg.c b/src/backend/optimizer/plan/planagg.c
index 7c2f0211f10..a63f85ba511 100644
--- a/src/backend/optimizer/plan/planagg.c
+++ b/src/backend/optimizer/plan/planagg.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/plan/planagg.c,v 1.10 2005/10/15 02:49:20 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/plan/planagg.c,v 1.10.2.1 2005/11/22 18:23:11 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -93,9 +93,9 @@ optimize_minmax_aggregates(PlannerInfo *root, List *tlist, Path *best_path)
/*
* Reject unoptimizable cases.
*
- * We don't handle GROUP BY, because our current implementations of grouping
- * require looking at all the rows anyway, and so there's not much point
- * in optimizing MIN/MAX.
+ * We don't handle GROUP BY, because our current implementations of
+ * grouping require looking at all the rows anyway, and so there's not
+ * much point in optimizing MIN/MAX.
*/
if (parse->groupClause)
return NULL;
@@ -160,9 +160,9 @@ optimize_minmax_aggregates(PlannerInfo *root, List *tlist, Path *best_path)
/*
* Make the cost comparison.
*
- * Note that we don't include evaluation cost of the tlist here; this is OK
- * since it isn't included in best_path's cost either, and should be the
- * same in either case.
+ * Note that we don't include evaluation cost of the tlist here; this is
+ * OK since it isn't included in best_path's cost either, and should be
+ * the same in either case.
*/
cost_agg(&agg_p, root, AGG_PLAIN, list_length(aggs_list),
0, 0,
@@ -493,12 +493,12 @@ make_agg_subplan(PlannerInfo *root, MinMaxAggInfo *info, List *constant_quals)
* node above it. We might need a gating Result, too, to handle any
* non-variable qual clauses.
*
- * Also we must add a "WHERE foo IS NOT NULL" restriction to the indexscan,
- * to be sure we don't return a NULL, which'd be contrary to the standard
- * behavior of MIN/MAX. XXX ideally this should be done earlier, so that
- * the selectivity of the restriction could be included in our cost
- * estimates. But that looks painful, and in most cases the fraction of
- * NULLs isn't high enough to change the decision.
+ * Also we must add a "WHERE foo IS NOT NULL" restriction to the
+ * indexscan, to be sure we don't return a NULL, which'd be contrary to
+ * the standard behavior of MIN/MAX. XXX ideally this should be done
+ * earlier, so that the selectivity of the restriction could be included
+ * in our cost estimates. But that looks painful, and in most cases the
+ * fraction of NULLs isn't high enough to change the decision.
*/
plan = create_plan(&subroot, (Path *) info->path);
diff --git a/src/backend/optimizer/plan/planmain.c b/src/backend/optimizer/plan/planmain.c
index ecbf44400c9..fc46c7aaf44 100644
--- a/src/backend/optimizer/plan/planmain.c
+++ b/src/backend/optimizer/plan/planmain.c
@@ -14,7 +14,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/plan/planmain.c,v 1.89 2005/10/15 02:49:20 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/plan/planmain.c,v 1.89.2.1 2005/11/22 18:23:11 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -146,11 +146,11 @@ query_planner(PlannerInfo *root, List *tlist, double tuple_fraction,
* added to appropriate lists belonging to the mentioned relations. We
* also build lists of equijoined keys for pathkey construction.
*
- * Note: all subplan nodes will have "flat" (var-only) tlists. This implies
- * that all expression evaluations are done at the root of the plan tree.
- * Once upon a time there was code to try to push expensive function calls
- * down to lower plan nodes, but that's dead code and has been for a long
- * time...
+ * Note: all subplan nodes will have "flat" (var-only) tlists. This
+ * implies that all expression evaluations are done at the root of the
+ * plan tree. Once upon a time there was code to try to push expensive
+ * function calls down to lower plan nodes, but that's dead code and has
+ * been for a long time...
*/
build_base_rel_tlists(root, tlist);
@@ -273,9 +273,9 @@ query_planner(PlannerInfo *root, List *tlist, double tuple_fraction,
* "cheapest presorted" path will be the cheapest overall for the tuple
* fraction.)
*
- * The cheapest-total path is also the one to use if grouping_planner decides
- * to use hashed aggregation, so we return it separately even if this
- * routine thinks the presorted path is the winner.
+ * The cheapest-total path is also the one to use if grouping_planner
+ * decides to use hashed aggregation, so we return it separately even if
+ * this routine thinks the presorted path is the winner.
*/
cheapestpath = final_rel->cheapest_total_path;
diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c
index 762dfb4b641..b1beb2c834e 100644
--- a/src/backend/optimizer/plan/planner.c
+++ b/src/backend/optimizer/plan/planner.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/plan/planner.c,v 1.194 2005/10/15 02:49:20 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/plan/planner.c,v 1.194.2.1 2005/11/22 18:23:11 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -99,9 +99,9 @@ planner(Query *parse, bool isCursor, int cursorOptions,
* multiple sub-queries. Also, boundParams is explicitly info from outside
* the query, and so is likewise better handled as a global variable.
*
- * Note we do NOT save and restore PlannerPlanId: it exists to assign unique
- * IDs to SubPlan nodes, and we want those IDs to be unique for the life
- * of a backend. Also, PlannerInitPlan is saved/restored in
+ * Note we do NOT save and restore PlannerPlanId: it exists to assign
+ * unique IDs to SubPlan nodes, and we want those IDs to be unique for the
+ * life of a backend. Also, PlannerInitPlan is saved/restored in
* subquery_planner, not here.
*/
save_PlannerQueryLevel = PlannerQueryLevel;
@@ -302,14 +302,14 @@ subquery_planner(Query *parse, double tuple_fraction,
* HAVING clause into WHERE, in hopes of eliminating tuples before
* aggregation instead of after.
*
- * If the query has explicit grouping then we can simply move such a clause
- * into WHERE; any group that fails the clause will not be in the output
- * because none of its tuples will reach the grouping or aggregation
- * stage. Otherwise we must have a degenerate (variable-free) HAVING
- * clause, which we put in WHERE so that query_planner() can use it in a
- * gating Result node, but also keep in HAVING to ensure that we don't
- * emit a bogus aggregated row. (This could be done better, but it seems
- * not worth optimizing.)
+ * If the query has explicit grouping then we can simply move such a
+ * clause into WHERE; any group that fails the clause will not be in the
+ * output because none of its tuples will reach the grouping or
+ * aggregation stage. Otherwise we must have a degenerate (variable-free)
+ * HAVING clause, which we put in WHERE so that query_planner() can use it
+ * in a gating Result node, but also keep in HAVING to ensure that we
+ * don't emit a bogus aggregated row. (This could be done better, but it
+ * seems not worth optimizing.)
*
* Note that both havingQual and parse->jointree->quals are in
* implicitly-ANDed-list form at this point, even though they are declared
@@ -426,8 +426,8 @@ preprocess_expression(PlannerInfo *root, Node *expr, int kind)
* careful to maintain AND/OR flatness --- that is, do not generate a tree
* with AND directly under AND, nor OR directly under OR.
*
- * Because this is a relatively expensive process, we skip it when the query
- * is trivial, such as "SELECT 2+2;" or "INSERT ... VALUES()". The
+ * Because this is a relatively expensive process, we skip it when the
+ * query is trivial, such as "SELECT 2+2;" or "INSERT ... VALUES()". The
* expression will only be evaluated once anyway, so no point in
* pre-simplifying; we can't execute it any faster than the executor can,
* and we will waste cycles copying the tree. Notice however that we
@@ -577,13 +577,13 @@ inheritance_planner(PlannerInfo *root, List *inheritlist)
* XXX my goodness this next bit is ugly. Really need to think about
* ways to rein in planner's habit of scribbling on its input.
*
- * Planning of the subquery might have modified the rangetable, either by
- * addition of RTEs due to expansion of inherited source tables, or by
- * changes of the Query structures inside subquery RTEs. We have to
- * ensure that this gets propagated back to the master copy. However,
- * if we aren't done planning yet, we also need to ensure that
- * subsequent calls to grouping_planner have virgin sub-Queries to
- * work from. So, if we are at the last list entry, just copy the
+ * Planning of the subquery might have modified the rangetable, either
+ * by addition of RTEs due to expansion of inherited source tables, or
+ * by changes of the Query structures inside subquery RTEs. We have
+ * to ensure that this gets propagated back to the master copy.
+ * However, if we aren't done planning yet, we also need to ensure
+ * that subsequent calls to grouping_planner have virgin sub-Queries
+ * to work from. So, if we are at the last list entry, just copy the
* subquery rangetable back to the master copy; if we are not, then
* extend the master copy by adding whatever the subquery added. (We
* assume these added entries will go untouched by the future
@@ -759,8 +759,8 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
* Note: we do not attempt to detect duplicate aggregates here; a
* somewhat-overestimated count is okay for our present purposes.
*
- * Note: think not that we can turn off hasAggs if we find no aggs. It is
- * possible for constant-expression simplification to remove all
+ * Note: think not that we can turn off hasAggs if we find no aggs. It
+ * is possible for constant-expression simplification to remove all
* explicit references to aggs, but we still have to follow the
* aggregate semantics (eg, producing only one output row).
*/
@@ -980,8 +980,8 @@ grouping_planner(PlannerInfo *root, double tuple_fraction)
* GROUP BY without aggregation, so insert a group node (plus
* the appropriate sort node, if necessary).
*
- * Add an explicit sort if we couldn't make the path come out the
- * way the GROUP node needs it.
+ * Add an explicit sort if we couldn't make the path come out
+ * the way the GROUP node needs it.
*/
if (!pathkeys_contained_in(group_pathkeys, current_pathkeys))
{
@@ -1329,14 +1329,15 @@ choose_hashed_grouping(PlannerInfo *root, double tuple_fraction,
* output won't be sorted may be a loss; so we need to do an actual cost
* comparison.
*
- * We need to consider cheapest_path + hashagg [+ final sort] versus either
- * cheapest_path [+ sort] + group or agg [+ final sort] or presorted_path
- * + group or agg [+ final sort] where brackets indicate a step that may
- * not be needed. We assume query_planner() will have returned a presorted
- * path only if it's a winner compared to cheapest_path for this purpose.
+ * We need to consider cheapest_path + hashagg [+ final sort] versus
+ * either cheapest_path [+ sort] + group or agg [+ final sort] or
+ * presorted_path + group or agg [+ final sort] where brackets indicate a
+ * step that may not be needed. We assume query_planner() will have
+ * returned a presorted path only if it's a winner compared to
+ * cheapest_path for this purpose.
*
- * These path variables are dummies that just hold cost fields; we don't make
- * actual Paths for these steps.
+ * These path variables are dummies that just hold cost fields; we don't
+ * make actual Paths for these steps.
*/
cost_agg(&hashed_p, root, AGG_HASHED, agg_counts->numAggs,
numGroupCols, dNumGroups,
diff --git a/src/backend/optimizer/plan/setrefs.c b/src/backend/optimizer/plan/setrefs.c
index e6f97fa0815..f206eefd75d 100644
--- a/src/backend/optimizer/plan/setrefs.c
+++ b/src/backend/optimizer/plan/setrefs.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/plan/setrefs.c,v 1.117 2005/11/03 17:45:29 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/plan/setrefs.c,v 1.117.2.1 2005/11/22 18:23:11 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -608,8 +608,8 @@ adjust_plan_varnos(Plan *plan, int rtoffset)
/*
* Now recurse into child plans.
*
- * We don't need to (and in fact mustn't) recurse into subqueries, so no need
- * to examine initPlan list.
+ * We don't need to (and in fact mustn't) recurse into subqueries, so no
+ * need to examine initPlan list.
*/
adjust_plan_varnos(plan->lefttree, rtoffset);
adjust_plan_varnos(plan->righttree, rtoffset);
@@ -853,8 +853,8 @@ set_inner_join_references(Plan *inner_plan,
* The inner side is a bitmap scan plan. Fix the top node, and
* recurse to get the lower nodes.
*
- * Note: create_bitmap_scan_plan removes clauses from bitmapqualorig if
- * they are duplicated in qpqual, so must test these independently.
+ * Note: create_bitmap_scan_plan removes clauses from bitmapqualorig
+ * if they are duplicated in qpqual, so must test these independently.
*/
BitmapHeapScan *innerscan = (BitmapHeapScan *) inner_plan;
Index innerrel = innerscan->scan.scanrelid;
diff --git a/src/backend/optimizer/plan/subselect.c b/src/backend/optimizer/plan/subselect.c
index b0dc9c5bf7f..e621e710f5b 100644
--- a/src/backend/optimizer/plan/subselect.c
+++ b/src/backend/optimizer/plan/subselect.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/plan/subselect.c,v 1.100 2005/10/15 02:49:20 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/plan/subselect.c,v 1.100.2.1 2005/11/22 18:23:11 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -118,10 +118,10 @@ replace_outer_var(Var *var)
* this sort of aliasing will cause no trouble. The correct field should
* get stored into the Param slot at execution in each part of the tree.
*
- * We also need to demand a match on vartypmod. This does not matter for the
- * Param itself, since those are not typmod-dependent, but it does matter
- * when make_subplan() instantiates a modified copy of the Var for a
- * subplan's args list.
+ * We also need to demand a match on vartypmod. This does not matter for
+ * the Param itself, since those are not typmod-dependent, but it does
+ * matter when make_subplan() instantiates a modified copy of the Var for
+ * a subplan's args list.
*/
i = 0;
foreach(ppl, PlannerParamList)
@@ -267,8 +267,8 @@ make_subplan(SubLink *slink, List *lefthand, bool isTopQual)
* 50% retrieval. For EXPR and MULTIEXPR subplans, use default behavior
* (we're only expecting one row out, anyway).
*
- * NOTE: if you change these numbers, also change cost_qual_eval_walker() in
- * path/costsize.c.
+ * NOTE: if you change these numbers, also change cost_qual_eval_walker()
+ * in path/costsize.c.
*
* XXX If an ALL/ANY subplan is uncorrelated, we may decide to hash or
* materialize its result below. In that case it would've been better to
@@ -698,10 +698,10 @@ convert_IN_to_join(PlannerInfo *root, SubLink *sublink)
/*
* Okay, pull up the sub-select into top range table and jointree.
*
- * We rely here on the assumption that the outer query has no references to
- * the inner (necessarily true, other than the Vars that we build below).
- * Therefore this is a lot easier than what pull_up_subqueries has to go
- * through.
+ * We rely here on the assumption that the outer query has no references
+ * to the inner (necessarily true, other than the Vars that we build
+ * below). Therefore this is a lot easier than what pull_up_subqueries has
+ * to go through.
*/
rte = addRangeTableEntryForSubquery(NULL,
subselect,
@@ -938,9 +938,9 @@ SS_finalize_plan(Plan *plan, List *rtable)
* Finally, attach any initPlans to the topmost plan node, and add their
* extParams to the topmost node's, too.
*
- * We also add the total_cost of each initPlan to the startup cost of the top
- * node. This is a conservative overestimate, since in fact each initPlan
- * might be executed later than plan startup, or even not at all.
+ * We also add the total_cost of each initPlan to the startup cost of the
+ * top node. This is a conservative overestimate, since in fact each
+ * initPlan might be executed later than plan startup, or even not at all.
*/
plan->initPlan = PlannerInitPlan;
PlannerInitPlan = NIL; /* make sure they're not attached twice */
diff --git a/src/backend/optimizer/prep/prepjointree.c b/src/backend/optimizer/prep/prepjointree.c
index ece6133c144..cc043d613d1 100644
--- a/src/backend/optimizer/prep/prepjointree.c
+++ b/src/backend/optimizer/prep/prepjointree.c
@@ -16,7 +16,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/prep/prepjointree.c,v 1.31 2005/10/15 02:49:20 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/prep/prepjointree.c,v 1.31.2.1 2005/11/22 18:23:11 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -198,9 +198,9 @@ pull_up_subqueries(PlannerInfo *root, Node *jtnode, bool below_outer_join)
* routine's processing is complete for its jointree and
* rangetable.
*
- * Note: 'false' is correct here even if we are within an outer join
- * in the upper query; the lower query starts with a clean slate
- * for outer-join semantics.
+ * Note: 'false' is correct here even if we are within an outer
+ * join in the upper query; the lower query starts with a clean
+ * slate for outer-join semantics.
*/
subquery->jointree = (FromExpr *)
pull_up_subqueries(subroot, (Node *) subquery->jointree,
@@ -210,9 +210,9 @@ pull_up_subqueries(PlannerInfo *root, Node *jtnode, bool below_outer_join)
* Now we must recheck whether the subquery is still simple enough
* to pull up. If not, abandon processing it.
*
- * We don't really need to recheck all the conditions involved, but
- * it's easier just to keep this "if" looking the same as the one
- * above.
+ * We don't really need to recheck all the conditions involved,
+ * but it's easier just to keep this "if" looking the same as the
+ * one above.
*/
if (is_simple_subquery(subquery) &&
(!below_outer_join || has_nullable_targetlist(subquery)))
@@ -294,8 +294,8 @@ pull_up_subqueries(PlannerInfo *root, Node *jtnode, bool below_outer_join)
* already adjusted the marker values, so just list_concat the
* list.)
*
- * Executor can't handle multiple FOR UPDATE/SHARE/NOWAIT flags, so
- * complain if they are valid but different
+ * Executor can't handle multiple FOR UPDATE/SHARE/NOWAIT flags,
+ * so complain if they are valid but different
*/
if (parse->rowMarks && subquery->rowMarks)
{
diff --git a/src/backend/optimizer/prep/prepqual.c b/src/backend/optimizer/prep/prepqual.c
index 9fad52acfe0..6613e763fe3 100644
--- a/src/backend/optimizer/prep/prepqual.c
+++ b/src/backend/optimizer/prep/prepqual.c
@@ -25,7 +25,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/prep/prepqual.c,v 1.51 2005/10/15 02:49:21 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/prep/prepqual.c,v 1.51.2.1 2005/11/22 18:23:11 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -431,9 +431,9 @@ process_duplicate_ors(List *orlist)
/*
* Generate new OR list consisting of the remaining sub-clauses.
*
- * If any clause degenerates to empty, then we have a situation like (A AND
- * B) OR (A), which can be reduced to just A --- that is, the additional
- * conditions in other arms of the OR are irrelevant.
+ * If any clause degenerates to empty, then we have a situation like (A
+ * AND B) OR (A), which can be reduced to just A --- that is, the
+ * additional conditions in other arms of the OR are irrelevant.
*
* Note that because we use list_difference, any multiple occurrences of a
* winning clause in an AND sub-clause will be removed automatically.
diff --git a/src/backend/optimizer/prep/preptlist.c b/src/backend/optimizer/prep/preptlist.c
index f23d0554e7c..9049a3be0bb 100644
--- a/src/backend/optimizer/prep/preptlist.c
+++ b/src/backend/optimizer/prep/preptlist.c
@@ -15,7 +15,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/prep/preptlist.c,v 1.78 2005/10/15 02:49:21 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/prep/preptlist.c,v 1.78.2.1 2005/11/22 18:23:11 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -64,8 +64,8 @@ preprocess_targetlist(PlannerInfo *root, List *tlist)
/*
* for heap_formtuple to work, the targetlist must match the exact order
- * of the attributes. We also need to fill in any missing attributes.
- * -ay 10/94
+ * of the attributes. We also need to fill in any missing attributes. -ay
+ * 10/94
*/
if (command_type == CMD_INSERT || command_type == CMD_UPDATE)
tlist = expand_targetlist(tlist, command_type,
@@ -185,10 +185,10 @@ expand_targetlist(List *tlist, int command_type,
* The rewriter should have already ensured that the TLEs are in correct
* order; but we have to insert TLEs for any missing attributes.
*
- * Scan the tuple description in the relation's relcache entry to make sure
- * we have all the user attributes in the right order. We assume that the
- * rewriter already acquired at least AccessShareLock on the relation, so
- * we need no lock here.
+ * Scan the tuple description in the relation's relcache entry to make
+ * sure we have all the user attributes in the right order. We assume
+ * that the rewriter already acquired at least AccessShareLock on the
+ * relation, so we need no lock here.
*/
rel = heap_open(getrelid(result_relation, range_table), NoLock);
@@ -220,18 +220,19 @@ expand_targetlist(List *tlist, int command_type,
* column isn't dropped, apply any domain constraints that might
* exist --- this is to catch domain NOT NULL.
*
- * For UPDATE, generate a Var reference to the existing value of the
- * attribute, so that it gets copied to the new tuple. But
+ * For UPDATE, generate a Var reference to the existing value of
+ * the attribute, so that it gets copied to the new tuple. But
* generate a NULL for dropped columns (we want to drop any old
* values).
*
- * When generating a NULL constant for a dropped column, we label it
- * INT4 (any other guaranteed-to-exist datatype would do as well).
- * We can't label it with the dropped column's datatype since that
- * might not exist anymore. It does not really matter what we
- * claim the type is, since NULL is NULL --- its representation is
- * datatype-independent. This could perhaps confuse code
- * comparing the finished plan to the target relation, however.
+ * When generating a NULL constant for a dropped column, we label
+ * it INT4 (any other guaranteed-to-exist datatype would do as
+ * well). We can't label it with the dropped column's datatype
+ * since that might not exist anymore. It does not really matter
+ * what we claim the type is, since NULL is NULL --- its
+ * representation is datatype-independent. This could perhaps
+ * confuse code comparing the finished plan to the target
+ * relation, however.
*/
Oid atttype = att_tup->atttypid;
int32 atttypmod = att_tup->atttypmod;
diff --git a/src/backend/optimizer/prep/prepunion.c b/src/backend/optimizer/prep/prepunion.c
index dc7d94e1c6c..11ce490ddc3 100644
--- a/src/backend/optimizer/prep/prepunion.c
+++ b/src/backend/optimizer/prep/prepunion.c
@@ -14,7 +14,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/prep/prepunion.c,v 1.127 2005/10/15 02:49:21 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/prep/prepunion.c,v 1.127.2.1 2005/11/22 18:23:11 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -439,8 +439,8 @@ recurse_union_children(Node *setOp, PlannerInfo *root,
/*
* Not same, so plan this child separately.
*
- * Note we disallow any resjunk columns in child results. This is necessary
- * since the Append node that implements the union won't do any
+ * Note we disallow any resjunk columns in child results. This is
+ * necessary since the Append node that implements the union won't do any
* projection, and upper levels will get confused if some of our output
* tuples have junk and some don't. This case only arises when we have an
* EXCEPT or INTERSECT as child, else there won't be resjunk anyway.
@@ -495,9 +495,9 @@ generate_setop_tlist(List *colTypes, int flag,
* data types and column names. Insert datatype coercions where
* necessary.
*
- * HACK: constants in the input's targetlist are copied up as-is rather
- * than being referenced as subquery outputs. This is mainly to
- * ensure that when we try to coerce them to the output column's
+ * HACK: constants in the input's targetlist are copied up as-is
+ * rather than being referenced as subquery outputs. This is mainly
+ * to ensure that when we try to coerce them to the output column's
* datatype, the right things happen for UNKNOWN constants. But do
* this only at the first level of subquery-scan plans; we don't want
* phony constants appearing in the output tlists of upper-level
diff --git a/src/backend/optimizer/util/clauses.c b/src/backend/optimizer/util/clauses.c
index 5e2718dc635..fc8d530b1e8 100644
--- a/src/backend/optimizer/util/clauses.c
+++ b/src/backend/optimizer/util/clauses.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/util/clauses.c,v 1.201 2005/10/15 02:49:21 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/util/clauses.c,v 1.201.2.1 2005/11/22 18:23:12 momjian Exp $
*
* HISTORY
* AUTHOR DATE MAJOR EVENT
@@ -1803,8 +1803,8 @@ eval_const_expressions_mutator(Node *node,
* simplifying functions.) Also, we can optimize field selection from
* a RowExpr construct.
*
- * We must however check that the declared type of the field is still the
- * same as when the FieldSelect was created --- this can change if
+ * We must however check that the declared type of the field is still
+ * the same as when the FieldSelect was created --- this can change if
* someone did ALTER COLUMN TYPE on the rowtype.
*/
FieldSelect *fselect = (FieldSelect *) node;
@@ -2638,10 +2638,10 @@ evaluate_expr(Expr *expr, Oid result_type)
/*
* And evaluate it.
*
- * It is OK to use a default econtext because none of the ExecEvalExpr() code
- * used in this situation will use econtext. That might seem fortuitous,
- * but it's not so unreasonable --- a constant expression does not depend
- * on context, by definition, n'est ce pas?
+ * It is OK to use a default econtext because none of the ExecEvalExpr()
+ * code used in this situation will use econtext. That might seem
+ * fortuitous, but it's not so unreasonable --- a constant expression does
+ * not depend on context, by definition, n'est ce pas?
*/
const_val = ExecEvalExprSwitchContext(exprstate,
GetPerTupleExprContext(estate),
@@ -2774,9 +2774,9 @@ expression_tree_walker(Node *node,
* The walker has already visited the current node, and so we need only
* recurse into any sub-nodes it has.
*
- * We assume that the walker is not interested in List nodes per se, so when
- * we expect a List we just recurse directly to self without bothering to
- * call the walker.
+ * We assume that the walker is not interested in List nodes per se, so
+ * when we expect a List we just recurse directly to self without
+ * bothering to call the walker.
*/
if (node == NULL)
return false;
diff --git a/src/backend/optimizer/util/plancat.c b/src/backend/optimizer/util/plancat.c
index 16868939405..f3d4e75ece6 100644
--- a/src/backend/optimizer/util/plancat.c
+++ b/src/backend/optimizer/util/plancat.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/util/plancat.c,v 1.114 2005/10/15 02:49:21 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/util/plancat.c,v 1.114.2.1 2005/11/22 18:23:12 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -132,10 +132,10 @@ get_relation_info(Oid relationObjectId, RelOptInfo *rel)
/*
* Extract info from the relation descriptor for the index.
*
- * Note that we take no lock on the index; we assume our lock on the
- * parent table will protect the index's schema information. When
- * and if the executor actually uses the index, it will take a
- * lock as needed to protect the access to the index contents.
+ * Note that we take no lock on the index; we assume our lock on
+ * the parent table will protect the index's schema information.
+ * When and if the executor actually uses the index, it will take
+ * a lock as needed to protect the access to the index contents.
*/
indexRelation = index_open(indexoid);
index = indexRelation->rd_index;
@@ -265,11 +265,11 @@ estimate_rel_size(Relation rel, int32 *attr_widths,
* infrastructure for redoing cached plans at all, so we have to
* kluge things here instead.
*
- * We approximate "never vacuumed" by "has relpages = 0", which means
- * this will also fire on genuinely empty relations. Not great,
- * but fortunately that's a seldom-seen case in the real world,
- * and it shouldn't degrade the quality of the plan too much
- * anyway to err in this direction.
+ * We approximate "never vacuumed" by "has relpages = 0", which
+ * means this will also fire on genuinely empty relations. Not
+ * great, but fortunately that's a seldom-seen case in the real
+ * world, and it shouldn't degrade the quality of the plan too
+ * much anyway to err in this direction.
*/
if (curpages < 10 && rel->rd_rel->relpages == 0)
curpages = 10;
diff --git a/src/backend/optimizer/util/relnode.c b/src/backend/optimizer/util/relnode.c
index 3ca43759e96..20b656e0b30 100644
--- a/src/backend/optimizer/util/relnode.c
+++ b/src/backend/optimizer/util/relnode.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/util/relnode.c,v 1.72 2005/10/15 02:49:21 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/util/relnode.c,v 1.72.2.1 2005/11/22 18:23:12 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -263,9 +263,9 @@ find_join_rel(PlannerInfo *root, Relids relids)
/*
* Use either hashtable lookup or linear search, as appropriate.
*
- * Note: the seemingly redundant hashkey variable is used to avoid taking the
- * address of relids; unless the compiler is exceedingly smart, doing so
- * would force relids out of a register and thus probably slow down the
+ * Note: the seemingly redundant hashkey variable is used to avoid taking
+ * the address of relids; unless the compiler is exceedingly smart, doing
+ * so would force relids out of a register and thus probably slow down the
* list-search case.
*/
if (root->join_rel_hash)
@@ -533,8 +533,8 @@ build_joinrel_restrictlist(PlannerInfo *root,
/*
* Eliminate duplicate and redundant clauses.
*
- * We must eliminate duplicates, since we will see many of the same clauses
- * arriving from both input relations. Also, if a clause is a
+ * We must eliminate duplicates, since we will see many of the same
+ * clauses arriving from both input relations. Also, if a clause is a
* mergejoinable clause, it's possible that it is redundant with previous
* clauses (see optimizer/README for discussion). We detect that case and
* omit the redundant clause from the result list.
diff --git a/src/backend/optimizer/util/restrictinfo.c b/src/backend/optimizer/util/restrictinfo.c
index 475e51bca6f..076bffd6f12 100644
--- a/src/backend/optimizer/util/restrictinfo.c
+++ b/src/backend/optimizer/util/restrictinfo.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/util/restrictinfo.c,v 1.41.2.2 2005/11/16 17:08:12 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/util/restrictinfo.c,v 1.41.2.3 2005/11/22 18:23:12 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -43,7 +43,7 @@ static RestrictInfo *join_clause_is_redundant(PlannerInfo *root,
* Build a RestrictInfo node containing the given subexpression.
*
* The is_pushed_down and outerjoin_delayed flags must be supplied by the
- * caller. required_relids can be NULL, in which case it defaults to the
+ * caller. required_relids can be NULL, in which case it defaults to the
* actual clause contents (i.e., clause_relids).
*
* We initialize fields that depend only on the given subexpression, leaving
diff --git a/src/backend/parser/analyze.c b/src/backend/parser/analyze.c
index 46dbb3f1488..da8629bbb36 100644
--- a/src/backend/parser/analyze.c
+++ b/src/backend/parser/analyze.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/parser/analyze.c,v 1.326 2005/10/15 02:49:21 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/analyze.c,v 1.326.2.1 2005/11/22 18:23:12 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -1108,8 +1108,8 @@ transformInhRelation(ParseState *pstate, CreateStmtContext *cxt,
/*
* Create a new inherited column.
*
- * For constraints, ONLY the NOT NULL constraint is inherited by the new
- * column definition per SQL99.
+ * For constraints, ONLY the NOT NULL constraint is inherited by the
+ * new column definition per SQL99.
*/
def = makeNode(ColumnDef);
def->colname = pstrdup(attributeName);
@@ -1353,8 +1353,8 @@ transformIndexConstraints(ParseState *pstate, CreateStmtContext *cxt)
* strict reading of SQL92 would suggest raising an error instead, but
* that strikes me as too anal-retentive. - tgl 2001-02-14
*
- * XXX in ALTER TABLE case, it'd be nice to look for duplicate pre-existing
- * indexes, too.
+ * XXX in ALTER TABLE case, it'd be nice to look for duplicate
+ * pre-existing indexes, too.
*/
cxt->alist = NIL;
if (cxt->pkey != NULL)
@@ -1744,8 +1744,8 @@ transformRuleStmt(ParseState *pstate, RuleStmt *stmt,
* For efficiency's sake, add OLD to the rule action's jointree
* only if it was actually referenced in the statement or qual.
*
- * For INSERT, NEW is not really a relation (only a reference to the
- * to-be-inserted tuple) and should never be added to the
+ * For INSERT, NEW is not really a relation (only a reference to
+ * the to-be-inserted tuple) and should never be added to the
* jointree.
*
* For UPDATE, we treat NEW as being another kind of reference to
@@ -1967,10 +1967,10 @@ transformSetOperationStmt(ParseState *pstate, SelectStmt *stmt)
* make lists of the dummy vars and their names for use in parsing ORDER
* BY.
*
- * Note: we use leftmostRTI as the varno of the dummy variables. It shouldn't
- * matter too much which RT index they have, as long as they have one that
- * corresponds to a real RT entry; else funny things may happen when the
- * tree is mashed by rule rewriting.
+ * Note: we use leftmostRTI as the varno of the dummy variables. It
+ * shouldn't matter too much which RT index they have, as long as they
+ * have one that corresponds to a real RT entry; else funny things may
+ * happen when the tree is mashed by rule rewriting.
*/
qry->targetList = NIL;
targetvars = NIL;
@@ -2005,9 +2005,9 @@ transformSetOperationStmt(ParseState *pstate, SelectStmt *stmt)
/*
* Handle SELECT INTO/CREATE TABLE AS.
*
- * Any column names from CREATE TABLE AS need to be attached to both the top
- * level and the leftmost subquery. We do not do this earlier because we
- * do *not* want the targetnames list to be affected.
+ * Any column names from CREATE TABLE AS need to be attached to both the
+ * top level and the leftmost subquery. We do not do this earlier because
+ * we do *not* want the targetnames list to be affected.
*/
qry->into = into;
if (intoColNames)
@@ -2022,9 +2022,9 @@ transformSetOperationStmt(ParseState *pstate, SelectStmt *stmt)
* output columns visible. A Join RTE node is handy for this, since we
* can easily control the Vars generated upon matches.
*
- * Note: we don't yet do anything useful with such cases, but at least "ORDER
- * BY upper(foo)" will draw the right error message rather than "foo not
- * found".
+ * Note: we don't yet do anything useful with such cases, but at least
+ * "ORDER BY upper(foo)" will draw the right error message rather than
+ * "foo not found".
*/
jrte = addRangeTableEntryForJoin(NULL,
targetnames,
@@ -2140,8 +2140,8 @@ transformSetOperationTree(ParseState *pstate, SelectStmt *stmt)
/*
* Transform SelectStmt into a Query.
*
- * Note: previously transformed sub-queries don't affect the parsing of
- * this sub-query, because they are not in the toplevel pstate's
+ * Note: previously transformed sub-queries don't affect the parsing
+ * of this sub-query, because they are not in the toplevel pstate's
* namespace list.
*/
selectList = parse_sub_analyze((Node *) stmt, pstate);
diff --git a/src/backend/parser/parse_clause.c b/src/backend/parser/parse_clause.c
index 95e1045ba2d..173d1723b3f 100644
--- a/src/backend/parser/parse_clause.c
+++ b/src/backend/parser/parse_clause.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parse_clause.c,v 1.143 2005/10/15 02:49:22 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parse_clause.c,v 1.143.2.1 2005/11/22 18:23:13 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -172,9 +172,9 @@ setTargetTable(ParseState *pstate, RangeVar *relation,
* instead mark target table as requiring exactly the specified
* permissions.
*
- * If we find an explicit reference to the rel later during parse analysis,
- * scanRTEForColumn will add the ACL_SELECT bit back again. That can't
- * happen for INSERT but it is possible for UPDATE and DELETE.
+ * If we find an explicit reference to the rel later during parse
+ * analysis, scanRTEForColumn will add the ACL_SELECT bit back again. That
+ * can't happen for INSERT but it is possible for UPDATE and DELETE.
*/
rte->requiredPerms = requiredPerms;
@@ -462,8 +462,8 @@ transformRangeSubselect(ParseState *pstate, RangeSubselect *r)
* XXX this will need further work to support SQL99's LATERAL() feature,
* wherein such references would indeed be legal.
*
- * We can skip groveling through the subquery if there's not anything visible
- * in the current query. Also note that outer references are OK.
+ * We can skip groveling through the subquery if there's not anything
+ * visible in the current query. Also note that outer references are OK.
*/
if (pstate->p_relnamespace || pstate->p_varnamespace)
{
@@ -1193,8 +1193,8 @@ findTargetlistEntry(ParseState *pstate, Node *node, List **tlist, int clause)
* than one column name exposed by FROM, colNameToVar will
* ereport(ERROR). That's just what we want here.
*
- * Small tweak for 7.4.3: ignore matches in upper query levels. This
- * effectively changes the search order for bare names to (1)
+ * Small tweak for 7.4.3: ignore matches in upper query levels.
+ * This effectively changes the search order for bare names to (1)
* local FROM variables, (2) local targetlist aliases, (3) outer
* FROM variables, whereas before it was (1) (3) (2). SQL92 and
* SQL99 do not allow GROUPing BY an outer reference, so this
@@ -1474,9 +1474,9 @@ transformDistinctClause(ParseState *pstate, List *distinctlist,
* DISTINCT values to the sort list, much as we did above for ordinary
* DISTINCT fields.
*
- * Actually, it'd be OK for the common prefixes of the two lists to match
- * in any order, but implementing that check seems like more trouble
- * than it's worth.
+ * Actually, it'd be OK for the common prefixes of the two lists to
+ * match in any order, but implementing that check seems like more
+ * trouble than it's worth.
*/
ListCell *nextsortlist = list_head(*sortClause);
diff --git a/src/backend/parser/parse_coerce.c b/src/backend/parser/parse_coerce.c
index 3bee3c31ad5..c3f60292c3e 100644
--- a/src/backend/parser/parse_coerce.c
+++ b/src/backend/parser/parse_coerce.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parse_coerce.c,v 2.132 2005/10/15 02:49:22 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parse_coerce.c,v 2.132.2.1 2005/11/22 18:23:13 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -180,9 +180,9 @@ coerce_type(ParseState *pstate, Node *node,
* length checks, which is not always what we want here. Any
* length constraint will be applied later by our caller.
*
- * Note that we call stringTypeDatum using the domain's pg_type row,
- * if it's a domain. This works because the domain row has the
- * same typinput and typelem as the base type --- ugly...
+ * Note that we call stringTypeDatum using the domain's pg_type
+ * row, if it's a domain. This works because the domain row has
+ * the same typinput and typelem as the base type --- ugly...
*/
newcon->constvalue = stringTypeDatum(targetType, val, -1);
}
@@ -1670,12 +1670,12 @@ find_coercion_pathway(Oid targetTypeId, Oid sourceTypeId,
* array types. If so, and if the element types have a suitable cast,
* use array_type_coerce() or array_type_length_coerce().
*
- * Hack: disallow coercions to oidvector and int2vector, which otherwise
- * tend to capture coercions that should go to "real" array types. We
- * want those types to be considered "real" arrays for many purposes,
- * but not this one. (Also, array_type_coerce isn't guaranteed to
- * produce an output that meets the restrictions of these datatypes,
- * such as being 1-dimensional.)
+ * Hack: disallow coercions to oidvector and int2vector, which
+ * otherwise tend to capture coercions that should go to "real" array
+ * types. We want those types to be considered "real" arrays for many
+ * purposes, but not this one. (Also, array_type_coerce isn't
+ * guaranteed to produce an output that meets the restrictions of
+ * these datatypes, such as being 1-dimensional.)
*/
Oid targetElemType;
Oid sourceElemType;
diff --git a/src/backend/parser/parse_expr.c b/src/backend/parser/parse_expr.c
index 9146887adc3..7045c3af5b5 100644
--- a/src/backend/parser/parse_expr.c
+++ b/src/backend/parser/parse_expr.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parse_expr.c,v 1.185.2.1 2005/11/18 23:08:13 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parse_expr.c,v 1.185.2.2 2005/11/22 18:23:13 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -365,10 +365,10 @@ transformColumnRef(ParseState *pstate, ColumnRef *cref)
/*
* Not known as a column of any range-table entry.
*
- * Consider the possibility that it's VALUE in a domain check
- * expression. (We handle VALUE as a name, not a keyword,
- * to avoid breaking a lot of applications that have used
- * VALUE as a column name in the past.)
+ * Consider the possibility that it's VALUE in a domain
+ * check expression. (We handle VALUE as a name, not a
+ * keyword, to avoid breaking a lot of applications that
+ * have used VALUE as a column name in the past.)
*/
if (pstate->p_value_substitute != NULL &&
strcmp(name, "value") == 0)
@@ -781,8 +781,8 @@ transformFuncCall(ParseState *pstate, FuncCall *fn)
* Transform the list of arguments. We use a shallow list copy and then
* transform-in-place to avoid O(N^2) behavior from repeated lappend's.
*
- * XXX: repeated lappend() would no longer result in O(n^2) behavior; worth
- * reconsidering this design?
+ * XXX: repeated lappend() would no longer result in O(n^2) behavior;
+ * worth reconsidering this design?
*/
targs = list_copy(fn->args);
foreach(args, targs)
diff --git a/src/backend/parser/parse_func.c b/src/backend/parser/parse_func.c
index a3a42326487..8c15203433e 100644
--- a/src/backend/parser/parse_func.c
+++ b/src/backend/parser/parse_func.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parse_func.c,v 1.182 2005/10/15 02:49:22 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parse_func.c,v 1.182.2.1 2005/11/22 18:23:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -88,11 +88,11 @@ ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs,
/*
* Extract arg type info in preparation for function lookup.
*
- * If any arguments are Param markers of type VOID, we discard them from the
- * parameter list. This is a hack to allow the JDBC driver to not have to
- * distinguish "input" and "output" parameter symbols while parsing
- * function-call constructs. We can't use foreach() because we may modify
- * the list ...
+ * If any arguments are Param markers of type VOID, we discard them from
+ * the parameter list. This is a hack to allow the JDBC driver to not
+ * have to distinguish "input" and "output" parameter symbols while
+ * parsing function-call constructs. We can't use foreach() because we
+ * may modify the list ...
*/
nargs = 0;
for (l = list_head(fargs); l != NULL; l = nextl)
@@ -502,10 +502,10 @@ func_select_candidate(int nargs,
/*
* Still too many candidates? Try assigning types for the unknown columns.
*
- * NOTE: for a binary operator with one unknown and one non-unknown input, we
- * already tried the heuristic of looking for a candidate with the known
- * input type on both sides (see binary_oper_exact()). That's essentially
- * a special case of the general algorithm we try next.
+ * NOTE: for a binary operator with one unknown and one non-unknown input,
+ * we already tried the heuristic of looking for a candidate with the
+ * known input type on both sides (see binary_oper_exact()). That's
+ * essentially a special case of the general algorithm we try next.
*
* We do this by examining each unknown argument position to see if we can
* determine a "type category" for it. If any candidate has an input
@@ -518,10 +518,10 @@ func_select_candidate(int nargs,
* If we are able to determine a type category, also notice whether any of
* the candidates takes a preferred datatype within the category.
*
- * Having completed this examination, remove candidates that accept the wrong
- * category at any unknown position. Also, if at least one candidate
- * accepted a preferred type at a position, remove candidates that accept
- * non-preferred types.
+ * Having completed this examination, remove candidates that accept the
+ * wrong category at any unknown position. Also, if at least one
+ * candidate accepted a preferred type at a position, remove candidates
+ * that accept non-preferred types.
*
* If we are down to one candidate at the end, we win.
*/
@@ -708,20 +708,20 @@ func_get_detail(List *funcname,
* as "text(name(varchar))" which the code below this point is
* entirely capable of selecting.
*
- * "Trivial" coercions are ones that involve binary-compatible types and
- * ones that are coercing a previously-unknown-type literal constant
- * to a specific type.
+ * "Trivial" coercions are ones that involve binary-compatible types
+ * and ones that are coercing a previously-unknown-type literal
+ * constant to a specific type.
*
* The reason we can restrict our check to binary-compatible coercions
* here is that we expect non-binary-compatible coercions to have an
* implementation function named after the target type. That function
* will be found by normal lookup if appropriate.
*
- * NB: it's important that this code stays in sync with what coerce_type
- * can do, because the caller will try to apply coerce_type if we
- * return FUNCDETAIL_COERCION. If we return that result for something
- * coerce_type can't handle, we'll cause infinite recursion between
- * this module and coerce_type!
+ * NB: it's important that this code stays in sync with what
+ * coerce_type can do, because the caller will try to apply
+ * coerce_type if we return FUNCDETAIL_COERCION. If we return that
+ * result for something coerce_type can't handle, we'll cause infinite
+ * recursion between this module and coerce_type!
*/
if (nargs == 1 && fargs != NIL)
{
@@ -983,9 +983,9 @@ ParseComplexProjection(ParseState *pstate, char *funcname, Node *first_arg)
/*
* Else do it the hard way with get_expr_result_type().
*
- * If it's a Var of type RECORD, we have to work even harder: we have to find
- * what the Var refers to, and pass that to get_expr_result_type. That
- * task is handled by expandRecordVariable().
+ * If it's a Var of type RECORD, we have to work even harder: we have to
+ * find what the Var refers to, and pass that to get_expr_result_type.
+ * That task is handled by expandRecordVariable().
*/
if (IsA(first_arg, Var) &&
((Var *) first_arg)->vartype == RECORDOID)
diff --git a/src/backend/parser/parse_oper.c b/src/backend/parser/parse_oper.c
index 764f729529f..6329ae5f927 100644
--- a/src/backend/parser/parse_oper.c
+++ b/src/backend/parser/parse_oper.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parse_oper.c,v 1.82 2005/10/15 02:49:22 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parse_oper.c,v 1.82.2.1 2005/11/22 18:23:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -702,9 +702,9 @@ left_oper(List *op, Oid arg, bool noError)
* First, quickly check to see if there is an exactly matching
* operator (there can be only one such entry in the list).
*
- * The returned list has args in the form (0, oprright). Move the useful
- * data into args[0] to keep oper_select_candidate simple. XXX we are
- * assuming here that we may scribble on the list!
+ * The returned list has args in the form (0, oprright). Move the
+ * useful data into args[0] to keep oper_select_candidate simple. XXX
+ * we are assuming here that we may scribble on the list!
*/
FuncCandidateList clisti;
diff --git a/src/backend/parser/parse_relation.c b/src/backend/parser/parse_relation.c
index efa9b49931c..2ed0a523501 100644
--- a/src/backend/parser/parse_relation.c
+++ b/src/backend/parser/parse_relation.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parse_relation.c,v 1.116 2005/10/26 19:21:54 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parse_relation.c,v 1.116.2.1 2005/11/22 18:23:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -301,10 +301,10 @@ scanRTEForColumn(ParseState *pstate, RangeTblEntry *rte, char *colname)
* will be empty strings that cannot match any legal SQL identifier, so we
* don't bother to test for that case here.
*
- * Should this somehow go wrong and we try to access a dropped column, we'll
- * still catch it by virtue of the checks in get_rte_attribute_type(),
- * which is called by make_var(). That routine has to do a cache lookup
- * anyway, so the check there is cheap.
+ * Should this somehow go wrong and we try to access a dropped column,
+ * we'll still catch it by virtue of the checks in
+ * get_rte_attribute_type(), which is called by make_var(). That routine
+ * has to do a cache lookup anyway, so the check there is cheap.
*/
foreach(c, rte->eref->colnames)
{
@@ -1007,9 +1007,9 @@ addImplicitRTE(ParseState *pstate, RangeVar *relation)
/*
* Note that we set inFromCl true, so that the RTE will be listed
- * explicitly if the parsetree is ever decompiled by ruleutils.c.
- * This provides a migration path for views/rules that were originally
- * written with implicit-RTE syntax.
+ * explicitly if the parsetree is ever decompiled by ruleutils.c. This
+ * provides a migration path for views/rules that were originally written
+ * with implicit-RTE syntax.
*/
rte = addRangeTableEntry(pstate, relation, NULL, false, true);
/* Add to joinlist and relnamespace, but not varnamespace */
diff --git a/src/backend/parser/parse_target.c b/src/backend/parser/parse_target.c
index 88c29ebf1e4..4290045baab 100644
--- a/src/backend/parser/parse_target.c
+++ b/src/backend/parser/parse_target.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parse_target.c,v 1.138 2005/10/15 02:49:22 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parse_target.c,v 1.138.2.1 2005/11/22 18:23:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -808,9 +808,9 @@ ExpandIndirectionStar(ParseState *pstate, A_Indirection *ind)
* lookup_rowtype_tupdesc(), which will almost certainly fail as well, but
* it will give an appropriate error message.
*
- * If it's a Var of type RECORD, we have to work even harder: we have to find
- * what the Var refers to, and pass that to get_expr_result_type. That
- * task is handled by expandRecordVariable().
+ * If it's a Var of type RECORD, we have to work even harder: we have to
+ * find what the Var refers to, and pass that to get_expr_result_type.
+ * That task is handled by expandRecordVariable().
*/
if (IsA(expr, Var) &&
((Var *) expr)->vartype == RECORDOID)
diff --git a/src/backend/port/beos/support.c b/src/backend/port/beos/support.c
index 228889f68ea..820cb80f3a3 100644
--- a/src/backend/port/beos/support.c
+++ b/src/backend/port/beos/support.c
@@ -168,7 +168,7 @@ beos_startup(int argc, char **argv)
/* Main server loop */
for (;;)
{
- int32 opcode = 0;
+ int32 opcode = 0;
char datas[4000];
/*
diff --git a/src/backend/port/posix_sema.c b/src/backend/port/posix_sema.c
index 2024b3ebdea..8d951f21837 100644
--- a/src/backend/port/posix_sema.c
+++ b/src/backend/port/posix_sema.c
@@ -11,7 +11,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/port/posix_sema.c,v 1.14 2005/10/15 02:49:22 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/port/posix_sema.c,v 1.14.2.1 2005/11/22 18:23:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -246,19 +246,20 @@ PGSemaphoreLock(PGSemaphore sema, bool interruptOK)
* from the operation prematurely because we were sent a signal. So we
* try and lock the semaphore again.
*
- * Each time around the loop, we check for a cancel/die interrupt. We assume
- * that if such an interrupt comes in while we are waiting, it will cause
- * the sem_wait() call to exit with errno == EINTR, so that we will be
- * able to service the interrupt (if not in a critical section already).
+ * Each time around the loop, we check for a cancel/die interrupt. We
+ * assume that if such an interrupt comes in while we are waiting, it will
+ * cause the sem_wait() call to exit with errno == EINTR, so that we will
+ * be able to service the interrupt (if not in a critical section
+ * already).
*
* Once we acquire the lock, we do NOT check for an interrupt before
* returning. The caller needs to be able to record ownership of the lock
* before any interrupt can be accepted.
*
- * There is a window of a few instructions between CHECK_FOR_INTERRUPTS and
- * entering the sem_wait() call. If a cancel/die interrupt occurs in that
- * window, we would fail to notice it until after we acquire the lock (or
- * get another interrupt to escape the sem_wait()). We can avoid this
+ * There is a window of a few instructions between CHECK_FOR_INTERRUPTS
+ * and entering the sem_wait() call. If a cancel/die interrupt occurs in
+ * that window, we would fail to notice it until after we acquire the lock
+ * (or get another interrupt to escape the sem_wait()). We can avoid this
* problem by temporarily setting ImmediateInterruptOK to true before we
* do CHECK_FOR_INTERRUPTS; then, a die() interrupt in this interval will
* execute directly. However, there is a huge pitfall: there is another
diff --git a/src/backend/port/sysv_sema.c b/src/backend/port/sysv_sema.c
index d42e8c87684..2575f65c73c 100644
--- a/src/backend/port/sysv_sema.c
+++ b/src/backend/port/sysv_sema.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/port/sysv_sema.c,v 1.17 2005/10/15 02:49:22 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/port/sysv_sema.c,v 1.17.2.1 2005/11/22 18:23:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -378,19 +378,19 @@ PGSemaphoreLock(PGSemaphore sema, bool interruptOK)
* from the operation prematurely because we were sent a signal. So we
* try and lock the semaphore again.
*
- * Each time around the loop, we check for a cancel/die interrupt. We assume
- * that if such an interrupt comes in while we are waiting, it will cause
- * the semop() call to exit with errno == EINTR, so that we will be able
- * to service the interrupt (if not in a critical section already).
+ * Each time around the loop, we check for a cancel/die interrupt. We
+ * assume that if such an interrupt comes in while we are waiting, it will
+ * cause the semop() call to exit with errno == EINTR, so that we will be
+ * able to service the interrupt (if not in a critical section already).
*
* Once we acquire the lock, we do NOT check for an interrupt before
* returning. The caller needs to be able to record ownership of the lock
* before any interrupt can be accepted.
*
- * There is a window of a few instructions between CHECK_FOR_INTERRUPTS and
- * entering the semop() call. If a cancel/die interrupt occurs in that
- * window, we would fail to notice it until after we acquire the lock (or
- * get another interrupt to escape the semop()). We can avoid this
+ * There is a window of a few instructions between CHECK_FOR_INTERRUPTS
+ * and entering the semop() call. If a cancel/die interrupt occurs in
+ * that window, we would fail to notice it until after we acquire the lock
+ * (or get another interrupt to escape the semop()). We can avoid this
* problem by temporarily setting ImmediateInterruptOK to true before we
* do CHECK_FOR_INTERRUPTS; then, a die() interrupt in this interval will
* execute directly. However, there is a huge pitfall: there is another
diff --git a/src/backend/port/win32/signal.c b/src/backend/port/win32/signal.c
index 3204c9c20e1..978e0339f17 100644
--- a/src/backend/port/win32/signal.c
+++ b/src/backend/port/win32/signal.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/port/win32/signal.c,v 1.14 2005/10/25 15:15:16 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/port/win32/signal.c,v 1.14.2.1 2005/11/22 18:23:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -22,10 +22,10 @@
* pg_signal_mask is only changed by main thread so shouldn't need it.
*/
volatile int pg_signal_queue;
-int pg_signal_mask;
+int pg_signal_mask;
-HANDLE pgwin32_signal_event;
-HANDLE pgwin32_initial_signal_pipe = INVALID_HANDLE_VALUE;
+HANDLE pgwin32_signal_event;
+HANDLE pgwin32_initial_signal_pipe = INVALID_HANDLE_VALUE;
/*
* pg_signal_crit_sec is used to protect only pg_signal_queue. That is the only
diff --git a/src/backend/port/win32/timer.c b/src/backend/port/win32/timer.c
index b6c0b407e6e..de07a8d97a9 100644
--- a/src/backend/port/win32/timer.c
+++ b/src/backend/port/win32/timer.c
@@ -3,15 +3,15 @@
* timer.c
* Microsoft Windows Win32 Timer Implementation
*
- * Limitations of this implementation:
+ * Limitations of this implementation:
*
- * - Does not support interval timer (value->it_interval)
- * - Only supports ITIMER_REAL
+ * - Does not support interval timer (value->it_interval)
+ * - Only supports ITIMER_REAL
*
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/port/win32/timer.c,v 1.6 2005/10/25 15:15:16 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/port/win32/timer.c,v 1.6.2.1 2005/11/22 18:23:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -22,11 +22,12 @@
/* Communication area for inter-thread communication */
-typedef struct timerCA {
+typedef struct timerCA
+{
struct itimerval value;
- HANDLE event;
+ HANDLE event;
CRITICAL_SECTION crit_sec;
-} timerCA;
+} timerCA;
static timerCA timerCommArea;
static HANDLE timerThreadHandle = INVALID_HANDLE_VALUE;
@@ -36,7 +37,7 @@ static HANDLE timerThreadHandle = INVALID_HANDLE_VALUE;
static DWORD WINAPI
pg_timer_thread(LPVOID param)
{
- DWORD waittime;
+ DWORD waittime;
Assert(param == NULL);
@@ -44,7 +45,7 @@ pg_timer_thread(LPVOID param)
for (;;)
{
- int r;
+ int r;
r = WaitForSingleObjectEx(timerCommArea.event, waittime, FALSE);
if (r == WAIT_OBJECT_0)
@@ -53,7 +54,7 @@ pg_timer_thread(LPVOID param)
EnterCriticalSection(&timerCommArea.crit_sec);
if (timerCommArea.value.it_value.tv_sec == 0 &&
timerCommArea.value.it_value.tv_usec == 0)
- waittime = INFINITE; /* Cancel the interrupt */
+ waittime = INFINITE; /* Cancel the interrupt */
else
waittime = timerCommArea.value.it_value.tv_usec / 10 + timerCommArea.value.it_value.tv_sec * 1000;
ResetEvent(timerCommArea.event);
diff --git a/src/backend/postmaster/autovacuum.c b/src/backend/postmaster/autovacuum.c
index 1081cf83e9c..ba9b87ed609 100644
--- a/src/backend/postmaster/autovacuum.c
+++ b/src/backend/postmaster/autovacuum.c
@@ -10,7 +10,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/postmaster/autovacuum.c,v 1.5 2005/10/15 02:49:23 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/postmaster/autovacuum.c,v 1.5.2.1 2005/11/22 18:23:15 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -244,8 +244,8 @@ AutoVacMain(int argc, char *argv[])
* backend, so we use the same signal handling. See equivalent code in
* tcop/postgres.c.
*
- * Currently, we don't pay attention to postgresql.conf changes that happen
- * during a single daemon iteration, so we can ignore SIGHUP.
+ * Currently, we don't pay attention to postgresql.conf changes that
+ * happen during a single daemon iteration, so we can ignore SIGHUP.
*/
pqsignal(SIGHUP, SIG_IGN);
@@ -308,9 +308,10 @@ AutoVacMain(int argc, char *argv[])
* recently auto-vacuumed, or one that needs database-wide vacuum (to
* prevent Xid wraparound-related data loss).
*
- * Note that a database with no stats entry is not considered, except for Xid
- * wraparound purposes. The theory is that if no one has ever connected
- * to it since the stats were last initialized, it doesn't need vacuuming.
+ * Note that a database with no stats entry is not considered, except for
+ * Xid wraparound purposes. The theory is that if no one has ever
+ * connected to it since the stats were last initialized, it doesn't need
+ * vacuuming.
*
* XXX This could be improved if we had more info about whether it needs
* vacuuming before connecting to it. Perhaps look through the pgstats
@@ -336,8 +337,8 @@ AutoVacMain(int argc, char *argv[])
* decide to start giving warnings. If any such db is found, we
* ignore all other dbs.
*
- * Unlike vacuum.c, we also look at vacuumxid. This is so that pg_clog
- * can be kept trimmed to a reasonable size.
+ * Unlike vacuum.c, we also look at vacuumxid. This is so that
+ * pg_clog can be kept trimmed to a reasonable size.
*/
freeze_age = (int32) (nextXid - tmp->frozenxid);
vacuum_age = (int32) (nextXid - tmp->vacuumxid);
@@ -571,10 +572,10 @@ do_autovacuum(PgStat_StatDBEntry *dbentry)
/*
* Scan pg_class and determine which tables to vacuum.
*
- * The stats subsystem collects stats for toast tables independently of the
- * stats for their parent tables. We need to check those stats since in
- * cases with short, wide tables there might be proportionally much more
- * activity in the toast table than in its parent.
+ * The stats subsystem collects stats for toast tables independently of
+ * the stats for their parent tables. We need to check those stats since
+ * in cases with short, wide tables there might be proportionally much
+ * more activity in the toast table than in its parent.
*
* Since we can only issue VACUUM against the parent table, we need to
* transpose a decision to vacuum a toast table into a decision to vacuum
diff --git a/src/backend/postmaster/pgstat.c b/src/backend/postmaster/pgstat.c
index 4fea8e0e2f7..76e493dc6e6 100644
--- a/src/backend/postmaster/pgstat.c
+++ b/src/backend/postmaster/pgstat.c
@@ -13,7 +13,7 @@
*
* Copyright (c) 2001-2005, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/backend/postmaster/pgstat.c,v 1.111 2005/10/17 16:24:19 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/postmaster/pgstat.c,v 1.111.2.1 2005/11/22 18:23:15 momjian Exp $
* ----------
*/
#include "postgres.h"
@@ -268,7 +268,7 @@ pgstat_init(void)
* On some platforms, pg_getaddrinfo_all() may return multiple addresses
* only one of which will actually work (eg, both IPv6 and IPv4 addresses
* when kernel will reject IPv6). Worse, the failure may occur at the
- * bind() or perhaps even connect() stage. So we must loop through the
+ * bind() or perhaps even connect() stage. So we must loop through the
* results till we find a working combination. We will generate LOG
* messages, but no error, for bogus combinations.
*/
@@ -1493,11 +1493,11 @@ PgstatBufferMain(int argc, char *argv[])
* Start a buffering process to read from the socket, so we have a little
* more time to process incoming messages.
*
- * NOTE: the process structure is: postmaster is parent of buffer process is
- * parent of collector process. This way, the buffer can detect collector
- * failure via SIGCHLD, whereas otherwise it wouldn't notice collector
- * failure until it tried to write on the pipe. That would mean that
- * after the postmaster started a new collector, we'd have two buffer
+ * NOTE: the process structure is: postmaster is parent of buffer process
+ * is parent of collector process. This way, the buffer can detect
+ * collector failure via SIGCHLD, whereas otherwise it wouldn't notice
+ * collector failure until it tried to write on the pipe. That would mean
+ * that after the postmaster started a new collector, we'd have two buffer
* processes competing to read from the UDP socket --- not good.
*/
if (pgpipe(pgStatPipe) < 0)
diff --git a/src/backend/postmaster/postmaster.c b/src/backend/postmaster/postmaster.c
index 3c9c39b03f1..b691e0c4eff 100644
--- a/src/backend/postmaster/postmaster.c
+++ b/src/backend/postmaster/postmaster.c
@@ -37,7 +37,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/postmaster/postmaster.c,v 1.475 2005/11/05 03:04:52 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/postmaster/postmaster.c,v 1.475.2.1 2005/11/22 18:23:15 momjian Exp $
*
* NOTES
*
@@ -664,8 +664,8 @@ PostmasterMain(int argc, char *argv[])
/*
* Fork away from controlling terminal, if -S specified.
*
- * Must do this before we grab any interlock files, else the interlocks will
- * show the wrong PID.
+ * Must do this before we grab any interlock files, else the interlocks
+ * will show the wrong PID.
*/
if (SilentMode)
pmdaemonize();
@@ -682,7 +682,7 @@ PostmasterMain(int argc, char *argv[])
CreateDataDirLockFile(true);
/*
- * If timezone is not set, determine what the OS uses. (In theory this
+ * If timezone is not set, determine what the OS uses. (In theory this
* should be done during GUC initialization, but because it can take as
* much as several seconds, we delay it until after we've created the
* postmaster.pid file. This prevents problems with boot scripts that
@@ -906,8 +906,8 @@ PostmasterMain(int argc, char *argv[])
SysLoggerPID = SysLogger_Start();
/*
- * Reset whereToSendOutput from DestDebug (its starting state) to DestNone.
- * This stops ereport from sending log messages to stderr unless
+ * Reset whereToSendOutput from DestDebug (its starting state) to
+ * DestNone. This stops ereport from sending log messages to stderr unless
* Log_destination permits. We don't do this until the postmaster is
* fully launched, since startup failures may as well be reported to
* stderr.
@@ -998,13 +998,14 @@ checkDataDir(void)
/*
* Check if the directory has group or world access. If so, reject.
*
- * It would be possible to allow weaker constraints (for example, allow group
- * access) but we cannot make a general assumption that that is okay; for
- * example there are platforms where nearly all users customarily belong
- * to the same group. Perhaps this test should be configurable.
+ * It would be possible to allow weaker constraints (for example, allow
+ * group access) but we cannot make a general assumption that that is
+ * okay; for example there are platforms where nearly all users
+ * customarily belong to the same group. Perhaps this test should be
+ * configurable.
*
- * XXX temporarily suppress check when on Windows, because there may not be
- * proper support for Unix-y file permissions. Need to think of a
+ * XXX temporarily suppress check when on Windows, because there may not
+ * be proper support for Unix-y file permissions. Need to think of a
* reasonable check to apply on Windows.
*/
#if !defined(WIN32) && !defined(__CYGWIN__)
@@ -1165,9 +1166,9 @@ ServerLoop(void)
/*
* Wait for something to happen.
*
- * We wait at most one minute, or the minimum autovacuum delay, to ensure
- * that the other background tasks handled below get done even when no
- * requests are arriving.
+ * We wait at most one minute, or the minimum autovacuum delay, to
+ * ensure that the other background tasks handled below get done even
+ * when no requests are arriving.
*/
memcpy((char *) &rmask, (char *) &readmask, sizeof(fd_set));
@@ -1922,8 +1923,8 @@ pmdie(SIGNAL_ARGS)
/*
* Fast Shutdown:
*
- * Abort all children with SIGTERM (rollback active transactions and
- * exit) and shut down when they are gone.
+ * Abort all children with SIGTERM (rollback active transactions
+ * and exit) and shut down when they are gone.
*/
if (Shutdown >= FastShutdown)
break;
@@ -1948,8 +1949,8 @@ pmdie(SIGNAL_ARGS)
/*
* No children left. Begin shutdown of data base system.
*
- * Note: if we previously got SIGTERM then we may send SIGUSR2 to the
- * bgwriter a second time here. This should be harmless.
+ * Note: if we previously got SIGTERM then we may send SIGUSR2 to
+ * the bgwriter a second time here. This should be harmless.
*/
if (StartupPID != 0 || FatalError)
break; /* let reaper() handle this */
@@ -2109,10 +2110,10 @@ reaper(SIGNAL_ARGS)
* that it wrote a shutdown checkpoint. (If for some reason
* it didn't, recovery will occur on next postmaster start.)
*
- * Note: we do not wait around for exit of the archiver or stats
- * processes. They've been sent SIGQUIT by this point, and in
- * any case contain logic to commit hara-kiri if they notice
- * the postmaster is gone.
+ * Note: we do not wait around for exit of the archiver or
+ * stats processes. They've been sent SIGQUIT by this point,
+ * and in any case contain logic to commit hara-kiri if they
+ * notice the postmaster is gone.
*/
ExitPostmaster(0);
}
@@ -2333,10 +2334,10 @@ HandleChildCrash(int pid, int exitstatus, const char *procname)
* This backend is still alive. Unless we did so already, tell it
* to commit hara-kiri.
*
- * SIGQUIT is the special signal that says exit without proc_exit and
- * let the user know what's going on. But if SendStop is set (-s
- * on command line), then we send SIGSTOP instead, so that we can
- * get core dumps from all backends by hand.
+ * SIGQUIT is the special signal that says exit without proc_exit
+ * and let the user know what's going on. But if SendStop is set
+ * (-s on command line), then we send SIGSTOP instead, so that we
+ * can get core dumps from all backends by hand.
*/
if (!FatalError)
{
@@ -2653,7 +2654,7 @@ BackendRun(Port *port)
* Must do this now because authentication uses libpq to send messages.
*/
pq_init(); /* initialize libpq to talk to client */
- whereToSendOutput = DestRemote; /* now safe to ereport to client */
+ whereToSendOutput = DestRemote; /* now safe to ereport to client */
/*
* We arrange for a simple exit(0) if we receive SIGTERM or SIGQUIT during
@@ -2674,7 +2675,7 @@ BackendRun(Port *port)
if (pg_getnameinfo_all(&port->raddr.addr, port->raddr.salen,
remote_host, sizeof(remote_host),
remote_port, sizeof(remote_port),
- (log_hostname ? 0 : NI_NUMERICHOST) | NI_NUMERICSERV))
+ (log_hostname ? 0 : NI_NUMERICHOST) | NI_NUMERICSERV))
{
int ret = pg_getnameinfo_all(&port->raddr.addr, port->raddr.salen,
remote_host, sizeof(remote_host),
diff --git a/src/backend/postmaster/syslogger.c b/src/backend/postmaster/syslogger.c
index b2e3add6a8f..07c705896d6 100644
--- a/src/backend/postmaster/syslogger.c
+++ b/src/backend/postmaster/syslogger.c
@@ -18,7 +18,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/postmaster/syslogger.c,v 1.20 2005/10/15 02:49:24 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/postmaster/syslogger.c,v 1.20.2.1 2005/11/22 18:23:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -388,13 +388,13 @@ SysLogger_Start(void)
* If first time through, create the pipe which will receive stderr
* output.
*
- * If the syslogger crashes and needs to be restarted, we continue to use the
- * same pipe (indeed must do so, since extant backends will be writing
+ * If the syslogger crashes and needs to be restarted, we continue to use
+ * the same pipe (indeed must do so, since extant backends will be writing
* into that pipe).
*
- * This means the postmaster must continue to hold the read end of the pipe
- * open, so we can pass it down to the reincarnated syslogger. This is a
- * bit klugy but we have little choice.
+ * This means the postmaster must continue to hold the read end of the
+ * pipe open, so we can pass it down to the reincarnated syslogger. This
+ * is a bit klugy but we have little choice.
*/
#ifndef WIN32
if (syslogPipe[0] < 0)
diff --git a/src/backend/regex/regc_locale.c b/src/backend/regex/regc_locale.c
index 75f32730497..cbfc5287c60 100644
--- a/src/backend/regex/regc_locale.c
+++ b/src/backend/regex/regc_locale.c
@@ -47,7 +47,7 @@
* permission to use and distribute the software in accordance with the
* terms specified in this license.
*
- * $PostgreSQL: pgsql/src/backend/regex/regc_locale.c,v 1.7 2005/10/15 02:49:24 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/regex/regc_locale.c,v 1.7.2.1 2005/11/22 18:23:16 momjian Exp $
*/
/* ASCII character-name table */
@@ -655,7 +655,8 @@ cclass(struct vars * v, /* context */
/*
* Now compute the character class contents.
*
- * For the moment, assume that only char codes < 256 can be in these classes.
+ * For the moment, assume that only char codes < 256 can be in these
+ * classes.
*/
switch ((enum classes) index)
diff --git a/src/backend/rewrite/rewriteDefine.c b/src/backend/rewrite/rewriteDefine.c
index 483afd395f1..55c6cc5345c 100644
--- a/src/backend/rewrite/rewriteDefine.c
+++ b/src/backend/rewrite/rewriteDefine.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/rewrite/rewriteDefine.c,v 1.107 2005/10/18 01:06:24 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/rewrite/rewriteDefine.c,v 1.107.2.1 2005/11/22 18:23:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -462,8 +462,9 @@ DefineQueryRewrite(RuleStmt *stmt)
* appropriate, also modify the 'relkind' field to show that the
* relation is now a view.
*
- * Important side effect: an SI notice is broadcast to force all backends
- * (including me!) to update relcache entries with the new rule.
+ * Important side effect: an SI notice is broadcast to force all
+ * backends (including me!) to update relcache entries with the new
+ * rule.
*/
SetRelationRuleStatus(ev_relid, true, RelisBecomingView);
}
diff --git a/src/backend/rewrite/rewriteHandler.c b/src/backend/rewrite/rewriteHandler.c
index 3513cf67c4b..c10d4c923c4 100644
--- a/src/backend/rewrite/rewriteHandler.c
+++ b/src/backend/rewrite/rewriteHandler.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/rewrite/rewriteHandler.c,v 1.158 2005/10/15 02:49:24 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/rewrite/rewriteHandler.c,v 1.158.2.1 2005/11/22 18:23:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -121,11 +121,12 @@ AcquireRewriteLocks(Query *parsetree)
* release it until end of transaction. This protects the
* rewriter and planner against schema changes mid-query.
*
- * If the relation is the query's result relation, then we need
- * RowExclusiveLock. Otherwise, check to see if the relation
- * is accessed FOR UPDATE/SHARE or not. We can't just grab
- * AccessShareLock because then the executor would be trying
- * to upgrade the lock, leading to possible deadlocks.
+ * If the relation is the query's result relation, then we
+ * need RowExclusiveLock. Otherwise, check to see if the
+ * relation is accessed FOR UPDATE/SHARE or not. We can't
+ * just grab AccessShareLock because then the executor would
+ * be trying to upgrade the lock, leading to possible
+ * deadlocks.
*/
if (rt_index == parsetree->resultRelation)
lockmode = RowExclusiveLock;
@@ -288,8 +289,8 @@ rewriteRuleAction(Query *parsetree,
* Adjust rule action and qual to offset its varnos, so that we can merge
* its rtable with the main parsetree's rtable.
*
- * If the rule action is an INSERT...SELECT, the OLD/NEW rtable entries will
- * be in the SELECT part, and we have to modify that rather than the
+ * If the rule action is an INSERT...SELECT, the OLD/NEW rtable entries
+ * will be in the SELECT part, and we have to modify that rather than the
* top-level INSERT (kluge!).
*/
sub_action = getInsertSelectQuery(rule_action, &sub_action_ptr);
@@ -308,12 +309,12 @@ rewriteRuleAction(Query *parsetree,
* action. Some of the entries may be unused after we finish rewriting,
* but we leave them all in place for two reasons:
*
- * We'd have a much harder job to adjust the query's varnos if we selectively
- * removed RT entries.
+ * We'd have a much harder job to adjust the query's varnos if we
+ * selectively removed RT entries.
*
- * If the rule is INSTEAD, then the original query won't be executed at all,
- * and so its rtable must be preserved so that the executor will do the
- * correct permissions checks on it.
+ * If the rule is INSTEAD, then the original query won't be executed at
+ * all, and so its rtable must be preserved so that the executor will do
+ * the correct permissions checks on it.
*
* RT entries that are not referenced in the completed jointree will be
* ignored by the planner, so they do not affect query semantics. But any
@@ -322,13 +323,13 @@ rewriteRuleAction(Query *parsetree,
* caller has, say, insert-permission on a view, when the view is not
* semantically referenced at all in the resulting query.
*
- * When a rule is not INSTEAD, the permissions checks done on its copied RT
- * entries will be redundant with those done during execution of the
+ * When a rule is not INSTEAD, the permissions checks done on its copied
+ * RT entries will be redundant with those done during execution of the
* original query, but we don't bother to treat that case differently.
*
- * NOTE: because planner will destructively alter rtable, we must ensure that
- * rule action's rtable is separate and shares no substructure with the
- * main rtable. Hence do a deep copy here.
+ * NOTE: because planner will destructively alter rtable, we must ensure
+ * that rule action's rtable is separate and shares no substructure with
+ * the main rtable. Hence do a deep copy here.
*/
sub_action->rtable = list_concat((List *) copyObject(parsetree->rtable),
sub_action->rtable);
@@ -344,8 +345,8 @@ rewriteRuleAction(Query *parsetree,
* don't want the original rtindex to be joined twice, however, so avoid
* keeping it if the rule action mentions it.
*
- * As above, the action's jointree must not share substructure with the main
- * parsetree's.
+ * As above, the action's jointree must not share substructure with the
+ * main parsetree's.
*/
if (sub_action->commandType != CMD_UTILITY)
{
@@ -389,9 +390,9 @@ rewriteRuleAction(Query *parsetree,
* Rewrite new.attribute w/ right hand side of target-list entry for
* appropriate field name in insert/update.
*
- * KLUGE ALERT: since ResolveNew returns a mutated copy, we can't just apply
- * it to sub_action; we have to remember to update the sublink inside
- * rule_action, too.
+ * KLUGE ALERT: since ResolveNew returns a mutated copy, we can't just
+ * apply it to sub_action; we have to remember to update the sublink
+ * inside rule_action, too.
*/
if ((event == CMD_INSERT || event == CMD_UPDATE) &&
sub_action->commandType != CMD_UTILITY)
@@ -532,8 +533,8 @@ rewriteTargetList(Query *parsetree, Relation target_relation)
* Copy all resjunk tlist entries to junk_tlist, and assign them
* resnos above the last real resno.
*
- * Typical junk entries include ORDER BY or GROUP BY expressions (are
- * these actually possible in an INSERT or UPDATE?), system
+ * Typical junk entries include ORDER BY or GROUP BY expressions
+ * (are these actually possible in an INSERT or UPDATE?), system
* attribute references, etc.
*/
@@ -1561,8 +1562,8 @@ QueryRewrite(Query *parsetree)
/*
* Step 3
*
- * Determine which, if any, of the resulting queries is supposed to set the
- * command-result tag; and update the canSetTag fields accordingly.
+ * Determine which, if any, of the resulting queries is supposed to set
+ * the command-result tag; and update the canSetTag fields accordingly.
*
* If the original query is still in the list, it sets the command tag.
* Otherwise, the last INSTEAD query of the same kind as the original is
diff --git a/src/backend/rewrite/rewriteManip.c b/src/backend/rewrite/rewriteManip.c
index 9e6bc4808e7..9d1d1d3acbe 100644
--- a/src/backend/rewrite/rewriteManip.c
+++ b/src/backend/rewrite/rewriteManip.c
@@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/rewrite/rewriteManip.c,v 1.92 2005/10/15 02:49:24 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/rewrite/rewriteManip.c,v 1.92.2.1 2005/11/22 18:23:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -715,11 +715,11 @@ AddQual(Query *parsetree, Node *qual)
/*
* There's noplace to put the qual on a utility statement.
*
- * If it's a NOTIFY, silently ignore the qual; this means that the NOTIFY
- * will execute, whether or not there are any qualifying rows. While
- * clearly wrong, this is much more useful than refusing to execute
- * the rule at all, and extra NOTIFY events are harmless for typical
- * uses of NOTIFY.
+ * If it's a NOTIFY, silently ignore the qual; this means that the
+ * NOTIFY will execute, whether or not there are any qualifying rows.
+ * While clearly wrong, this is much more useful than refusing to
+ * execute the rule at all, and extra NOTIFY events are harmless for
+ * typical uses of NOTIFY.
*
* If it isn't a NOTIFY, error out, since unconditional execution of
* other utility stmts is unlikely to be wanted. (This case is not
diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c
index c556c5d91fa..1204bbfb07d 100644
--- a/src/backend/storage/buffer/bufmgr.c
+++ b/src/backend/storage/buffer/bufmgr.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/buffer/bufmgr.c,v 1.198.2.1 2005/11/17 17:42:24 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/buffer/bufmgr.c,v 1.198.2.2 2005/11/22 18:23:17 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -86,7 +86,7 @@ static volatile BufferDesc *PinCountWaitBuf = NULL;
static bool PinBuffer(volatile BufferDesc *buf);
static void PinBuffer_Locked(volatile BufferDesc *buf);
static void UnpinBuffer(volatile BufferDesc *buf,
- bool fixOwner, bool normalAccess);
+ bool fixOwner, bool normalAccess);
static bool SyncOneBuffer(int buf_id, bool skip_pinned);
static void WaitIO(volatile BufferDesc *buf);
static bool StartBufferIO(volatile BufferDesc *buf, bool forInput);
@@ -178,11 +178,12 @@ ReadBuffer(Relation reln, BlockNumber blockNum)
* page but its contents are not yet valid. IO_IN_PROGRESS is set for it,
* if it's a shared buffer.
*
- * Note: if smgrextend fails, we will end up with a buffer that is allocated
- * but not marked BM_VALID. P_NEW will still select the same block number
- * (because the relation didn't get any longer on disk) and so future
- * attempts to extend the relation will find the same buffer (if it's not
- * been recycled) but come right back here to try smgrextend again.
+ * Note: if smgrextend fails, we will end up with a buffer that is
+ * allocated but not marked BM_VALID. P_NEW will still select the same
+ * block number (because the relation didn't get any longer on disk) and
+ * so future attempts to extend the relation will find the same buffer (if
+ * it's not been recycled) but come right back here to try smgrextend
+ * again.
*/
Assert(!(bufHdr->flags & BM_VALID)); /* spinlock not needed */
@@ -982,8 +983,8 @@ SyncOneBuffer(int buf_id, bool skip_pinned)
/*
* Check whether buffer needs writing.
*
- * We can make this check without taking the buffer content lock so long as
- * we mark pages dirty in access methods *before* logging changes with
+ * We can make this check without taking the buffer content lock so long
+ * as we mark pages dirty in access methods *before* logging changes with
* XLogInsert(): if someone marks the buffer dirty just after our check we
* don't worry because our checkpoint.redo points before log record for
* upcoming changes and so we are not required to write such dirty buffer.
@@ -1871,8 +1872,9 @@ WaitIO(volatile BufferDesc *buf)
/*
* Changed to wait until there's no IO - Inoue 01/13/2000
*
- * Note this is *necessary* because an error abort in the process doing I/O
- * could release the io_in_progress_lock prematurely. See AbortBufferIO.
+ * Note this is *necessary* because an error abort in the process doing
+ * I/O could release the io_in_progress_lock prematurely. See
+ * AbortBufferIO.
*/
for (;;)
{
diff --git a/src/backend/storage/buffer/localbuf.c b/src/backend/storage/buffer/localbuf.c
index 52750051946..38fec03419c 100644
--- a/src/backend/storage/buffer/localbuf.c
+++ b/src/backend/storage/buffer/localbuf.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/buffer/localbuf.c,v 1.70.2.1 2005/11/17 17:42:24 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/buffer/localbuf.c,v 1.70.2.2 2005/11/22 18:23:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -277,7 +277,7 @@ DropRelFileNodeLocalBuffers(RelFileNode rnode, BlockNumber firstDelBlock)
hresult = (LocalBufferLookupEnt *)
hash_search(LocalBufHash, (void *) &bufHdr->tag,
HASH_REMOVE, NULL);
- if (!hresult) /* shouldn't happen */
+ if (!hresult) /* shouldn't happen */
elog(ERROR, "local buffer hash table corrupted");
/* Mark buffer invalid */
CLEAR_BUFFERTAG(bufHdr->tag);
diff --git a/src/backend/storage/file/fd.c b/src/backend/storage/file/fd.c
index 2db12ebd11b..f706e062ffd 100644
--- a/src/backend/storage/file/fd.c
+++ b/src/backend/storage/file/fd.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/file/fd.c,v 1.121 2005/10/15 02:49:25 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/file/fd.c,v 1.121.2.1 2005/11/22 18:23:18 momjian Exp $
*
* NOTES:
*
@@ -901,9 +901,9 @@ OpenTemporaryFile(bool interXact)
* We might need to create the pg_tempfiles subdirectory, if no one
* has yet done so.
*
- * Don't check for error from mkdir; it could fail if someone else just
- * did the same thing. If it doesn't work then we'll bomb out on the
- * second create attempt, instead.
+ * Don't check for error from mkdir; it could fail if someone else
+ * just did the same thing. If it doesn't work then we'll bomb out on
+ * the second create attempt, instead.
*/
dirpath = make_database_relative(PG_TEMP_FILES_DIR);
mkdir(dirpath, S_IRWXU);
diff --git a/src/backend/storage/ipc/ipc.c b/src/backend/storage/ipc/ipc.c
index 39e8d3e527a..15bca1bfd78 100644
--- a/src/backend/storage/ipc/ipc.c
+++ b/src/backend/storage/ipc/ipc.c
@@ -13,7 +13,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/ipc/ipc.c,v 1.91 2005/10/15 02:49:25 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/ipc/ipc.c,v 1.91.2.1 2005/11/22 18:23:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -100,10 +100,11 @@ proc_exit(int code)
/*
* call all the callbacks registered before calling exit().
*
- * Note that since we decrement on_proc_exit_index each time, if a callback
- * calls ereport(ERROR) or ereport(FATAL) then it won't be invoked again
- * when control comes back here (nor will the previously-completed
- * callbacks). So, an infinite loop should not be possible.
+ * Note that since we decrement on_proc_exit_index each time, if a
+ * callback calls ereport(ERROR) or ereport(FATAL) then it won't be
+ * invoked again when control comes back here (nor will the
+ * previously-completed callbacks). So, an infinite loop should not be
+ * possible.
*/
while (--on_proc_exit_index >= 0)
(*on_proc_exit_list[on_proc_exit_index].function) (code,
@@ -127,8 +128,8 @@ shmem_exit(int code)
/*
* call all the registered callbacks.
*
- * As with proc_exit(), we remove each callback from the list before calling
- * it, to avoid infinite loop in case of error.
+ * As with proc_exit(), we remove each callback from the list before
+ * calling it, to avoid infinite loop in case of error.
*/
while (--on_shmem_exit_index >= 0)
(*on_shmem_exit_list[on_shmem_exit_index].function) (code,
diff --git a/src/backend/storage/ipc/procarray.c b/src/backend/storage/ipc/procarray.c
index 1387ec6bd9d..2e4fc92e246 100644
--- a/src/backend/storage/ipc/procarray.c
+++ b/src/backend/storage/ipc/procarray.c
@@ -23,7 +23,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/ipc/procarray.c,v 1.7 2005/10/15 02:49:25 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/ipc/procarray.c,v 1.7.2.1 2005/11/22 18:23:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -293,10 +293,10 @@ TransactionIdIsInProgress(TransactionId xid)
/*
* Step 3: have to check pg_subtrans.
*
- * At this point, we know it's either a subtransaction of one of the Xids in
- * xids[], or it's not running. If it's an already-failed subtransaction,
- * we want to say "not running" even though its parent may still be
- * running. So first, check pg_clog to see if it's been aborted.
+ * At this point, we know it's either a subtransaction of one of the Xids
+ * in xids[], or it's not running. If it's an already-failed
+ * subtransaction, we want to say "not running" even though its parent may
+ * still be running. So first, check pg_clog to see if it's been aborted.
*/
xc_slow_answer_inc();
diff --git a/src/backend/storage/ipc/shmem.c b/src/backend/storage/ipc/shmem.c
index 443c153c90a..5e71b489d5a 100644
--- a/src/backend/storage/ipc/shmem.c
+++ b/src/backend/storage/ipc/shmem.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/ipc/shmem.c,v 1.87 2005/10/15 02:49:25 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/ipc/shmem.c,v 1.87.2.1 2005/11/22 18:23:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -350,8 +350,8 @@ ShmemInitStruct(const char *name, Size size, bool *foundPtr)
* If the shmem index doesn't exist, we are bootstrapping: we must
* be trying to init the shmem index itself.
*
- * Notice that the ShmemIndexLock is held until the shmem index has
- * been completely initialized.
+ * Notice that the ShmemIndexLock is held until the shmem index
+ * has been completely initialized.
*/
*foundPtr = FALSE;
ShmemIndexAlloc = ShmemAlloc(size);
diff --git a/src/backend/storage/ipc/sinval.c b/src/backend/storage/ipc/sinval.c
index b5efb510d7d..ca8fb8feff7 100644
--- a/src/backend/storage/ipc/sinval.c
+++ b/src/backend/storage/ipc/sinval.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/ipc/sinval.c,v 1.78 2005/10/15 02:49:25 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/ipc/sinval.c,v 1.78.2.1 2005/11/22 18:23:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -262,18 +262,18 @@ EnableCatchupInterrupt(void)
* steps. (A very small time window, perhaps, but Murphy's Law says you
* can hit it...) Instead, we first set the enable flag, then test the
* occurred flag. If we see an unserviced interrupt has occurred, we
- * re-clear the enable flag before going off to do the service work.
- * (That prevents re-entrant invocation of ProcessCatchupEvent() if
- * another interrupt occurs.) If an interrupt comes in between the setting
- * and clearing of catchupInterruptEnabled, then it will have done the
- * service work and left catchupInterruptOccurred zero, so we have to
- * check again after clearing enable. The whole thing has to be in a loop
- * in case another interrupt occurs while we're servicing the first. Once
- * we get out of the loop, enable is set and we know there is no
- * unserviced interrupt.
+ * re-clear the enable flag before going off to do the service work. (That
+ * prevents re-entrant invocation of ProcessCatchupEvent() if another
+ * interrupt occurs.) If an interrupt comes in between the setting and
+ * clearing of catchupInterruptEnabled, then it will have done the service
+ * work and left catchupInterruptOccurred zero, so we have to check again
+ * after clearing enable. The whole thing has to be in a loop in case
+ * another interrupt occurs while we're servicing the first. Once we get
+ * out of the loop, enable is set and we know there is no unserviced
+ * interrupt.
*
- * NB: an overenthusiastic optimizing compiler could easily break this code.
- * Hopefully, they all understand what "volatile" means these days.
+ * NB: an overenthusiastic optimizing compiler could easily break this
+ * code. Hopefully, they all understand what "volatile" means these days.
*/
for (;;)
{
@@ -332,10 +332,10 @@ ProcessCatchupEvent(void)
* start and immediately end a transaction; the call to
* AcceptInvalidationMessages() happens down inside transaction start.
*
- * It is awfully tempting to just call AcceptInvalidationMessages() without
- * the rest of the xact start/stop overhead, and I think that would
- * actually work in the normal case; but I am not sure that things would
- * clean up nicely if we got an error partway through.
+ * It is awfully tempting to just call AcceptInvalidationMessages()
+ * without the rest of the xact start/stop overhead, and I think that
+ * would actually work in the normal case; but I am not sure that things
+ * would clean up nicely if we got an error partway through.
*/
if (IsTransactionOrTransactionBlock())
{
diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c
index 467bde6c1cc..fe983106889 100644
--- a/src/backend/storage/lmgr/lock.c
+++ b/src/backend/storage/lmgr/lock.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/lmgr/lock.c,v 1.159 2005/11/05 03:04:52 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/lmgr/lock.c,v 1.159.2.1 2005/11/22 18:23:18 momjian Exp $
*
* NOTES
* Outside modules can create a lock table and acquire/release
@@ -322,10 +322,10 @@ LockMethodTableInit(const char *tabName,
* allocate a non-shared hash table for LOCALLOCK structs. This is used
* to store lock counts and resource owner information.
*
- * The non-shared table could already exist in this process (this occurs when
- * the postmaster is recreating shared memory after a backend crash). If
- * so, delete and recreate it. (We could simply leave it, since it ought
- * to be empty in the postmaster, but for safety let's zap it.)
+ * The non-shared table could already exist in this process (this occurs
+ * when the postmaster is recreating shared memory after a backend crash).
+ * If so, delete and recreate it. (We could simply leave it, since it
+ * ought to be empty in the postmaster, but for safety let's zap it.)
*/
if (LockMethodLocalHash[lockmethodid])
hash_destroy(LockMethodLocalHash[lockmethodid]);
@@ -534,8 +534,8 @@ LockAcquire(LOCKMETHODID lockmethodid,
/*
* Find or create a lock with this tag.
*
- * Note: if the locallock object already existed, it might have a pointer to
- * the lock already ... but we probably should not assume that that
+ * Note: if the locallock object already existed, it might have a pointer
+ * to the lock already ... but we probably should not assume that that
* pointer is valid, since a lock object with no locks can go away
* anytime.
*/
@@ -818,10 +818,10 @@ LockCheckConflicts(LockMethod lockMethodTable,
* first check for global conflicts: If no locks conflict with my request,
* then I get the lock.
*
- * Checking for conflict: lock->grantMask represents the types of currently
- * held locks. conflictTable[lockmode] has a bit set for each type of
- * lock that conflicts with request. Bitwise compare tells if there is a
- * conflict.
+ * Checking for conflict: lock->grantMask represents the types of
+ * currently held locks. conflictTable[lockmode] has a bit set for each
+ * type of lock that conflicts with request. Bitwise compare tells if
+ * there is a conflict.
*/
if (!(lockMethodTable->conflictTab[lockmode] & lock->grantMask))
{
@@ -1875,8 +1875,8 @@ LockShmemSize(void)
* Note we count only one pair of hash tables, since the userlocks table
* actually overlays the main one.
*
- * Since the lockHash entry count above is only an estimate, add 10% safety
- * margin.
+ * Since the lockHash entry count above is only an estimate, add 10%
+ * safety margin.
*/
size = add_size(size, size / 10);
diff --git a/src/backend/storage/lmgr/proc.c b/src/backend/storage/lmgr/proc.c
index 1c26a5934ba..4ea1dfe74be 100644
--- a/src/backend/storage/lmgr/proc.c
+++ b/src/backend/storage/lmgr/proc.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/lmgr/proc.c,v 1.167 2005/10/15 02:49:26 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/lmgr/proc.c,v 1.167.2.1 2005/11/22 18:23:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -640,15 +640,15 @@ ProcSleep(LockMethod lockMethodTable,
/*
* Determine where to add myself in the wait queue.
*
- * Normally I should go at the end of the queue. However, if I already hold
- * locks that conflict with the request of any previous waiter, put myself
- * in the queue just in front of the first such waiter. This is not a
- * necessary step, since deadlock detection would move me to before that
+ * Normally I should go at the end of the queue. However, if I already
+ * hold locks that conflict with the request of any previous waiter, put
+ * myself in the queue just in front of the first such waiter. This is not
+ * a necessary step, since deadlock detection would move me to before that
* waiter anyway; but it's relatively cheap to detect such a conflict
* immediately, and avoid delaying till deadlock timeout.
*
- * Special case: if I find I should go in front of some waiter, check to see
- * if I conflict with already-held locks or the requests before that
+ * Special case: if I find I should go in front of some waiter, check to
+ * see if I conflict with already-held locks or the requests before that
* waiter. If not, then just grant myself the requested lock immediately.
* This is the same as the test for immediate grant in LockAcquire, except
* we are only considering the part of the wait queue before my insertion
@@ -755,8 +755,8 @@ ProcSleep(LockMethod lockMethodTable,
* sets MyProc->waitStatus = STATUS_ERROR, allowing us to know that we
* must report failure rather than success.
*
- * By delaying the check until we've waited for a bit, we can avoid running
- * the rather expensive deadlock-check code in most cases.
+ * By delaying the check until we've waited for a bit, we can avoid
+ * running the rather expensive deadlock-check code in most cases.
*/
if (!enable_sig_alarm(DeadlockTimeout, false))
elog(FATAL, "could not set timer for process wakeup");
@@ -768,13 +768,13 @@ ProcSleep(LockMethod lockMethodTable,
* not detect a deadlock, PGSemaphoreLock() will continue to wait. There
* used to be a loop here, but it was useless code...
*
- * We pass interruptOK = true, which eliminates a window in which cancel/die
- * interrupts would be held off undesirably. This is a promise that we
- * don't mind losing control to a cancel/die interrupt here. We don't,
- * because we have no shared-state-change work to do after being granted
- * the lock (the grantor did it all). We do have to worry about updating
- * the locallock table, but if we lose control to an error, LockWaitCancel
- * will fix that up.
+ * We pass interruptOK = true, which eliminates a window in which
+ * cancel/die interrupts would be held off undesirably. This is a promise
+ * that we don't mind losing control to a cancel/die interrupt here. We
+ * don't, because we have no shared-state-change work to do after being
+ * granted the lock (the grantor did it all). We do have to worry about
+ * updating the locallock table, but if we lose control to an error,
+ * LockWaitCancel will fix that up.
*/
PGSemaphoreLock(&MyProc->sem, true);
@@ -931,9 +931,9 @@ CheckDeadLock(void)
/*
* Check to see if we've been awoken by anyone in the interim.
*
- * If we have we can return and resume our transaction -- happy day. Before
- * we are awoken the process releasing the lock grants it to us so we know
- * that we don't have to wait anymore.
+ * If we have we can return and resume our transaction -- happy day.
+ * Before we are awoken the process releasing the lock grants it to us so
+ * we know that we don't have to wait anymore.
*
* We check by looking to see if we've been unlinked from the wait queue.
* This is quicker than checking our semaphore's state, since no kernel
@@ -1085,10 +1085,10 @@ enable_sig_alarm(int delayms, bool is_statement_timeout)
/*
* Begin deadlock timeout with statement-level timeout active
*
- * Here, we want to interrupt at the closer of the two timeout times. If
- * fin_time >= statement_fin_time then we need not touch the existing
- * timer setting; else set up to interrupt at the deadlock timeout
- * time.
+ * Here, we want to interrupt at the closer of the two timeout times.
+ * If fin_time >= statement_fin_time then we need not touch the
+ * existing timer setting; else set up to interrupt at the deadlock
+ * timeout time.
*
* NOTE: in this case it is possible that this routine will be
* interrupted by the previously-set timer alarm. This is okay
diff --git a/src/backend/storage/lmgr/s_lock.c b/src/backend/storage/lmgr/s_lock.c
index f1c92d70dac..3330c48a877 100644
--- a/src/backend/storage/lmgr/s_lock.c
+++ b/src/backend/storage/lmgr/s_lock.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/lmgr/s_lock.c,v 1.40 2005/10/15 02:49:26 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/lmgr/s_lock.c,v 1.40.2.1 2005/11/22 18:23:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -58,27 +58,27 @@ s_lock(volatile slock_t *lock, const char *file, int line)
* longer than to call the kernel, so we try to adapt the spin loop count
* depending on whether we seem to be in a uniprocessor or multiprocessor.
*
- * Note: you might think MIN_SPINS_PER_DELAY should be just 1, but you'd be
- * wrong; there are platforms where that can result in a "stuck spinlock"
- * failure. This has been seen particularly on Alphas; it seems that the
- * first TAS after returning from kernel space will always fail on that
- * hardware.
+ * Note: you might think MIN_SPINS_PER_DELAY should be just 1, but you'd
+ * be wrong; there are platforms where that can result in a "stuck
+ * spinlock" failure. This has been seen particularly on Alphas; it seems
+ * that the first TAS after returning from kernel space will always fail
+ * on that hardware.
*
- * Once we do decide to block, we use randomly increasing pg_usleep() delays.
- * The first delay is 1 msec, then the delay randomly increases to about
- * one second, after which we reset to 1 msec and start again. The idea
- * here is that in the presence of heavy contention we need to increase
- * the delay, else the spinlock holder may never get to run and release
- * the lock. (Consider situation where spinlock holder has been nice'd
- * down in priority by the scheduler --- it will not get scheduled until
- * all would-be acquirers are sleeping, so if we always use a 1-msec
+ * Once we do decide to block, we use randomly increasing pg_usleep()
+ * delays. The first delay is 1 msec, then the delay randomly increases to
+ * about one second, after which we reset to 1 msec and start again. The
+ * idea here is that in the presence of heavy contention we need to
+ * increase the delay, else the spinlock holder may never get to run and
+ * release the lock. (Consider situation where spinlock holder has been
+ * nice'd down in priority by the scheduler --- it will not get scheduled
+ * until all would-be acquirers are sleeping, so if we always use a 1-msec
* sleep, there is a real possibility of starvation.) But we can't just
* clamp the delay to an upper bound, else it would take a long time to
* make a reasonable number of tries.
*
- * We time out and declare error after NUM_DELAYS delays (thus, exactly that
- * many tries). With the given settings, this will usually take 2 or so
- * minutes. It seems better to fix the total number of tries (and thus
+ * We time out and declare error after NUM_DELAYS delays (thus, exactly
+ * that many tries). With the given settings, this will usually take 2 or
+ * so minutes. It seems better to fix the total number of tries (and thus
* the probability of unintended failure) than to fix the total time
* spent.
*
@@ -251,7 +251,6 @@ _success: \n\
);
}
#endif /* __m68k__ && !__linux__ */
-
#else /* not __GNUC__ */
/*
diff --git a/src/backend/storage/lmgr/spin.c b/src/backend/storage/lmgr/spin.c
index dfec2a77694..0f9d9d47ccb 100644
--- a/src/backend/storage/lmgr/spin.c
+++ b/src/backend/storage/lmgr/spin.c
@@ -16,7 +16,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/lmgr/spin.c,v 1.17 2005/10/15 02:49:26 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/lmgr/spin.c,v 1.17.2.1 2005/11/22 18:23:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -54,8 +54,8 @@ SpinlockSemas(void)
* It would be cleaner to distribute this logic into the affected modules,
* similar to the way shmem space estimation is handled.
*
- * For now, though, we just need a few spinlocks (10 should be plenty) plus
- * one for each LWLock.
+ * For now, though, we just need a few spinlocks (10 should be plenty)
+ * plus one for each LWLock.
*/
return NumLWLocks() + 10;
}
diff --git a/src/backend/storage/page/bufpage.c b/src/backend/storage/page/bufpage.c
index fd19fd8736d..8812dfb5f68 100644
--- a/src/backend/storage/page/bufpage.c
+++ b/src/backend/storage/page/bufpage.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/page/bufpage.c,v 1.67 2005/10/15 02:49:26 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/page/bufpage.c,v 1.67.2.1 2005/11/22 18:23:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -184,8 +184,8 @@ PageAddItem(Page page,
/*
* Compute new lower and upper pointers for page, see if it'll fit.
*
- * Note: do arithmetic as signed ints, to avoid mistakes if, say, alignedSize
- * > pd_upper.
+ * Note: do arithmetic as signed ints, to avoid mistakes if, say,
+ * alignedSize > pd_upper.
*/
if (offsetNumber == limit || needshuffle)
lower = phdr->pd_lower + sizeof(ItemIdData);
@@ -524,8 +524,8 @@ PageIndexTupleDelete(Page page, OffsetNumber offnum)
/*
* Finally, we need to adjust the linp entries that remain.
*
- * Anything that used to be before the deleted tuple's data was moved forward
- * by the size of the deleted tuple.
+ * Anything that used to be before the deleted tuple's data was moved
+ * forward by the size of the deleted tuple.
*/
if (!PageIsEmpty(page))
{
diff --git a/src/backend/storage/smgr/smgr.c b/src/backend/storage/smgr/smgr.c
index 17d14e38700..873ba1ebe2c 100644
--- a/src/backend/storage/smgr/smgr.c
+++ b/src/backend/storage/smgr/smgr.c
@@ -11,7 +11,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/smgr/smgr.c,v 1.93 2005/10/15 02:49:26 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/smgr/smgr.c,v 1.93.2.1 2005/11/22 18:23:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -344,8 +344,8 @@ smgrcreate(SMgrRelation reln, bool isTemp, bool isRedo)
* We may be using the target table space for the first time in this
* database, so create a per-database subdirectory if needed.
*
- * XXX this is a fairly ugly violation of module layering, but this seems to
- * be the best place to put the check. Maybe TablespaceCreateDbspace
+ * XXX this is a fairly ugly violation of module layering, but this seems
+ * to be the best place to put the check. Maybe TablespaceCreateDbspace
* should be here and not in commands/tablespace.c? But that would imply
* importing a lot of stuff that smgr.c oughtn't know, either.
*/
@@ -472,8 +472,8 @@ smgr_internal_unlink(RelFileNode rnode, int which, bool isTemp, bool isRedo)
/*
* And delete the physical files.
*
- * Note: we treat deletion failure as a WARNING, not an error, because we've
- * already decided to commit or abort the current xact.
+ * Note: we treat deletion failure as a WARNING, not an error, because
+ * we've already decided to commit or abort the current xact.
*/
if (!(*(smgrsw[which].smgr_unlink)) (rnode, isRedo))
ereport(WARNING,
diff --git a/src/backend/tcop/fastpath.c b/src/backend/tcop/fastpath.c
index 7c7de52e57a..a447c46bf8f 100644
--- a/src/backend/tcop/fastpath.c
+++ b/src/backend/tcop/fastpath.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/tcop/fastpath.c,v 1.83 2005/10/15 02:49:26 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/tcop/fastpath.c,v 1.83.2.1 2005/11/22 18:23:20 momjian Exp $
*
* NOTES
* This cruft is the server side of PQfn.
@@ -542,9 +542,9 @@ parse_fcall_arguments_20(StringInfo msgBuf, struct fp_info * fip,
* Copy supplied arguments into arg vector. In protocol 2.0 these are
* always assumed to be supplied in binary format.
*
- * Note: although the original protocol 2.0 code did not have any way for the
- * frontend to specify a NULL argument, we now choose to interpret length
- * == -1 as meaning a NULL.
+ * Note: although the original protocol 2.0 code did not have any way for
+ * the frontend to specify a NULL argument, we now choose to interpret
+ * length == -1 as meaning a NULL.
*/
for (i = 0; i < nargs; ++i)
{
diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c
index c6c0402e20d..5d489a858e0 100644
--- a/src/backend/tcop/postgres.c
+++ b/src/backend/tcop/postgres.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/tcop/postgres.c,v 1.468.2.1 2005/11/10 00:31:40 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/tcop/postgres.c,v 1.468.2.2 2005/11/22 18:23:20 momjian Exp $
*
* NOTES
* this is the "main" module of the postgres backend and
@@ -301,8 +301,8 @@ SocketBackend(StringInfo inBuf)
* sync, better to say "command unknown" than to run out of memory because
* we used garbage as a length word.
*
- * This also gives us a place to set the doing_extended_query_message flag as
- * soon as possible.
+ * This also gives us a place to set the doing_extended_query_message flag
+ * as soon as possible.
*/
switch (qtype)
{
@@ -1423,11 +1423,11 @@ exec_bind_message(StringInfo input_message)
/*
* If we are in aborted transaction state, the only portals we can
- * actually run are those containing COMMIT or ROLLBACK commands.
- * We disallow binding anything else to avoid problems with infrastructure
- * that expects to run inside a valid transaction. We also disallow
- * binding any parameters, since we can't risk calling user-defined
- * I/O functions.
+ * actually run are those containing COMMIT or ROLLBACK commands. We
+ * disallow binding anything else to avoid problems with infrastructure
+ * that expects to run inside a valid transaction. We also disallow
+ * binding any parameters, since we can't risk calling user-defined I/O
+ * functions.
*/
if (IsAbortedTransactionBlockState() &&
(!IsTransactionExitStmtList(pstmt->query_list) ||
@@ -1490,12 +1490,11 @@ exec_bind_message(StringInfo input_message)
/*
* Rather than copying data around, we just set up a phony
- * StringInfo pointing to the correct portion of the
- * message buffer. We assume we can scribble on the
- * message buffer so as to maintain the convention that
- * StringInfos have a trailing null. This is grotty but
- * is a big win when dealing with very large parameter
- * strings.
+ * StringInfo pointing to the correct portion of the message
+ * buffer. We assume we can scribble on the message buffer so
+ * as to maintain the convention that StringInfos have a
+ * trailing null. This is grotty but is a big win when
+ * dealing with very large parameter strings.
*/
pbuf.data = (char *) pvalue;
pbuf.maxlen = plength + 1;
@@ -1514,8 +1513,8 @@ exec_bind_message(StringInfo input_message)
getTypeInputInfo(ptype, &typinput, &typioparam);
/*
- * We have to do encoding conversion before calling
- * the typinput routine.
+ * We have to do encoding conversion before calling the
+ * typinput routine.
*/
pstring = pg_client_to_server(pbuf.data, plength);
params[i].value =
@@ -1546,9 +1545,9 @@ exec_bind_message(StringInfo input_message)
/* Trouble if it didn't eat the whole buffer */
if (pbuf.cursor != pbuf.len)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
- errmsg("incorrect binary data format in bind parameter %d",
- i + 1)));
+ (errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
+ errmsg("incorrect binary data format in bind parameter %d",
+ i + 1)));
}
else
{
@@ -2259,9 +2258,10 @@ check_stack_depth(void)
/*
* Trouble?
*
- * The test on stack_base_ptr prevents us from erroring out if called during
- * process setup or in a non-backend process. Logically it should be done
- * first, but putting it here avoids wasting cycles during normal cases.
+ * The test on stack_base_ptr prevents us from erroring out if called
+ * during process setup or in a non-backend process. Logically it should
+ * be done first, but putting it here avoids wasting cycles during normal
+ * cases.
*/
if (stack_depth > max_stack_depth_bytes &&
stack_base_ptr != NULL)
@@ -2582,9 +2582,9 @@ PostgresMain(int argc, char *argv[], const char *username)
/*
* ignore system indexes
*
- * As of PG 7.4 this is safe to allow from the client, since it
- * only disables reading the system indexes, not writing them.
- * Worst case consequence is slowness.
+ * As of PG 7.4 this is safe to allow from the client, since
+ * it only disables reading the system indexes, not writing
+ * them. Worst case consequence is slowness.
*/
IgnoreSystemIndexes(true);
break;
@@ -2627,8 +2627,8 @@ PostgresMain(int argc, char *argv[], const char *username)
/*
* s - report usage statistics (timings) after each query
*
- * Since log options are SUSET, we need to postpone unless still
- * in secure context
+ * Since log options are SUSET, we need to postpone unless
+ * still in secure context
*/
if (ctx == PGC_BACKEND)
PendingConfigOption("log_statement_stats", "true");
@@ -2767,9 +2767,9 @@ PostgresMain(int argc, char *argv[], const char *username)
/*
* Set up signal handlers and masks.
*
- * Note that postmaster blocked all signals before forking child process, so
- * there is no race condition whereby we might receive a signal before we
- * have set up the handler.
+ * Note that postmaster blocked all signals before forking child process,
+ * so there is no race condition whereby we might receive a signal before
+ * we have set up the handler.
*
* Also note: it's best not to use any signals that are SIG_IGNored in the
* postmaster. If such a signal arrives before we are able to change the
@@ -2887,9 +2887,9 @@ PostgresMain(int argc, char *argv[], const char *username)
/*
* General initialization.
*
- * NOTE: if you are tempted to add code in this vicinity, consider putting it
- * inside InitPostgres() instead. In particular, anything that involves
- * database access should be there, not here.
+ * NOTE: if you are tempted to add code in this vicinity, consider putting
+ * it inside InitPostgres() instead. In particular, anything that
+ * involves database access should be there, not here.
*/
ereport(DEBUG3,
(errmsg_internal("InitPostgres")));
@@ -2978,13 +2978,13 @@ PostgresMain(int argc, char *argv[], const char *username)
* If an exception is encountered, processing resumes here so we abort the
* current transaction and start a new one.
*
- * You might wonder why this isn't coded as an infinite loop around a PG_TRY
- * construct. The reason is that this is the bottom of the exception
- * stack, and so with PG_TRY there would be no exception handler in force
- * at all during the CATCH part. By leaving the outermost setjmp always
- * active, we have at least some chance of recovering from an error during
- * error recovery. (If we get into an infinite loop thereby, it will soon
- * be stopped by overflow of elog.c's internal state stack.)
+ * You might wonder why this isn't coded as an infinite loop around a
+ * PG_TRY construct. The reason is that this is the bottom of the
+ * exception stack, and so with PG_TRY there would be no exception handler
+ * in force at all during the CATCH part. By leaving the outermost setjmp
+ * always active, we have at least some chance of recovering from an error
+ * during error recovery. (If we get into an infinite loop thereby, it
+ * will soon be stopped by overflow of elog.c's internal state stack.)
*/
if (sigsetjmp(local_sigjmp_buf, 1) != 0)
diff --git a/src/backend/tcop/pquery.c b/src/backend/tcop/pquery.c
index a293610bdaf..4a094048384 100644
--- a/src/backend/tcop/pquery.c
+++ b/src/backend/tcop/pquery.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/tcop/pquery.c,v 1.97 2005/11/03 21:35:57 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/tcop/pquery.c,v 1.97.2.1 2005/11/22 18:23:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -757,8 +757,8 @@ PortalRunSelect(Portal portal,
/*
* Force the queryDesc destination to the right thing. This supports
- * MOVE, for example, which will pass in dest = DestNone. This is okay
- * to change as long as we do it on every fetch. (The Executor must not
+ * MOVE, for example, which will pass in dest = DestNone. This is okay to
+ * change as long as we do it on every fetch. (The Executor must not
* assume that dest never changes.)
*/
if (queryDesc)
@@ -962,8 +962,8 @@ PortalRunUtility(Portal portal, Query *query,
* say, it has to update an index with expressions that invoke
* user-defined functions, then it had better have a snapshot.
*
- * Note we assume that caller will take care of restoring ActiveSnapshot on
- * exit/error.
+ * Note we assume that caller will take care of restoring ActiveSnapshot
+ * on exit/error.
*/
if (!(IsA(utilityStmt, TransactionStmt) ||
IsA(utilityStmt, LockStmt) ||
@@ -1015,11 +1015,11 @@ PortalRunMulti(Portal portal,
ListCell *planlist_item;
/*
- * If the destination is DestRemoteExecute, change to DestNone. The reason
- * is that the client won't be expecting any tuples, and indeed has no way
- * to know what they are, since there is no provision for Describe to send
- * a RowDescription message when this portal execution strategy is in
- * effect. This presently will only affect SELECT commands added to
+ * If the destination is DestRemoteExecute, change to DestNone. The
+ * reason is that the client won't be expecting any tuples, and indeed has
+ * no way to know what they are, since there is no provision for Describe
+ * to send a RowDescription message when this portal execution strategy is
+ * in effect. This presently will only affect SELECT commands added to
* non-SELECT queries by rewrite rules: such commands will be executed,
* but the results will be discarded unless you use "simple Query"
* protocol.
@@ -1101,9 +1101,9 @@ PortalRunMulti(Portal portal,
* If a command completion tag was supplied, use it. Otherwise use the
* portal's commandTag as the default completion tag.
*
- * Exception: clients will expect INSERT/UPDATE/DELETE tags to have counts,
- * so fake something up if necessary. (This could happen if the original
- * query was replaced by a DO INSTEAD rule.)
+ * Exception: clients will expect INSERT/UPDATE/DELETE tags to have
+ * counts, so fake something up if necessary. (This could happen if the
+ * original query was replaced by a DO INSTEAD rule.)
*/
if (completionTag && completionTag[0] == '\0')
{
diff --git a/src/backend/utils/adt/arrayfuncs.c b/src/backend/utils/adt/arrayfuncs.c
index 5304d47fa8a..32334ac9109 100644
--- a/src/backend/utils/adt/arrayfuncs.c
+++ b/src/backend/utils/adt/arrayfuncs.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/arrayfuncs.c,v 1.123 2005/10/15 02:49:27 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/arrayfuncs.c,v 1.123.2.1 2005/11/22 18:23:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -189,8 +189,8 @@ array_in(PG_FUNCTION_ARGS)
* Otherwise, we require the input to be in curly-brace style, and we
* prescan the input to determine dimensions.
*
- * Dimension info takes the form of one or more [n] or [m:n] items. The outer
- * loop iterates once per dimension item.
+ * Dimension info takes the form of one or more [n] or [m:n] items. The
+ * outer loop iterates once per dimension item.
*/
p = string_save;
ndim = 0;
@@ -640,8 +640,8 @@ ReadArrayStr(char *arrayStr,
* in-place within arrayStr to do this. srcptr is the current scan point,
* and dstptr is where we are copying to.
*
- * We also want to suppress leading and trailing unquoted whitespace. We use
- * the leadingspace flag to suppress leading space. Trailing space is
+ * We also want to suppress leading and trailing unquoted whitespace. We
+ * use the leadingspace flag to suppress leading space. Trailing space is
* tracked by using dstendptr to point to the last significant output
* character.
*
@@ -2290,8 +2290,8 @@ array_map(FunctionCallInfo fcinfo, Oid inpType, Oid retType,
/*
* Apply the given function to source elt and extra args.
*
- * We assume the extra args are non-NULL, so need not check whether fn()
- * is strict. Would need to do more work here to support arrays
+ * We assume the extra args are non-NULL, so need not check whether
+ * fn() is strict. Would need to do more work here to support arrays
* containing nulls, too.
*/
fcinfo->arg[0] = elt;
@@ -3158,8 +3158,8 @@ array_type_length_coerce_internal(ArrayType *src,
/*
* Use array_map to apply the function to each array element.
*
- * We pass on the desttypmod and isExplicit flags whether or not the function
- * wants them.
+ * We pass on the desttypmod and isExplicit flags whether or not the
+ * function wants them.
*/
InitFunctionCallInfoData(locfcinfo, &my_extra->coerce_finfo, 3,
NULL, NULL);
diff --git a/src/backend/utils/adt/datetime.c b/src/backend/utils/adt/datetime.c
index 5b3fc46d9c2..e8440ecce0e 100644
--- a/src/backend/utils/adt/datetime.c
+++ b/src/backend/utils/adt/datetime.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/datetime.c,v 1.160 2005/10/15 02:49:28 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/datetime.c,v 1.160.2.1 2005/11/22 18:23:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -3550,8 +3550,8 @@ EncodeDateTime(struct pg_tm * tm, fsec_t fsec, int *tzp, char **tzn, int style,
* Print fractional seconds if any. The field widths here should
* be at least equal to MAX_TIMESTAMP_PRECISION.
*
- * In float mode, don't print fractional seconds before 1 AD, since
- * it's unlikely there's any precision left ...
+ * In float mode, don't print fractional seconds before 1 AD,
+ * since it's unlikely there's any precision left ...
*/
#ifdef HAVE_INT64_TIMESTAMP
if (fsec != 0)
@@ -3602,8 +3602,8 @@ EncodeDateTime(struct pg_tm * tm, fsec_t fsec, int *tzp, char **tzn, int style,
* Print fractional seconds if any. The field widths here should
* be at least equal to MAX_TIMESTAMP_PRECISION.
*
- * In float mode, don't print fractional seconds before 1 AD, since
- * it's unlikely there's any precision left ...
+ * In float mode, don't print fractional seconds before 1 AD,
+ * since it's unlikely there's any precision left ...
*/
#ifdef HAVE_INT64_TIMESTAMP
if (fsec != 0)
@@ -3650,8 +3650,8 @@ EncodeDateTime(struct pg_tm * tm, fsec_t fsec, int *tzp, char **tzn, int style,
* Print fractional seconds if any. The field widths here should
* be at least equal to MAX_TIMESTAMP_PRECISION.
*
- * In float mode, don't print fractional seconds before 1 AD, since
- * it's unlikely there's any precision left ...
+ * In float mode, don't print fractional seconds before 1 AD,
+ * since it's unlikely there's any precision left ...
*/
#ifdef HAVE_INT64_TIMESTAMP
if (fsec != 0)
@@ -3706,8 +3706,8 @@ EncodeDateTime(struct pg_tm * tm, fsec_t fsec, int *tzp, char **tzn, int style,
* Print fractional seconds if any. The field widths here should
* be at least equal to MAX_TIMESTAMP_PRECISION.
*
- * In float mode, don't print fractional seconds before 1 AD, since
- * it's unlikely there's any precision left ...
+ * In float mode, don't print fractional seconds before 1 AD,
+ * since it's unlikely there's any precision left ...
*/
#ifdef HAVE_INT64_TIMESTAMP
if (fsec != 0)
diff --git a/src/backend/utils/adt/formatting.c b/src/backend/utils/adt/formatting.c
index de84afe42c9..b02fdfeff5d 100644
--- a/src/backend/utils/adt/formatting.c
+++ b/src/backend/utils/adt/formatting.c
@@ -1,7 +1,7 @@
/* -----------------------------------------------------------------------
* formatting.c
*
- * $PostgreSQL: pgsql/src/backend/utils/adt/formatting.c,v 1.101 2005/10/20 15:59:46 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/formatting.c,v 1.101.2.1 2005/11/22 18:23:21 momjian Exp $
*
*
* Portions Copyright (c) 1999-2005, PostgreSQL Global Development Group
@@ -1326,8 +1326,8 @@ DCH_processor(FormatNode *node, char *inout, bool is_to_char,
* The input string is shorter than format picture, so it's good
* time to break this loop...
*
- * Note: this isn't relevant for TO_CHAR mode, beacuse it use 'inout'
- * allocated by format picture length.
+ * Note: this isn't relevant for TO_CHAR mode, beacuse it use
+ * 'inout' allocated by format picture length.
*/
break;
@@ -3752,8 +3752,8 @@ NUM_numpart_from_char(NUMProc *Np, int id, int plen)
* We need sign detection because determine exact position of post-sign is
* difficult:
*
- * FM9999.9999999S -> 123.001- 9.9S -> .5- FM9.999999MI
- * -> 5.01-
+ * FM9999.9999999S -> 123.001- 9.9S -> .5- FM9.999999MI ->
+ * 5.01-
*/
if (*Np->number == ' ' && Np->read_pre + Np->read_post > 0)
{
@@ -3797,8 +3797,9 @@ NUM_numpart_from_char(NUMProc *Np, int id, int plen)
*
* FM9.999999MI -> 5.01-
*
- * if (.... && IS_LSIGN(Np->Num)==FALSE) prevents read wrong formats like
- * to_number('1 -', '9S') where sign is not anchored to last number.
+ * if (.... && IS_LSIGN(Np->Num)==FALSE) prevents read wrong formats
+ * like to_number('1 -', '9S') where sign is not anchored to last
+ * number.
*/
else if (isread == FALSE && IS_LSIGN(Np->Num) == FALSE &&
(IS_PLUS(Np->Num) || IS_MINUS(Np->Num)))
diff --git a/src/backend/utils/adt/numeric.c b/src/backend/utils/adt/numeric.c
index a8becf990d1..1a4dc1bf802 100644
--- a/src/backend/utils/adt/numeric.c
+++ b/src/backend/utils/adt/numeric.c
@@ -14,7 +14,7 @@
* Copyright (c) 1998-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/numeric.c,v 1.86 2005/10/15 02:49:29 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/numeric.c,v 1.86.2.1 2005/11/22 18:23:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -348,8 +348,8 @@ numeric_out(PG_FUNCTION_ARGS)
/*
* Get the number in the variable format.
*
- * Even if we didn't need to change format, we'd still need to copy the value
- * to have a modifiable copy for rounding. set_var_from_num() also
+ * Even if we didn't need to change format, we'd still need to copy the
+ * value to have a modifiable copy for rounding. set_var_from_num() also
* guarantees there is extra digit space in case we produce a carry out
* from rounding.
*/
@@ -459,7 +459,7 @@ numeric_send(PG_FUNCTION_ARGS)
* scale of the attribute have to be applied on the value.
*/
Datum
-numeric(PG_FUNCTION_ARGS)
+numeric (PG_FUNCTION_ARGS)
{
Numeric num = PG_GETARG_NUMERIC(0);
int32 typmod = PG_GETARG_INT32(1);
@@ -2958,10 +2958,10 @@ get_str_from_var(NumericVar *var, int dscale)
/*
* Allocate space for the result.
*
- * i is set to to # of decimal digits before decimal point. dscale is the #
- * of decimal digits we will print after decimal point. We may generate as
- * many as DEC_DIGITS-1 excess digits at the end, and in addition we need
- * room for sign, decimal point, null terminator.
+ * i is set to to # of decimal digits before decimal point. dscale is the
+ * # of decimal digits we will print after decimal point. We may generate
+ * as many as DEC_DIGITS-1 excess digits at the end, and in addition we
+ * need room for sign, decimal point, null terminator.
*/
i = (var->weight + 1) * DEC_DIGITS;
if (i <= 0)
@@ -3898,12 +3898,12 @@ div_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
* INT_MAX is noticeably larger than NBASE*NBASE, this gives us headroom
* to avoid normalizing carries immediately.
*
- * We start with div[] containing one zero digit followed by the dividend's
- * digits (plus appended zeroes to reach the desired precision including
- * guard digits). Each step of the main loop computes an (approximate)
- * quotient digit and stores it into div[], removing one position of
- * dividend space. A final pass of carry propagation takes care of any
- * mistaken quotient digits.
+ * We start with div[] containing one zero digit followed by the
+ * dividend's digits (plus appended zeroes to reach the desired precision
+ * including guard digits). Each step of the main loop computes an
+ * (approximate) quotient digit and stores it into div[], removing one
+ * position of dividend space. A final pass of carry propagation takes
+ * care of any mistaken quotient digits.
*/
div = (int *) palloc0((div_ndigits + 1) * sizeof(int));
for (i = 0; i < var1ndigits; i++)
@@ -4430,8 +4430,8 @@ exp_var_internal(NumericVar *arg, NumericVar *result, int rscale)
*
* exp(x) = 1 + x + x^2/2! + x^3/3! + ...
*
- * Given the limited range of x, this should converge reasonably quickly. We
- * run the series until the terms fall below the local_rscale limit.
+ * Given the limited range of x, this should converge reasonably quickly.
+ * We run the series until the terms fall below the local_rscale limit.
*/
add_var(&const_one, &x, result);
set_var_from_var(&x, &xpow);
@@ -4519,11 +4519,11 @@ ln_var(NumericVar *arg, NumericVar *result, int rscale)
*
* z + z^3/3 + z^5/5 + ...
*
- * where z = (x-1)/(x+1) is in the range (approximately) -0.053 .. 0.048 due
- * to the above range-reduction of x.
+ * where z = (x-1)/(x+1) is in the range (approximately) -0.053 .. 0.048
+ * due to the above range-reduction of x.
*
- * The convergence of this is not as fast as one would like, but is tolerable
- * given that z is small.
+ * The convergence of this is not as fast as one would like, but is
+ * tolerable given that z is small.
*/
sub_var(&x, &const_one, result);
add_var(&x, &const_one, &elem);
diff --git a/src/backend/utils/adt/oid.c b/src/backend/utils/adt/oid.c
index 62db042bbde..351eeec0755 100644
--- a/src/backend/utils/adt/oid.c
+++ b/src/backend/utils/adt/oid.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/oid.c,v 1.64 2005/10/15 02:49:29 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/oid.c,v 1.64.2.1 2005/11/22 18:23:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -92,12 +92,12 @@ oidin_subr(const char *funcname, const char *s, char **endloc)
* case strtoul will not raise an error for some values that are out of
* the range of Oid.
*
- * For backwards compatibility, we want to accept inputs that are given with
- * a minus sign, so allow the input value if it matches after either
+ * For backwards compatibility, we want to accept inputs that are given
+ * with a minus sign, so allow the input value if it matches after either
* signed or unsigned extension to long.
*
- * To ensure consistent results on 32-bit and 64-bit platforms, make sure the
- * error message is the same as if strtoul() had returned ERANGE.
+ * To ensure consistent results on 32-bit and 64-bit platforms, make sure
+ * the error message is the same as if strtoul() had returned ERANGE.
*/
#if OID_MAX != ULONG_MAX
if (cvt != (unsigned long) result &&
diff --git a/src/backend/utils/adt/pg_lzcompress.c b/src/backend/utils/adt/pg_lzcompress.c
index 48d93d0602c..6d1c9273252 100644
--- a/src/backend/utils/adt/pg_lzcompress.c
+++ b/src/backend/utils/adt/pg_lzcompress.c
@@ -1,7 +1,7 @@
/* ----------
* pg_lzcompress.c -
*
- * $PostgreSQL: pgsql/src/backend/utils/adt/pg_lzcompress.c,v 1.20 2005/10/15 02:49:29 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/pg_lzcompress.c,v 1.20.2.1 2005/11/22 18:23:21 momjian Exp $
*
* This is an implementation of LZ compression for PostgreSQL.
* It uses a simple history table and generates 2-3 byte tags
@@ -782,9 +782,9 @@ pglz_get_next_decomp_char_from_lzdata(PGLZ_DecompState *dstate)
* function and a difference occurs early). Otherwise, all the checks,
* needed here, cause too much overhead.
*
- * Thus we decompress the entire rest at once into the temporary buffer
- * and change the decomp state to return the prepared data from the
- * buffer by the more simple calls to
+ * Thus we decompress the entire rest at once into the temporary
+ * buffer and change the decomp state to return the prepared data from
+ * the buffer by the more simple calls to
* pglz_get_next_decomp_char_from_plain().
*/
if (dstate->cp_out - dstate->temp_buf >= 256)
diff --git a/src/backend/utils/adt/regexp.c b/src/backend/utils/adt/regexp.c
index ce04ce77e67..f40229d02b3 100644
--- a/src/backend/utils/adt/regexp.c
+++ b/src/backend/utils/adt/regexp.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/regexp.c,v 1.60 2005/10/18 20:38:58 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/regexp.c,v 1.60.2.1 2005/11/22 18:23:21 momjian Exp $
*
* Alistair Crooks added the code for the regex caching
* agc - cached the regular expressions used - there's a good chance
@@ -212,7 +212,7 @@ RE_compile_and_execute(text *text_re, char *dat, int dat_len,
pg_wchar *data;
size_t data_len;
int regexec_result;
- regex_t *re;
+ regex_t *re;
char errMsg[100];
/* Convert data string to wide characters */
@@ -452,7 +452,7 @@ textregexreplace_noopt(PG_FUNCTION_ARGS)
text *s = PG_GETARG_TEXT_P(0);
text *p = PG_GETARG_TEXT_P(1);
text *r = PG_GETARG_TEXT_P(2);
- regex_t *re;
+ regex_t *re;
re = RE_compile_and_cache(p, regex_flavor);
@@ -475,7 +475,7 @@ textregexreplace(PG_FUNCTION_ARGS)
int i;
bool glob = false;
bool ignorecase = false;
- regex_t *re;
+ regex_t *re;
/* parse options */
for (i = 0; i < opt_len; i++)
diff --git a/src/backend/utils/adt/ri_triggers.c b/src/backend/utils/adt/ri_triggers.c
index 347f82d8c3d..0b925f36478 100644
--- a/src/backend/utils/adt/ri_triggers.c
+++ b/src/backend/utils/adt/ri_triggers.c
@@ -17,7 +17,7 @@
*
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/backend/utils/adt/ri_triggers.c,v 1.82 2005/10/29 18:39:17 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/ri_triggers.c,v 1.82.2.1 2005/11/22 18:23:21 momjian Exp $
*
* ----------
*/
@@ -995,8 +995,8 @@ RI_FKey_cascade_del(PG_FUNCTION_ARGS)
/*
* Get the relation descriptors of the FK and PK tables and the old tuple.
*
- * fk_rel is opened in RowExclusiveLock mode since that's what our eventual
- * DELETE will get on it.
+ * fk_rel is opened in RowExclusiveLock mode since that's what our
+ * eventual DELETE will get on it.
*/
fk_rel = heap_open(trigdata->tg_trigger->tgconstrrelid, RowExclusiveLock);
pk_rel = trigdata->tg_relation;
@@ -1156,8 +1156,8 @@ RI_FKey_cascade_upd(PG_FUNCTION_ARGS)
* Get the relation descriptors of the FK and PK tables and the new and
* old tuple.
*
- * fk_rel is opened in RowExclusiveLock mode since that's what our eventual
- * UPDATE will get on it.
+ * fk_rel is opened in RowExclusiveLock mode since that's what our
+ * eventual UPDATE will get on it.
*/
fk_rel = heap_open(trigdata->tg_trigger->tgconstrrelid, RowExclusiveLock);
pk_rel = trigdata->tg_relation;
@@ -1680,8 +1680,8 @@ RI_FKey_setnull_del(PG_FUNCTION_ARGS)
/*
* Get the relation descriptors of the FK and PK tables and the old tuple.
*
- * fk_rel is opened in RowExclusiveLock mode since that's what our eventual
- * UPDATE will get on it.
+ * fk_rel is opened in RowExclusiveLock mode since that's what our
+ * eventual UPDATE will get on it.
*/
fk_rel = heap_open(trigdata->tg_trigger->tgconstrrelid, RowExclusiveLock);
pk_rel = trigdata->tg_relation;
@@ -1849,8 +1849,8 @@ RI_FKey_setnull_upd(PG_FUNCTION_ARGS)
/*
* Get the relation descriptors of the FK and PK tables and the old tuple.
*
- * fk_rel is opened in RowExclusiveLock mode since that's what our eventual
- * UPDATE will get on it.
+ * fk_rel is opened in RowExclusiveLock mode since that's what our
+ * eventual UPDATE will get on it.
*/
fk_rel = heap_open(trigdata->tg_trigger->tgconstrrelid, RowExclusiveLock);
pk_rel = trigdata->tg_relation;
@@ -2059,8 +2059,8 @@ RI_FKey_setdefault_del(PG_FUNCTION_ARGS)
/*
* Get the relation descriptors of the FK and PK tables and the old tuple.
*
- * fk_rel is opened in RowExclusiveLock mode since that's what our eventual
- * UPDATE will get on it.
+ * fk_rel is opened in RowExclusiveLock mode since that's what our
+ * eventual UPDATE will get on it.
*/
fk_rel = heap_open(trigdata->tg_trigger->tgconstrrelid, RowExclusiveLock);
pk_rel = trigdata->tg_relation;
@@ -2238,8 +2238,8 @@ RI_FKey_setdefault_upd(PG_FUNCTION_ARGS)
/*
* Get the relation descriptors of the FK and PK tables and the old tuple.
*
- * fk_rel is opened in RowExclusiveLock mode since that's what our eventual
- * UPDATE will get on it.
+ * fk_rel is opened in RowExclusiveLock mode since that's what our
+ * eventual UPDATE will get on it.
*/
fk_rel = heap_open(trigdata->tg_trigger->tgconstrrelid, RowExclusiveLock);
pk_rel = trigdata->tg_relation;
diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c
index 04e8eb55161..216f8656afa 100644
--- a/src/backend/utils/adt/ruleutils.c
+++ b/src/backend/utils/adt/ruleutils.c
@@ -3,7 +3,7 @@
* back to source text
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/ruleutils.c,v 1.207 2005/10/15 02:49:29 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/ruleutils.c,v 1.207.2.1 2005/11/22 18:23:22 momjian Exp $
*
* This software is copyrighted by Jan Wieck - Hamburg.
*
@@ -3845,8 +3845,8 @@ get_const_expr(Const *constval, deparse_context *context)
* 'NaN'). Note that strtod() and friends might accept NaN,
* so we can't use that to test.
*
- * In reality we only need to defend against infinity and NaN, so
- * we need not get too crazy about pattern matching here.
+ * In reality we only need to defend against infinity and NaN,
+ * so we need not get too crazy about pattern matching here.
*/
if (strspn(extval, "0123456789+-eE.") == strlen(extval))
{
@@ -4579,8 +4579,8 @@ quote_identifier(const char *ident)
* parser doesn't provide any easy way to test for whether an
* identifier is safe or not... so be safe not sorry.
*
- * Note: ScanKeywordLookup() does case-insensitive comparison, but that's
- * fine, since we already know we have all-lower-case.
+ * Note: ScanKeywordLookup() does case-insensitive comparison, but
+ * that's fine, since we already know we have all-lower-case.
*/
if (ScanKeywordLookup(ident) != NULL)
safe = false;
diff --git a/src/backend/utils/adt/selfuncs.c b/src/backend/utils/adt/selfuncs.c
index 95980ca1e03..7d6426092db 100644
--- a/src/backend/utils/adt/selfuncs.c
+++ b/src/backend/utils/adt/selfuncs.c
@@ -15,7 +15,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/selfuncs.c,v 1.191 2005/10/15 02:49:29 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/selfuncs.c,v 1.191.2.1 2005/11/22 18:23:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -1396,11 +1396,11 @@ eqjoinsel(PG_FUNCTION_ARGS)
* the righthand relation are unique (ie, act as if it's been
* DISTINCT'd).
*
- * NOTE: it might seem that we should unique-ify the lefthand input when
- * considering JOIN_REVERSE_IN. But this is not so, because the join
- * clause we've been handed has not been commuted from the way the
- * parser originally wrote it. We know that the unique side of the IN
- * clause is *always* on the right.
+ * NOTE: it might seem that we should unique-ify the lefthand input
+ * when considering JOIN_REVERSE_IN. But this is not so, because the
+ * join clause we've been handed has not been commuted from the way
+ * the parser originally wrote it. We know that the unique side of
+ * the IN clause is *always* on the right.
*
* NOTE: it would be dangerous to try to be smart about JOIN_LEFT or
* JOIN_RIGHT here, because we do not have enough information to
@@ -2190,8 +2190,8 @@ estimate_hash_bucketsize(PlannerInfo *root, Node *hashkey, double nbuckets)
* assuming that the data distribution is affected uniformly by the
* restriction clauses!
*
- * XXX Possibly better way, but much more expensive: multiply by selectivity
- * of rel's restriction clauses that mention the target Var.
+ * XXX Possibly better way, but much more expensive: multiply by
+ * selectivity of rel's restriction clauses that mention the target Var.
*/
if (vardata.rel)
ndistinct *= vardata.rel->rows / vardata.rel->tuples;
@@ -2296,10 +2296,10 @@ convert_to_scalar(Datum value, Oid valuetypid, double *scaledvalue,
* declared input type(s) of the operator we are invoked for, so we just
* error out if either is not recognized.
*
- * XXX The histogram we are interpolating between points of could belong to a
- * column that's only binary-compatible with the declared type. In essence
- * we are assuming that the semantics of binary-compatible types are
- * enough alike that we can use a histogram generated with one type's
+ * XXX The histogram we are interpolating between points of could belong
+ * to a column that's only binary-compatible with the declared type. In
+ * essence we are assuming that the semantics of binary-compatible types
+ * are enough alike that we can use a histogram generated with one type's
* operators to estimate selectivity for the other's. This is outright
* wrong in some cases --- in particular signed versus unsigned
* interpretation could trip us up. But it's useful enough in the
@@ -2636,10 +2636,10 @@ convert_string_datum(Datum value, Oid typid)
* that can write past the specified buffer length in that scenario.
* So, do it the dumb way for portability.
*
- * Yet other systems (e.g., glibc) sometimes return a smaller value from
- * the second call than the first; thus the Assert must be <= not ==
- * as you'd expect. Can't any of these people program their way out
- * of a paper bag?
+ * Yet other systems (e.g., glibc) sometimes return a smaller value
+ * from the second call than the first; thus the Assert must be <= not
+ * == as you'd expect. Can't any of these people program their way
+ * out of a paper bag?
*/
xfrmlen = strxfrm(NULL, val, 0);
xfrmstr = (char *) palloc(xfrmlen + 1);
@@ -3150,7 +3150,8 @@ get_variable_numdistinct(VariableStatData *vardata)
/*
* Special-case boolean columns: presumably, two distinct values.
*
- * Are there any other datatypes we should wire in special estimates for?
+ * Are there any other datatypes we should wire in special estimates
+ * for?
*/
stadistinct = 2.0;
}
@@ -3265,8 +3266,9 @@ get_variable_maximum(PlannerInfo *root, VariableStatData *vardata,
/*
* If there is a histogram, grab the last or first value as appropriate.
*
- * If there is a histogram that is sorted with some other operator than the
- * one we want, fail --- this suggests that there is data we can't use.
+ * If there is a histogram that is sorted with some other operator than
+ * the one we want, fail --- this suggests that there is data we can't
+ * use.
*/
if (get_attstatsslot(vardata->statsTuple,
vardata->atttype, vardata->atttypmod,
@@ -4214,8 +4216,8 @@ genericcostestimate(PlannerInfo *root,
* system in favor of using partial indexes where possible, which is not
* necessarily a bad thing. But it'd be nice to do better someday.
*
- * Note that index->indpred and indexQuals are both in implicit-AND form, so
- * ANDing them together just takes merging the lists. However,
+ * Note that index->indpred and indexQuals are both in implicit-AND form,
+ * so ANDing them together just takes merging the lists. However,
* eliminating duplicates is a bit trickier because indexQuals contains
* RestrictInfo nodes and the indpred does not. It is okay to pass a
* mixed list to clauselist_selectivity, but we have to work a bit to
@@ -4261,8 +4263,8 @@ genericcostestimate(PlannerInfo *root,
/*
* Estimate the number of index pages that will be retrieved.
*
- * For all currently-supported index types, the first page of the index is a
- * metadata page, and we should figure on fetching that plus a pro-rated
+ * For all currently-supported index types, the first page of the index is
+ * a metadata page, and we should figure on fetching that plus a pro-rated
* fraction of the remaining pages.
*/
if (index->pages > 1 && index->tuples > 0)
@@ -4289,9 +4291,9 @@ genericcostestimate(PlannerInfo *root,
* CPU costs as cpu_index_tuple_cost plus one cpu_operator_cost per
* indexqual operator.
*
- * Note: this neglects the possible costs of rechecking lossy operators and
- * OR-clause expressions. Detecting that that might be needed seems more
- * expensive than it's worth, though, considering all the other
+ * Note: this neglects the possible costs of rechecking lossy operators
+ * and OR-clause expressions. Detecting that that might be needed seems
+ * more expensive than it's worth, though, considering all the other
* inaccuracies here ...
*/
cost_qual_eval(&index_qual_cost, indexQuals);
diff --git a/src/backend/utils/adt/timestamp.c b/src/backend/utils/adt/timestamp.c
index 43956597e31..2cf4f5878b9 100644
--- a/src/backend/utils/adt/timestamp.c
+++ b/src/backend/utils/adt/timestamp.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/timestamp.c,v 1.157 2005/10/27 02:45:22 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/timestamp.c,v 1.157.2.1 2005/11/22 18:23:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -1944,30 +1944,22 @@ timestamp_mi(PG_FUNCTION_ARGS)
result->day = 0;
/*
- * This is wrong, but removing it breaks a lot of regression tests.
- * For example:
+ * This is wrong, but removing it breaks a lot of regression tests. For
+ * example:
*
- * test=> SET timezone = 'EST5EDT';
- * test=> SELECT
- * test-> ('2005-10-30 13:22:00-05'::timestamptz -
- * test(> '2005-10-29 13:22:00-04'::timestamptz);
- * ?column?
- * ----------------
- * 1 day 01:00:00
- * (1 row)
+ * test=> SET timezone = 'EST5EDT'; test=> SELECT test-> ('2005-10-30
+ * 13:22:00-05'::timestamptz - test(> '2005-10-29
+ * 13:22:00-04'::timestamptz); ?column? ---------------- 1 day 01:00:00 (1
+ * row)
*
- * so adding that to the first timestamp gets:
+ * so adding that to the first timestamp gets:
*
- * test=> SELECT
- * test-> ('2005-10-29 13:22:00-04'::timestamptz +
- * test(> ('2005-10-30 13:22:00-05'::timestamptz -
- * test(> '2005-10-29 13:22:00-04'::timestamptz)) at time zone 'EST';
- * timezone
- * --------------------
- * 2005-10-30 14:22:00
- * (1 row)
+ * test=> SELECT test-> ('2005-10-29 13:22:00-04'::timestamptz + test(>
+ * ('2005-10-30 13:22:00-05'::timestamptz - test(> '2005-10-29
+ * 13:22:00-04'::timestamptz)) at time zone 'EST'; timezone
+ * -------------------- 2005-10-30 14:22:00 (1 row)
*/
- result = DatumGetIntervalP(DirectFunctionCall1(interval_justify_hours,
+ result = DatumGetIntervalP(DirectFunctionCall1(interval_justify_hours,
IntervalPGetDatum(result)));
PG_RETURN_INTERVAL_P(result);
@@ -1986,6 +1978,7 @@ interval_justify_hours(PG_FUNCTION_ARGS)
{
Interval *span = PG_GETARG_INTERVAL_P(0);
Interval *result;
+
#ifdef HAVE_INT64_TIMESTAMP
int64 wholeday;
#else
@@ -2334,12 +2327,12 @@ interval_mul(PG_FUNCTION_ARGS)
day_remainder -= result->day;
/*
- * The above correctly handles the whole-number part of the month and
- * day products, but we have to do something with any fractional part
+ * The above correctly handles the whole-number part of the month and day
+ * products, but we have to do something with any fractional part
* resulting when the factor is nonintegral. We cascade the fractions
* down to lower units using the conversion factors DAYS_PER_MONTH and
- * SECS_PER_DAY. Note we do NOT cascade up, since we are not forced to
- * do so by the representation. The user can choose to cascade up later,
+ * SECS_PER_DAY. Note we do NOT cascade up, since we are not forced to do
+ * so by the representation. The user can choose to cascade up later,
* using justify_hours and/or justify_days.
*/
diff --git a/src/backend/utils/adt/varlena.c b/src/backend/utils/adt/varlena.c
index 1f6c176f640..63e5d6b8dd0 100644
--- a/src/backend/utils/adt/varlena.c
+++ b/src/backend/utils/adt/varlena.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/varlena.c,v 1.139 2005/10/29 00:31:51 petere Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/varlena.c,v 1.139.2.1 2005/11/22 18:23:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -888,8 +888,8 @@ varstr_cmp(char *arg1, int len1, char *arg2, int len2)
(LPWSTR) a1p, a1len / 2);
if (!r)
ereport(ERROR,
- (errmsg("could not convert string to UTF-16: error %lu",
- GetLastError())));
+ (errmsg("could not convert string to UTF-16: error %lu",
+ GetLastError())));
}
((LPWSTR) a1p)[r] = 0;
@@ -901,8 +901,8 @@ varstr_cmp(char *arg1, int len1, char *arg2, int len2)
(LPWSTR) a2p, a2len / 2);
if (!r)
ereport(ERROR,
- (errmsg("could not convert string to UTF-16: error %lu",
- GetLastError())));
+ (errmsg("could not convert string to UTF-16: error %lu",
+ GetLastError())));
}
((LPWSTR) a2p)[r] = 0;
@@ -2118,12 +2118,12 @@ appendStringInfoRegexpSubstr(StringInfo str, text *replace_text,
if (eml == 1)
{
for (; p < p_end && *p != '\\'; p++)
- /* nothing */ ;
+ /* nothing */ ;
}
else
{
for (; p < p_end && *p != '\\'; p += pg_mblen(p))
- /* nothing */ ;
+ /* nothing */ ;
}
/* Copy the text we just scanned over, if any. */
@@ -2168,9 +2168,9 @@ appendStringInfoRegexpSubstr(StringInfo str, text *replace_text,
else
{
/*
- * If escape char is not followed by any expected char,
- * just treat it as ordinary data to copy. (XXX would it be
- * better to throw an error?)
+ * If escape char is not followed by any expected char, just treat
+ * it as ordinary data to copy. (XXX would it be better to throw
+ * an error?)
*/
appendStringInfoChar(str, '\\');
continue;
@@ -2179,7 +2179,7 @@ appendStringInfoRegexpSubstr(StringInfo str, text *replace_text,
if (so != -1 && eo != -1)
{
/*
- * Copy the text that is back reference of regexp. Because so and
+ * Copy the text that is back reference of regexp. Because so and
* eo are counted in characters not bytes, it's easiest to use
* text_substring to pull out the correct chunk of text.
*/
@@ -2252,9 +2252,9 @@ replace_text_regexp(text *src_text, void *regexp,
break;
/*
- * Copy the text to the left of the match position. Because we
- * are working with character not byte indexes, it's easiest to
- * use text_substring to pull out the needed data.
+ * Copy the text to the left of the match position. Because we are
+ * working with character not byte indexes, it's easiest to use
+ * text_substring to pull out the needed data.
*/
if (pmatch[0].rm_so - data_pos > 0)
{
diff --git a/src/backend/utils/cache/catcache.c b/src/backend/utils/cache/catcache.c
index 918ab7c081a..db7554f70b9 100644
--- a/src/backend/utils/cache/catcache.c
+++ b/src/backend/utils/cache/catcache.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/cache/catcache.c,v 1.125 2005/10/15 02:49:30 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/cache/catcache.c,v 1.125.2.1 2005/11/22 18:23:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -1426,8 +1426,8 @@ SearchCatCacheList(CatCache *cache,
* relation. For each matching tuple found in the relation, use an
* existing cache entry if possible, else build a new one.
*
- * We have to bump the member refcounts temporarily to ensure they won't get
- * dropped from the cache while loading other members. We use a PG_TRY
+ * We have to bump the member refcounts temporarily to ensure they won't
+ * get dropped from the cache while loading other members. We use a PG_TRY
* block to ensure we can undo those refcounts if we get an error before
* we finish constructing the CatCList.
*/
diff --git a/src/backend/utils/cache/inval.c b/src/backend/utils/cache/inval.c
index 59250feac1a..b84458550ec 100644
--- a/src/backend/utils/cache/inval.c
+++ b/src/backend/utils/cache/inval.c
@@ -80,7 +80,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/cache/inval.c,v 1.73 2005/10/15 02:49:31 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/cache/inval.c,v 1.73.2.1 2005/11/22 18:23:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -564,8 +564,8 @@ PrepareForTupleInvalidation(Relation relation, HeapTuple tuple)
* is needed because other backends might possibly possess smgr cache
* but not relcache entries for the target relation.
*
- * Note: during a pg_class row update that assigns a new relfilenode or
- * reltablespace value, we will be called on both the old and new
+ * Note: during a pg_class row update that assigns a new relfilenode
+ * or reltablespace value, we will be called on both the old and new
* tuples, and thus will broadcast invalidation messages showing both
* the old and new RelFileNode values. This ensures that other
* backends will close smgr references to the old file.
diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c
index e877c1f828b..e0616b45a2f 100644
--- a/src/backend/utils/cache/relcache.c
+++ b/src/backend/utils/cache/relcache.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/cache/relcache.c,v 1.230 2005/10/15 02:49:31 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/cache/relcache.c,v 1.230.2.1 2005/11/22 18:23:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -303,11 +303,11 @@ AllocateRelationDesc(Relation relation, Form_pg_class relp)
/*
* Copy the relation tuple form
*
- * We only allocate space for the fixed fields, ie, CLASS_TUPLE_SIZE. relacl
- * is NOT stored in the relcache --- there'd be little point in it, since
- * we don't copy the tuple's nullvalues bitmap and hence wouldn't know if
- * the value is valid ... bottom line is that relacl *cannot* be retrieved
- * from the relcache. Get it from the syscache if you need it.
+ * We only allocate space for the fixed fields, ie, CLASS_TUPLE_SIZE.
+ * relacl is NOT stored in the relcache --- there'd be little point in it,
+ * since we don't copy the tuple's nullvalues bitmap and hence wouldn't
+ * know if the value is valid ... bottom line is that relacl *cannot* be
+ * retrieved from the relcache. Get it from the syscache if you need it.
*/
relationForm = (Form_pg_class) palloc(CLASS_TUPLE_SIZE);
@@ -549,8 +549,8 @@ RelationBuildRuleLock(Relation relation)
/*
* open pg_rewrite and begin a scan
*
- * Note: since we scan the rules using RewriteRelRulenameIndexId, we will be
- * reading the rules in name order, except possibly during
+ * Note: since we scan the rules using RewriteRelRulenameIndexId, we will
+ * be reading the rules in name order, except possibly during
* emergency-recovery operations (ie, IsIgnoringSystemIndexes). This in
* turn ensures that rules will be fired in name order.
*/
@@ -1199,9 +1199,9 @@ formrdesc(const char *relationName, Oid relationReltype,
/*
* initialize relation tuple form
*
- * The data we insert here is pretty incomplete/bogus, but it'll serve to get
- * us launched. RelationCacheInitializePhase2() will read the real data
- * from pg_class and replace what we've done here.
+ * The data we insert here is pretty incomplete/bogus, but it'll serve to
+ * get us launched. RelationCacheInitializePhase2() will read the real
+ * data from pg_class and replace what we've done here.
*/
relation->rd_rel = (Form_pg_class) palloc0(CLASS_TUPLE_SIZE);
@@ -1453,8 +1453,8 @@ RelationReloadClassinfo(Relation relation)
/*
* Read the pg_class row
*
- * Don't try to use an indexscan of pg_class_oid_index to reload the info for
- * pg_class_oid_index ...
+ * Don't try to use an indexscan of pg_class_oid_index to reload the info
+ * for pg_class_oid_index ...
*/
indexOK = (RelationGetRelid(relation) != ClassOidIndexId);
pg_class_tuple = ScanPgRelation(RelationGetRelid(relation), indexOK);
@@ -1501,9 +1501,9 @@ RelationClearRelation(Relation relation, bool rebuild)
* got called because of a relation cache flush that was triggered by
* VACUUM.
*
- * If it's a nailed index, then we need to re-read the pg_class row to see if
- * its relfilenode changed. We can't necessarily do that here, because we
- * might be in a failed transaction. We assume it's okay to do it if
+ * If it's a nailed index, then we need to re-read the pg_class row to see
+ * if its relfilenode changed. We can't necessarily do that here, because
+ * we might be in a failed transaction. We assume it's okay to do it if
* there are open references to the relcache entry (cf notes for
* AtEOXact_RelationCache). Otherwise just mark the entry as possibly
* invalid, and it'll be fixed when next opened.
@@ -1574,8 +1574,8 @@ RelationClearRelation(Relation relation, bool rebuild)
* rd_createSubid state. Also attempt to preserve the tupledesc and
* rewrite-rule substructures in place.
*
- * Note that this process does not touch CurrentResourceOwner; which is
- * good because whatever ref counts the entry may have do not
+ * Note that this process does not touch CurrentResourceOwner; which
+ * is good because whatever ref counts the entry may have do not
* necessarily belong to that resource owner.
*/
Oid save_relid = RelationGetRelid(relation);
@@ -1934,8 +1934,8 @@ AtEOSubXact_RelationCache(bool isCommit, SubTransactionId mySubid,
/*
* Is it a relation created in the current subtransaction?
*
- * During subcommit, mark it as belonging to the parent, instead. During
- * subabort, simply delete the relcache entry.
+ * During subcommit, mark it as belonging to the parent, instead.
+ * During subabort, simply delete the relcache entry.
*/
if (relation->rd_createSubid == mySubid)
{
@@ -3077,8 +3077,8 @@ load_relcache_init_file(void)
* Rules and triggers are not saved (mainly because the internal
* format is complex and subject to change). They must be rebuilt if
* needed by RelationCacheInitializePhase2. This is not expected to
- * be a big performance hit since few system catalogs have such.
- * Ditto for index expressions and predicates.
+ * be a big performance hit since few system catalogs have such. Ditto
+ * for index expressions and predicates.
*/
rel->rd_rules = NULL;
rel->rd_rulescxt = NULL;
@@ -3321,10 +3321,10 @@ write_relcache_init_file(void)
* OK, rename the temp file to its final name, deleting any
* previously-existing init file.
*
- * Note: a failure here is possible under Cygwin, if some other backend
- * is holding open an unlinked-but-not-yet-gone init file. So treat
- * this as a noncritical failure; just remove the useless temp file on
- * failure.
+ * Note: a failure here is possible under Cygwin, if some other
+ * backend is holding open an unlinked-but-not-yet-gone init file. So
+ * treat this as a noncritical failure; just remove the useless temp
+ * file on failure.
*/
if (rename(tempfilename, finalfilename) < 0)
unlink(tempfilename);
diff --git a/src/backend/utils/cache/typcache.c b/src/backend/utils/cache/typcache.c
index ff9cc975437..b681892ca9e 100644
--- a/src/backend/utils/cache/typcache.c
+++ b/src/backend/utils/cache/typcache.c
@@ -36,7 +36,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/cache/typcache.c,v 1.15 2005/10/15 02:49:32 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/cache/typcache.c,v 1.15.2.1 2005/11/22 18:23:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -238,9 +238,9 @@ lookup_type_cache(Oid type_id, int flags)
/*
* Set up fmgr lookup info as requested
*
- * Note: we tell fmgr the finfo structures live in CacheMemoryContext, which
- * is not quite right (they're really in DynaHashContext) but this will do
- * for our purposes.
+ * Note: we tell fmgr the finfo structures live in CacheMemoryContext,
+ * which is not quite right (they're really in DynaHashContext) but this
+ * will do for our purposes.
*/
if ((flags & TYPECACHE_EQ_OPR_FINFO) &&
typentry->eq_opr_finfo.fn_oid == InvalidOid &&
@@ -319,8 +319,8 @@ lookup_default_opclass(Oid type_id, Oid am_id)
* require the user to specify which one he wants. If we find more than
* one exact match, then someone put bogus entries in pg_opclass.
*
- * This is the same logic as GetDefaultOpClass() in indexcmds.c, except that
- * we consider all opclasses, regardless of the current search path.
+ * This is the same logic as GetDefaultOpClass() in indexcmds.c, except
+ * that we consider all opclasses, regardless of the current search path.
*/
rel = heap_open(OperatorClassRelationId, AccessShareLock);
diff --git a/src/backend/utils/error/elog.c b/src/backend/utils/error/elog.c
index 44ebac245c9..866662a65e6 100644
--- a/src/backend/utils/error/elog.c
+++ b/src/backend/utils/error/elog.c
@@ -42,7 +42,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/error/elog.c,v 1.167 2005/11/05 03:04:52 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/error/elog.c,v 1.167.2.1 2005/11/22 18:23:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -1158,8 +1158,8 @@ set_syslog_parameters(const char *ident, int facility)
* the connection until needed, since this routine will get called whether
* or not Log_destination actually mentions syslog.
*
- * Note that we make our own copy of the ident string rather than relying on
- * guc.c's. This may be overly paranoid, but it ensures that we cannot
+ * Note that we make our own copy of the ident string rather than relying
+ * on guc.c's. This may be overly paranoid, but it ensures that we cannot
* accidentally free a string that syslog is still using.
*/
if (syslog_ident == NULL || strcmp(syslog_ident, ident) != 0 ||
@@ -1487,7 +1487,7 @@ log_line_prefix(StringInfo buf)
if (MyProcPort)
{
const char *psdisp;
- int displen;
+ int displen;
psdisp = get_ps_display(&displen);
appendStringInfo(buf, "%.*s", displen, psdisp);
diff --git a/src/backend/utils/fmgr/fmgr.c b/src/backend/utils/fmgr/fmgr.c
index 4e5dcc3002b..4d9aa23be5a 100644
--- a/src/backend/utils/fmgr/fmgr.c
+++ b/src/backend/utils/fmgr/fmgr.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/fmgr/fmgr.c,v 1.97 2005/10/15 02:49:32 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/fmgr/fmgr.c,v 1.97.2.1 2005/11/22 18:23:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -608,8 +608,8 @@ fmgr_oldstyle(PG_FUNCTION_ARGS)
* backwards-compatibility wrapper). Note, however, that we'll never get
* here with NULL arguments if the function is marked strict.
*
- * We also need to detoast any TOAST-ed inputs, since it's unlikely that an
- * old-style function knows about TOASTing.
+ * We also need to detoast any TOAST-ed inputs, since it's unlikely that
+ * an old-style function knows about TOASTing.
*/
isnull = false;
for (i = 0; i < n_arguments; i++)
diff --git a/src/backend/utils/hash/dynahash.c b/src/backend/utils/hash/dynahash.c
index 292673ac26a..395a4f36beb 100644
--- a/src/backend/utils/hash/dynahash.c
+++ b/src/backend/utils/hash/dynahash.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/hash/dynahash.c,v 1.65 2005/10/15 02:49:33 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/hash/dynahash.c,v 1.65.2.1 2005/11/22 18:23:24 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -118,8 +118,8 @@ hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
* For shared hash tables, we have a local hash header (HTAB struct) that
* we allocate in TopMemoryContext; all else is in shared memory.
*
- * For non-shared hash tables, everything including the hash header is in a
- * memory context created specially for the hash table --- this makes
+ * For non-shared hash tables, everything including the hash header is in
+ * a memory context created specially for the hash table --- this makes
* hash_destroy very simple. The memory context is made a child of either
* a context specified by the caller, or TopMemoryContext if nothing is
* specified.
diff --git a/src/backend/utils/init/flatfiles.c b/src/backend/utils/init/flatfiles.c
index 9906682c320..eca3182d746 100644
--- a/src/backend/utils/init/flatfiles.c
+++ b/src/backend/utils/init/flatfiles.c
@@ -23,7 +23,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/utils/init/flatfiles.c,v 1.15 2005/10/15 02:49:33 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/init/flatfiles.c,v 1.15.2.1 2005/11/22 18:23:24 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -593,8 +593,8 @@ write_auth_file(Relation rel_authid, Relation rel_authmem)
* Convert list of role Oids to list of role names. We must do
* this before re-sorting auth_info.
*
- * We skip the first list element (curr_role itself) since there is
- * no point in writing that a role is a member of itself.
+ * We skip the first list element (curr_role itself) since there
+ * is no point in writing that a role is a member of itself.
*/
for_each_cell(mem, lnext(list_head(roles_list)))
{
@@ -775,8 +775,8 @@ AtEOXact_UpdateFlatFiles(bool isCommit)
* likely won't have gotten a strong enough lock), so get the locks we
* need before writing anything.
*
- * For writing the auth file, it's sufficient to ExclusiveLock pg_authid; we
- * take just regular AccessShareLock on pg_auth_members.
+ * For writing the auth file, it's sufficient to ExclusiveLock pg_authid;
+ * we take just regular AccessShareLock on pg_auth_members.
*/
if (database_file_update_subid != InvalidSubTransactionId)
drel = heap_open(DatabaseRelationId, ExclusiveLock);
diff --git a/src/backend/utils/init/miscinit.c b/src/backend/utils/init/miscinit.c
index 5c6f2f95d5f..cd03ad1f752 100644
--- a/src/backend/utils/init/miscinit.c
+++ b/src/backend/utils/init/miscinit.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/init/miscinit.c,v 1.150 2005/10/15 02:49:33 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/init/miscinit.c,v 1.150.2.1 2005/11/22 18:23:24 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -695,8 +695,8 @@ CreateLockFile(const char *filename, bool amPostmaster,
/*
* Try to create the lock file --- O_EXCL makes this atomic.
*
- * Think not to make the file protection weaker than 0600. See comments
- * below.
+ * Think not to make the file protection weaker than 0600. See
+ * comments below.
*/
fd = open(filename, O_RDWR | O_CREAT | O_EXCL, 0600);
if (fd >= 0)
@@ -757,26 +757,27 @@ CreateLockFile(const char *filename, bool amPostmaster,
* carefully then all but the immediate parent shell will be
* root-owned processes and so the kill test will fail with EPERM.
*
- * We can treat the EPERM-error case as okay because that error implies
- * that the existing process has a different userid than we do, which
- * means it cannot be a competing postmaster. A postmaster cannot
- * successfully attach to a data directory owned by a userid other
- * than its own. (This is now checked directly in checkDataDir(), but
- * has been true for a long time because of the restriction that the
- * data directory isn't group- or world-accessible.) Also, since we
- * create the lockfiles mode 600, we'd have failed above if the
- * lockfile belonged to another userid --- which means that whatever
- * process kill() is reporting about isn't the one that made the
- * lockfile. (NOTE: this last consideration is the only one that
- * keeps us from blowing away a Unix socket file belonging to an
- * instance of Postgres being run by someone else, at least on
- * machines where /tmp hasn't got a stickybit.)
+ * We can treat the EPERM-error case as okay because that error
+ * implies that the existing process has a different userid than we
+ * do, which means it cannot be a competing postmaster. A postmaster
+ * cannot successfully attach to a data directory owned by a userid
+ * other than its own. (This is now checked directly in
+ * checkDataDir(), but has been true for a long time because of the
+ * restriction that the data directory isn't group- or
+ * world-accessible.) Also, since we create the lockfiles mode 600,
+ * we'd have failed above if the lockfile belonged to another userid
+ * --- which means that whatever process kill() is reporting about
+ * isn't the one that made the lockfile. (NOTE: this last
+ * consideration is the only one that keeps us from blowing away a
+ * Unix socket file belonging to an instance of Postgres being run by
+ * someone else, at least on machines where /tmp hasn't got a
+ * stickybit.)
*
- * Windows hasn't got getppid(), but doesn't need it since it's not using
- * real kill() either...
+ * Windows hasn't got getppid(), but doesn't need it since it's not
+ * using real kill() either...
*
- * Normally kill() will fail with ESRCH if the given PID doesn't exist.
- * BeOS returns EINVAL for some silly reason, however.
+ * Normally kill() will fail with ESRCH if the given PID doesn't
+ * exist. BeOS returns EINVAL for some silly reason, however.
*/
if (other_pid != my_pid
#ifndef WIN32
diff --git a/src/backend/utils/init/postinit.c b/src/backend/utils/init/postinit.c
index 3c763e39292..1279e9a4c41 100644
--- a/src/backend/utils/init/postinit.c
+++ b/src/backend/utils/init/postinit.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/init/postinit.c,v 1.158 2005/10/15 02:49:33 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/init/postinit.c,v 1.158.2.1 2005/11/22 18:23:24 momjian Exp $
*
*
*-------------------------------------------------------------------------
@@ -325,8 +325,8 @@ InitPostgres(const char *dbname, const char *username)
/*
* Set up the global variables holding database id and path.
*
- * We take a shortcut in the bootstrap case, otherwise we have to look up the
- * db name in pg_database.
+ * We take a shortcut in the bootstrap case, otherwise we have to look up
+ * the db name in pg_database.
*/
if (bootstrap)
{
diff --git a/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c b/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c
index 0447c2a9e7d..ccc7027f657 100644
--- a/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c
+++ b/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c
@@ -7,7 +7,7 @@
*
* 1999/1/15 Tatsuo Ishii
*
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c,v 1.6 2005/10/15 02:49:34 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c,v 1.6.2.1 2005/11/22 18:23:24 momjian Exp $
*/
/* can be used in either frontend or backend */
@@ -19,7 +19,7 @@ typedef struct
{
unsigned short code,
peer;
-} codes_t;
+} codes_t;
/* map Big5 Level 1 to CNS 11643-1992 Plane 1 */
static codes_t big5Level1ToCnsPlane1[25] = { /* range */
@@ -205,7 +205,7 @@ static unsigned short b2c3[][2] = {
};
static unsigned short BinarySearchRange
- (codes_t * array, int high, unsigned short code)
+ (codes_t *array, int high, unsigned short code)
{
int low,
mid,
diff --git a/src/backend/utils/mb/conversion_procs/utf8_and_iso8859/utf8_and_iso8859.c b/src/backend/utils/mb/conversion_procs/utf8_and_iso8859/utf8_and_iso8859.c
index 0038db58e62..f3dee67960b 100644
--- a/src/backend/utils/mb/conversion_procs/utf8_and_iso8859/utf8_and_iso8859.c
+++ b/src/backend/utils/mb/conversion_procs/utf8_and_iso8859/utf8_and_iso8859.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_iso8859/utf8_and_iso8859.c,v 1.15 2005/10/15 02:49:35 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_iso8859/utf8_and_iso8859.c,v 1.15.2.1 2005/11/22 18:23:24 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -65,7 +65,7 @@ typedef struct
pg_utf_to_local *map2; /* from UTF8 map name */
int size1; /* size of map1 */
int size2; /* size of map2 */
-} pg_conv_map;
+} pg_conv_map;
static pg_conv_map maps[] = {
{PG_SQL_ASCII}, /* SQL/ASCII */
diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c
index 79e162efc02..ec4baff4206 100644
--- a/src/backend/utils/misc/guc.c
+++ b/src/backend/utils/misc/guc.c
@@ -10,7 +10,7 @@
* Written by Peter Eisentraut <peter_e@gmx.net>.
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/misc/guc.c,v 1.299 2005/11/04 23:50:30 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/misc/guc.c,v 1.299.2.1 2005/11/22 18:23:24 momjian Exp $
*
*--------------------------------------------------------------------
*/
@@ -466,7 +466,7 @@ static struct config_bool ConfigureNamesBool[] =
{"constraint_exclusion", PGC_USERSET, QUERY_TUNING_OTHER,
gettext_noop("Enables the planner to use constraints to optimize queries."),
gettext_noop("Child table scans will be skipped if their "
- "constraints guarantee that no rows match the query.")
+ "constraints guarantee that no rows match the query.")
},
&constraint_exclusion,
false, NULL, NULL
@@ -502,7 +502,7 @@ static struct config_bool ConfigureNamesBool[] =
{"fsync", PGC_SIGHUP, WAL_SETTINGS,
gettext_noop("Forces synchronization of updates to disk."),
gettext_noop("The server will use the fsync() system call in several places to make "
- "sure that updates are physically written to disk. This insures "
+ "sure that updates are physically written to disk. This insures "
"that a database cluster will recover to a consistent state after "
"an operating system or hardware crash.")
},
@@ -527,7 +527,7 @@ static struct config_bool ConfigureNamesBool[] =
gettext_noop("Writes full pages to WAL when first modified after a checkpoint."),
gettext_noop("A page write in process during an operating system crash might be "
"only partially written to disk. During recovery, the row changes "
- "stored in WAL are not enough to recover. This option writes "
+ "stored in WAL are not enough to recover. This option writes "
"pages when first modified after a checkpoint to WAL so full recovery "
"is possible.")
},
@@ -2771,8 +2771,8 @@ SelectConfigFiles(const char *userDoption, const char *progname)
* If the data_directory GUC variable has been set, use that as DataDir;
* otherwise use configdir if set; else punt.
*
- * Note: SetDataDir will copy and absolute-ize its argument, so we don't have
- * to.
+ * Note: SetDataDir will copy and absolute-ize its argument, so we don't
+ * have to.
*/
if (data_directory)
SetDataDir(data_directory);
@@ -3103,8 +3103,8 @@ AtEOXact_GUC(bool isCommit, bool isSubXact)
/*
* We have two cases:
*
- * If commit and HAVE_TENTATIVE, set actual value to tentative (this is
- * to override a SET LOCAL if one occurred later than SET). We keep
+ * If commit and HAVE_TENTATIVE, set actual value to tentative (this
+ * is to override a SET LOCAL if one occurred later than SET). We keep
* the tentative value and propagate HAVE_TENTATIVE to the parent
* status, allowing the SET's effect to percolate up. (But if we're
* exiting the outermost transaction, we'll drop the HAVE_TENTATIVE
@@ -3258,7 +3258,8 @@ AtEOXact_GUC(bool isCommit, bool isSubXact)
* If newval should now be freed, it'll be
* taken care of below.
*
- * See notes in set_config_option about casting
+ * See notes in set_config_option about
+ * casting
*/
newval = (char *) newstr;
}
diff --git a/src/backend/utils/misc/ps_status.c b/src/backend/utils/misc/ps_status.c
index 878ff81e241..cfc857de0a4 100644
--- a/src/backend/utils/misc/ps_status.c
+++ b/src/backend/utils/misc/ps_status.c
@@ -5,7 +5,7 @@
* to contain some useful information. Mechanism differs wildly across
* platforms.
*
- * $PostgreSQL: pgsql/src/backend/utils/misc/ps_status.c,v 1.26 2005/11/05 03:04:52 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/misc/ps_status.c,v 1.26.2.1 2005/11/22 18:23:25 momjian Exp $
*
* Copyright (c) 2000-2005, PostgreSQL Global Development Group
* various details abducted from various places
@@ -380,7 +380,7 @@ get_ps_display(int *displen)
/* Remove any trailing spaces to offset the effect of PS_PADDING */
offset = ps_buffer_size;
- while (offset > ps_buffer_fixed_size && ps_buffer[offset-1] == PS_PADDING)
+ while (offset > ps_buffer_fixed_size && ps_buffer[offset - 1] == PS_PADDING)
offset--;
*displen = offset - ps_buffer_fixed_size;
diff --git a/src/backend/utils/mmgr/portalmem.c b/src/backend/utils/mmgr/portalmem.c
index 9866e12d68c..8120720041b 100644
--- a/src/backend/utils/mmgr/portalmem.c
+++ b/src/backend/utils/mmgr/portalmem.c
@@ -12,7 +12,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mmgr/portalmem.c,v 1.82 2005/10/15 02:49:36 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mmgr/portalmem.c,v 1.82.2.1 2005/11/22 18:23:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -320,21 +320,21 @@ PortalDrop(Portal portal, bool isTopCommit)
* Release any resources still attached to the portal. There are several
* cases being covered here:
*
- * Top transaction commit (indicated by isTopCommit): normally we should do
- * nothing here and let the regular end-of-transaction resource releasing
- * mechanism handle these resources too. However, if we have a FAILED
- * portal (eg, a cursor that got an error), we'd better clean up its
- * resources to avoid resource-leakage warning messages.
+ * Top transaction commit (indicated by isTopCommit): normally we should
+ * do nothing here and let the regular end-of-transaction resource
+ * releasing mechanism handle these resources too. However, if we have a
+ * FAILED portal (eg, a cursor that got an error), we'd better clean up
+ * its resources to avoid resource-leakage warning messages.
*
- * Sub transaction commit: never comes here at all, since we don't kill any
- * portals in AtSubCommit_Portals().
+ * Sub transaction commit: never comes here at all, since we don't kill
+ * any portals in AtSubCommit_Portals().
*
* Main or sub transaction abort: we will do nothing here because
* portal->resowner was already set NULL; the resources were already
* cleaned up in transaction abort.
*
- * Ordinary portal drop: must release resources. However, if the portal is
- * not FAILED then we do not release its locks. The locks become the
+ * Ordinary portal drop: must release resources. However, if the portal
+ * is not FAILED then we do not release its locks. The locks become the
* responsibility of the transaction's ResourceOwner (since it is the
* parent of the portal's owner) and will be released when the transaction
* eventually ends.
@@ -439,8 +439,8 @@ CommitHoldablePortals(void)
* Instead of dropping the portal, prepare it for access by later
* transactions.
*
- * Note that PersistHoldablePortal() must release all resources used
- * by the portal that are local to the creating transaction.
+ * Note that PersistHoldablePortal() must release all resources
+ * used by the portal that are local to the creating transaction.
*/
PortalCreateHoldStore(portal);
PersistHoldablePortal(portal);
@@ -698,8 +698,8 @@ AtSubAbort_Portals(SubTransactionId mySubid,
* If the portal is READY then allow it to survive into the parent
* transaction; otherwise shut it down.
*
- * Currently, we can't actually support that because the portal's query
- * might refer to objects created or changed in the failed
+ * Currently, we can't actually support that because the portal's
+ * query might refer to objects created or changed in the failed
* subtransaction, leading to crashes if execution is resumed. So,
* even READY portals are deleted. It would be nice to detect whether
* the query actually depends on any such object, instead.
diff --git a/src/backend/utils/resowner/resowner.c b/src/backend/utils/resowner/resowner.c
index 97933de820b..3eaf64e87bc 100644
--- a/src/backend/utils/resowner/resowner.c
+++ b/src/backend/utils/resowner/resowner.c
@@ -14,7 +14,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/resowner/resowner.c,v 1.14 2005/10/15 02:49:36 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/resowner/resowner.c,v 1.14.2.1 2005/11/22 18:23:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -199,9 +199,9 @@ ResourceOwnerReleaseInternal(ResourceOwner owner,
* buffer entry from my list, so I just have to iterate till there are
* none.
*
- * During a commit, there shouldn't be any remaining pins --- that would
- * indicate failure to clean up the executor correctly --- so issue
- * warnings. In the abort case, just clean up quietly.
+ * During a commit, there shouldn't be any remaining pins --- that
+ * would indicate failure to clean up the executor correctly --- so
+ * issue warnings. In the abort case, just clean up quietly.
*
* We are careful to do the releasing back-to-front, so as to avoid
* O(N^2) behavior in ResourceOwnerForgetBuffer().
@@ -218,8 +218,8 @@ ResourceOwnerReleaseInternal(ResourceOwner owner,
* the relref entry from my list, so I just have to iterate till there
* are none.
*
- * As with buffer pins, warn if any are left at commit time, and release
- * back-to-front for speed.
+ * As with buffer pins, warn if any are left at commit time, and
+ * release back-to-front for speed.
*/
while (owner->nrelrefs > 0)
{
@@ -261,8 +261,8 @@ ResourceOwnerReleaseInternal(ResourceOwner owner,
* the catref entry from my list, so I just have to iterate till there
* are none. Ditto for catcache lists.
*
- * As with buffer pins, warn if any are left at commit time, and release
- * back-to-front for speed.
+ * As with buffer pins, warn if any are left at commit time, and
+ * release back-to-front for speed.
*/
while (owner->ncatrefs > 0)
{
diff --git a/src/backend/utils/sort/tuplesort.c b/src/backend/utils/sort/tuplesort.c
index d38d5e2d41f..4e197fde351 100644
--- a/src/backend/utils/sort/tuplesort.c
+++ b/src/backend/utils/sort/tuplesort.c
@@ -78,7 +78,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/sort/tuplesort.c,v 1.54 2005/10/25 13:47:08 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/sort/tuplesort.c,v 1.54.2.1 2005/11/22 18:23:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -584,6 +584,7 @@ void
tuplesort_end(Tuplesortstate *state)
{
int i;
+
#ifdef TRACE_SORT
long spaceUsed;
#endif
@@ -743,8 +744,8 @@ puttuple_common(Tuplesortstate *state, void *tuple)
* and it's simplest to let writetup free each tuple as soon as
* it's written.)
*
- * Note there will always be at least one tuple in the heap at this
- * point; see dumptuples.
+ * Note there will always be at least one tuple in the heap at
+ * this point; see dumptuples.
*/
Assert(state->memtupcount > 0);
if (COMPARETUP(state, tuple, state->memtuples[0]) >= 0)
@@ -890,8 +891,8 @@ tuplesort_gettuple(Tuplesortstate *state, bool forward,
/*
* Backward.
*
- * if all tuples are fetched already then we return last tuple, else
- * - tuple before last returned.
+ * if all tuples are fetched already then we return last tuple,
+ * else - tuple before last returned.
*/
if (state->eof_reached)
{
diff --git a/src/backend/utils/sort/tuplestore.c b/src/backend/utils/sort/tuplestore.c
index d409121418e..3980bfca8fb 100644
--- a/src/backend/utils/sort/tuplestore.c
+++ b/src/backend/utils/sort/tuplestore.c
@@ -36,7 +36,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/sort/tuplestore.c,v 1.23 2005/10/15 02:49:37 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/sort/tuplestore.c,v 1.23.2.1 2005/11/22 18:23:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -115,17 +115,17 @@ struct Tuplestorestate
/*
* These variables are used to keep track of the current position.
*
- * In state WRITEFILE, the current file seek position is the write point, and
- * the read position is remembered in readpos_xxx; in state READFILE, the
- * current file seek position is the read point, and the write position is
- * remembered in writepos_xxx. (The write position is the same as EOF,
- * but since BufFileSeek doesn't currently implement SEEK_END, we have to
- * remember it explicitly.)
+ * In state WRITEFILE, the current file seek position is the write point,
+ * and the read position is remembered in readpos_xxx; in state READFILE,
+ * the current file seek position is the read point, and the write
+ * position is remembered in writepos_xxx. (The write position is the
+ * same as EOF, but since BufFileSeek doesn't currently implement
+ * SEEK_END, we have to remember it explicitly.)
*
- * Special case: if we are in WRITEFILE state and eof_reached is true, then
- * the read position is implicitly equal to the write position (and hence
- * to the file seek position); this way we need not update the readpos_xxx
- * variables on each write.
+ * Special case: if we are in WRITEFILE state and eof_reached is true,
+ * then the read position is implicitly equal to the write position (and
+ * hence to the file seek position); this way we need not update the
+ * readpos_xxx variables on each write.
*/
bool eof_reached; /* read reached EOF (always valid) */
int current; /* next array index (valid if INMEM) */
@@ -454,11 +454,11 @@ tuplestore_gettuple(Tuplestorestate *state, bool forward,
/*
* Backward.
*
- * if all tuples are fetched already then we return last tuple, else
- * - tuple before last returned.
+ * if all tuples are fetched already then we return last tuple,
+ * else - tuple before last returned.
*
- * Back up to fetch previously-returned tuple's ending length word.
- * If seek fails, assume we are at start of file.
+ * Back up to fetch previously-returned tuple's ending length
+ * word. If seek fails, assume we are at start of file.
*/
if (BufFileSeek(state->myfile, 0, -(long) sizeof(unsigned int),
SEEK_CUR) != 0)
diff --git a/src/backend/utils/time/tqual.c b/src/backend/utils/time/tqual.c
index fa6bd4a3c58..27b712a22ae 100644
--- a/src/backend/utils/time/tqual.c
+++ b/src/backend/utils/time/tqual.c
@@ -32,7 +32,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/time/tqual.c,v 1.91 2005/10/15 02:49:37 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/time/tqual.c,v 1.91.2.1 2005/11/22 18:23:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -941,12 +941,12 @@ HeapTupleSatisfiesSnapshot(HeapTupleHeader tuple, Snapshot snapshot,
* By here, the inserting transaction has committed - have to check
* when...
*
- * Note that the provided snapshot contains only top-level XIDs, so we have
- * to convert a subxact XID to its parent for comparison. However, we can
- * make first-pass range checks with the given XID, because a subxact with
- * XID < xmin has surely also got a parent with XID < xmin, while one with
- * XID >= xmax must belong to a parent that was not yet committed at the
- * time of this snapshot.
+ * Note that the provided snapshot contains only top-level XIDs, so we
+ * have to convert a subxact XID to its parent for comparison. However, we
+ * can make first-pass range checks with the given XID, because a subxact
+ * with XID < xmin has surely also got a parent with XID < xmin, while one
+ * with XID >= xmax must belong to a parent that was not yet committed at
+ * the time of this snapshot.
*/
if (TransactionIdFollowsOrEquals(HeapTupleHeaderGetXmin(tuple),
snapshot->xmin))
@@ -1070,8 +1070,8 @@ HeapTupleSatisfiesVacuum(HeapTupleHeader tuple, TransactionId OldestXmin,
/*
* Has inserting transaction committed?
*
- * If the inserting transaction aborted, then the tuple was never visible to
- * any other transaction, so we can delete it immediately.
+ * If the inserting transaction aborted, then the tuple was never visible
+ * to any other transaction, so we can delete it immediately.
*/
if (!(tuple->t_infomask & HEAP_XMIN_COMMITTED))
{
diff --git a/src/bin/initdb/initdb.c b/src/bin/initdb/initdb.c
index d79e4985a7a..6d27546d322 100644
--- a/src/bin/initdb/initdb.c
+++ b/src/bin/initdb/initdb.c
@@ -42,7 +42,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
* Portions taken from FreeBSD.
*
- * $PostgreSQL: pgsql/src/bin/initdb/initdb.c,v 1.99 2005/10/15 02:49:37 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/initdb/initdb.c,v 1.99.2.1 2005/11/22 18:23:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -537,10 +537,11 @@ mkdir_p(char *path, mode_t omode)
* existing directory, effects equivalent to those caused by the
* following command shall occcur:
*
- * mkdir -p -m $(umask -S),u+wx $(dirname dir) && mkdir [-m mode] dir
+ * mkdir -p -m $(umask -S),u+wx $(dirname dir) && mkdir [-m mode]
+ * dir
*
- * We change the user's umask and then restore it, instead of doing
- * chmod's.
+ * We change the user's umask and then restore it, instead of
+ * doing chmod's.
*/
oumask = umask(0);
numask = oumask & ~(S_IWUSR | S_IXUSR);
@@ -1329,8 +1330,8 @@ bootstrap_template1(char *short_version)
/*
* Pass correct LC_xxx environment to bootstrap.
*
- * The shell script arranged to restore the LC settings afterwards, but there
- * doesn't seem to be any compelling reason to do that.
+ * The shell script arranged to restore the LC settings afterwards, but
+ * there doesn't seem to be any compelling reason to do that.
*/
snprintf(cmd, sizeof(cmd), "LC_COLLATE=%s", lc_collate);
putenv(xstrdup(cmd));
@@ -1555,8 +1556,8 @@ setup_depend(void)
* for instance) but generating only the minimum required set of
* dependencies seems hard.
*
- * Note that we deliberately do not pin the system views, which haven't
- * been created yet.
+ * Note that we deliberately do not pin the system views, which
+ * haven't been created yet.
*
* First delete any already-made entries; PINs override all else, and
* must be the only entries for their objects.
@@ -2651,8 +2652,8 @@ main(int argc, char *argv[])
/*
* Determine platform-specific config settings
*
- * Use reasonable values if kernel will let us, else scale back. Probe for
- * max_connections first since it is subject to more constraints than
+ * Use reasonable values if kernel will let us, else scale back. Probe
+ * for max_connections first since it is subject to more constraints than
* shared_buffers.
*/
diff --git a/src/bin/pg_ctl/pg_ctl.c b/src/bin/pg_ctl/pg_ctl.c
index 7c12a72c6db..79e3b11da4d 100644
--- a/src/bin/pg_ctl/pg_ctl.c
+++ b/src/bin/pg_ctl/pg_ctl.c
@@ -4,7 +4,7 @@
*
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/pg_ctl/pg_ctl.c,v 1.61 2005/10/15 02:49:38 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_ctl/pg_ctl.c,v 1.61.2.1 2005/11/22 18:23:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -334,9 +334,9 @@ start_postmaster(void)
/*
* Win32 needs START /B rather than "&".
*
- * Win32 has a problem with START and quoted executable names. You must add a
- * "" as the title at the beginning so you can quote the executable name:
- * http://www.winnetmag.com/Article/ArticleID/14589/14589.html
+ * Win32 has a problem with START and quoted executable names. You must
+ * add a "" as the title at the beginning so you can quote the executable
+ * name: http://www.winnetmag.com/Article/ArticleID/14589/14589.html
* http://dev.remotenetworktechnology.com/cmd/cmdfaq.htm
*/
if (log_file != NULL)
diff --git a/src/bin/pg_dump/dumputils.c b/src/bin/pg_dump/dumputils.c
index aa999173b2b..962489a2876 100644
--- a/src/bin/pg_dump/dumputils.c
+++ b/src/bin/pg_dump/dumputils.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/bin/pg_dump/dumputils.c,v 1.20 2005/10/15 02:49:38 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_dump/dumputils.c,v 1.20.2.1 2005/11/22 18:23:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -261,10 +261,10 @@ parsePGArray(const char *atext, char ***itemarray, int *nitems)
* either raw data, or surrounded by double quotes (in which case embedded
* characters including backslashes and quotes are backslashed).
*
- * We build the result as an array of pointers followed by the actual string
- * data, all in one malloc block for convenience of deallocation. The
- * worst-case storage need is not more than one pointer and one character
- * for each input character (consider "{,,,,,,,,,,}").
+ * We build the result as an array of pointers followed by the actual
+ * string data, all in one malloc block for convenience of deallocation.
+ * The worst-case storage need is not more than one pointer and one
+ * character for each input character (consider "{,,,,,,,,,,}").
*/
*itemarray = NULL;
*nitems = 0;
diff --git a/src/bin/pg_dump/pg_backup_archiver.c b/src/bin/pg_dump/pg_backup_archiver.c
index 54eb9769200..d62c542206a 100644
--- a/src/bin/pg_dump/pg_backup_archiver.c
+++ b/src/bin/pg_dump/pg_backup_archiver.c
@@ -15,7 +15,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_archiver.c,v 1.117 2005/10/15 02:49:38 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_archiver.c,v 1.117.2.1 2005/11/22 18:23:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -325,8 +325,9 @@ RestoreArchive(Archive *AHX, RestoreOptions *ropt)
* withing a database connection. Pre 1.3 archives can
* not use DB connections and are sent to output only.
*
- * For V1.3+, the table data MUST have a copy statement
- * so that we can go into appropriate mode with libpq.
+ * For V1.3+, the table data MUST have a copy
+ * statement so that we can go into appropriate mode
+ * with libpq.
*/
if (te->copyStmt && strlen(te->copyStmt) > 0)
ahprintf(AH, "%s", te->copyStmt);
@@ -1276,8 +1277,8 @@ ReadOffset(ArchiveHandle *AH, off_t *o)
* Read the flag indicating the state of the data pointer. Check if valid
* and die if not.
*
- * This used to be handled by a negative or zero pointer, now we use an extra
- * byte specifically for the state.
+ * This used to be handled by a negative or zero pointer, now we use an
+ * extra byte specifically for the state.
*/
offsetFlg = (*AH->ReadBytePtr) (AH) & 0xFF;
@@ -1566,8 +1567,8 @@ _allocAH(const char *FileSpec, const ArchiveFormat fmt,
/*
* Not used; maybe later....
*
- * AH->workDir = strdup(FileSpec); for(i=strlen(FileSpec) ; i > 0 ; i--)
- * if (AH->workDir[i-1] == '/')
+ * AH->workDir = strdup(FileSpec); for(i=strlen(FileSpec) ; i > 0 ;
+ * i--) if (AH->workDir[i-1] == '/')
*/
}
else
diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c
index 895d156317e..37c9bd6c193 100644
--- a/src/bin/pg_dump/pg_dump.c
+++ b/src/bin/pg_dump/pg_dump.c
@@ -12,7 +12,7 @@
* by PostgreSQL
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/bin/pg_dump/pg_dump.c,v 1.422 2005/10/15 02:49:38 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_dump/pg_dump.c,v 1.422.2.1 2005/11/22 18:23:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -865,26 +865,26 @@ dumpTableData_copy(Archive *fout, void *dcontext)
/*
* THROTTLE:
*
- * There was considerable discussion in late July, 2000 regarding slowing
- * down pg_dump when backing up large tables. Users with both slow &
- * fast (muti-processor) machines experienced performance degradation
- * when doing a backup.
+ * There was considerable discussion in late July, 2000 regarding
+ * slowing down pg_dump when backing up large tables. Users with both
+ * slow & fast (muti-processor) machines experienced performance
+ * degradation when doing a backup.
*
- * Initial attempts based on sleeping for a number of ms for each ms of
- * work were deemed too complex, then a simple 'sleep in each loop'
+ * Initial attempts based on sleeping for a number of ms for each ms
+ * of work were deemed too complex, then a simple 'sleep in each loop'
* implementation was suggested. The latter failed because the loop
* was too tight. Finally, the following was implemented:
*
- * If throttle is non-zero, then See how long since the last sleep. Work
- * out how long to sleep (based on ratio). If sleep is more than
+ * If throttle is non-zero, then See how long since the last sleep.
+ * Work out how long to sleep (based on ratio). If sleep is more than
* 100ms, then sleep reset timer EndIf EndIf
*
- * where the throttle value was the number of ms to sleep per ms of work.
- * The calculation was done in each loop.
+ * where the throttle value was the number of ms to sleep per ms of
+ * work. The calculation was done in each loop.
*
- * Most of the hard work is done in the backend, and this solution still
- * did not work particularly well: on slow machines, the ratio was
- * 50:1, and on medium paced machines, 1:1, and on fast
+ * Most of the hard work is done in the backend, and this solution
+ * still did not work particularly well: on slow machines, the ratio
+ * was 50:1, and on medium paced machines, 1:1, and on fast
* multi-processor machines, it had little or no effect, for reasons
* that were unclear.
*
@@ -1015,9 +1015,9 @@ dumpTableData_insert(Archive *fout, void *dcontext)
* strtod() and friends might accept NaN, so we
* can't use that to test.
*
- * In reality we only need to defend against infinity
- * and NaN, so we need not get too crazy about
- * pattern matching here.
+ * In reality we only need to defend against
+ * infinity and NaN, so we need not get too crazy
+ * about pattern matching here.
*/
const char *s = PQgetvalue(res, tuple, field);
@@ -2435,21 +2435,21 @@ getTables(int *numTables)
/*
* Find all the tables (including views and sequences).
*
- * We include system catalogs, so that we can work if a user table is defined
- * to inherit from a system catalog (pretty weird, but...)
+ * We include system catalogs, so that we can work if a user table is
+ * defined to inherit from a system catalog (pretty weird, but...)
*
* We ignore tables that are not type 'r' (ordinary relation), 'S'
* (sequence), 'v' (view), or 'c' (composite type).
*
- * Composite-type table entries won't be dumped as such, but we have to make
- * a DumpableObject for them so that we can track dependencies of the
+ * Composite-type table entries won't be dumped as such, but we have to
+ * make a DumpableObject for them so that we can track dependencies of the
* composite type (pg_depend entries for columns of the composite type
* link to the pg_class entry not the pg_type entry).
*
- * Note: in this phase we should collect only a minimal amount of information
- * about each table, basically just enough to decide if it is interesting.
- * We must fetch all tables in this phase because otherwise we cannot
- * correctly identify inherited columns, serial columns, etc.
+ * Note: in this phase we should collect only a minimal amount of
+ * information about each table, basically just enough to decide if it is
+ * interesting. We must fetch all tables in this phase because otherwise
+ * we cannot correctly identify inherited columns, serial columns, etc.
*/
if (g_fout->remoteVersion >= 80000)
@@ -6907,8 +6907,8 @@ dumpTableSchema(Archive *fout, TableInfo *tbinfo)
/*
* Not Null constraint --- suppress if inherited
*
- * Note: we could suppress this for serial columns since SERIAL
- * implies NOT NULL. We choose not to for forward
+ * Note: we could suppress this for serial columns since
+ * SERIAL implies NOT NULL. We choose not to for forward
* compatibility, since there has been some talk of making
* SERIAL not imply NOT NULL, in which case the explicit
* specification would be needed.
@@ -7575,12 +7575,12 @@ dumpSequence(Archive *fout, TableInfo *tbinfo)
/*
* The logic we use for restoring sequences is as follows:
*
- * Add a basic CREATE SEQUENCE statement (use last_val for start if called is
- * false, else use min_val for start_val). Skip this if the sequence came
- * from a SERIAL column.
+ * Add a basic CREATE SEQUENCE statement (use last_val for start if called
+ * is false, else use min_val for start_val). Skip this if the sequence
+ * came from a SERIAL column.
*
- * Add a 'SETVAL(seq, last_val, iscalled)' at restore-time iff we load data.
- * We do this for serial sequences too.
+ * Add a 'SETVAL(seq, last_val, iscalled)' at restore-time iff we load
+ * data. We do this for serial sequences too.
*/
if (!dataOnly && !OidIsValid(tbinfo->owning_tab))
diff --git a/src/bin/pg_dump/pg_dump_sort.c b/src/bin/pg_dump/pg_dump_sort.c
index 7e91d9bb79b..1a15b68c282 100644
--- a/src/bin/pg_dump/pg_dump_sort.c
+++ b/src/bin/pg_dump/pg_dump_sort.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/bin/pg_dump/pg_dump_sort.c,v 1.11 2005/10/15 02:49:39 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_dump/pg_dump_sort.c,v 1.11.2.1 2005/11/22 18:23:27 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -305,10 +305,10 @@ TopoSort(DumpableObject **objs,
* Now initialize the heap of items-ready-to-output by filling it with the
* indexes of items that already have beforeConstraints[id] == 0.
*
- * The essential property of a heap is heap[(j-1)/2] >= heap[j] for each j in
- * the range 1..heapLength-1 (note we are using 0-based subscripts here,
- * while the discussion in Knuth assumes 1-based subscripts). So, if we
- * simply enter the indexes into pendingHeap[] in decreasing order, we
+ * The essential property of a heap is heap[(j-1)/2] >= heap[j] for each j
+ * in the range 1..heapLength-1 (note we are using 0-based subscripts
+ * here, while the discussion in Knuth assumes 1-based subscripts). So, if
+ * we simply enter the indexes into pendingHeap[] in decreasing order, we
* a-fortiori have the heap invariant satisfied at completion of this
* loop, and don't need to do any sift-up comparisons.
*/
@@ -469,8 +469,8 @@ findDependencyLoops(DumpableObject **objs, int nObjs, int totObjs)
* representation. After we identify and process a loop, we can add it to
* the initial part of the workspace just by moving the boundary pointer.
*
- * When we determine that an object is not part of any interesting loop, we
- * also add it to the initial part of the workspace. This is not
+ * When we determine that an object is not part of any interesting loop,
+ * we also add it to the initial part of the workspace. This is not
* necessary for correctness, but saves later invocations of findLoop()
* from uselessly chasing references to such an object.
*
diff --git a/src/bin/psql/common.c b/src/bin/psql/common.c
index bddbd7e539f..80d4c91c009 100644
--- a/src/bin/psql/common.c
+++ b/src/bin/psql/common.c
@@ -3,7 +3,7 @@
*
* Copyright (c) 2000-2005, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/psql/common.c,v 1.110 2005/11/04 18:35:40 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/common.c,v 1.110.2.1 2005/11/22 18:23:27 momjian Exp $
*/
#include "postgres_fe.h"
#include "common.h"
@@ -268,7 +268,6 @@ handle_sigint(SIGNAL_ARGS)
}
errno = save_errno; /* just in case the write changed it */
}
-
#else /* WIN32 */
static BOOL WINAPI
@@ -323,7 +322,6 @@ setup_cancel_handler(void)
done = true;
}
}
-
#endif /* WIN32 */
@@ -531,9 +529,9 @@ ReportSyntaxErrorPosition(const PGresult *result, const char *query)
* want to think about coping with their variable screen width, but
* not today.)
*
- * Extract line number and begin and end indexes of line containing error
- * location. There will not be any newlines or carriage returns in
- * the selected extract.
+ * Extract line number and begin and end indexes of line containing
+ * error location. There will not be any newlines or carriage returns
+ * in the selected extract.
*/
for (i = 0; i < clen; i++)
{
@@ -1217,8 +1215,8 @@ command_no_begin(const char *query)
* gives rise to a TransactionStmt in the backend grammar, except for the
* savepoint-related commands.
*
- * (We assume that START must be START TRANSACTION, since there is presently
- * no other "START foo" command.)
+ * (We assume that START must be START TRANSACTION, since there is
+ * presently no other "START foo" command.)
*/
if (wordlen == 5 && pg_strncasecmp(query, "abort", 5) == 0)
return true;
diff --git a/src/bin/psql/describe.c b/src/bin/psql/describe.c
index aefb6041abc..74a3c8afe0c 100644
--- a/src/bin/psql/describe.c
+++ b/src/bin/psql/describe.c
@@ -3,7 +3,7 @@
*
* Copyright (c) 2000-2005, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/psql/describe.c,v 1.129 2005/10/27 13:34:47 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/describe.c,v 1.129.2.1 2005/11/22 18:23:27 momjian Exp $
*/
#include "postgres_fe.h"
#include "describe.h"
@@ -1843,8 +1843,8 @@ processNamePattern(PQExpBuffer buf, const char *pattern,
/*
* Ordinary data character, transfer to pattern
*
- * Inside double quotes, or at all times if parsing an operator name,
- * quote regexp special characters with a backslash to avoid
+ * Inside double quotes, or at all times if parsing an operator
+ * name, quote regexp special characters with a backslash to avoid
* regexp errors. Outside quotes, however, let them pass through
* as-is; this lets knowledgeable users build regexp expressions
* that are more powerful than shell-style patterns.
diff --git a/src/bin/psql/startup.c b/src/bin/psql/startup.c
index 7c374db77c9..5dc5aa65e97 100644
--- a/src/bin/psql/startup.c
+++ b/src/bin/psql/startup.c
@@ -3,7 +3,7 @@
*
* Copyright (c) 2000-2005, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/psql/startup.c,v 1.126.2.1 2005/11/17 23:49:44 adunstan Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/startup.c,v 1.126.2.2 2005/11/22 18:23:27 momjian Exp $
*/
#include "postgres_fe.h"
@@ -206,7 +206,7 @@ main(int argc, char *argv[])
{
need_pass = false;
pset.db = PQsetdbLogin(options.host, options.port, NULL, NULL,
- options.action == ACT_LIST_DB && options.dbname == NULL ?
+ options.action == ACT_LIST_DB && options.dbname == NULL ?
"postgres" : options.dbname,
username, password);
diff --git a/src/include/access/tuptoaster.h b/src/include/access/tuptoaster.h
index d45ff3ac42a..04bb7da18d4 100644
--- a/src/include/access/tuptoaster.h
+++ b/src/include/access/tuptoaster.h
@@ -6,7 +6,7 @@
*
* Copyright (c) 2000-2005, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/include/access/tuptoaster.h,v 1.23.2.1 2005/11/20 18:38:42 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/access/tuptoaster.h,v 1.23.2.2 2005/11/22 18:23:27 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -72,7 +72,7 @@
* ----------
*/
extern HeapTuple toast_insert_or_update(Relation rel,
- HeapTuple newtup, HeapTuple oldtup);
+ HeapTuple newtup, HeapTuple oldtup);
/* ----------
* toast_delete -
diff --git a/src/include/catalog/pg_constraint.h b/src/include/catalog/pg_constraint.h
index 71fb126638b..fdfc006a7c0 100644
--- a/src/include/catalog/pg_constraint.h
+++ b/src/include/catalog/pg_constraint.h
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_constraint.h,v 1.18 2005/10/15 02:49:42 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_constraint.h,v 1.18.2.1 2005/11/22 18:23:27 momjian Exp $
*
* NOTES
* the genbki.sh script reads this file and generates .bki
@@ -61,8 +61,8 @@ CATALOG(pg_constraint,2606)
* contypid links to the pg_type row for a domain if this is a domain
* constraint. Otherwise it's 0.
*
- * For SQL-style global ASSERTIONs, both conrelid and contypid would be zero.
- * This is not presently supported, however.
+ * For SQL-style global ASSERTIONs, both conrelid and contypid would be
+ * zero. This is not presently supported, however.
*/
Oid contypid; /* domain this constraint constrains */
diff --git a/src/include/catalog/pg_control.h b/src/include/catalog/pg_control.h
index 847ad08b2ef..c8fff121f56 100644
--- a/src/include/catalog/pg_control.h
+++ b/src/include/catalog/pg_control.h
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_control.h,v 1.25 2005/10/15 02:49:42 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_control.h,v 1.25.2.1 2005/11/22 18:23:27 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -87,9 +87,9 @@ typedef struct ControlFileData
* pg_control_version identifies the format of pg_control itself.
* catalog_version_no identifies the format of the system catalogs.
*
- * There are additional version identifiers in individual files; for example,
- * WAL logs contain per-page magic numbers that can serve as version cues
- * for the WAL log.
+ * There are additional version identifiers in individual files; for
+ * example, WAL logs contain per-page magic numbers that can serve as
+ * version cues for the WAL log.
*/
uint32 pg_control_version; /* PG_CONTROL_VERSION */
uint32 catalog_version_no; /* see catversion.h */
diff --git a/src/include/catalog/pg_shdepend.h b/src/include/catalog/pg_shdepend.h
index de4f6eb0d46..9049383cc0a 100644
--- a/src/include/catalog/pg_shdepend.h
+++ b/src/include/catalog/pg_shdepend.h
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_shdepend.h,v 1.2 2005/10/15 02:49:44 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_shdepend.h,v 1.2.2.1 2005/11/22 18:23:27 momjian Exp $
*
* NOTES
* the genbki.sh script reads this file and generates .bki
@@ -37,8 +37,8 @@ CATALOG(pg_shdepend,1214) BKI_SHARED_RELATION BKI_WITHOUT_OIDS
/*
* Identification of the dependent (referencing) object.
*
- * These fields are all zeroes for a DEPENDENCY_PIN entry. Also, dbid can be
- * zero to denote a shared object.
+ * These fields are all zeroes for a DEPENDENCY_PIN entry. Also, dbid can
+ * be zero to denote a shared object.
*/
Oid dbid; /* OID of database containing object */
Oid classid; /* OID of table containing object */
diff --git a/src/include/catalog/pg_type.h b/src/include/catalog/pg_type.h
index 0e3dd006c43..ba3dc0d5979 100644
--- a/src/include/catalog/pg_type.h
+++ b/src/include/catalog/pg_type.h
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_type.h,v 1.166 2005/10/15 02:49:44 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_type.h,v 1.166.2.1 2005/11/22 18:23:27 momjian Exp $
*
* NOTES
* the genbki.sh script reads this file and generates .bki
@@ -148,8 +148,8 @@ CATALOG(pg_type,1247) BKI_BOOTSTRAP
/*
* This flag represents a "NOT NULL" constraint against this datatype.
*
- * If true, the attnotnull column for a corresponding table column using this
- * datatype will always enforce the NOT NULL constraint.
+ * If true, the attnotnull column for a corresponding table column using
+ * this datatype will always enforce the NOT NULL constraint.
*
* Used primarily for domain types.
*/
diff --git a/src/include/funcapi.h b/src/include/funcapi.h
index 8357cdd6ede..c4193ba5af8 100644
--- a/src/include/funcapi.h
+++ b/src/include/funcapi.h
@@ -9,7 +9,7 @@
*
* Copyright (c) 2002-2005, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/include/funcapi.h,v 1.20 2005/10/15 02:49:41 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/funcapi.h,v 1.20.2.1 2005/11/22 18:23:27 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -67,9 +67,9 @@ typedef struct FuncCallContext
/*
* OPTIONAL maximum number of calls
*
- * max_calls is here for convenience only and setting it is optional. If not
- * set, you must provide alternative means to know when the function is
- * done.
+ * max_calls is here for convenience only and setting it is optional. If
+ * not set, you must provide alternative means to know when the function
+ * is done.
*/
uint32 max_calls;
@@ -84,25 +84,25 @@ typedef struct FuncCallContext
/*
* OPTIONAL pointer to miscellaneous user-provided context information
*
- * user_fctx is for use as a pointer to your own struct to retain arbitrary
- * context information between calls of your function.
+ * user_fctx is for use as a pointer to your own struct to retain
+ * arbitrary context information between calls of your function.
*/
void *user_fctx;
/*
* OPTIONAL pointer to struct containing attribute type input metadata
*
- * attinmeta is for use when returning tuples (i.e. composite data types) and
- * is not used when returning base data types. It is only needed if you
- * intend to use BuildTupleFromCStrings() to create the return tuple.
+ * attinmeta is for use when returning tuples (i.e. composite data types)
+ * and is not used when returning base data types. It is only needed if
+ * you intend to use BuildTupleFromCStrings() to create the return tuple.
*/
AttInMetadata *attinmeta;
/*
* memory context used for structures that must live for multiple calls
*
- * multi_call_memory_ctx is set by SRF_FIRSTCALL_INIT() for you, and used by
- * SRF_RETURN_DONE() for cleanup. It is the most appropriate memory
+ * multi_call_memory_ctx is set by SRF_FIRSTCALL_INIT() for you, and used
+ * by SRF_RETURN_DONE() for cleanup. It is the most appropriate memory
* context for any memory that is to be reused across multiple calls of
* the SRF.
*/
diff --git a/src/include/libpq/crypt.h b/src/include/libpq/crypt.h
index aeb86b4c76d..8beff9ba9fd 100644
--- a/src/include/libpq/crypt.h
+++ b/src/include/libpq/crypt.h
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/libpq/crypt.h,v 1.32 2005/10/17 16:24:20 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/libpq/crypt.h,v 1.32.2.1 2005/11/22 18:23:28 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -28,6 +28,6 @@ extern int md5_crypt_verify(const Port *port, const char *user,
/* in md5.c --- these are also present in frontend libpq */
extern bool pg_md5_hash(const void *buff, size_t len, char *hexsum);
extern bool pg_md5_encrypt(const char *passwd, const char *salt,
- size_t salt_len, char *buf);
+ size_t salt_len, char *buf);
#endif
diff --git a/src/include/libpq/ip.h b/src/include/libpq/ip.h
index 57858934bd2..395f21a0f5e 100644
--- a/src/include/libpq/ip.h
+++ b/src/include/libpq/ip.h
@@ -5,7 +5,7 @@
*
* Copyright (c) 2003-2005, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/include/libpq/ip.h,v 1.15 2005/10/17 16:24:20 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/libpq/ip.h,v 1.15.2.1 2005/11/22 18:23:28 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -17,25 +17,25 @@
extern int pg_getaddrinfo_all(const char *hostname, const char *servname,
- const struct addrinfo *hintp,
- struct addrinfo **result);
-extern void pg_freeaddrinfo_all(int hint_ai_family, struct addrinfo *ai);
+ const struct addrinfo * hintp,
+ struct addrinfo ** result);
+extern void pg_freeaddrinfo_all(int hint_ai_family, struct addrinfo * ai);
-extern int pg_getnameinfo_all(const struct sockaddr_storage *addr, int salen,
- char *node, int nodelen,
- char *service, int servicelen,
- int flags);
+extern int pg_getnameinfo_all(const struct sockaddr_storage * addr, int salen,
+ char *node, int nodelen,
+ char *service, int servicelen,
+ int flags);
-extern int pg_range_sockaddr(const struct sockaddr_storage *addr,
- const struct sockaddr_storage *netaddr,
- const struct sockaddr_storage *netmask);
+extern int pg_range_sockaddr(const struct sockaddr_storage * addr,
+ const struct sockaddr_storage * netaddr,
+ const struct sockaddr_storage * netmask);
-extern int pg_sockaddr_cidr_mask(struct sockaddr_storage *mask,
- char *numbits, int family);
+extern int pg_sockaddr_cidr_mask(struct sockaddr_storage * mask,
+ char *numbits, int family);
#ifdef HAVE_IPV6
-extern void pg_promote_v4_to_v6_addr(struct sockaddr_storage *addr);
-extern void pg_promote_v4_to_v6_mask(struct sockaddr_storage *addr);
+extern void pg_promote_v4_to_v6_addr(struct sockaddr_storage * addr);
+extern void pg_promote_v4_to_v6_mask(struct sockaddr_storage * addr);
#endif
#ifdef HAVE_UNIX_SOCKETS
diff --git a/src/include/libpq/libpq-be.h b/src/include/libpq/libpq-be.h
index 8d7f88d1352..3a7e5b35218 100644
--- a/src/include/libpq/libpq-be.h
+++ b/src/include/libpq/libpq-be.h
@@ -11,7 +11,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/libpq/libpq-be.h,v 1.53 2005/11/05 03:04:53 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/libpq/libpq-be.h,v 1.53.2.1 2005/11/22 18:23:28 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -85,9 +85,9 @@ typedef struct Port
/*
* TCP keepalive settings.
*
- * default values are 0 if AF_UNIX or not yet known; current values are 0 if
- * AF_UNIX or using the default. Also, -1 in a default value means we were
- * unable to find out the default (getsockopt failed).
+ * default values are 0 if AF_UNIX or not yet known; current values are 0
+ * if AF_UNIX or using the default. Also, -1 in a default value means we
+ * were unable to find out the default (getsockopt failed).
*/
int default_keepalives_idle;
int default_keepalives_interval;
diff --git a/src/include/miscadmin.h b/src/include/miscadmin.h
index fc13891e414..b7050bb35fe 100644
--- a/src/include/miscadmin.h
+++ b/src/include/miscadmin.h
@@ -13,7 +13,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/miscadmin.h,v 1.183 2005/10/25 15:15:16 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/miscadmin.h,v 1.183.2.1 2005/11/22 18:23:27 momjian Exp $
*
* NOTES
* some of the information in this file should be moved to other files.
@@ -83,7 +83,6 @@ do { \
if (InterruptPending) \
ProcessInterrupts(); \
} while(0)
-
#else /* WIN32 */
#define CHECK_FOR_INTERRUPTS() \
@@ -93,7 +92,6 @@ do { \
if (InterruptPending) \
ProcessInterrupts(); \
} while(0)
-
#endif /* WIN32 */
diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h
index 0c96e7545f9..0aead5234c0 100644
--- a/src/include/nodes/execnodes.h
+++ b/src/include/nodes/execnodes.h
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/nodes/execnodes.h,v 1.139.2.1 2005/11/14 17:43:13 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/nodes/execnodes.h,v 1.139.2.2 2005/11/22 18:23:28 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -343,10 +343,10 @@ typedef struct EState
bool es_useEvalPlan; /* evaluating EPQ tuples? */
/*
- * this field added at end of struct to avoid post-release ABI breakage
- * in 8.1 series. It'll be in a more logical place in 8.2.
+ * this field added at end of struct to avoid post-release ABI breakage in
+ * 8.1 series. It'll be in a more logical place in 8.2.
*/
- TupleTableSlot *es_trig_tuple_slot; /* for trigger output tuples */
+ TupleTableSlot *es_trig_tuple_slot; /* for trigger output tuples */
} EState;
@@ -495,9 +495,9 @@ typedef struct FuncExprState
* We also need to store argument values across calls when evaluating a
* function-returning-set.
*
- * setArgsValid is true when we are evaluating a set-valued function and we
- * are in the middle of a call series; we want to pass the same argument
- * values to the function again (and again, until it returns
+ * setArgsValid is true when we are evaluating a set-valued function and
+ * we are in the middle of a call series; we want to pass the same
+ * argument values to the function again (and again, until it returns
* ExprEndResult).
*/
bool setArgsValid;
diff --git a/src/include/nodes/nodes.h b/src/include/nodes/nodes.h
index 327e4301ff9..ac789b8d118 100644
--- a/src/include/nodes/nodes.h
+++ b/src/include/nodes/nodes.h
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/nodes/nodes.h,v 1.176 2005/10/15 02:49:45 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/nodes/nodes.h,v 1.176.2.1 2005/11/22 18:23:28 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -141,8 +141,8 @@ typedef enum NodeTag
/*
* TAGS FOR EXPRESSION STATE NODES (execnodes.h)
*
- * These correspond (not always one-for-one) to primitive nodes derived from
- * Expr.
+ * These correspond (not always one-for-one) to primitive nodes derived
+ * from Expr.
*/
T_ExprState = 400,
T_GenericExprState,
diff --git a/src/include/nodes/parsenodes.h b/src/include/nodes/parsenodes.h
index 5adb79ef6bc..4fff844db81 100644
--- a/src/include/nodes/parsenodes.h
+++ b/src/include/nodes/parsenodes.h
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/nodes/parsenodes.h,v 1.292 2005/10/26 19:21:55 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/nodes/parsenodes.h,v 1.292.2.1 2005/11/22 18:23:28 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -533,9 +533,9 @@ typedef struct RangeTblEntry
/*
* Fields valid for a join RTE (else NULL/zero):
*
- * joinaliasvars is a list of Vars or COALESCE expressions corresponding to
- * the columns of the join result. An alias Var referencing column K of
- * the join result can be replaced by the K'th element of joinaliasvars
+ * joinaliasvars is a list of Vars or COALESCE expressions corresponding
+ * to the columns of the join result. An alias Var referencing column K
+ * of the join result can be replaced by the K'th element of joinaliasvars
* --- but to simplify the task of reverse-listing aliases correctly, we
* do not do that until planning time. In a Query loaded from a stored
* rule, it is also possible for joinaliasvars items to be NULL Consts,
diff --git a/src/include/nodes/plannodes.h b/src/include/nodes/plannodes.h
index 4a0ff51afde..7a8c31b31d3 100644
--- a/src/include/nodes/plannodes.h
+++ b/src/include/nodes/plannodes.h
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/nodes/plannodes.h,v 1.80 2005/10/15 02:49:45 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/nodes/plannodes.h,v 1.80.2.1 2005/11/22 18:23:28 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -65,13 +65,13 @@ typedef struct Plan
/*
* Information for management of parameter-change-driven rescanning
*
- * extParam includes the paramIDs of all external PARAM_EXEC params affecting
- * this plan node or its children. setParam params from the node's
- * initPlans are not included, but their extParams are.
+ * extParam includes the paramIDs of all external PARAM_EXEC params
+ * affecting this plan node or its children. setParam params from the
+ * node's initPlans are not included, but their extParams are.
*
- * allParam includes all the extParam paramIDs, plus the IDs of local params
- * that affect the node (i.e., the setParams of its initplans). These are
- * _all_ the PARAM_EXEC params that affect this node.
+ * allParam includes all the extParam paramIDs, plus the IDs of local
+ * params that affect the node (i.e., the setParams of its initplans).
+ * These are _all_ the PARAM_EXEC params that affect this node.
*/
Bitmapset *extParam;
Bitmapset *allParam;
diff --git a/src/include/storage/buf_internals.h b/src/include/storage/buf_internals.h
index d1b0cc6dfde..8def60853f3 100644
--- a/src/include/storage/buf_internals.h
+++ b/src/include/storage/buf_internals.h
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/storage/buf_internals.h,v 1.81.2.1 2005/11/17 17:42:24 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/storage/buf_internals.h,v 1.81.2.2 2005/11/22 18:23:28 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -199,7 +199,7 @@ extern BufferDesc *LocalBufferAlloc(Relation reln, BlockNumber blockNum,
bool *foundPtr);
extern void WriteLocalBuffer(Buffer buffer, bool release);
extern void DropRelFileNodeLocalBuffers(RelFileNode rnode,
- BlockNumber firstDelBlock);
+ BlockNumber firstDelBlock);
extern void AtEOXact_LocalBuffers(bool isCommit);
#endif /* BUFMGR_INTERNALS_H */
diff --git a/src/include/tcop/dest.h b/src/include/tcop/dest.h
index 68c926a9a64..7a103618d8b 100644
--- a/src/include/tcop/dest.h
+++ b/src/include/tcop/dest.h
@@ -54,7 +54,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/tcop/dest.h,v 1.48 2005/11/03 17:11:40 alvherre Exp $
+ * $PostgreSQL: pgsql/src/include/tcop/dest.h,v 1.48.2.1 2005/11/22 18:23:28 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -79,12 +79,12 @@
*/
typedef enum
{
- DestNone, /* results are discarded */
- DestDebug, /* results go to debugging output */
- DestRemote, /* results sent to frontend process */
- DestRemoteExecute, /* sent to frontend, in Execute command */
- DestSPI, /* results sent to SPI manager */
- DestTuplestore /* results sent to Tuplestore */
+ DestNone, /* results are discarded */
+ DestDebug, /* results go to debugging output */
+ DestRemote, /* results sent to frontend process */
+ DestRemoteExecute, /* sent to frontend, in Execute command */
+ DestSPI, /* results sent to SPI manager */
+ DestTuplestore /* results sent to Tuplestore */
} CommandDest;
/* ----------------
diff --git a/src/include/utils/builtins.h b/src/include/utils/builtins.h
index 1ec358f3849..b4961f13111 100644
--- a/src/include/utils/builtins.h
+++ b/src/include/utils/builtins.h
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/builtins.h,v 1.267 2005/10/18 20:38:58 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/utils/builtins.h,v 1.267.2.1 2005/11/22 18:23:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -594,7 +594,7 @@ extern bool SplitIdentifierString(char *rawstring, char separator,
List **namelist);
extern Datum replace_text(PG_FUNCTION_ARGS);
extern text *replace_text_regexp(text *src_text, void *regexp,
- text *replace_text, bool glob);
+ text *replace_text, bool glob);
extern Datum split_text(PG_FUNCTION_ARGS);
extern Datum text_to_array(PG_FUNCTION_ARGS);
extern Datum array_to_text(PG_FUNCTION_ARGS);
@@ -753,7 +753,7 @@ extern Datum numeric_in(PG_FUNCTION_ARGS);
extern Datum numeric_out(PG_FUNCTION_ARGS);
extern Datum numeric_recv(PG_FUNCTION_ARGS);
extern Datum numeric_send(PG_FUNCTION_ARGS);
-extern Datum numeric(PG_FUNCTION_ARGS);
+extern Datum numeric (PG_FUNCTION_ARGS);
extern Datum numeric_abs(PG_FUNCTION_ARGS);
extern Datum numeric_uminus(PG_FUNCTION_ARGS);
extern Datum numeric_uplus(PG_FUNCTION_ARGS);
diff --git a/src/include/utils/catcache.h b/src/include/utils/catcache.h
index 6fb358b8135..214b2df468e 100644
--- a/src/include/utils/catcache.h
+++ b/src/include/utils/catcache.h
@@ -13,7 +13,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/catcache.h,v 1.56 2005/10/15 02:49:46 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/utils/catcache.h,v 1.56.2.1 2005/11/22 18:23:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -98,8 +98,8 @@ typedef struct catctup
* refcount must go to zero, too; also, remember to mark the list dead at
* the same time the tuple is marked.)
*
- * A negative cache entry is an assertion that there is no tuple matching a
- * particular key. This is just as useful as a normal entry so far as
+ * A negative cache entry is an assertion that there is no tuple matching
+ * a particular key. This is just as useful as a normal entry so far as
* avoiding catalog searches is concerned. Management of positive and
* negative entries is identical.
*/
@@ -125,14 +125,14 @@ typedef struct catclist
* table rows satisfying the partial key. (Note: none of these will be
* negative cache entries.)
*
- * A CatCList is only a member of a per-cache list; we do not do separate LRU
- * management for CatCLists. See CatalogCacheCleanup() for the details of
- * the management algorithm.
+ * A CatCList is only a member of a per-cache list; we do not do separate
+ * LRU management for CatCLists. See CatalogCacheCleanup() for the
+ * details of the management algorithm.
*
- * A list marked "dead" must not be returned by subsequent searches. However,
- * it won't be physically deleted from the cache until its refcount goes
- * to zero. (A list should be marked dead if any of its member entries
- * are dead.)
+ * A list marked "dead" must not be returned by subsequent searches.
+ * However, it won't be physically deleted from the cache until its
+ * refcount goes to zero. (A list should be marked dead if any of its
+ * member entries are dead.)
*
* If "ordered" is true then the member tuples appear in the order of the
* cache's underlying index. This will be true in normal operation, but
diff --git a/src/include/utils/typcache.h b/src/include/utils/typcache.h
index 64fe33d81d6..b4e6bbc80cf 100644
--- a/src/include/utils/typcache.h
+++ b/src/include/utils/typcache.h
@@ -9,7 +9,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/typcache.h,v 1.8 2005/10/15 02:49:47 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/utils/typcache.h,v 1.8.2.1 2005/11/22 18:23:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -35,8 +35,8 @@ typedef struct TypeCacheEntry
/*
* Information obtained from opclass entries
*
- * These will be InvalidOid if no match could be found, or if the information
- * hasn't yet been requested.
+ * These will be InvalidOid if no match could be found, or if the
+ * information hasn't yet been requested.
*/
Oid btree_opc; /* OID of the default btree opclass */
Oid hash_opc; /* OID of the default hash opclass */
diff --git a/src/interfaces/ecpg/compatlib/informix.c b/src/interfaces/ecpg/compatlib/informix.c
index d0e884f93b9..0cb45100429 100644
--- a/src/interfaces/ecpg/compatlib/informix.c
+++ b/src/interfaces/ecpg/compatlib/informix.c
@@ -15,7 +15,7 @@
char *ECPGalloc(long, int);
static int
-deccall2(decimal * arg1, decimal * arg2, int (*ptr) (numeric *, numeric *))
+deccall2(decimal *arg1, decimal *arg2, int (*ptr) (numeric *, numeric *))
{
numeric *a1,
*a2;
@@ -53,7 +53,7 @@ deccall2(decimal * arg1, decimal * arg2, int (*ptr) (numeric *, numeric *))
}
static int
-deccall3(decimal * arg1, decimal * arg2, decimal * result, int (*ptr) (numeric *, numeric *, numeric *))
+deccall3(decimal *arg1, decimal *arg2, decimal *result, int (*ptr) (numeric *, numeric *, numeric *))
{
numeric *a1,
*a2,
@@ -118,7 +118,7 @@ deccall3(decimal * arg1, decimal * arg2, decimal * result, int (*ptr) (numeric *
/* we start with the numeric functions */
int
-decadd(decimal * arg1, decimal * arg2, decimal * sum)
+decadd(decimal *arg1, decimal *arg2, decimal *sum)
{
deccall3(arg1, arg2, sum, PGTYPESnumeric_add);
@@ -131,13 +131,13 @@ decadd(decimal * arg1, decimal * arg2, decimal * sum)
}
int
-deccmp(decimal * arg1, decimal * arg2)
+deccmp(decimal *arg1, decimal *arg2)
{
return (deccall2(arg1, arg2, PGTYPESnumeric_cmp));
}
void
-deccopy(decimal * src, decimal * target)
+deccopy(decimal *src, decimal *target)
{
memcpy(target, src, sizeof(decimal));
}
@@ -162,7 +162,7 @@ ecpg_strndup(const char *str, size_t len)
}
int
-deccvasc(char *cp, int len, decimal * np)
+deccvasc(char *cp, int len, decimal *np)
{
char *str = ecpg_strndup(cp, len); /* decimal_in always converts
* the complete string */
@@ -207,7 +207,7 @@ deccvasc(char *cp, int len, decimal * np)
}
int
-deccvdbl(double dbl, decimal * np)
+deccvdbl(double dbl, decimal *np)
{
numeric *nres = PGTYPESnumeric_new();
int result = 1;
@@ -228,7 +228,7 @@ deccvdbl(double dbl, decimal * np)
}
int
-deccvint(int in, decimal * np)
+deccvint(int in, decimal *np)
{
numeric *nres = PGTYPESnumeric_new();
int result = 1;
@@ -249,7 +249,7 @@ deccvint(int in, decimal * np)
}
int
-deccvlong(long lng, decimal * np)
+deccvlong(long lng, decimal *np)
{
numeric *nres = PGTYPESnumeric_new();
int result = 1;
@@ -270,7 +270,7 @@ deccvlong(long lng, decimal * np)
}
int
-decdiv(decimal * n1, decimal * n2, decimal * result)
+decdiv(decimal *n1, decimal *n2, decimal *result)
{
int i;
@@ -295,7 +295,7 @@ decdiv(decimal * n1, decimal * n2, decimal * result)
}
int
-decmul(decimal * n1, decimal * n2, decimal * result)
+decmul(decimal *n1, decimal *n2, decimal *result)
{
int i;
@@ -316,7 +316,7 @@ decmul(decimal * n1, decimal * n2, decimal * result)
}
int
-decsub(decimal * n1, decimal * n2, decimal * result)
+decsub(decimal *n1, decimal *n2, decimal *result)
{
int i;
@@ -337,7 +337,7 @@ decsub(decimal * n1, decimal * n2, decimal * result)
}
int
-dectoasc(decimal * np, char *cp, int len, int right)
+dectoasc(decimal *np, char *cp, int len, int right)
{
char *str;
numeric *nres = PGTYPESnumeric_new();
@@ -372,7 +372,7 @@ dectoasc(decimal * np, char *cp, int len, int right)
}
int
-dectodbl(decimal * np, double *dblp)
+dectodbl(decimal *np, double *dblp)
{
numeric *nres = PGTYPESnumeric_new();
int i;
@@ -390,7 +390,7 @@ dectodbl(decimal * np, double *dblp)
}
int
-dectoint(decimal * np, int *ip)
+dectoint(decimal *np, int *ip)
{
int ret;
numeric *nres = PGTYPESnumeric_new();
@@ -410,7 +410,7 @@ dectoint(decimal * np, int *ip)
}
int
-dectolong(decimal * np, long *lngp)
+dectolong(decimal *np, long *lngp)
{
int ret;
numeric *nres = PGTYPESnumeric_new();;
diff --git a/src/interfaces/ecpg/include/pgtypes_numeric.h b/src/interfaces/ecpg/include/pgtypes_numeric.h
index 8d391db123a..51b70a529bf 100644
--- a/src/interfaces/ecpg/include/pgtypes_numeric.h
+++ b/src/interfaces/ecpg/include/pgtypes_numeric.h
@@ -21,7 +21,7 @@ typedef struct
int sign; /* NUMERIC_POS, NUMERIC_NEG, or NUMERIC_NAN */
NumericDigit *buf; /* start of alloc'd space for digits[] */
NumericDigit *digits; /* decimal digits */
-} numeric;
+} numeric;
typedef struct
{
@@ -31,14 +31,14 @@ typedef struct
int dscale; /* display scale */
int sign; /* NUMERIC_POS, NUMERIC_NEG, or NUMERIC_NAN */
NumericDigit digits[DECSIZE]; /* decimal digits */
-} decimal;
+} decimal;
#ifdef __cplusplus
extern "C"
{
#endif
- numeric * PGTYPESnumeric_new(void);
+numeric *PGTYPESnumeric_new(void);
void PGTYPESnumeric_free(numeric *);
numeric *PGTYPESnumeric_from_asc(char *, char **);
char *PGTYPESnumeric_to_asc(numeric *, int);
diff --git a/src/interfaces/ecpg/pgtypeslib/datetime.c b/src/interfaces/ecpg/pgtypeslib/datetime.c
index 5cb0dca0123..bf636ba2405 100644
--- a/src/interfaces/ecpg/pgtypeslib/datetime.c
+++ b/src/interfaces/ecpg/pgtypeslib/datetime.c
@@ -633,11 +633,11 @@ PGTYPESdate_defmt_asc(date * d, char *fmt, char *str)
* here we found a month. token[token_count] and
* token_values[token_count] reflect the month's details.
*
- * only the month can be specified with a literal. Here we can do a quick
- * check if the month is at the right position according to the format
- * string because we can check if the token that we expect to be the
- * month is at the position of the only token that already has a
- * value. If we wouldn't check here we could say "December 4 1990"
+ * only the month can be specified with a literal. Here we can do a
+ * quick check if the month is at the right position according to the
+ * format string because we can check if the token that we expect to
+ * be the month is at the position of the only token that already has
+ * a value. If we wouldn't check here we could say "December 4 1990"
* with a fmt string of "dd mm yy" for 12 April 1990.
*/
if (fmt_token_order[token_count] != 'm')
diff --git a/src/interfaces/ecpg/pgtypeslib/dt_common.c b/src/interfaces/ecpg/pgtypeslib/dt_common.c
index bd10e2dbd0f..dc3f55c6665 100644
--- a/src/interfaces/ecpg/pgtypeslib/dt_common.c
+++ b/src/interfaces/ecpg/pgtypeslib/dt_common.c
@@ -784,8 +784,8 @@ EncodeDateTime(struct tm * tm, fsec_t fsec, int *tzp, char **tzn, int style, cha
* Print fractional seconds if any. The field widths here should
* be at least equal to MAX_TIMESTAMP_PRECISION.
*
- * In float mode, don't print fractional seconds before 1 AD, since
- * it's unlikely there's any precision left ...
+ * In float mode, don't print fractional seconds before 1 AD,
+ * since it's unlikely there's any precision left ...
*/
#ifdef HAVE_INT64_TIMESTAMP
if (fsec != 0)
@@ -834,8 +834,8 @@ EncodeDateTime(struct tm * tm, fsec_t fsec, int *tzp, char **tzn, int style, cha
* Print fractional seconds if any. The field widths here should
* be at least equal to MAX_TIMESTAMP_PRECISION.
*
- * In float mode, don't print fractional seconds before 1 AD, since
- * it's unlikely there's any precision left ...
+ * In float mode, don't print fractional seconds before 1 AD,
+ * since it's unlikely there's any precision left ...
*/
#ifdef HAVE_INT64_TIMESTAMP
if (fsec != 0)
@@ -880,8 +880,8 @@ EncodeDateTime(struct tm * tm, fsec_t fsec, int *tzp, char **tzn, int style, cha
* Print fractional seconds if any. The field widths here should
* be at least equal to MAX_TIMESTAMP_PRECISION.
*
- * In float mode, don't print fractional seconds before 1 AD, since
- * it's unlikely there's any precision left ...
+ * In float mode, don't print fractional seconds before 1 AD,
+ * since it's unlikely there's any precision left ...
*/
#ifdef HAVE_INT64_TIMESTAMP
if (fsec != 0)
@@ -934,8 +934,8 @@ EncodeDateTime(struct tm * tm, fsec_t fsec, int *tzp, char **tzn, int style, cha
* Print fractional seconds if any. The field widths here should
* be at least equal to MAX_TIMESTAMP_PRECISION.
*
- * In float mode, don't print fractional seconds before 1 AD, since
- * it's unlikely there's any precision left ...
+ * In float mode, don't print fractional seconds before 1 AD,
+ * since it's unlikely there's any precision left ...
*/
#ifdef HAVE_INT64_TIMESTAMP
if (fsec != 0)
@@ -1182,8 +1182,8 @@ DetermineLocalTimeZone(struct tm * tm)
* localtime() call and delta calculation. We may have to do it
* twice before we have a trustworthy delta.
*
- * Note: think not to put a loop here, since if we've been given an
- * "impossible" local time (in the gap during a spring-forward
+ * Note: think not to put a loop here, since if we've been given
+ * an "impossible" local time (in the gap during a spring-forward
* transition) we'd never get out of the loop. Twice is enough to
* give the behavior we want, which is that "impossible" times are
* taken as standard time, while at a fall-back boundary ambiguous
@@ -2542,12 +2542,13 @@ find_end_token(char *str, char *fmt)
* functions gets called as find_end_token("28the day12the hour", "the
* day%hthehour")
*
- * fmt points to "the day%hthehour", next_percent points to %hthehour and we
- * have to find a match for everything between these positions ("the
+ * fmt points to "the day%hthehour", next_percent points to %hthehour and
+ * we have to find a match for everything between these positions ("the
* day"). We look for "the day" in str and know that the pattern we are
* about to scan ends where this string starts (right after the "28")
*
- * At the end, *fmt is '\0' and *str isn't. end_position then is unchanged.
+ * At the end, *fmt is '\0' and *str isn't. end_position then is
+ * unchanged.
*/
char *end_position = NULL;
char *next_percent,
@@ -2627,8 +2628,8 @@ find_end_token(char *str, char *fmt)
*
* and have set fmt to " " because overwrote the % sign with a NULL
*
- * In this case where we would have to match a space but can't find it,
- * set end_position to the end of the string
+ * In this case where we would have to match a space but can't find
+ * it, set end_position to the end of the string
*/
if ((fmt + scan_offset)[0] == ' ' && fmt + scan_offset + 1 == subst_location)
end_position = str + strlen(str);
diff --git a/src/interfaces/ecpg/pgtypeslib/numeric.c b/src/interfaces/ecpg/pgtypeslib/numeric.c
index 575d6d0c97e..8ba0a093ecb 100644
--- a/src/interfaces/ecpg/pgtypeslib/numeric.c
+++ b/src/interfaces/ecpg/pgtypeslib/numeric.c
@@ -28,7 +28,7 @@
* ----------
*/
static int
-apply_typmod(numeric * var, long typmod)
+apply_typmod(numeric *var, long typmod)
{
int precision;
int scale;
@@ -108,7 +108,7 @@ apply_typmod(numeric * var, long typmod)
* ----------
*/
static int
-alloc_var(numeric * var, int ndigits)
+alloc_var(numeric *var, int ndigits)
{
digitbuf_free(var->buf);
var->buf = digitbuf_alloc(ndigits + 1);
@@ -141,7 +141,7 @@ PGTYPESnumeric_new(void)
* ----------
*/
static int
-set_var_from_str(char *str, char **ptr, numeric * dest)
+set_var_from_str(char *str, char **ptr, numeric *dest)
{
bool have_dp = FALSE;
int i = 0;
@@ -271,7 +271,7 @@ set_var_from_str(char *str, char **ptr, numeric * dest)
* ----------
*/
static char *
-get_str_from_var(numeric * var, int dscale)
+get_str_from_var(numeric *var, int dscale)
{
char *str;
char *cp;
@@ -384,7 +384,7 @@ PGTYPESnumeric_from_asc(char *str, char **endptr)
}
char *
-PGTYPESnumeric_to_asc(numeric * num, int dscale)
+PGTYPESnumeric_to_asc(numeric *num, int dscale)
{
if (dscale < 0)
dscale = num->dscale;
@@ -400,7 +400,7 @@ PGTYPESnumeric_to_asc(numeric * num, int dscale)
* ----------
*/
static void
-zero_var(numeric * var)
+zero_var(numeric *var)
{
digitbuf_free(var->buf);
var->buf = NULL;
@@ -411,7 +411,7 @@ zero_var(numeric * var)
}
void
-PGTYPESnumeric_free(numeric * var)
+PGTYPESnumeric_free(numeric *var)
{
digitbuf_free(var->buf);
free(var);
@@ -427,7 +427,7 @@ PGTYPESnumeric_free(numeric * var)
* ----------
*/
static int
-cmp_abs(numeric * var1, numeric * var2)
+cmp_abs(numeric *var1, numeric *var2)
{
int i1 = 0;
int i2 = 0;
@@ -485,7 +485,7 @@ cmp_abs(numeric * var1, numeric * var2)
* ----------
*/
static int
-add_abs(numeric * var1, numeric * var2, numeric * result)
+add_abs(numeric *var1, numeric *var2, numeric *result)
{
NumericDigit *res_buf;
NumericDigit *res_digits;
@@ -573,7 +573,7 @@ add_abs(numeric * var1, numeric * var2, numeric * result)
* ----------
*/
static int
-sub_abs(numeric * var1, numeric * var2, numeric * result)
+sub_abs(numeric *var1, numeric *var2, numeric *result)
{
NumericDigit *res_buf;
NumericDigit *res_digits;
@@ -657,7 +657,7 @@ sub_abs(numeric * var1, numeric * var2, numeric * result)
* ----------
*/
int
-PGTYPESnumeric_add(numeric * var1, numeric * var2, numeric * result)
+PGTYPESnumeric_add(numeric *var1, numeric *var2, numeric *result)
{
/*
* Decide on the signs of the two variables what to do
@@ -785,7 +785,7 @@ PGTYPESnumeric_add(numeric * var1, numeric * var2, numeric * result)
* ----------
*/
int
-PGTYPESnumeric_sub(numeric * var1, numeric * var2, numeric * result)
+PGTYPESnumeric_sub(numeric *var1, numeric *var2, numeric *result)
{
/*
* Decide on the signs of the two variables what to do
@@ -916,7 +916,7 @@ PGTYPESnumeric_sub(numeric * var1, numeric * var2, numeric * result)
* ----------
*/
int
-PGTYPESnumeric_mul(numeric * var1, numeric * var2, numeric * result)
+PGTYPESnumeric_mul(numeric *var1, numeric *var2, numeric *result)
{
NumericDigit *res_buf;
NumericDigit *res_digits;
@@ -1007,7 +1007,7 @@ PGTYPESnumeric_mul(numeric * var1, numeric * var2, numeric * result)
* Note that this must be called before div_var.
*/
static int
-select_div_scale(numeric * var1, numeric * var2, int *rscale)
+select_div_scale(numeric *var1, numeric *var2, int *rscale)
{
int weight1,
weight2,
@@ -1074,7 +1074,7 @@ select_div_scale(numeric * var1, numeric * var2, int *rscale)
}
int
-PGTYPESnumeric_div(numeric * var1, numeric * var2, numeric * result)
+PGTYPESnumeric_div(numeric *var1, numeric *var2, numeric *result)
{
NumericDigit *res_digits;
int res_ndigits;
@@ -1303,7 +1303,7 @@ done:
int
-PGTYPESnumeric_cmp(numeric * var1, numeric * var2)
+PGTYPESnumeric_cmp(numeric *var1, numeric *var2)
{
/* use cmp_abs function to calculate the result */
@@ -1333,7 +1333,7 @@ PGTYPESnumeric_cmp(numeric * var1, numeric * var2)
}
int
-PGTYPESnumeric_from_int(signed int int_val, numeric * var)
+PGTYPESnumeric_from_int(signed int int_val, numeric *var)
{
/* implicit conversion */
signed long int long_int = int_val;
@@ -1342,7 +1342,7 @@ PGTYPESnumeric_from_int(signed int int_val, numeric * var)
}
int
-PGTYPESnumeric_from_long(signed long int long_val, numeric * var)
+PGTYPESnumeric_from_long(signed long int long_val, numeric *var)
{
/* calculate the size of the long int number */
/* a number n needs log_10 n digits */
@@ -1412,7 +1412,7 @@ PGTYPESnumeric_from_long(signed long int long_val, numeric * var)
}
int
-PGTYPESnumeric_copy(numeric * src, numeric * dst)
+PGTYPESnumeric_copy(numeric *src, numeric *dst)
{
int i;
@@ -1435,7 +1435,7 @@ PGTYPESnumeric_copy(numeric * src, numeric * dst)
}
int
-PGTYPESnumeric_from_double(double d, numeric * dst)
+PGTYPESnumeric_from_double(double d, numeric *dst)
{
char buffer[100];
numeric *tmp;
@@ -1452,7 +1452,7 @@ PGTYPESnumeric_from_double(double d, numeric * dst)
}
static int
-numericvar_to_double_no_overflow(numeric * var, double *dp)
+numericvar_to_double_no_overflow(numeric *var, double *dp)
{
char *tmp;
double val;
@@ -1476,7 +1476,7 @@ numericvar_to_double_no_overflow(numeric * var, double *dp)
}
int
-PGTYPESnumeric_to_double(numeric * nv, double *dp)
+PGTYPESnumeric_to_double(numeric *nv, double *dp)
{
double tmp;
int i;
@@ -1488,7 +1488,7 @@ PGTYPESnumeric_to_double(numeric * nv, double *dp)
}
int
-PGTYPESnumeric_to_int(numeric * nv, int *ip)
+PGTYPESnumeric_to_int(numeric *nv, int *ip)
{
long l;
int i;
@@ -1507,7 +1507,7 @@ PGTYPESnumeric_to_int(numeric * nv, int *ip)
}
int
-PGTYPESnumeric_to_long(numeric * nv, long *lp)
+PGTYPESnumeric_to_long(numeric *nv, long *lp)
{
int i;
long l = 0;
@@ -1535,7 +1535,7 @@ PGTYPESnumeric_to_long(numeric * nv, long *lp)
}
int
-PGTYPESnumeric_to_decimal(numeric * src, decimal * dst)
+PGTYPESnumeric_to_decimal(numeric *src, decimal *dst)
{
int i;
@@ -1558,7 +1558,7 @@ PGTYPESnumeric_to_decimal(numeric * src, decimal * dst)
}
int
-PGTYPESnumeric_from_decimal(decimal * src, numeric * dst)
+PGTYPESnumeric_from_decimal(decimal *src, numeric *dst)
{
int i;
diff --git a/src/interfaces/libpq/fe-auth.c b/src/interfaces/libpq/fe-auth.c
index bca9f46830c..abfdda2252e 100644
--- a/src/interfaces/libpq/fe-auth.c
+++ b/src/interfaces/libpq/fe-auth.c
@@ -10,7 +10,7 @@
* exceed INITIAL_EXPBUFFER_SIZE (currently 256 bytes).
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/interfaces/libpq/fe-auth.c,v 1.107 2005/10/24 15:38:37 momjian Exp $
+ * $PostgreSQL: pgsql/src/interfaces/libpq/fe-auth.c,v 1.107.2.1 2005/11/22 18:23:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -278,7 +278,6 @@ pg_krb5_sendauth(char *PQerrormsg, int sock, const char *hostname, const char *s
return ret;
}
-
#endif /* KRB5 */
@@ -501,14 +500,13 @@ pg_fe_getauthname(char *PQerrormsg)
#endif
/*
- * pglock_thread() really only needs to be called around
- * pg_krb5_authname(), but some users are using configure
- * --enable-thread-safety-force, so we might as well do
- * the locking within our library to protect pqGetpwuid().
- * In fact, application developers can use getpwuid()
- * in their application if they use the locking call we
- * provide, or install their own locking function using
- * PQregisterThreadLock().
+ * pglock_thread() really only needs to be called around
+ * pg_krb5_authname(), but some users are using configure
+ * --enable-thread-safety-force, so we might as well do the locking within
+ * our library to protect pqGetpwuid(). In fact, application developers
+ * can use getpwuid() in their application if they use the locking call we
+ * provide, or install their own locking function using
+ * PQregisterThreadLock().
*/
pglock_thread();
diff --git a/src/interfaces/libpq/fe-auth.h b/src/interfaces/libpq/fe-auth.h
index 01b2fcc9d92..e69efb5ca31 100644
--- a/src/interfaces/libpq/fe-auth.h
+++ b/src/interfaces/libpq/fe-auth.h
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/interfaces/libpq/fe-auth.h,v 1.23 2005/10/17 16:24:20 tgl Exp $
+ * $PostgreSQL: pgsql/src/interfaces/libpq/fe-auth.h,v 1.23.2.1 2005/11/22 18:23:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -19,7 +19,7 @@
extern int pg_fe_sendauth(AuthRequest areq, PGconn *conn, const char *hostname,
- const char *password, char *PQerrormsg);
+ const char *password, char *PQerrormsg);
extern char *pg_fe_getauthname(char *PQerrormsg);
#endif /* FE_AUTH_H */
diff --git a/src/interfaces/libpq/fe-connect.c b/src/interfaces/libpq/fe-connect.c
index b378b65c82e..298918589aa 100644
--- a/src/interfaces/libpq/fe-connect.c
+++ b/src/interfaces/libpq/fe-connect.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/interfaces/libpq/fe-connect.c,v 1.323 2005/10/17 16:24:20 tgl Exp $
+ * $PostgreSQL: pgsql/src/interfaces/libpq/fe-connect.c,v 1.323.2.1 2005/11/22 18:23:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -370,8 +370,8 @@ connectOptions1(PGconn *conn, const char *conninfo)
/*
* Move option values into conn structure
*
- * Don't put anything cute here --- intelligence should be in connectOptions2
- * ...
+ * Don't put anything cute here --- intelligence should be in
+ * connectOptions2 ...
*
* XXX: probably worth checking strdup() return value here...
*/
@@ -687,7 +687,7 @@ connectFailureMessage(PGconn *conn, int errorno)
printfPQExpBuffer(&conn->errorMessage,
libpq_gettext("could not connect to server: %s\n"
"\tIs the server running locally and accepting\n"
- "\tconnections on Unix domain socket \"%s\"?\n"),
+ "\tconnections on Unix domain socket \"%s\"?\n"),
SOCK_STRERROR(errorno, sebuf, sizeof(sebuf)),
service);
}
@@ -1003,7 +1003,7 @@ keep_going: /* We will come back to here until there is
/*
* Try to initiate a connection to one of the addresses
* returned by pg_getaddrinfo_all(). conn->addr_cur is the
- * next one to try. We fail when we run out of addresses
+ * next one to try. We fail when we run out of addresses
* (reporting the error returned for the *last* alternative,
* which may not be what users expect :-().
*/
@@ -1226,8 +1226,9 @@ keep_going: /* We will come back to here until there is
/*
* Send the SSL request packet.
*
- * Theoretically, this could block, but it really shouldn't
- * since we only got here if the socket is write-ready.
+ * Theoretically, this could block, but it really
+ * shouldn't since we only got here if the socket is
+ * write-ready.
*/
pv = htonl(NEGOTIATE_SSL_CODE);
if (pqPacketSend(conn, 0, &pv, sizeof(pv)) != STATUS_OK)
@@ -1262,8 +1263,8 @@ keep_going: /* We will come back to here until there is
/*
* Send the startup packet.
*
- * Theoretically, this could block, but it really shouldn't since
- * we only got here if the socket is write-ready.
+ * Theoretically, this could block, but it really shouldn't
+ * since we only got here if the socket is write-ready.
*/
if (pqPacketSend(conn, 0, startpacket, packetlen) != STATUS_OK)
{
@@ -1500,8 +1501,8 @@ keep_going: /* We will come back to here until there is
/*
* Can't process if message body isn't all here yet.
*
- * (In protocol 2.0 case, we are assuming messages carry at least
- * 4 bytes of data.)
+ * (In protocol 2.0 case, we are assuming messages carry at
+ * least 4 bytes of data.)
*/
msgLength -= 4;
avail = conn->inEnd - conn->inCursor;
@@ -1829,8 +1830,8 @@ makeEmptyPGconn(void)
* bufferloads. The output buffer is initially made 16K in size, and we
* try to dump it after accumulating 8K.
*
- * With the same goal of minimizing context swaps, the input buffer will be
- * enlarged anytime it has less than 8K free, so we initially allocate
+ * With the same goal of minimizing context swaps, the input buffer will
+ * be enlarged anytime it has less than 8K free, so we initially allocate
* twice that.
*/
conn->inBufSize = 16 * 1024;
diff --git a/src/interfaces/libpq/fe-exec.c b/src/interfaces/libpq/fe-exec.c
index 84637072bc2..c13f32836de 100644
--- a/src/interfaces/libpq/fe-exec.c
+++ b/src/interfaces/libpq/fe-exec.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/interfaces/libpq/fe-exec.c,v 1.176 2005/10/15 02:49:48 momjian Exp $
+ * $PostgreSQL: pgsql/src/interfaces/libpq/fe-exec.c,v 1.176.2.1 2005/11/22 18:23:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -454,7 +454,7 @@ pqPrepareAsyncResult(PGconn *conn)
* a trailing newline, and should not be more than one line).
*/
void
-pqInternalNotice(const PGNoticeHooks * hooks, const char *fmt,...)
+pqInternalNotice(const PGNoticeHooks *hooks, const char *fmt,...)
{
char msgBuf[1024];
va_list args;
@@ -505,20 +505,20 @@ pqInternalNotice(const PGNoticeHooks * hooks, const char *fmt,...)
* Returns TRUE if OK, FALSE if not enough memory to add the row
*/
int
-pqAddTuple(PGresult *res, PGresAttValue * tup)
+pqAddTuple(PGresult *res, PGresAttValue *tup)
{
if (res->ntups >= res->tupArrSize)
{
/*
* Try to grow the array.
*
- * We can use realloc because shallow copying of the structure is okay.
- * Note that the first time through, res->tuples is NULL. While ANSI
- * says that realloc() should act like malloc() in that case, some old
- * C libraries (like SunOS 4.1.x) coredump instead. On failure realloc
- * is supposed to return NULL without damaging the existing
- * allocation. Note that the positions beyond res->ntups are garbage,
- * not necessarily NULL.
+ * We can use realloc because shallow copying of the structure is
+ * okay. Note that the first time through, res->tuples is NULL. While
+ * ANSI says that realloc() should act like malloc() in that case,
+ * some old C libraries (like SunOS 4.1.x) coredump instead. On
+ * failure realloc is supposed to return NULL without damaging the
+ * existing allocation. Note that the positions beyond res->ntups are
+ * garbage, not necessarily NULL.
*/
int newSize = (res->tupArrSize > 0) ? res->tupArrSize * 2 : 128;
PGresAttValue **newTuples;
@@ -594,7 +594,7 @@ pqSaveParameterStatus(PGconn *conn, const char *name, const char *value)
* Store new info as a single malloc block
*/
pstatus = (pgParameterStatus *) malloc(sizeof(pgParameterStatus) +
- strlen(name) + strlen(value) + 2);
+ strlen(name) +strlen(value) + 2);
if (pstatus)
{
char *ptr;
diff --git a/src/interfaces/libpq/fe-misc.c b/src/interfaces/libpq/fe-misc.c
index c78d8b3c662..cd9f56fe38b 100644
--- a/src/interfaces/libpq/fe-misc.c
+++ b/src/interfaces/libpq/fe-misc.c
@@ -23,7 +23,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/interfaces/libpq/fe-misc.c,v 1.122 2005/10/15 02:49:48 momjian Exp $
+ * $PostgreSQL: pgsql/src/interfaces/libpq/fe-misc.c,v 1.122.2.1 2005/11/22 18:23:30 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -566,8 +566,8 @@ pqReadData(PGconn *conn)
/*
* If the buffer is fairly full, enlarge it. We need to be able to enlarge
- * the buffer in case a single message exceeds the initial buffer size.
- * We enlarge before filling the buffer entirely so as to avoid asking the
+ * the buffer in case a single message exceeds the initial buffer size. We
+ * enlarge before filling the buffer entirely so as to avoid asking the
* kernel for a partial packet. The magic constant here should be large
* enough for a TCP packet or Unix pipe bufferload. 8K is the usual pipe
* buffer size, so...
@@ -623,9 +623,9 @@ retry3:
* buffer space. Without this, the block-and-restart behavior of
* libpq's higher levels leads to O(N^2) performance on long messages.
*
- * Since we left-justified the data above, conn->inEnd gives the amount
- * of data already read in the current message. We consider the
- * message "long" once we have acquired 32k ...
+ * Since we left-justified the data above, conn->inEnd gives the
+ * amount of data already read in the current message. We consider
+ * the message "long" once we have acquired 32k ...
*/
if (conn->inEnd > 32768 &&
(conn->inBufSize - conn->inEnd) >= 8192)
@@ -648,10 +648,10 @@ retry3:
* since in normal practice we should not be trying to read data unless
* the file selected for reading already.
*
- * In SSL mode it's even worse: SSL_read() could say WANT_READ and then data
- * could arrive before we make the pqReadReady() test. So we must play
- * dumb and assume there is more data, relying on the SSL layer to detect
- * true EOF.
+ * In SSL mode it's even worse: SSL_read() could say WANT_READ and then
+ * data could arrive before we make the pqReadReady() test. So we must
+ * play dumb and assume there is more data, relying on the SSL layer to
+ * detect true EOF.
*/
#ifdef USE_SSL
diff --git a/src/interfaces/libpq/fe-protocol2.c b/src/interfaces/libpq/fe-protocol2.c
index 8e3614ac070..bb0ac9205c1 100644
--- a/src/interfaces/libpq/fe-protocol2.c
+++ b/src/interfaces/libpq/fe-protocol2.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/interfaces/libpq/fe-protocol2.c,v 1.19 2005/10/15 02:49:48 momjian Exp $
+ * $PostgreSQL: pgsql/src/interfaces/libpq/fe-protocol2.c,v 1.19.2.1 2005/11/22 18:23:30 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -379,9 +379,9 @@ pqParseInput2(PGconn *conn)
* NOTIFY and NOTICE messages can happen in any state besides COPY
* OUT; always process them right away.
*
- * Most other messages should only be processed while in BUSY state. (In
- * particular, in READY state we hold off further parsing until the
- * application collects the current PGresult.)
+ * Most other messages should only be processed while in BUSY state.
+ * (In particular, in READY state we hold off further parsing until
+ * the application collects the current PGresult.)
*
* However, if the state is IDLE then we got trouble; we need to deal
* with the unexpected message somehow.
@@ -1420,7 +1420,7 @@ pqFunctionCall2(PGconn *conn, Oid fnid,
*/
char *
pqBuildStartupPacket2(PGconn *conn, int *packetlen,
- const PQEnvironmentOption * options)
+ const PQEnvironmentOption *options)
{
StartupPacket *startpacket;
diff --git a/src/interfaces/libpq/fe-protocol3.c b/src/interfaces/libpq/fe-protocol3.c
index d3af5ad447c..7ec6513ca6c 100644
--- a/src/interfaces/libpq/fe-protocol3.c
+++ b/src/interfaces/libpq/fe-protocol3.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/interfaces/libpq/fe-protocol3.c,v 1.22 2005/10/15 02:49:48 momjian Exp $
+ * $PostgreSQL: pgsql/src/interfaces/libpq/fe-protocol3.c,v 1.22.2.1 2005/11/22 18:23:30 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -52,7 +52,7 @@ static int getNotify(PGconn *conn);
static int getCopyStart(PGconn *conn, ExecStatusType copytype);
static int getReadyForQuery(PGconn *conn);
static int build_startup_packet(const PGconn *conn, char *packet,
- const PQEnvironmentOption * options);
+ const PQEnvironmentOption *options);
/*
@@ -130,9 +130,9 @@ pqParseInput3(PGconn *conn)
* NOTIFY and NOTICE messages can happen in any state; always process
* them right away.
*
- * Most other messages should only be processed while in BUSY state. (In
- * particular, in READY state we hold off further parsing until the
- * application collects the current PGresult.)
+ * Most other messages should only be processed while in BUSY state.
+ * (In particular, in READY state we hold off further parsing until
+ * the application collects the current PGresult.)
*
* However, if the state is IDLE then we got trouble; we need to deal
* with the unexpected message somehow.
@@ -1430,7 +1430,7 @@ pqFunctionCall3(PGconn *conn, Oid fnid,
*/
char *
pqBuildStartupPacket3(PGconn *conn, int *packetlen,
- const PQEnvironmentOption * options)
+ const PQEnvironmentOption *options)
{
char *startpacket;
@@ -1453,7 +1453,7 @@ pqBuildStartupPacket3(PGconn *conn, int *packetlen,
*/
static int
build_startup_packet(const PGconn *conn, char *packet,
- const PQEnvironmentOption * options)
+ const PQEnvironmentOption *options)
{
int packet_len = 0;
const PQEnvironmentOption *next_eo;
diff --git a/src/interfaces/libpq/libpq-fe.h b/src/interfaces/libpq/libpq-fe.h
index a26721e9f63..7642dda9a24 100644
--- a/src/interfaces/libpq/libpq-fe.h
+++ b/src/interfaces/libpq/libpq-fe.h
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/interfaces/libpq/libpq-fe.h,v 1.120 2005/10/15 02:49:48 momjian Exp $
+ * $PostgreSQL: pgsql/src/interfaces/libpq/libpq-fe.h,v 1.120.2.1 2005/11/22 18:23:30 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -35,7 +35,7 @@ extern "C"
/* Application-visible enum types */
-typedef enum
+ typedef enum
{
/*
* Although it is okay to add to this list, values which become unused
diff --git a/src/interfaces/libpq/libpq-int.h b/src/interfaces/libpq/libpq-int.h
index d2ee44753bc..9092f9c4741 100644
--- a/src/interfaces/libpq/libpq-int.h
+++ b/src/interfaces/libpq/libpq-int.h
@@ -12,7 +12,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/interfaces/libpq/libpq-int.h,v 1.108 2005/10/15 02:49:49 momjian Exp $
+ * $PostgreSQL: pgsql/src/interfaces/libpq/libpq-int.h,v 1.108.2.1 2005/11/22 18:23:30 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -92,7 +92,7 @@ typedef struct pgresAttDesc
Oid typid; /* type id */
int typlen; /* type size */
int atttypmod; /* type-specific modifier info */
-} PGresAttDesc;
+} PGresAttDesc;
/*
* Data for a single attribute of a single tuple
@@ -118,7 +118,7 @@ typedef struct pgresAttValue
{
int len; /* length in bytes of the value */
char *value; /* actual value, plus terminating zero byte */
-} PGresAttValue;
+} PGresAttValue;
/* Typedef for message-field list entries */
typedef struct pgMessageField
@@ -126,7 +126,7 @@ typedef struct pgMessageField
struct pgMessageField *next; /* list link */
char code; /* field code */
char contents[1]; /* field value (VARIABLE LENGTH) */
-} PGMessageField;
+} PGMessageField;
/* Fields needed for notice handling */
typedef struct
@@ -135,7 +135,7 @@ typedef struct
void *noticeRecArg;
PQnoticeProcessor noticeProc; /* notice message processor */
void *noticeProcArg;
-} PGNoticeHooks;
+} PGNoticeHooks;
struct pg_result
{
@@ -186,7 +186,7 @@ typedef enum
PGASYNC_READY, /* result ready for PQgetResult */
PGASYNC_COPY_IN, /* Copy In data transfer in progress */
PGASYNC_COPY_OUT /* Copy Out data transfer in progress */
-} PGAsyncStatusType;
+} PGAsyncStatusType;
/* PGQueryClass tracks which query protocol we are now executing */
typedef enum
@@ -194,7 +194,7 @@ typedef enum
PGQUERY_SIMPLE, /* simple Query protocol (PQexec) */
PGQUERY_EXTENDED, /* full Extended protocol (PQexecParams) */
PGQUERY_PREPARE /* Parse only (PQprepare) */
-} PGQueryClass;
+} PGQueryClass;
/* PGSetenvStatusType defines the state of the PQSetenv state machine */
/* (this is used only for 2.0-protocol connections) */
@@ -207,14 +207,14 @@ typedef enum
SETENV_STATE_QUERY2_SEND, /* About to send a status query */
SETENV_STATE_QUERY2_WAIT, /* Waiting for query to complete */
SETENV_STATE_IDLE
-} PGSetenvStatusType;
+} PGSetenvStatusType;
/* Typedef for the EnvironmentOptions[] array */
typedef struct PQEnvironmentOption
{
const char *envName, /* name of an environment variable */
*pgName; /* name of corresponding SET variable */
-} PQEnvironmentOption;
+} PQEnvironmentOption;
/* Typedef for parameter-status list entries */
typedef struct pgParameterStatus
@@ -223,7 +223,7 @@ typedef struct pgParameterStatus
char *name; /* parameter name */
char *value; /* parameter value */
/* Note: name and value are stored in same malloc block as struct is */
-} pgParameterStatus;
+} pgParameterStatus;
/* large-object-access data ... allocated only if large-object code is used. */
typedef struct pgLobjfuncs
@@ -237,7 +237,7 @@ typedef struct pgLobjfuncs
Oid fn_lo_tell; /* OID of backend function lo_tell */
Oid fn_lo_read; /* OID of backend function LOread */
Oid fn_lo_write; /* OID of backend function LOwrite */
-} PGlobjfuncs;
+} PGlobjfuncs;
/*
* PGconn stores all the state data associated with a single connection
@@ -402,10 +402,10 @@ extern void pqClearAsyncResult(PGconn *conn);
extern void pqSaveErrorResult(PGconn *conn);
extern PGresult *pqPrepareAsyncResult(PGconn *conn);
extern void
-pqInternalNotice(const PGNoticeHooks * hooks, const char *fmt,...)
+pqInternalNotice(const PGNoticeHooks *hooks, const char *fmt,...)
/* This lets gcc check the format string for consistency. */
__attribute__((format(printf, 2, 3)));
-extern int pqAddTuple(PGresult *res, PGresAttValue * tup);
+extern int pqAddTuple(PGresult *res, PGresAttValue *tup);
extern void pqSaveMessageField(PGresult *res, char code,
const char *value);
extern void pqSaveParameterStatus(PGconn *conn, const char *name,
@@ -417,7 +417,7 @@ extern void pqHandleSendFailure(PGconn *conn);
extern PostgresPollingStatusType pqSetenvPoll(PGconn *conn);
extern char *pqBuildStartupPacket2(PGconn *conn, int *packetlen,
- const PQEnvironmentOption * options);
+ const PQEnvironmentOption *options);
extern void pqParseInput2(PGconn *conn);
extern int pqGetCopyData2(PGconn *conn, char **buffer, int async);
extern int pqGetline2(PGconn *conn, char *s, int maxlen);
@@ -431,7 +431,7 @@ extern PGresult *pqFunctionCall2(PGconn *conn, Oid fnid,
/* === in fe-protocol3.c === */
extern char *pqBuildStartupPacket3(PGconn *conn, int *packetlen,
- const PQEnvironmentOption * options);
+ const PQEnvironmentOption *options);
extern void pqParseInput3(PGconn *conn);
extern int pqGetErrorNotice3(PGconn *conn, bool isError);
extern int pqGetCopyData3(PGconn *conn, char **buffer, int async);
diff --git a/src/interfaces/libpq/pthread-win32.h b/src/interfaces/libpq/pthread-win32.h
index c30eaeb291f..dfcdd328ec0 100644
--- a/src/interfaces/libpq/pthread-win32.h
+++ b/src/interfaces/libpq/pthread-win32.h
@@ -13,7 +13,7 @@ void *pthread_getspecific(pthread_key_t);
void pthread_mutex_init(pthread_mutex_t *, void *attr);
void pthread_mutex_lock(pthread_mutex_t *);
-//blocking
+/* blocking */
void pthread_mutex_unlock(pthread_mutex_t *);
#endif
diff --git a/src/pl/plperl/plperl.c b/src/pl/plperl/plperl.c
index 01d7b2453b1..935cf94d168 100644
--- a/src/pl/plperl/plperl.c
+++ b/src/pl/plperl/plperl.c
@@ -33,7 +33,7 @@
* ENHANCEMENTS, OR MODIFICATIONS.
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/pl/plperl/plperl.c,v 1.94 2005/10/18 17:13:14 tgl Exp $
+ * $PostgreSQL: pgsql/src/pl/plperl/plperl.c,v 1.94.2.1 2005/11/22 18:23:30 momjian Exp $
*
**********************************************************************/
@@ -90,7 +90,7 @@ typedef struct plperl_proc_desc
FmgrInfo arg_out_func[FUNC_MAX_ARGS];
bool arg_is_rowtype[FUNC_MAX_ARGS];
SV *reference;
-} plperl_proc_desc;
+} plperl_proc_desc;
/**********************************************************************
@@ -315,7 +315,7 @@ strip_trailing_ws(const char *msg)
/* Build a tuple from a hash. */
static HeapTuple
-plperl_build_tuple_result(HV * perlhash, AttInMetadata *attinmeta)
+plperl_build_tuple_result(HV *perlhash, AttInMetadata *attinmeta)
{
TupleDesc td = attinmeta->tupdesc;
char **values;
@@ -350,7 +350,7 @@ plperl_build_tuple_result(HV * perlhash, AttInMetadata *attinmeta)
* convert perl array to postgres string representation
*/
static SV *
-plperl_convert_to_pg_array(SV * src)
+plperl_convert_to_pg_array(SV *src)
{
SV *rv;
int count;
@@ -474,7 +474,7 @@ plperl_trigger_build_args(FunctionCallInfo fcinfo)
/* Set up the new tuple returned from a trigger. */
static HeapTuple
-plperl_modify_tuple(HV * hvTD, TriggerData *tdata, HeapTuple otup)
+plperl_modify_tuple(HV *hvTD, TriggerData *tdata, HeapTuple otup)
{
SV **svp;
HV *hvNew;
@@ -743,8 +743,8 @@ plperl_create_sub(char *s, bool trusted)
*
**********************************************************************/
-EXTERN_C void boot_DynaLoader(pTHX_ CV * cv);
-EXTERN_C void boot_SPI(pTHX_ CV * cv);
+EXTERN_C void boot_DynaLoader(pTHX_ CV *cv);
+EXTERN_C void boot_SPI(pTHX_ CV *cv);
static void
plperl_init_shared_libs(pTHX)
@@ -757,7 +757,7 @@ plperl_init_shared_libs(pTHX)
static SV *
-plperl_call_perl_func(plperl_proc_desc * desc, FunctionCallInfo fcinfo)
+plperl_call_perl_func(plperl_proc_desc *desc, FunctionCallInfo fcinfo)
{
dSP;
SV *retval;
@@ -850,8 +850,8 @@ plperl_call_perl_func(plperl_proc_desc * desc, FunctionCallInfo fcinfo)
static SV *
-plperl_call_perl_trigger_func(plperl_proc_desc * desc, FunctionCallInfo fcinfo,
- SV * td)
+plperl_call_perl_trigger_func(plperl_proc_desc *desc, FunctionCallInfo fcinfo,
+ SV *td)
{
dSP;
SV *retval;
@@ -1549,7 +1549,7 @@ plperl_spi_execute_fetch_result(SPITupleTable *tuptable, int processed,
/*
* Note: plperl_return_next is called both in Postgres and Perl contexts.
- * We report any errors in Postgres fashion (via ereport). If called in
+ * We report any errors in Postgres fashion (via ereport). If called in
* Perl context, it is SPI.xs's responsibility to catch the error and
* convert to a Perl error. We assume (perhaps without adequate justification)
* that we need not abort the current transaction if the Perl code traps the
diff --git a/src/pl/plperl/ppport.h b/src/pl/plperl/ppport.h
index d9c64deabf4..e89ab4fc066 100644
--- a/src/pl/plperl/ppport.h
+++ b/src/pl/plperl/ppport.h
@@ -344,7 +344,7 @@ typedef NVTYPE NV;
#else
#if defined(USE_THREADS)
static SV *
-newRV_noinc(SV * sv)
+newRV_noinc(SV *sv)
{
SV *nsv = (SV *) newRV(sv);
@@ -366,7 +366,7 @@ newRV_noinc(SV * sv)
#if defined(NEED_newCONSTSUB)
static
#else
-extern void newCONSTSUB(HV * stash, char *name, SV * sv);
+extern void newCONSTSUB(HV *stash, char *name, SV *sv);
#endif
#if defined(NEED_newCONSTSUB) || defined(NEED_newCONSTSUB_GLOBAL)
@@ -543,7 +543,7 @@ SV *sv;
((SvFLAGS(sv) & (SVf_POK|SVf_UTF8)) == (SVf_POK) \
? ((lp = SvCUR(sv)), SvPVX(sv)) : my_sv_2pvbyte(aTHX_ sv, &lp))
static char *
-my_sv_2pvbyte(pTHX_ register SV * sv, STRLEN * lp)
+my_sv_2pvbyte(pTHX_ register SV *sv, STRLEN *lp)
{
sv_utf8_downgrade(sv, 0);
return SvPV(sv, *lp);
@@ -558,7 +558,7 @@ my_sv_2pvbyte(pTHX_ register SV * sv, STRLEN * lp)
((SvFLAGS(sv) & (SVf_POK)) == SVf_POK \
? SvPVX(sv) : sv_2pv_nolen(sv))
static char *
-sv_2pv_nolen(pTHX_ register SV * sv)
+sv_2pv_nolen(pTHX_ register SV *sv)
{
STRLEN n_a;
diff --git a/src/pl/plpgsql/src/pl_comp.c b/src/pl/plpgsql/src/pl_comp.c
index 2c84899519b..18292a367d8 100644
--- a/src/pl/plpgsql/src/pl_comp.c
+++ b/src/pl/plpgsql/src/pl_comp.c
@@ -3,7 +3,7 @@
* procedural language
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/pl/plpgsql/src/pl_comp.c,v 1.94 2005/10/15 02:49:49 momjian Exp $
+ * $PostgreSQL: pgsql/src/pl/plpgsql/src/pl_comp.c,v 1.94.2.1 2005/11/22 18:23:30 momjian Exp $
*
* This software is copyrighted by Jan Wieck - Hamburg.
*
@@ -95,7 +95,7 @@ typedef struct plpgsql_hashent
{
PLpgSQL_func_hashkey key;
PLpgSQL_function *function;
-} plpgsql_HashEnt;
+} plpgsql_HashEnt;
#define FUNCS_PER_USER 128 /* initial table size */
@@ -107,7 +107,7 @@ typedef struct
{
const char *label;
int sqlerrstate;
-} ExceptionLabelMap;
+} ExceptionLabelMap;
static const ExceptionLabelMap exception_label_map[] = {
#include "plerrcodes.h"
@@ -121,27 +121,27 @@ static const ExceptionLabelMap exception_label_map[] = {
*/
static PLpgSQL_function *do_compile(FunctionCallInfo fcinfo,
HeapTuple procTup,
- PLpgSQL_func_hashkey * hashkey,
+ PLpgSQL_func_hashkey *hashkey,
bool forValidator);
static int fetchArgInfo(HeapTuple procTup,
Oid **p_argtypes, char ***p_argnames,
char **p_argmodes);
static PLpgSQL_row *build_row_from_class(Oid classOid);
-static PLpgSQL_row *build_row_from_vars(PLpgSQL_variable ** vars, int numvars);
+static PLpgSQL_row *build_row_from_vars(PLpgSQL_variable **vars, int numvars);
static PLpgSQL_type *build_datatype(HeapTuple typeTup, int32 typmod);
static void compute_function_hashkey(FunctionCallInfo fcinfo,
Form_pg_proc procStruct,
- PLpgSQL_func_hashkey * hashkey,
+ PLpgSQL_func_hashkey *hashkey,
bool forValidator);
static void plpgsql_resolve_polymorphic_argtypes(int numargs,
Oid *argtypes, char *argmodes,
Node *call_expr, bool forValidator,
const char *proname);
-static PLpgSQL_function *plpgsql_HashTableLookup(PLpgSQL_func_hashkey * func_key);
-static void plpgsql_HashTableInsert(PLpgSQL_function * function,
- PLpgSQL_func_hashkey * func_key);
-static void plpgsql_HashTableDelete(PLpgSQL_function * function);
-static void delete_function(PLpgSQL_function * func);
+static PLpgSQL_function *plpgsql_HashTableLookup(PLpgSQL_func_hashkey *func_key);
+static void plpgsql_HashTableInsert(PLpgSQL_function *function,
+ PLpgSQL_func_hashkey *func_key);
+static void plpgsql_HashTableDelete(PLpgSQL_function *function);
+static void delete_function(PLpgSQL_function *func);
/* ----------
* plpgsql_compile Make an execution tree for a PL/pgSQL function.
@@ -252,7 +252,7 @@ plpgsql_compile(FunctionCallInfo fcinfo, bool forValidator)
static PLpgSQL_function *
do_compile(FunctionCallInfo fcinfo,
HeapTuple procTup,
- PLpgSQL_func_hashkey * hashkey,
+ PLpgSQL_func_hashkey *hashkey,
bool forValidator)
{
Form_pg_proc procStruct = (Form_pg_proc) GETSTRUCT(procTup);
@@ -352,8 +352,8 @@ do_compile(FunctionCallInfo fcinfo,
* Fetch info about the procedure's parameters. Allocations aren't
* needed permanently, so make them in tmp cxt.
*
- * We also need to resolve any polymorphic input or output argument
- * types. In validation mode we won't be able to, so we
+ * We also need to resolve any polymorphic input or output
+ * argument types. In validation mode we won't be able to, so we
* arbitrarily assume we are dealing with integers.
*/
MemoryContextSwitchTo(compile_tmp_cxt);
@@ -1558,7 +1558,7 @@ plpgsql_parse_dblwordrowtype(char *word)
* array, and optionally to the current namespace.
*/
PLpgSQL_variable *
-plpgsql_build_variable(const char *refname, int lineno, PLpgSQL_type * dtype,
+plpgsql_build_variable(const char *refname, int lineno, PLpgSQL_type *dtype,
bool add2namespace)
{
PLpgSQL_variable *result;
@@ -1705,12 +1705,12 @@ build_row_from_class(Oid classOid)
/*
* Create the internal variable for the field
*
- * We know if the table definitions contain a default value or if the
- * field is declared in the table as NOT NULL. But it's possible
- * to create a table field as NOT NULL without a default value and
- * that would lead to problems later when initializing the
- * variables due to entering a block at execution time. Thus we
- * ignore this information for now.
+ * We know if the table definitions contain a default value or if
+ * the field is declared in the table as NOT NULL. But it's
+ * possible to create a table field as NOT NULL without a default
+ * value and that would lead to problems later when initializing
+ * the variables due to entering a block at execution time. Thus
+ * we ignore this information for now.
*/
var = plpgsql_build_variable(refname, 0,
plpgsql_build_datatype(attrStruct->atttypid,
@@ -1738,7 +1738,7 @@ build_row_from_class(Oid classOid)
* Build a row-variable data structure given the component variables.
*/
static PLpgSQL_row *
-build_row_from_vars(PLpgSQL_variable ** vars, int numvars)
+build_row_from_vars(PLpgSQL_variable **vars, int numvars)
{
PLpgSQL_row *row;
int i;
@@ -1942,7 +1942,7 @@ plpgsql_parse_err_condition(char *condname)
* ----------
*/
void
-plpgsql_adddatum(PLpgSQL_datum * new)
+plpgsql_adddatum(PLpgSQL_datum *new)
{
if (plpgsql_nDatums == datums_alloc)
{
@@ -2018,7 +2018,7 @@ plpgsql_add_initdatums(int **varnos)
static void
compute_function_hashkey(FunctionCallInfo fcinfo,
Form_pg_proc procStruct,
- PLpgSQL_func_hashkey * hashkey,
+ PLpgSQL_func_hashkey *hashkey,
bool forValidator)
{
/* Make sure any unused bytes of the struct are zero */
@@ -2101,7 +2101,7 @@ plpgsql_resolve_polymorphic_argtypes(int numargs,
}
static void
-delete_function(PLpgSQL_function * func)
+delete_function(PLpgSQL_function *func)
{
/* remove function from hash table */
plpgsql_HashTableDelete(func);
@@ -2135,7 +2135,7 @@ plpgsql_HashTableInit(void)
}
static PLpgSQL_function *
-plpgsql_HashTableLookup(PLpgSQL_func_hashkey * func_key)
+plpgsql_HashTableLookup(PLpgSQL_func_hashkey *func_key)
{
plpgsql_HashEnt *hentry;
@@ -2150,8 +2150,8 @@ plpgsql_HashTableLookup(PLpgSQL_func_hashkey * func_key)
}
static void
-plpgsql_HashTableInsert(PLpgSQL_function * function,
- PLpgSQL_func_hashkey * func_key)
+plpgsql_HashTableInsert(PLpgSQL_function *function,
+ PLpgSQL_func_hashkey *func_key)
{
plpgsql_HashEnt *hentry;
bool found;
@@ -2169,7 +2169,7 @@ plpgsql_HashTableInsert(PLpgSQL_function * function,
}
static void
-plpgsql_HashTableDelete(PLpgSQL_function * function)
+plpgsql_HashTableDelete(PLpgSQL_function *function)
{
plpgsql_HashEnt *hentry;
diff --git a/src/pl/plpgsql/src/pl_exec.c b/src/pl/plpgsql/src/pl_exec.c
index df82dd3dc1b..95acb3a9d8c 100644
--- a/src/pl/plpgsql/src/pl_exec.c
+++ b/src/pl/plpgsql/src/pl_exec.c
@@ -3,7 +3,7 @@
* procedural language
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/pl/plpgsql/src/pl_exec.c,v 1.154 2005/10/24 15:10:22 tgl Exp $
+ * $PostgreSQL: pgsql/src/pl/plpgsql/src/pl_exec.c,v 1.154.2.1 2005/11/22 18:23:30 momjian Exp $
*
* This software is copyrighted by Jan Wieck - Hamburg.
*
@@ -75,97 +75,97 @@ static PLpgSQL_expr *active_simple_exprs = NULL;
* Local function forward declarations
************************************************************/
static void plpgsql_exec_error_callback(void *arg);
-static PLpgSQL_datum *copy_plpgsql_datum(PLpgSQL_datum * datum);
+static PLpgSQL_datum *copy_plpgsql_datum(PLpgSQL_datum *datum);
-static int exec_stmt_block(PLpgSQL_execstate * estate,
- PLpgSQL_stmt_block * block);
-static int exec_stmts(PLpgSQL_execstate * estate,
+static int exec_stmt_block(PLpgSQL_execstate *estate,
+ PLpgSQL_stmt_block *block);
+static int exec_stmts(PLpgSQL_execstate *estate,
List *stmts);
-static int exec_stmt(PLpgSQL_execstate * estate,
- PLpgSQL_stmt * stmt);
-static int exec_stmt_assign(PLpgSQL_execstate * estate,
- PLpgSQL_stmt_assign * stmt);
-static int exec_stmt_perform(PLpgSQL_execstate * estate,
- PLpgSQL_stmt_perform * stmt);
-static int exec_stmt_getdiag(PLpgSQL_execstate * estate,
- PLpgSQL_stmt_getdiag * stmt);
-static int exec_stmt_if(PLpgSQL_execstate * estate,
- PLpgSQL_stmt_if * stmt);
-static int exec_stmt_loop(PLpgSQL_execstate * estate,
- PLpgSQL_stmt_loop * stmt);
-static int exec_stmt_while(PLpgSQL_execstate * estate,
- PLpgSQL_stmt_while * stmt);
-static int exec_stmt_fori(PLpgSQL_execstate * estate,
- PLpgSQL_stmt_fori * stmt);
-static int exec_stmt_fors(PLpgSQL_execstate * estate,
- PLpgSQL_stmt_fors * stmt);
-static int exec_stmt_select(PLpgSQL_execstate * estate,
- PLpgSQL_stmt_select * stmt);
-static int exec_stmt_open(PLpgSQL_execstate * estate,
- PLpgSQL_stmt_open * stmt);
-static int exec_stmt_fetch(PLpgSQL_execstate * estate,
- PLpgSQL_stmt_fetch * stmt);
-static int exec_stmt_close(PLpgSQL_execstate * estate,
- PLpgSQL_stmt_close * stmt);
-static int exec_stmt_exit(PLpgSQL_execstate * estate,
- PLpgSQL_stmt_exit * stmt);
-static int exec_stmt_return(PLpgSQL_execstate * estate,
- PLpgSQL_stmt_return * stmt);
-static int exec_stmt_return_next(PLpgSQL_execstate * estate,
- PLpgSQL_stmt_return_next * stmt);
-static int exec_stmt_raise(PLpgSQL_execstate * estate,
- PLpgSQL_stmt_raise * stmt);
-static int exec_stmt_execsql(PLpgSQL_execstate * estate,
- PLpgSQL_stmt_execsql * stmt);
-static int exec_stmt_dynexecute(PLpgSQL_execstate * estate,
- PLpgSQL_stmt_dynexecute * stmt);
-static int exec_stmt_dynfors(PLpgSQL_execstate * estate,
- PLpgSQL_stmt_dynfors * stmt);
-
-static void plpgsql_estate_setup(PLpgSQL_execstate * estate,
- PLpgSQL_function * func,
+static int exec_stmt(PLpgSQL_execstate *estate,
+ PLpgSQL_stmt *stmt);
+static int exec_stmt_assign(PLpgSQL_execstate *estate,
+ PLpgSQL_stmt_assign *stmt);
+static int exec_stmt_perform(PLpgSQL_execstate *estate,
+ PLpgSQL_stmt_perform *stmt);
+static int exec_stmt_getdiag(PLpgSQL_execstate *estate,
+ PLpgSQL_stmt_getdiag *stmt);
+static int exec_stmt_if(PLpgSQL_execstate *estate,
+ PLpgSQL_stmt_if *stmt);
+static int exec_stmt_loop(PLpgSQL_execstate *estate,
+ PLpgSQL_stmt_loop *stmt);
+static int exec_stmt_while(PLpgSQL_execstate *estate,
+ PLpgSQL_stmt_while *stmt);
+static int exec_stmt_fori(PLpgSQL_execstate *estate,
+ PLpgSQL_stmt_fori *stmt);
+static int exec_stmt_fors(PLpgSQL_execstate *estate,
+ PLpgSQL_stmt_fors *stmt);
+static int exec_stmt_select(PLpgSQL_execstate *estate,
+ PLpgSQL_stmt_select *stmt);
+static int exec_stmt_open(PLpgSQL_execstate *estate,
+ PLpgSQL_stmt_open *stmt);
+static int exec_stmt_fetch(PLpgSQL_execstate *estate,
+ PLpgSQL_stmt_fetch *stmt);
+static int exec_stmt_close(PLpgSQL_execstate *estate,
+ PLpgSQL_stmt_close *stmt);
+static int exec_stmt_exit(PLpgSQL_execstate *estate,
+ PLpgSQL_stmt_exit *stmt);
+static int exec_stmt_return(PLpgSQL_execstate *estate,
+ PLpgSQL_stmt_return *stmt);
+static int exec_stmt_return_next(PLpgSQL_execstate *estate,
+ PLpgSQL_stmt_return_next *stmt);
+static int exec_stmt_raise(PLpgSQL_execstate *estate,
+ PLpgSQL_stmt_raise *stmt);
+static int exec_stmt_execsql(PLpgSQL_execstate *estate,
+ PLpgSQL_stmt_execsql *stmt);
+static int exec_stmt_dynexecute(PLpgSQL_execstate *estate,
+ PLpgSQL_stmt_dynexecute *stmt);
+static int exec_stmt_dynfors(PLpgSQL_execstate *estate,
+ PLpgSQL_stmt_dynfors *stmt);
+
+static void plpgsql_estate_setup(PLpgSQL_execstate *estate,
+ PLpgSQL_function *func,
ReturnSetInfo *rsi);
-static void exec_eval_cleanup(PLpgSQL_execstate * estate);
+static void exec_eval_cleanup(PLpgSQL_execstate *estate);
-static void exec_prepare_plan(PLpgSQL_execstate * estate,
- PLpgSQL_expr * expr);
+static void exec_prepare_plan(PLpgSQL_execstate *estate,
+ PLpgSQL_expr *expr);
static bool exec_simple_check_node(Node *node);
-static void exec_simple_check_plan(PLpgSQL_expr * expr);
-static Datum exec_eval_simple_expr(PLpgSQL_execstate * estate,
- PLpgSQL_expr * expr,
+static void exec_simple_check_plan(PLpgSQL_expr *expr);
+static Datum exec_eval_simple_expr(PLpgSQL_execstate *estate,
+ PLpgSQL_expr *expr,
bool *isNull,
Oid *rettype);
-static void exec_assign_expr(PLpgSQL_execstate * estate,
- PLpgSQL_datum * target,
- PLpgSQL_expr * expr);
-static void exec_assign_value(PLpgSQL_execstate * estate,
- PLpgSQL_datum * target,
+static void exec_assign_expr(PLpgSQL_execstate *estate,
+ PLpgSQL_datum *target,
+ PLpgSQL_expr *expr);
+static void exec_assign_value(PLpgSQL_execstate *estate,
+ PLpgSQL_datum *target,
Datum value, Oid valtype, bool *isNull);
-static void exec_eval_datum(PLpgSQL_execstate * estate,
- PLpgSQL_datum * datum,
+static void exec_eval_datum(PLpgSQL_execstate *estate,
+ PLpgSQL_datum *datum,
Oid expectedtypeid,
Oid *typeid,
Datum *value,
bool *isnull);
-static int exec_eval_integer(PLpgSQL_execstate * estate,
- PLpgSQL_expr * expr,
+static int exec_eval_integer(PLpgSQL_execstate *estate,
+ PLpgSQL_expr *expr,
bool *isNull);
-static bool exec_eval_boolean(PLpgSQL_execstate * estate,
- PLpgSQL_expr * expr,
+static bool exec_eval_boolean(PLpgSQL_execstate *estate,
+ PLpgSQL_expr *expr,
bool *isNull);
-static Datum exec_eval_expr(PLpgSQL_execstate * estate,
- PLpgSQL_expr * expr,
+static Datum exec_eval_expr(PLpgSQL_execstate *estate,
+ PLpgSQL_expr *expr,
bool *isNull,
Oid *rettype);
-static int exec_run_select(PLpgSQL_execstate * estate,
- PLpgSQL_expr * expr, long maxtuples, Portal *portalP);
-static void exec_move_row(PLpgSQL_execstate * estate,
- PLpgSQL_rec * rec,
- PLpgSQL_row * row,
+static int exec_run_select(PLpgSQL_execstate *estate,
+ PLpgSQL_expr *expr, long maxtuples, Portal *portalP);
+static void exec_move_row(PLpgSQL_execstate *estate,
+ PLpgSQL_rec *rec,
+ PLpgSQL_row *row,
HeapTuple tup, TupleDesc tupdesc);
-static HeapTuple make_tuple_from_row(PLpgSQL_execstate * estate,
- PLpgSQL_row * row,
+static HeapTuple make_tuple_from_row(PLpgSQL_execstate *estate,
+ PLpgSQL_row *row,
TupleDesc tupdesc);
static char *convert_value_to_string(Datum value, Oid valtype);
static Datum exec_cast_value(Datum value, Oid valtype,
@@ -177,10 +177,10 @@ static Datum exec_cast_value(Datum value, Oid valtype,
static Datum exec_simple_cast_value(Datum value, Oid valtype,
Oid reqtype, int32 reqtypmod,
bool isnull);
-static void exec_init_tuple_store(PLpgSQL_execstate * estate);
+static void exec_init_tuple_store(PLpgSQL_execstate *estate);
static bool compatible_tupdesc(TupleDesc td1, TupleDesc td2);
-static void exec_set_found(PLpgSQL_execstate * estate, bool state);
-static void free_var(PLpgSQL_var * var);
+static void exec_set_found(PLpgSQL_execstate *estate, bool state);
+static void free_var(PLpgSQL_var *var);
/* ----------
@@ -189,7 +189,7 @@ static void free_var(PLpgSQL_var * var);
* ----------
*/
Datum
-plpgsql_exec_function(PLpgSQL_function * func, FunctionCallInfo fcinfo)
+plpgsql_exec_function(PLpgSQL_function *func, FunctionCallInfo fcinfo)
{
PLpgSQL_execstate estate;
ErrorContextCallback plerrcontext;
@@ -399,7 +399,7 @@ plpgsql_exec_function(PLpgSQL_function * func, FunctionCallInfo fcinfo)
* ----------
*/
HeapTuple
-plpgsql_exec_trigger(PLpgSQL_function * func,
+plpgsql_exec_trigger(PLpgSQL_function *func,
TriggerData *trigdata)
{
PLpgSQL_execstate estate;
@@ -677,7 +677,7 @@ plpgsql_exec_error_callback(void *arg)
* ----------
*/
static PLpgSQL_datum *
-copy_plpgsql_datum(PLpgSQL_datum * datum)
+copy_plpgsql_datum(PLpgSQL_datum *datum)
{
PLpgSQL_datum *result;
@@ -735,7 +735,7 @@ copy_plpgsql_datum(PLpgSQL_datum * datum)
static bool
-exception_matches_conditions(ErrorData *edata, PLpgSQL_condition * cond)
+exception_matches_conditions(ErrorData *edata, PLpgSQL_condition *cond)
{
for (; cond != NULL; cond = cond->next)
{
@@ -767,7 +767,7 @@ exception_matches_conditions(ErrorData *edata, PLpgSQL_condition * cond)
* ----------
*/
static int
-exec_stmt_block(PLpgSQL_execstate * estate, PLpgSQL_stmt_block * block)
+exec_stmt_block(PLpgSQL_execstate *estate, PLpgSQL_stmt_block *block)
{
volatile int rc = -1;
int i;
@@ -970,7 +970,7 @@ exec_stmt_block(PLpgSQL_execstate * estate, PLpgSQL_stmt_block * block)
* ----------
*/
static int
-exec_stmts(PLpgSQL_execstate * estate, List *stmts)
+exec_stmts(PLpgSQL_execstate *estate, List *stmts)
{
ListCell *s;
@@ -1004,7 +1004,7 @@ exec_stmts(PLpgSQL_execstate * estate, List *stmts)
* ----------
*/
static int
-exec_stmt(PLpgSQL_execstate * estate, PLpgSQL_stmt * stmt)
+exec_stmt(PLpgSQL_execstate *estate, PLpgSQL_stmt *stmt)
{
PLpgSQL_stmt *save_estmt;
int rc = -1;
@@ -1113,7 +1113,7 @@ exec_stmt(PLpgSQL_execstate * estate, PLpgSQL_stmt * stmt)
* ----------
*/
static int
-exec_stmt_assign(PLpgSQL_execstate * estate, PLpgSQL_stmt_assign * stmt)
+exec_stmt_assign(PLpgSQL_execstate *estate, PLpgSQL_stmt_assign *stmt)
{
Assert(stmt->varno >= 0);
@@ -1129,7 +1129,7 @@ exec_stmt_assign(PLpgSQL_execstate * estate, PLpgSQL_stmt_assign * stmt)
* ----------
*/
static int
-exec_stmt_perform(PLpgSQL_execstate * estate, PLpgSQL_stmt_perform * stmt)
+exec_stmt_perform(PLpgSQL_execstate *estate, PLpgSQL_stmt_perform *stmt)
{
PLpgSQL_expr *expr = stmt->expr;
@@ -1146,7 +1146,7 @@ exec_stmt_perform(PLpgSQL_execstate * estate, PLpgSQL_stmt_perform * stmt)
* ----------
*/
static int
-exec_stmt_getdiag(PLpgSQL_execstate * estate, PLpgSQL_stmt_getdiag * stmt)
+exec_stmt_getdiag(PLpgSQL_execstate *estate, PLpgSQL_stmt_getdiag *stmt)
{
ListCell *lc;
@@ -1196,7 +1196,7 @@ exec_stmt_getdiag(PLpgSQL_execstate * estate, PLpgSQL_stmt_getdiag * stmt)
* ----------
*/
static int
-exec_stmt_if(PLpgSQL_execstate * estate, PLpgSQL_stmt_if * stmt)
+exec_stmt_if(PLpgSQL_execstate *estate, PLpgSQL_stmt_if *stmt)
{
bool value;
bool isnull;
@@ -1225,7 +1225,7 @@ exec_stmt_if(PLpgSQL_execstate * estate, PLpgSQL_stmt_if * stmt)
* ----------
*/
static int
-exec_stmt_loop(PLpgSQL_execstate * estate, PLpgSQL_stmt_loop * stmt)
+exec_stmt_loop(PLpgSQL_execstate *estate, PLpgSQL_stmt_loop *stmt)
{
for (;;)
{
@@ -1278,7 +1278,7 @@ exec_stmt_loop(PLpgSQL_execstate * estate, PLpgSQL_stmt_loop * stmt)
* ----------
*/
static int
-exec_stmt_while(PLpgSQL_execstate * estate, PLpgSQL_stmt_while * stmt)
+exec_stmt_while(PLpgSQL_execstate *estate, PLpgSQL_stmt_while *stmt)
{
for (;;)
{
@@ -1341,7 +1341,7 @@ exec_stmt_while(PLpgSQL_execstate * estate, PLpgSQL_stmt_while * stmt)
* ----------
*/
static int
-exec_stmt_fori(PLpgSQL_execstate * estate, PLpgSQL_stmt_fori * stmt)
+exec_stmt_fori(PLpgSQL_execstate *estate, PLpgSQL_stmt_fori *stmt)
{
PLpgSQL_var *var;
Datum value;
@@ -1483,7 +1483,7 @@ exec_stmt_fori(PLpgSQL_execstate * estate, PLpgSQL_stmt_fori * stmt)
* ----------
*/
static int
-exec_stmt_fors(PLpgSQL_execstate * estate, PLpgSQL_stmt_fors * stmt)
+exec_stmt_fors(PLpgSQL_execstate *estate, PLpgSQL_stmt_fors *stmt)
{
PLpgSQL_rec *rec = NULL;
PLpgSQL_row *row = NULL;
@@ -1635,7 +1635,7 @@ exec_stmt_fors(PLpgSQL_execstate * estate, PLpgSQL_stmt_fors * stmt)
* ----------
*/
static int
-exec_stmt_select(PLpgSQL_execstate * estate, PLpgSQL_stmt_select * stmt)
+exec_stmt_select(PLpgSQL_execstate *estate, PLpgSQL_stmt_select *stmt)
{
PLpgSQL_rec *rec = NULL;
PLpgSQL_row *row = NULL;
@@ -1693,7 +1693,7 @@ exec_stmt_select(PLpgSQL_execstate * estate, PLpgSQL_stmt_select * stmt)
* ----------
*/
static int
-exec_stmt_exit(PLpgSQL_execstate * estate, PLpgSQL_stmt_exit * stmt)
+exec_stmt_exit(PLpgSQL_execstate *estate, PLpgSQL_stmt_exit *stmt)
{
/*
* If the exit / continue has a condition, evaluate it
@@ -1723,7 +1723,7 @@ exec_stmt_exit(PLpgSQL_execstate * estate, PLpgSQL_stmt_exit * stmt)
* ----------
*/
static int
-exec_stmt_return(PLpgSQL_execstate * estate, PLpgSQL_stmt_return * stmt)
+exec_stmt_return(PLpgSQL_execstate *estate, PLpgSQL_stmt_return *stmt)
{
/*
* If processing a set-returning PL/PgSQL function, the final RETURN
@@ -1834,8 +1834,8 @@ exec_stmt_return(PLpgSQL_execstate * estate, PLpgSQL_stmt_return * stmt)
* ----------
*/
static int
-exec_stmt_return_next(PLpgSQL_execstate * estate,
- PLpgSQL_stmt_return_next * stmt)
+exec_stmt_return_next(PLpgSQL_execstate *estate,
+ PLpgSQL_stmt_return_next *stmt)
{
TupleDesc tupdesc;
int natts;
@@ -1974,7 +1974,7 @@ exec_stmt_return_next(PLpgSQL_execstate * estate,
}
static void
-exec_init_tuple_store(PLpgSQL_execstate * estate)
+exec_init_tuple_store(PLpgSQL_execstate *estate)
{
ReturnSetInfo *rsi = estate->rsi;
MemoryContext oldcxt;
@@ -2003,7 +2003,7 @@ exec_init_tuple_store(PLpgSQL_execstate * estate)
* ----------
*/
static int
-exec_stmt_raise(PLpgSQL_execstate * estate, PLpgSQL_stmt_raise * stmt)
+exec_stmt_raise(PLpgSQL_execstate *estate, PLpgSQL_stmt_raise *stmt)
{
char *cp;
PLpgSQL_dstring ds;
@@ -2086,8 +2086,8 @@ exec_stmt_raise(PLpgSQL_execstate * estate, PLpgSQL_stmt_raise * stmt)
* ----------
*/
static void
-plpgsql_estate_setup(PLpgSQL_execstate * estate,
- PLpgSQL_function * func,
+plpgsql_estate_setup(PLpgSQL_execstate *estate,
+ PLpgSQL_function *func,
ReturnSetInfo *rsi)
{
estate->retval = (Datum) 0;
@@ -2152,7 +2152,7 @@ plpgsql_estate_setup(PLpgSQL_execstate * estate,
* ----------
*/
static void
-exec_eval_cleanup(PLpgSQL_execstate * estate)
+exec_eval_cleanup(PLpgSQL_execstate *estate)
{
/* Clear result of a full SPI_execute */
if (estate->eval_tuptable != NULL)
@@ -2170,8 +2170,8 @@ exec_eval_cleanup(PLpgSQL_execstate * estate)
* ----------
*/
static void
-exec_prepare_plan(PLpgSQL_execstate * estate,
- PLpgSQL_expr * expr)
+exec_prepare_plan(PLpgSQL_execstate *estate,
+ PLpgSQL_expr *expr)
{
int i;
_SPI_plan *spi_plan;
@@ -2239,8 +2239,8 @@ exec_prepare_plan(PLpgSQL_execstate * estate,
* ----------
*/
static int
-exec_stmt_execsql(PLpgSQL_execstate * estate,
- PLpgSQL_stmt_execsql * stmt)
+exec_stmt_execsql(PLpgSQL_execstate *estate,
+ PLpgSQL_stmt_execsql *stmt)
{
int i;
Datum *values;
@@ -2331,8 +2331,8 @@ exec_stmt_execsql(PLpgSQL_execstate * estate,
* ----------
*/
static int
-exec_stmt_dynexecute(PLpgSQL_execstate * estate,
- PLpgSQL_stmt_dynexecute * stmt)
+exec_stmt_dynexecute(PLpgSQL_execstate *estate,
+ PLpgSQL_stmt_dynexecute *stmt)
{
Datum query;
bool isnull = false;
@@ -2469,7 +2469,7 @@ exec_stmt_dynexecute(PLpgSQL_execstate * estate,
* ----------
*/
static int
-exec_stmt_dynfors(PLpgSQL_execstate * estate, PLpgSQL_stmt_dynfors * stmt)
+exec_stmt_dynfors(PLpgSQL_execstate *estate, PLpgSQL_stmt_dynfors *stmt)
{
Datum query;
bool isnull;
@@ -2650,7 +2650,7 @@ exec_stmt_dynfors(PLpgSQL_execstate * estate, PLpgSQL_stmt_dynfors * stmt)
* ----------
*/
static int
-exec_stmt_open(PLpgSQL_execstate * estate, PLpgSQL_stmt_open * stmt)
+exec_stmt_open(PLpgSQL_execstate *estate, PLpgSQL_stmt_open *stmt)
{
PLpgSQL_var *curvar = NULL;
char *curname = NULL;
@@ -2852,7 +2852,7 @@ exec_stmt_open(PLpgSQL_execstate * estate, PLpgSQL_stmt_open * stmt)
* ----------
*/
static int
-exec_stmt_fetch(PLpgSQL_execstate * estate, PLpgSQL_stmt_fetch * stmt)
+exec_stmt_fetch(PLpgSQL_execstate *estate, PLpgSQL_stmt_fetch *stmt)
{
PLpgSQL_var *curvar = NULL;
PLpgSQL_rec *rec = NULL;
@@ -2925,7 +2925,7 @@ exec_stmt_fetch(PLpgSQL_execstate * estate, PLpgSQL_stmt_fetch * stmt)
* ----------
*/
static int
-exec_stmt_close(PLpgSQL_execstate * estate, PLpgSQL_stmt_close * stmt)
+exec_stmt_close(PLpgSQL_execstate *estate, PLpgSQL_stmt_close *stmt)
{
PLpgSQL_var *curvar = NULL;
Portal portal;
@@ -2965,8 +2965,8 @@ exec_stmt_close(PLpgSQL_execstate * estate, PLpgSQL_stmt_close * stmt)
* ----------
*/
static void
-exec_assign_expr(PLpgSQL_execstate * estate, PLpgSQL_datum * target,
- PLpgSQL_expr * expr)
+exec_assign_expr(PLpgSQL_execstate *estate, PLpgSQL_datum *target,
+ PLpgSQL_expr *expr)
{
Datum value;
Oid valtype;
@@ -2983,8 +2983,8 @@ exec_assign_expr(PLpgSQL_execstate * estate, PLpgSQL_datum * target,
* ----------
*/
static void
-exec_assign_value(PLpgSQL_execstate * estate,
- PLpgSQL_datum * target,
+exec_assign_value(PLpgSQL_execstate *estate,
+ PLpgSQL_datum *target,
Datum value, Oid valtype, bool *isNull)
{
switch (target->dtype)
@@ -3257,8 +3257,8 @@ exec_assign_value(PLpgSQL_execstate * estate,
/*
* Target is an element of an array
*
- * To handle constructs like x[1][2] := something, we have to be
- * prepared to deal with a chain of arrayelem datums. Chase
+ * To handle constructs like x[1][2] := something, we have to
+ * be prepared to deal with a chain of arrayelem datums. Chase
* back to find the base array datum, and save the subscript
* expressions as we go. (We are scanning right to left here,
* but want to evaluate the subscripts left-to-right to
@@ -3401,8 +3401,8 @@ exec_assign_value(PLpgSQL_execstate * estate,
* the estate's short-term memory context.
*/
static void
-exec_eval_datum(PLpgSQL_execstate * estate,
- PLpgSQL_datum * datum,
+exec_eval_datum(PLpgSQL_execstate *estate,
+ PLpgSQL_datum *datum,
Oid expectedtypeid,
Oid *typeid,
Datum *value,
@@ -3559,8 +3559,8 @@ exec_eval_datum(PLpgSQL_execstate * estate,
* ----------
*/
static int
-exec_eval_integer(PLpgSQL_execstate * estate,
- PLpgSQL_expr * expr,
+exec_eval_integer(PLpgSQL_execstate *estate,
+ PLpgSQL_expr *expr,
bool *isNull)
{
Datum exprdatum;
@@ -3581,8 +3581,8 @@ exec_eval_integer(PLpgSQL_execstate * estate,
* ----------
*/
static bool
-exec_eval_boolean(PLpgSQL_execstate * estate,
- PLpgSQL_expr * expr,
+exec_eval_boolean(PLpgSQL_execstate *estate,
+ PLpgSQL_expr *expr,
bool *isNull)
{
Datum exprdatum;
@@ -3603,8 +3603,8 @@ exec_eval_boolean(PLpgSQL_execstate * estate,
* ----------
*/
static Datum
-exec_eval_expr(PLpgSQL_execstate * estate,
- PLpgSQL_expr * expr,
+exec_eval_expr(PLpgSQL_execstate *estate,
+ PLpgSQL_expr *expr,
bool *isNull,
Oid *rettype)
{
@@ -3666,8 +3666,8 @@ exec_eval_expr(PLpgSQL_execstate * estate,
* ----------
*/
static int
-exec_run_select(PLpgSQL_execstate * estate,
- PLpgSQL_expr * expr, long maxtuples, Portal *portalP)
+exec_run_select(PLpgSQL_execstate *estate,
+ PLpgSQL_expr *expr, long maxtuples, Portal *portalP)
{
int i;
Datum *values;
@@ -3748,8 +3748,8 @@ exec_run_select(PLpgSQL_execstate * estate,
* ----------
*/
static Datum
-exec_eval_simple_expr(PLpgSQL_execstate * estate,
- PLpgSQL_expr * expr,
+exec_eval_simple_expr(PLpgSQL_execstate *estate,
+ PLpgSQL_expr *expr,
bool *isNull,
Oid *rettype)
{
@@ -3780,10 +3780,10 @@ exec_eval_simple_expr(PLpgSQL_execstate * estate,
/*
* Param list can live in econtext's temporary memory context.
*
- * XXX think about avoiding repeated palloc's for param lists? Beware however
- * that this routine is re-entrant: exec_eval_datum() can call it back for
- * subscript evaluation, and so there can be a need to have more than one
- * active param list.
+ * XXX think about avoiding repeated palloc's for param lists? Beware
+ * however that this routine is re-entrant: exec_eval_datum() can call it
+ * back for subscript evaluation, and so there can be a need to have more
+ * than one active param list.
*/
paramLI = (ParamListInfo)
MemoryContextAlloc(econtext->ecxt_per_tuple_memory,
@@ -3861,9 +3861,9 @@ exec_eval_simple_expr(PLpgSQL_execstate * estate,
* ----------
*/
static void
-exec_move_row(PLpgSQL_execstate * estate,
- PLpgSQL_rec * rec,
- PLpgSQL_row * row,
+exec_move_row(PLpgSQL_execstate *estate,
+ PLpgSQL_rec *rec,
+ PLpgSQL_row *row,
HeapTuple tup, TupleDesc tupdesc)
{
/*
@@ -3927,16 +3927,16 @@ exec_move_row(PLpgSQL_execstate * estate,
* Row is a bit more complicated in that we assign the individual
* attributes of the tuple to the variables the row points to.
*
- * NOTE: this code used to demand row->nfields == tup->t_data->t_natts, but
- * that's wrong. The tuple might have more fields than we expected if
+ * NOTE: this code used to demand row->nfields == tup->t_data->t_natts,
+ * but that's wrong. The tuple might have more fields than we expected if
* it's from an inheritance-child table of the current table, or it might
* have fewer if the table has had columns added by ALTER TABLE. Ignore
* extra columns and assume NULL for missing columns, the same as
* heap_getattr would do. We also have to skip over dropped columns in
* either the source or destination.
*
- * If we have no tuple data at all, we'll assign NULL to all columns of the
- * row variable.
+ * If we have no tuple data at all, we'll assign NULL to all columns of
+ * the row variable.
*/
if (row != NULL)
{
@@ -3995,8 +3995,8 @@ exec_move_row(PLpgSQL_execstate * estate,
* ----------
*/
static HeapTuple
-make_tuple_from_row(PLpgSQL_execstate * estate,
- PLpgSQL_row * row,
+make_tuple_from_row(PLpgSQL_execstate *estate,
+ PLpgSQL_row *row,
TupleDesc tupdesc)
{
int natts = tupdesc->natts;
@@ -4362,7 +4362,7 @@ exec_simple_check_node(Node *node)
* ----------
*/
static void
-exec_simple_check_plan(PLpgSQL_expr * expr)
+exec_simple_check_plan(PLpgSQL_expr *expr)
{
_SPI_plan *spi_plan = (_SPI_plan *) expr->plan;
Plan *plan;
@@ -4449,7 +4449,7 @@ compatible_tupdesc(TupleDesc td1, TupleDesc td2)
* ----------
*/
static void
-exec_set_found(PLpgSQL_execstate * estate, bool state)
+exec_set_found(PLpgSQL_execstate *estate, bool state)
{
PLpgSQL_var *var;
@@ -4496,7 +4496,7 @@ plpgsql_xact_cb(XactEvent event, void *arg)
}
static void
-free_var(PLpgSQL_var * var)
+free_var(PLpgSQL_var *var)
{
if (var->freeval)
{
diff --git a/src/pl/plpgsql/src/pl_funcs.c b/src/pl/plpgsql/src/pl_funcs.c
index dd12a061f34..b03dc5ec797 100644
--- a/src/pl/plpgsql/src/pl_funcs.c
+++ b/src/pl/plpgsql/src/pl_funcs.c
@@ -3,7 +3,7 @@
* procedural language
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/pl/plpgsql/src/pl_funcs.c,v 1.46 2005/10/15 02:49:50 momjian Exp $
+ * $PostgreSQL: pgsql/src/pl/plpgsql/src/pl_funcs.c,v 1.46.2.1 2005/11/22 18:23:30 momjian Exp $
*
* This software is copyrighted by Jan Wieck - Hamburg.
*
@@ -56,7 +56,7 @@ static bool ns_localmode = false;
* ----------
*/
void
-plpgsql_dstring_init(PLpgSQL_dstring * ds)
+plpgsql_dstring_init(PLpgSQL_dstring *ds)
{
ds->value = palloc(ds->alloc = 512);
ds->used = 1;
@@ -69,13 +69,13 @@ plpgsql_dstring_init(PLpgSQL_dstring * ds)
* ----------
*/
void
-plpgsql_dstring_free(PLpgSQL_dstring * ds)
+plpgsql_dstring_free(PLpgSQL_dstring *ds)
{
pfree(ds->value);
}
static void
-plpgsql_dstring_expand(PLpgSQL_dstring * ds, int needed)
+plpgsql_dstring_expand(PLpgSQL_dstring *ds, int needed)
{
/* Don't allow truncating the string */
Assert(needed > ds->alloc);
@@ -94,7 +94,7 @@ plpgsql_dstring_expand(PLpgSQL_dstring * ds, int needed)
* ----------
*/
void
-plpgsql_dstring_append(PLpgSQL_dstring * ds, const char *str)
+plpgsql_dstring_append(PLpgSQL_dstring *ds, const char *str)
{
int len = strlen(str);
int needed = ds->used + len;
@@ -113,7 +113,7 @@ plpgsql_dstring_append(PLpgSQL_dstring * ds, const char *str)
* ----------
*/
void
-plpgsql_dstring_append_char(PLpgSQL_dstring * ds, char c)
+plpgsql_dstring_append_char(PLpgSQL_dstring *ds, char c)
{
if (ds->used == ds->alloc)
plpgsql_dstring_expand(ds, ds->used + 1);
@@ -129,7 +129,7 @@ plpgsql_dstring_append_char(PLpgSQL_dstring * ds, char c)
* ----------
*/
char *
-plpgsql_dstring_get(PLpgSQL_dstring * ds)
+plpgsql_dstring_get(PLpgSQL_dstring *ds)
{
return ds->value;
}
@@ -443,7 +443,7 @@ plpgsql_convert_ident(const char *s, char **output, int numidents)
* Statement type as a string, for use in error messages etc.
*/
const char *
-plpgsql_stmt_typename(PLpgSQL_stmt * stmt)
+plpgsql_stmt_typename(PLpgSQL_stmt *stmt)
{
switch (stmt->cmd_type)
{
@@ -499,28 +499,28 @@ plpgsql_stmt_typename(PLpgSQL_stmt * stmt)
static int dump_indent;
static void dump_ind();
-static void dump_stmt(PLpgSQL_stmt * stmt);
-static void dump_block(PLpgSQL_stmt_block * block);
-static void dump_assign(PLpgSQL_stmt_assign * stmt);
-static void dump_if(PLpgSQL_stmt_if * stmt);
-static void dump_loop(PLpgSQL_stmt_loop * stmt);
-static void dump_while(PLpgSQL_stmt_while * stmt);
-static void dump_fori(PLpgSQL_stmt_fori * stmt);
-static void dump_fors(PLpgSQL_stmt_fors * stmt);
-static void dump_select(PLpgSQL_stmt_select * stmt);
-static void dump_exit(PLpgSQL_stmt_exit * stmt);
-static void dump_return(PLpgSQL_stmt_return * stmt);
-static void dump_return_next(PLpgSQL_stmt_return_next * stmt);
-static void dump_raise(PLpgSQL_stmt_raise * stmt);
-static void dump_execsql(PLpgSQL_stmt_execsql * stmt);
-static void dump_dynexecute(PLpgSQL_stmt_dynexecute * stmt);
-static void dump_dynfors(PLpgSQL_stmt_dynfors * stmt);
-static void dump_getdiag(PLpgSQL_stmt_getdiag * stmt);
-static void dump_open(PLpgSQL_stmt_open * stmt);
-static void dump_fetch(PLpgSQL_stmt_fetch * stmt);
-static void dump_close(PLpgSQL_stmt_close * stmt);
-static void dump_perform(PLpgSQL_stmt_perform * stmt);
-static void dump_expr(PLpgSQL_expr * expr);
+static void dump_stmt(PLpgSQL_stmt *stmt);
+static void dump_block(PLpgSQL_stmt_block *block);
+static void dump_assign(PLpgSQL_stmt_assign *stmt);
+static void dump_if(PLpgSQL_stmt_if *stmt);
+static void dump_loop(PLpgSQL_stmt_loop *stmt);
+static void dump_while(PLpgSQL_stmt_while *stmt);
+static void dump_fori(PLpgSQL_stmt_fori *stmt);
+static void dump_fors(PLpgSQL_stmt_fors *stmt);
+static void dump_select(PLpgSQL_stmt_select *stmt);
+static void dump_exit(PLpgSQL_stmt_exit *stmt);
+static void dump_return(PLpgSQL_stmt_return *stmt);
+static void dump_return_next(PLpgSQL_stmt_return_next *stmt);
+static void dump_raise(PLpgSQL_stmt_raise *stmt);
+static void dump_execsql(PLpgSQL_stmt_execsql *stmt);
+static void dump_dynexecute(PLpgSQL_stmt_dynexecute *stmt);
+static void dump_dynfors(PLpgSQL_stmt_dynfors *stmt);
+static void dump_getdiag(PLpgSQL_stmt_getdiag *stmt);
+static void dump_open(PLpgSQL_stmt_open *stmt);
+static void dump_fetch(PLpgSQL_stmt_fetch *stmt);
+static void dump_close(PLpgSQL_stmt_close *stmt);
+static void dump_perform(PLpgSQL_stmt_perform *stmt);
+static void dump_expr(PLpgSQL_expr *expr);
static void
@@ -533,7 +533,7 @@ dump_ind(void)
}
static void
-dump_stmt(PLpgSQL_stmt * stmt)
+dump_stmt(PLpgSQL_stmt *stmt)
{
printf("%3d:", stmt->lineno);
switch (stmt->cmd_type)
@@ -616,7 +616,7 @@ dump_stmts(List *stmts)
}
static void
-dump_block(PLpgSQL_stmt_block * block)
+dump_block(PLpgSQL_stmt_block *block)
{
char *name;
@@ -657,7 +657,7 @@ dump_block(PLpgSQL_stmt_block * block)
}
static void
-dump_assign(PLpgSQL_stmt_assign * stmt)
+dump_assign(PLpgSQL_stmt_assign *stmt)
{
dump_ind();
printf("ASSIGN var %d := ", stmt->varno);
@@ -666,7 +666,7 @@ dump_assign(PLpgSQL_stmt_assign * stmt)
}
static void
-dump_if(PLpgSQL_stmt_if * stmt)
+dump_if(PLpgSQL_stmt_if *stmt)
{
dump_ind();
printf("IF ");
@@ -687,7 +687,7 @@ dump_if(PLpgSQL_stmt_if * stmt)
}
static void
-dump_loop(PLpgSQL_stmt_loop * stmt)
+dump_loop(PLpgSQL_stmt_loop *stmt)
{
dump_ind();
printf("LOOP\n");
@@ -699,7 +699,7 @@ dump_loop(PLpgSQL_stmt_loop * stmt)
}
static void
-dump_while(PLpgSQL_stmt_while * stmt)
+dump_while(PLpgSQL_stmt_while *stmt)
{
dump_ind();
printf("WHILE ");
@@ -713,7 +713,7 @@ dump_while(PLpgSQL_stmt_while * stmt)
}
static void
-dump_fori(PLpgSQL_stmt_fori * stmt)
+dump_fori(PLpgSQL_stmt_fori *stmt)
{
dump_ind();
printf("FORI %s %s\n", stmt->var->refname, (stmt->reverse) ? "REVERSE" : "NORMAL");
@@ -736,7 +736,7 @@ dump_fori(PLpgSQL_stmt_fori * stmt)
}
static void
-dump_fors(PLpgSQL_stmt_fors * stmt)
+dump_fors(PLpgSQL_stmt_fors *stmt)
{
dump_ind();
printf("FORS %s ", (stmt->rec != NULL) ? stmt->rec->refname : stmt->row->refname);
@@ -750,7 +750,7 @@ dump_fors(PLpgSQL_stmt_fors * stmt)
}
static void
-dump_select(PLpgSQL_stmt_select * stmt)
+dump_select(PLpgSQL_stmt_select *stmt)
{
dump_ind();
printf("SELECT ");
@@ -773,7 +773,7 @@ dump_select(PLpgSQL_stmt_select * stmt)
}
static void
-dump_open(PLpgSQL_stmt_open * stmt)
+dump_open(PLpgSQL_stmt_open *stmt)
{
dump_ind();
printf("OPEN curvar=%d\n", stmt->curvar);
@@ -805,7 +805,7 @@ dump_open(PLpgSQL_stmt_open * stmt)
}
static void
-dump_fetch(PLpgSQL_stmt_fetch * stmt)
+dump_fetch(PLpgSQL_stmt_fetch *stmt)
{
dump_ind();
printf("FETCH curvar=%d\n", stmt->curvar);
@@ -826,14 +826,14 @@ dump_fetch(PLpgSQL_stmt_fetch * stmt)
}
static void
-dump_close(PLpgSQL_stmt_close * stmt)
+dump_close(PLpgSQL_stmt_close *stmt)
{
dump_ind();
printf("CLOSE curvar=%d\n", stmt->curvar);
}
static void
-dump_perform(PLpgSQL_stmt_perform * stmt)
+dump_perform(PLpgSQL_stmt_perform *stmt)
{
dump_ind();
printf("PERFORM expr = ");
@@ -842,7 +842,7 @@ dump_perform(PLpgSQL_stmt_perform * stmt)
}
static void
-dump_exit(PLpgSQL_stmt_exit * stmt)
+dump_exit(PLpgSQL_stmt_exit *stmt)
{
dump_ind();
printf("%s label='%s'",
@@ -856,7 +856,7 @@ dump_exit(PLpgSQL_stmt_exit * stmt)
}
static void
-dump_return(PLpgSQL_stmt_return * stmt)
+dump_return(PLpgSQL_stmt_return *stmt)
{
dump_ind();
printf("RETURN ");
@@ -870,7 +870,7 @@ dump_return(PLpgSQL_stmt_return * stmt)
}
static void
-dump_return_next(PLpgSQL_stmt_return_next * stmt)
+dump_return_next(PLpgSQL_stmt_return_next *stmt)
{
dump_ind();
printf("RETURN NEXT ");
@@ -884,7 +884,7 @@ dump_return_next(PLpgSQL_stmt_return_next * stmt)
}
static void
-dump_raise(PLpgSQL_stmt_raise * stmt)
+dump_raise(PLpgSQL_stmt_raise *stmt)
{
ListCell *lc;
int i = 0;
@@ -903,7 +903,7 @@ dump_raise(PLpgSQL_stmt_raise * stmt)
}
static void
-dump_execsql(PLpgSQL_stmt_execsql * stmt)
+dump_execsql(PLpgSQL_stmt_execsql *stmt)
{
dump_ind();
printf("EXECSQL ");
@@ -912,7 +912,7 @@ dump_execsql(PLpgSQL_stmt_execsql * stmt)
}
static void
-dump_dynexecute(PLpgSQL_stmt_dynexecute * stmt)
+dump_dynexecute(PLpgSQL_stmt_dynexecute *stmt)
{
dump_ind();
printf("EXECUTE ");
@@ -934,7 +934,7 @@ dump_dynexecute(PLpgSQL_stmt_dynexecute * stmt)
}
static void
-dump_dynfors(PLpgSQL_stmt_dynfors * stmt)
+dump_dynfors(PLpgSQL_stmt_dynfors *stmt)
{
dump_ind();
printf("FORS %s EXECUTE ", (stmt->rec != NULL) ? stmt->rec->refname : stmt->row->refname);
@@ -948,7 +948,7 @@ dump_dynfors(PLpgSQL_stmt_dynfors * stmt)
}
static void
-dump_getdiag(PLpgSQL_stmt_getdiag * stmt)
+dump_getdiag(PLpgSQL_stmt_getdiag *stmt)
{
ListCell *lc;
@@ -982,7 +982,7 @@ dump_getdiag(PLpgSQL_stmt_getdiag * stmt)
}
static void
-dump_expr(PLpgSQL_expr * expr)
+dump_expr(PLpgSQL_expr *expr)
{
int i;
@@ -1002,7 +1002,7 @@ dump_expr(PLpgSQL_expr * expr)
}
void
-plpgsql_dumptree(PLpgSQL_function * func)
+plpgsql_dumptree(PLpgSQL_function *func)
{
int i;
PLpgSQL_datum *d;
diff --git a/src/pl/plpgsql/src/plpgsql.h b/src/pl/plpgsql/src/plpgsql.h
index 38b1aa1329b..c5da41bf63b 100644
--- a/src/pl/plpgsql/src/plpgsql.h
+++ b/src/pl/plpgsql/src/plpgsql.h
@@ -3,7 +3,7 @@
* procedural language
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/pl/plpgsql/src/plpgsql.h,v 1.65 2005/10/15 02:49:50 momjian Exp $
+ * $PostgreSQL: pgsql/src/pl/plpgsql/src/plpgsql.h,v 1.65.2.1 2005/11/22 18:23:30 momjian Exp $
*
* This software is copyrighted by Jan Wieck - Hamburg.
*
@@ -150,7 +150,7 @@ typedef struct
int alloc;
int used; /* Including NUL terminator */
char *value;
-} PLpgSQL_dstring;
+} PLpgSQL_dstring;
typedef struct
@@ -164,7 +164,7 @@ typedef struct
Oid typioparam;
FmgrInfo typinput; /* lookup info for typinput function */
int32 atttypmod; /* typmod (taken from someplace else) */
-} PLpgSQL_type;
+} PLpgSQL_type;
/*
@@ -176,7 +176,7 @@ typedef struct
{ /* Generic datum array item */
int dtype;
int dno;
-} PLpgSQL_datum;
+} PLpgSQL_datum;
/*
* The variants PLpgSQL_var, PLpgSQL_row, and PLpgSQL_rec share these
@@ -188,7 +188,7 @@ typedef struct
int dno;
char *refname;
int lineno;
-} PLpgSQL_variable;
+} PLpgSQL_variable;
typedef struct PLpgSQL_expr
{ /* SQL Query to plan and execute */
@@ -206,7 +206,7 @@ typedef struct PLpgSQL_expr
/* params to pass to expr */
int nparams;
int params[1]; /* VARIABLE SIZE ARRAY ... must be last */
-} PLpgSQL_expr;
+} PLpgSQL_expr;
typedef struct
@@ -226,7 +226,7 @@ typedef struct
Datum value;
bool isnull;
bool freeval;
-} PLpgSQL_var;
+} PLpgSQL_var;
typedef struct
@@ -248,7 +248,7 @@ typedef struct
int nfields;
char **fieldnames;
int *varnos;
-} PLpgSQL_row;
+} PLpgSQL_row;
typedef struct
@@ -262,7 +262,7 @@ typedef struct
TupleDesc tupdesc;
bool freetup;
bool freetupdesc;
-} PLpgSQL_rec;
+} PLpgSQL_rec;
typedef struct
@@ -271,7 +271,7 @@ typedef struct
int rfno;
char *fieldname;
int recparentno; /* dno of parent record */
-} PLpgSQL_recfield;
+} PLpgSQL_recfield;
typedef struct
@@ -280,7 +280,7 @@ typedef struct
int dno;
PLpgSQL_expr *subscript;
int arrayparentno; /* dno of parent array variable */
-} PLpgSQL_arrayelem;
+} PLpgSQL_arrayelem;
typedef struct
@@ -288,7 +288,7 @@ typedef struct
int dtype;
int dno;
PLpgSQL_expr *argnum;
-} PLpgSQL_trigarg;
+} PLpgSQL_trigarg;
typedef struct
@@ -296,7 +296,7 @@ typedef struct
int itemtype;
int itemno;
char name[1];
-} PLpgSQL_nsitem;
+} PLpgSQL_nsitem;
/* XXX: consider adapting this to use List */
@@ -306,14 +306,14 @@ typedef struct PLpgSQL_ns
int items_used;
PLpgSQL_nsitem **items;
struct PLpgSQL_ns *upper;
-} PLpgSQL_ns;
+} PLpgSQL_ns;
typedef struct
{ /* Generic execution node */
int cmd_type;
int lineno;
-} PLpgSQL_stmt;
+} PLpgSQL_stmt;
typedef struct PLpgSQL_condition
@@ -321,21 +321,21 @@ typedef struct PLpgSQL_condition
int sqlerrstate; /* SQLSTATE code */
char *condname; /* condition name (for debugging) */
struct PLpgSQL_condition *next;
-} PLpgSQL_condition;
+} PLpgSQL_condition;
typedef struct
{
int sqlstate_varno;
int sqlerrm_varno;
List *exc_list; /* List of WHEN clauses */
-} PLpgSQL_exception_block;
+} PLpgSQL_exception_block;
typedef struct
{ /* One EXCEPTION ... WHEN clause */
int lineno;
PLpgSQL_condition *conditions;
List *action; /* List of statements */
-} PLpgSQL_exception;
+} PLpgSQL_exception;
typedef struct
@@ -347,7 +347,7 @@ typedef struct
int n_initvars;
int *initvarnos;
PLpgSQL_exception_block *exceptions;
-} PLpgSQL_stmt_block;
+} PLpgSQL_stmt_block;
typedef struct
@@ -356,27 +356,27 @@ typedef struct
int lineno;
int varno;
PLpgSQL_expr *expr;
-} PLpgSQL_stmt_assign;
+} PLpgSQL_stmt_assign;
typedef struct
{ /* PERFORM statement */
int cmd_type;
int lineno;
PLpgSQL_expr *expr;
-} PLpgSQL_stmt_perform;
+} PLpgSQL_stmt_perform;
typedef struct
{ /* Get Diagnostics item */
int kind; /* id for diagnostic value desired */
int target; /* where to assign it */
-} PLpgSQL_diag_item;
+} PLpgSQL_diag_item;
typedef struct
{ /* Get Diagnostics statement */
int cmd_type;
int lineno;
List *diag_items; /* List of PLpgSQL_diag_item */
-} PLpgSQL_stmt_getdiag;
+} PLpgSQL_stmt_getdiag;
typedef struct
@@ -386,7 +386,7 @@ typedef struct
PLpgSQL_expr *cond;
List *true_body; /* List of statements */
List *false_body; /* List of statements */
-} PLpgSQL_stmt_if;
+} PLpgSQL_stmt_if;
typedef struct
@@ -395,7 +395,7 @@ typedef struct
int lineno;
char *label;
List *body; /* List of statements */
-} PLpgSQL_stmt_loop;
+} PLpgSQL_stmt_loop;
typedef struct
@@ -405,7 +405,7 @@ typedef struct
char *label;
PLpgSQL_expr *cond;
List *body; /* List of statements */
-} PLpgSQL_stmt_while;
+} PLpgSQL_stmt_while;
typedef struct
@@ -418,7 +418,7 @@ typedef struct
PLpgSQL_expr *upper;
int reverse;
List *body; /* List of statements */
-} PLpgSQL_stmt_fori;
+} PLpgSQL_stmt_fori;
typedef struct
@@ -430,7 +430,7 @@ typedef struct
PLpgSQL_row *row;
PLpgSQL_expr *query;
List *body; /* List of statements */
-} PLpgSQL_stmt_fors;
+} PLpgSQL_stmt_fors;
typedef struct
@@ -442,7 +442,7 @@ typedef struct
PLpgSQL_row *row;
PLpgSQL_expr *query;
List *body; /* List of statements */
-} PLpgSQL_stmt_dynfors;
+} PLpgSQL_stmt_dynfors;
typedef struct
@@ -452,7 +452,7 @@ typedef struct
PLpgSQL_rec *rec;
PLpgSQL_row *row;
PLpgSQL_expr *query;
-} PLpgSQL_stmt_select;
+} PLpgSQL_stmt_select;
typedef struct
@@ -464,7 +464,7 @@ typedef struct
PLpgSQL_expr *argquery;
PLpgSQL_expr *query;
PLpgSQL_expr *dynquery;
-} PLpgSQL_stmt_open;
+} PLpgSQL_stmt_open;
typedef struct
@@ -474,7 +474,7 @@ typedef struct
PLpgSQL_rec *rec;
PLpgSQL_row *row;
int curvar;
-} PLpgSQL_stmt_fetch;
+} PLpgSQL_stmt_fetch;
typedef struct
@@ -482,7 +482,7 @@ typedef struct
int cmd_type;
int lineno;
int curvar;
-} PLpgSQL_stmt_close;
+} PLpgSQL_stmt_close;
typedef struct
@@ -492,7 +492,7 @@ typedef struct
bool is_exit; /* Is this an exit or a continue? */
char *label;
PLpgSQL_expr *cond;
-} PLpgSQL_stmt_exit;
+} PLpgSQL_stmt_exit;
typedef struct
@@ -501,7 +501,7 @@ typedef struct
int lineno;
PLpgSQL_expr *expr;
int retvarno;
-} PLpgSQL_stmt_return;
+} PLpgSQL_stmt_return;
typedef struct
{ /* RETURN NEXT statement */
@@ -509,7 +509,7 @@ typedef struct
int lineno;
PLpgSQL_expr *expr;
int retvarno;
-} PLpgSQL_stmt_return_next;
+} PLpgSQL_stmt_return_next;
typedef struct
{ /* RAISE statement */
@@ -518,7 +518,7 @@ typedef struct
int elog_level;
char *message;
List *params; /* list of expressions */
-} PLpgSQL_stmt_raise;
+} PLpgSQL_stmt_raise;
typedef struct
@@ -526,7 +526,7 @@ typedef struct
int cmd_type;
int lineno;
PLpgSQL_expr *sqlstmt;
-} PLpgSQL_stmt_execsql;
+} PLpgSQL_stmt_execsql;
typedef struct
@@ -536,7 +536,7 @@ typedef struct
PLpgSQL_rec *rec; /* INTO record or row variable */
PLpgSQL_row *row;
PLpgSQL_expr *query;
-} PLpgSQL_stmt_dynexecute;
+} PLpgSQL_stmt_dynexecute;
typedef struct PLpgSQL_func_hashkey
@@ -556,7 +556,7 @@ typedef struct PLpgSQL_func_hashkey
* PLpgSQL functions. Be careful that extra positions are zeroed!
*/
Oid argtypes[FUNC_MAX_ARGS];
-} PLpgSQL_func_hashkey;
+} PLpgSQL_func_hashkey;
typedef struct PLpgSQL_function
@@ -595,7 +595,7 @@ typedef struct PLpgSQL_function
int ndatums;
PLpgSQL_datum **datums;
PLpgSQL_stmt_block *action;
-} PLpgSQL_function;
+} PLpgSQL_function;
typedef struct
@@ -635,7 +635,7 @@ typedef struct
PLpgSQL_function *err_func; /* current func */
PLpgSQL_stmt *err_stmt; /* current stmt */
const char *err_text; /* additional state info */
-} PLpgSQL_execstate;
+} PLpgSQL_execstate;
/**********************************************************************
@@ -680,10 +680,10 @@ extern int plpgsql_parse_dblwordrowtype(char *word);
extern PLpgSQL_type *plpgsql_parse_datatype(const char *string);
extern PLpgSQL_type *plpgsql_build_datatype(Oid typeOid, int32 typmod);
extern PLpgSQL_variable *plpgsql_build_variable(const char *refname, int lineno,
- PLpgSQL_type * dtype,
+ PLpgSQL_type *dtype,
bool add2namespace);
extern PLpgSQL_condition *plpgsql_parse_err_condition(char *condname);
-extern void plpgsql_adddatum(PLpgSQL_datum * new);
+extern void plpgsql_adddatum(PLpgSQL_datum *new);
extern int plpgsql_add_initdatums(int **varnos);
extern void plpgsql_HashTableInit(void);
extern void plpgsql_compile_error_callback(void *arg);
@@ -700,9 +700,9 @@ extern Datum plpgsql_validator(PG_FUNCTION_ARGS);
* Functions in pl_exec.c
* ----------
*/
-extern Datum plpgsql_exec_function(PLpgSQL_function * func,
+extern Datum plpgsql_exec_function(PLpgSQL_function *func,
FunctionCallInfo fcinfo);
-extern HeapTuple plpgsql_exec_trigger(PLpgSQL_function * func,
+extern HeapTuple plpgsql_exec_trigger(PLpgSQL_function *func,
TriggerData *trigdata);
extern void plpgsql_xact_cb(XactEvent event, void *arg);
@@ -710,11 +710,11 @@ extern void plpgsql_xact_cb(XactEvent event, void *arg);
* Functions for the dynamic string handling in pl_funcs.c
* ----------
*/
-extern void plpgsql_dstring_init(PLpgSQL_dstring * ds);
-extern void plpgsql_dstring_free(PLpgSQL_dstring * ds);
-extern void plpgsql_dstring_append(PLpgSQL_dstring * ds, const char *str);
-extern void plpgsql_dstring_append_char(PLpgSQL_dstring * ds, char c);
-extern char *plpgsql_dstring_get(PLpgSQL_dstring * ds);
+extern void plpgsql_dstring_init(PLpgSQL_dstring *ds);
+extern void plpgsql_dstring_free(PLpgSQL_dstring *ds);
+extern void plpgsql_dstring_append(PLpgSQL_dstring *ds, const char *str);
+extern void plpgsql_dstring_append_char(PLpgSQL_dstring *ds, char c);
+extern char *plpgsql_dstring_get(PLpgSQL_dstring *ds);
/* ----------
* Functions for the namestack handling in pl_funcs.c
@@ -733,8 +733,8 @@ extern void plpgsql_ns_rename(char *oldname, char *newname);
* ----------
*/
extern void plpgsql_convert_ident(const char *s, char **output, int numidents);
-extern const char *plpgsql_stmt_typename(PLpgSQL_stmt * stmt);
-extern void plpgsql_dumptree(PLpgSQL_function * func);
+extern const char *plpgsql_stmt_typename(PLpgSQL_stmt *stmt);
+extern void plpgsql_dumptree(PLpgSQL_function *func);
/* ----------
* Externs in gram.y and scan.l
diff --git a/src/pl/tcl/pltcl.c b/src/pl/tcl/pltcl.c
index fe4e9f030ec..3f3d6acd45a 100644
--- a/src/pl/tcl/pltcl.c
+++ b/src/pl/tcl/pltcl.c
@@ -31,7 +31,7 @@
* ENHANCEMENTS, OR MODIFICATIONS.
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/pl/tcl/pltcl.c,v 1.98 2005/10/15 02:49:50 momjian Exp $
+ * $PostgreSQL: pgsql/src/pl/tcl/pltcl.c,v 1.98.2.1 2005/11/22 18:23:31 momjian Exp $
*
**********************************************************************/
@@ -112,7 +112,7 @@ typedef struct pltcl_proc_desc
int nargs;
FmgrInfo arg_out_func[FUNC_MAX_ARGS];
bool arg_is_rowtype[FUNC_MAX_ARGS];
-} pltcl_proc_desc;
+} pltcl_proc_desc;
/**********************************************************************
@@ -126,7 +126,7 @@ typedef struct pltcl_query_desc
Oid *argtypes;
FmgrInfo *arginfuncs;
Oid *argtypioparams;
-} pltcl_query_desc;
+} pltcl_query_desc;
/**********************************************************************
@@ -149,9 +149,9 @@ static pltcl_proc_desc *pltcl_current_prodesc = NULL;
* Forward declarations
**********************************************************************/
static void pltcl_init_all(void);
-static void pltcl_init_interp(Tcl_Interp * interp);
+static void pltcl_init_interp(Tcl_Interp *interp);
-static void pltcl_init_load_unknown(Tcl_Interp * interp);
+static void pltcl_init_load_unknown(Tcl_Interp *interp);
Datum pltcl_call_handler(PG_FUNCTION_ARGS);
Datum pltclu_call_handler(PG_FUNCTION_ARGS);
@@ -163,34 +163,34 @@ static HeapTuple pltcl_trigger_handler(PG_FUNCTION_ARGS);
static pltcl_proc_desc *compile_pltcl_function(Oid fn_oid, Oid tgreloid);
-static int pltcl_elog(ClientData cdata, Tcl_Interp * interp,
+static int pltcl_elog(ClientData cdata, Tcl_Interp *interp,
int argc, CONST84 char *argv[]);
-static int pltcl_quote(ClientData cdata, Tcl_Interp * interp,
+static int pltcl_quote(ClientData cdata, Tcl_Interp *interp,
int argc, CONST84 char *argv[]);
-static int pltcl_argisnull(ClientData cdata, Tcl_Interp * interp,
+static int pltcl_argisnull(ClientData cdata, Tcl_Interp *interp,
int argc, CONST84 char *argv[]);
-static int pltcl_returnnull(ClientData cdata, Tcl_Interp * interp,
+static int pltcl_returnnull(ClientData cdata, Tcl_Interp *interp,
int argc, CONST84 char *argv[]);
-static int pltcl_SPI_execute(ClientData cdata, Tcl_Interp * interp,
+static int pltcl_SPI_execute(ClientData cdata, Tcl_Interp *interp,
int argc, CONST84 char *argv[]);
-static int pltcl_process_SPI_result(Tcl_Interp * interp,
+static int pltcl_process_SPI_result(Tcl_Interp *interp,
CONST84 char *arrayname,
CONST84 char *loop_body,
int spi_rc,
SPITupleTable *tuptable,
int ntuples);
-static int pltcl_SPI_prepare(ClientData cdata, Tcl_Interp * interp,
+static int pltcl_SPI_prepare(ClientData cdata, Tcl_Interp *interp,
int argc, CONST84 char *argv[]);
-static int pltcl_SPI_execute_plan(ClientData cdata, Tcl_Interp * interp,
+static int pltcl_SPI_execute_plan(ClientData cdata, Tcl_Interp *interp,
int argc, CONST84 char *argv[]);
-static int pltcl_SPI_lastoid(ClientData cdata, Tcl_Interp * interp,
+static int pltcl_SPI_lastoid(ClientData cdata, Tcl_Interp *interp,
int argc, CONST84 char *argv[]);
-static void pltcl_set_tuple_values(Tcl_Interp * interp, CONST84 char *arrayname,
+static void pltcl_set_tuple_values(Tcl_Interp *interp, CONST84 char *arrayname,
int tupno, HeapTuple tuple, TupleDesc tupdesc);
static void pltcl_build_tuple_argument(HeapTuple tuple, TupleDesc tupdesc,
- Tcl_DString * retval);
+ Tcl_DString *retval);
/*
@@ -296,7 +296,7 @@ pltcl_init_all(void)
* pltcl_init_interp() - initialize a Tcl interpreter
**********************************************************************/
static void
-pltcl_init_interp(Tcl_Interp * interp)
+pltcl_init_interp(Tcl_Interp *interp)
{
/************************************************************
* Install the commands for SPI support in the interpreter
@@ -326,7 +326,7 @@ pltcl_init_interp(Tcl_Interp * interp)
* table pltcl_modules (if it exists)
**********************************************************************/
static void
-pltcl_init_load_unknown(Tcl_Interp * interp)
+pltcl_init_load_unknown(Tcl_Interp *interp)
{
int spi_rc;
int tcl_rc;
@@ -1270,7 +1270,7 @@ compile_pltcl_function(Oid fn_oid, Oid tgreloid)
* pltcl_elog() - elog() support for PLTcl
**********************************************************************/
static int
-pltcl_elog(ClientData cdata, Tcl_Interp * interp,
+pltcl_elog(ClientData cdata, Tcl_Interp *interp,
int argc, CONST84 char *argv[])
{
volatile int level;
@@ -1342,7 +1342,7 @@ pltcl_elog(ClientData cdata, Tcl_Interp * interp,
* be used in SPI_execute query strings
**********************************************************************/
static int
-pltcl_quote(ClientData cdata, Tcl_Interp * interp,
+pltcl_quote(ClientData cdata, Tcl_Interp *interp,
int argc, CONST84 char *argv[])
{
char *tmp;
@@ -1395,7 +1395,7 @@ pltcl_quote(ClientData cdata, Tcl_Interp * interp,
* pltcl_argisnull() - determine if a specific argument is NULL
**********************************************************************/
static int
-pltcl_argisnull(ClientData cdata, Tcl_Interp * interp,
+pltcl_argisnull(ClientData cdata, Tcl_Interp *interp,
int argc, CONST84 char *argv[])
{
int argno;
@@ -1452,7 +1452,7 @@ pltcl_argisnull(ClientData cdata, Tcl_Interp * interp,
* pltcl_returnnull() - Cause a NULL return from a function
**********************************************************************/
static int
-pltcl_returnnull(ClientData cdata, Tcl_Interp * interp,
+pltcl_returnnull(ClientData cdata, Tcl_Interp *interp,
int argc, CONST84 char *argv[])
{
FunctionCallInfo fcinfo = pltcl_current_fcinfo;
@@ -1535,7 +1535,7 @@ pltcl_subtrans_commit(MemoryContext oldcontext, ResourceOwner oldowner)
}
static void
-pltcl_subtrans_abort(Tcl_Interp * interp,
+pltcl_subtrans_abort(Tcl_Interp *interp,
MemoryContext oldcontext, ResourceOwner oldowner)
{
ErrorData *edata;
@@ -1568,7 +1568,7 @@ pltcl_subtrans_abort(Tcl_Interp * interp,
* for the Tcl interpreter
**********************************************************************/
static int
-pltcl_SPI_execute(ClientData cdata, Tcl_Interp * interp,
+pltcl_SPI_execute(ClientData cdata, Tcl_Interp *interp,
int argc, CONST84 char *argv[])
{
int my_rc;
@@ -1671,7 +1671,7 @@ pltcl_SPI_execute(ClientData cdata, Tcl_Interp * interp,
* Shared code between pltcl_SPI_execute and pltcl_SPI_execute_plan
*/
static int
-pltcl_process_SPI_result(Tcl_Interp * interp,
+pltcl_process_SPI_result(Tcl_Interp *interp,
CONST84 char *arrayname,
CONST84 char *loop_body,
int spi_rc,
@@ -1775,7 +1775,7 @@ pltcl_process_SPI_result(Tcl_Interp * interp,
* and not save the plan currently.
**********************************************************************/
static int
-pltcl_SPI_prepare(ClientData cdata, Tcl_Interp * interp,
+pltcl_SPI_prepare(ClientData cdata, Tcl_Interp *interp,
int argc, CONST84 char *argv[])
{
int nargs;
@@ -1920,7 +1920,7 @@ pltcl_SPI_prepare(ClientData cdata, Tcl_Interp * interp,
* pltcl_SPI_execute_plan() - Execute a prepared plan
**********************************************************************/
static int
-pltcl_SPI_execute_plan(ClientData cdata, Tcl_Interp * interp,
+pltcl_SPI_execute_plan(ClientData cdata, Tcl_Interp *interp,
int argc, CONST84 char *argv[])
{
int my_rc;
@@ -2140,7 +2140,7 @@ pltcl_SPI_execute_plan(ClientData cdata, Tcl_Interp * interp,
* be used after insert queries
**********************************************************************/
static int
-pltcl_SPI_lastoid(ClientData cdata, Tcl_Interp * interp,
+pltcl_SPI_lastoid(ClientData cdata, Tcl_Interp *interp,
int argc, CONST84 char *argv[])
{
char buf[64];
@@ -2156,7 +2156,7 @@ pltcl_SPI_lastoid(ClientData cdata, Tcl_Interp * interp,
* of a given tuple
**********************************************************************/
static void
-pltcl_set_tuple_values(Tcl_Interp * interp, CONST84 char *arrayname,
+pltcl_set_tuple_values(Tcl_Interp *interp, CONST84 char *arrayname,
int tupno, HeapTuple tuple, TupleDesc tupdesc)
{
int i;
@@ -2249,7 +2249,7 @@ pltcl_set_tuple_values(Tcl_Interp * interp, CONST84 char *arrayname,
**********************************************************************/
static void
pltcl_build_tuple_argument(HeapTuple tuple, TupleDesc tupdesc,
- Tcl_DString * retval)
+ Tcl_DString *retval)
{
int i;
char *outputstr;
diff --git a/src/port/exec.c b/src/port/exec.c
index e754b182157..5f0f6959f54 100644
--- a/src/port/exec.c
+++ b/src/port/exec.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/port/exec.c,v 1.39 2005/10/15 02:49:51 momjian Exp $
+ * $PostgreSQL: pgsql/src/port/exec.c,v 1.39.2.1 2005/11/22 18:23:31 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -95,8 +95,8 @@ validate_exec(const char *path)
/*
* Ensure that the file exists and is a regular file.
*
- * XXX if you have a broken system where stat() looks at the symlink instead
- * of the underlying file, you lose.
+ * XXX if you have a broken system where stat() looks at the symlink
+ * instead of the underlying file, you lose.
*/
if (stat(path, &buf) < 0)
return -1;
@@ -297,9 +297,9 @@ resolve_symlinks(char *path)
* points, for example). After following the final symlink, we use
* getcwd() to figure out where the heck we're at.
*
- * One might think we could skip all this if path doesn't point to a symlink
- * to start with, but that's wrong. We also want to get rid of any
- * directory symlinks that are present in the given path. We expect
+ * One might think we could skip all this if path doesn't point to a
+ * symlink to start with, but that's wrong. We also want to get rid of
+ * any directory symlinks that are present in the given path. We expect
* getcwd() to give us an accurate, symlink-free path.
*/
if (!getcwd(orig_wd, MAXPGPATH))
diff --git a/src/port/path.c b/src/port/path.c
index c6137bd0a13..c4a33521abc 100644
--- a/src/port/path.c
+++ b/src/port/path.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/port/path.c,v 1.61 2005/10/15 02:49:51 momjian Exp $
+ * $PostgreSQL: pgsql/src/port/path.c,v 1.61.2.1 2005/11/22 18:23:31 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -280,8 +280,8 @@ canonicalize_path(char *path)
/*
* Remove any trailing uses of "." and process ".." ourselves
*
- * Note that "/../.." should reduce to just "/", while "../.." has to be kept
- * as-is. In the latter case we put back mistakenly trimmed ".."
+ * Note that "/../.." should reduce to just "/", while "../.." has to be
+ * kept as-is. In the latter case we put back mistakenly trimmed ".."
* components below. Also note that we want a Windows drive spec to be
* visible to trim_directory(), but it's not part of the logic that's
* looking at the name components; hence distinction between path and
diff --git a/src/port/strtol.c b/src/port/strtol.c
index a948489390e..a103b446e62 100644
--- a/src/port/strtol.c
+++ b/src/port/strtol.c
@@ -103,7 +103,8 @@ int base;
* digit is > 7 (or 8), the number is too big, and we will return a range
* error.
*
- * Set any if any `digits' consumed; make it negative to indicate overflow.
+ * Set any if any `digits' consumed; make it negative to indicate
+ * overflow.
*/
cutoff = neg ? -(unsigned long) LONG_MIN : LONG_MAX;
cutlim = cutoff % (unsigned long) base;
diff --git a/src/port/unsetenv.c b/src/port/unsetenv.c
index 6509ff79f3e..e387b507991 100644
--- a/src/port/unsetenv.c
+++ b/src/port/unsetenv.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/port/unsetenv.c,v 1.5 2005/10/15 02:49:51 momjian Exp $
+ * $PostgreSQL: pgsql/src/port/unsetenv.c,v 1.5.2.1 2005/11/22 18:23:31 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -33,8 +33,8 @@ unsetenv(const char *name)
* presented string. This method fails on such platforms. Hopefully all
* such platforms have unsetenv() and thus won't be using this hack.
*
- * Note that repeatedly setting and unsetting a var using this code will leak
- * memory.
+ * Note that repeatedly setting and unsetting a var using this code will
+ * leak memory.
*/
envstr = (char *) malloc(strlen(name) + 2);
diff --git a/src/test/examples/testlibpq3.c b/src/test/examples/testlibpq3.c
index c7e4e097322..97ed023c14a 100644
--- a/src/test/examples/testlibpq3.c
+++ b/src/test/examples/testlibpq3.c
@@ -18,9 +18,9 @@
* b = (5 bytes) \000\001\002\003\004
*
* tuple 0: got
- * i = (4 bytes) 2
- * t = (8 bytes) 'ho there'
- * b = (5 bytes) \004\003\002\001\000
+ * i = (4 bytes) 2
+ * t = (8 bytes) 'ho there'
+ * b = (5 bytes) \004\003\002\001\000
*/
#include <stdio.h>
#include <stdlib.h>
@@ -137,10 +137,10 @@ main(int argc, char **argv)
* out-of-line parameters, as well as binary transmission of data.
*
* This first example transmits the parameters as text, but receives the
- * results in binary format. By using out-of-line parameters we can
- * avoid a lot of tedious mucking about with quoting and escaping, even
- * though the data is text. Notice how we don't have to do anything
- * special with the quote mark in the parameter value.
+ * results in binary format. By using out-of-line parameters we can avoid
+ * a lot of tedious mucking about with quoting and escaping, even though
+ * the data is text. Notice how we don't have to do anything special with
+ * the quote mark in the parameter value.
*/
/* Here is our out-of-line parameter value */
@@ -167,8 +167,8 @@ main(int argc, char **argv)
PQclear(res);
/*
- * In this second example we transmit an integer parameter in binary
- * form, and again retrieve the results in binary form.
+ * In this second example we transmit an integer parameter in binary form,
+ * and again retrieve the results in binary form.
*
* Although we tell PQexecParams we are letting the backend deduce
* parameter type, we really force the decision by casting the parameter
diff --git a/src/timezone/pgtz.c b/src/timezone/pgtz.c
index 23165062bf7..b3381c28e65 100644
--- a/src/timezone/pgtz.c
+++ b/src/timezone/pgtz.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/timezone/pgtz.c,v 1.38 2005/10/15 02:49:51 momjian Exp $
+ * $PostgreSQL: pgsql/src/timezone/pgtz.c,v 1.38.2.1 2005/11/22 18:23:31 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -789,7 +789,7 @@ static const struct
"Australia/Perth"
}, /* (GMT+08:00) Perth */
/* {"W. Central Africa Standard Time", "W. Central Africa Daylight Time",
- * * * ""}, Could not find a match for this one. Excluded for now. *//* (
+ * * * * ""}, Could not find a match for this one. Excluded for now. *//* (
* G MT+01:00) West Central Africa */
{
"W. Europe Standard Time", "W. Europe Daylight Time",