aboutsummaryrefslogtreecommitdiff
path: root/src/backend/utils
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/utils')
-rw-r--r--src/backend/utils/adt/acl.c266
-rw-r--r--src/backend/utils/adt/array_userfuncs.c43
-rw-r--r--src/backend/utils/adt/arrayfuncs.c342
-rw-r--r--src/backend/utils/adt/ascii.c6
-rw-r--r--src/backend/utils/adt/cash.c24
-rw-r--r--src/backend/utils/adt/char.c8
-rw-r--r--src/backend/utils/adt/date.c195
-rw-r--r--src/backend/utils/adt/datetime.c337
-rw-r--r--src/backend/utils/adt/datum.c11
-rw-r--r--src/backend/utils/adt/dbsize.c142
-rw-r--r--src/backend/utils/adt/encode.c8
-rw-r--r--src/backend/utils/adt/float.c122
-rw-r--r--src/backend/utils/adt/format_type.c42
-rw-r--r--src/backend/utils/adt/formatting.c339
-rw-r--r--src/backend/utils/adt/genfile.c57
-rw-r--r--src/backend/utils/adt/geo_ops.c135
-rw-r--r--src/backend/utils/adt/inet_net_ntop.c20
-rw-r--r--src/backend/utils/adt/inet_net_pton.c5
-rw-r--r--src/backend/utils/adt/int.c148
-rw-r--r--src/backend/utils/adt/int8.c140
-rw-r--r--src/backend/utils/adt/like.c46
-rw-r--r--src/backend/utils/adt/like_match.c42
-rw-r--r--src/backend/utils/adt/lockfuncs.c29
-rw-r--r--src/backend/utils/adt/mac.c6
-rw-r--r--src/backend/utils/adt/misc.c21
-rw-r--r--src/backend/utils/adt/nabstime.c147
-rw-r--r--src/backend/utils/adt/name.c6
-rw-r--r--src/backend/utils/adt/network.c28
-rw-r--r--src/backend/utils/adt/numeric.c328
-rw-r--r--src/backend/utils/adt/numutils.c16
-rw-r--r--src/backend/utils/adt/oid.c28
-rw-r--r--src/backend/utils/adt/oracle_compat.c74
-rw-r--r--src/backend/utils/adt/pg_locale.c27
-rw-r--r--src/backend/utils/adt/pg_lzcompress.c102
-rw-r--r--src/backend/utils/adt/pgstatfuncs.c12
-rw-r--r--src/backend/utils/adt/quote.c6
-rw-r--r--src/backend/utils/adt/regexp.c63
-rw-r--r--src/backend/utils/adt/regproc.c133
-rw-r--r--src/backend/utils/adt/ri_triggers.c504
-rw-r--r--src/backend/utils/adt/rowtypes.c61
-rw-r--r--src/backend/utils/adt/ruleutils.c367
-rw-r--r--src/backend/utils/adt/selfuncs.c748
-rw-r--r--src/backend/utils/adt/timestamp.c509
-rw-r--r--src/backend/utils/adt/varbit.c50
-rw-r--r--src/backend/utils/adt/varchar.c44
-rw-r--r--src/backend/utils/adt/varlena.c214
-rw-r--r--src/backend/utils/cache/catcache.c195
-rw-r--r--src/backend/utils/cache/inval.c93
-rw-r--r--src/backend/utils/cache/lsyscache.c40
-rw-r--r--src/backend/utils/cache/relcache.c408
-rw-r--r--src/backend/utils/cache/syscache.c77
-rw-r--r--src/backend/utils/cache/typcache.c41
-rw-r--r--src/backend/utils/error/assert.c6
-rw-r--r--src/backend/utils/error/elog.c198
-rw-r--r--src/backend/utils/fmgr/dfmgr.c16
-rw-r--r--src/backend/utils/fmgr/fmgr.c99
-rw-r--r--src/backend/utils/fmgr/funcapi.c83
-rw-r--r--src/backend/utils/hash/dynahash.c78
-rw-r--r--src/backend/utils/hash/hashfn.c8
-rw-r--r--src/backend/utils/hash/pg_crc.c6
-rw-r--r--src/backend/utils/init/flatfiles.c193
-rw-r--r--src/backend/utils/init/miscinit.c153
-rw-r--r--src/backend/utils/init/postinit.c115
-rw-r--r--src/backend/utils/mb/conv.c13
-rw-r--r--src/backend/utils/mb/conversion_procs/euc_jp_and_sjis/euc_jp_and_sjis.c14
-rw-r--r--src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c35
-rw-r--r--src/backend/utils/mb/conversion_procs/utf8_and_cyrillic/utf8_and_cyrillic.c6
-rw-r--r--src/backend/utils/mb/conversion_procs/utf8_and_euc_cn/utf8_and_euc_cn.c4
-rw-r--r--src/backend/utils/mb/conversion_procs/utf8_and_euc_jp/utf8_and_euc_jp.c4
-rw-r--r--src/backend/utils/mb/conversion_procs/utf8_and_euc_kr/utf8_and_euc_kr.c4
-rw-r--r--src/backend/utils/mb/conversion_procs/utf8_and_euc_tw/utf8_and_euc_tw.c4
-rw-r--r--src/backend/utils/mb/conversion_procs/utf8_and_gb18030/utf8_and_gb18030.c4
-rw-r--r--src/backend/utils/mb/conversion_procs/utf8_and_iso8859/utf8_and_iso8859.c4
-rw-r--r--src/backend/utils/mb/conversion_procs/utf8_and_johab/utf8_and_johab.c4
-rw-r--r--src/backend/utils/mb/conversion_procs/utf8_and_win1250/utf8_and_win1250.c4
-rw-r--r--src/backend/utils/mb/conversion_procs/utf8_and_win1252/utf8_and_win1252.c4
-rw-r--r--src/backend/utils/mb/conversion_procs/utf8_and_win1256/utf8_and_win1256.c4
-rw-r--r--src/backend/utils/mb/conversion_procs/utf8_and_win1258/utf8_and_win1258.c4
-rw-r--r--src/backend/utils/mb/conversion_procs/utf8_and_win874/utf8_and_win874.c4
-rw-r--r--src/backend/utils/mb/encnames.c22
-rw-r--r--src/backend/utils/mb/mbutils.c30
-rw-r--r--src/backend/utils/mb/wchar.c123
-rw-r--r--src/backend/utils/misc/guc.c436
-rw-r--r--src/backend/utils/misc/pg_rusage.c6
-rw-r--r--src/backend/utils/misc/ps_status.c48
-rw-r--r--src/backend/utils/misc/superuser.c8
-rw-r--r--src/backend/utils/mmgr/aset.c128
-rw-r--r--src/backend/utils/mmgr/mcxt.c39
-rw-r--r--src/backend/utils/mmgr/portalmem.c107
-rw-r--r--src/backend/utils/resowner/resowner.c68
-rw-r--r--src/backend/utils/sort/logtape.c104
-rw-r--r--src/backend/utils/sort/tuplesort.c326
-rw-r--r--src/backend/utils/sort/tuplestore.c90
-rw-r--r--src/backend/utils/time/tqual.c64
94 files changed, 4688 insertions, 4837 deletions
diff --git a/src/backend/utils/adt/acl.c b/src/backend/utils/adt/acl.c
index 9909640ad4a..5fcb9b25fc4 100644
--- a/src/backend/utils/adt/acl.c
+++ b/src/backend/utils/adt/acl.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/acl.c,v 1.125 2005/10/10 18:49:03 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/acl.c,v 1.126 2005/10/15 02:49:27 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -59,9 +59,9 @@
* The cache is valid if cached_member_role is not InvalidOid.
*/
static Oid cached_privs_role = InvalidOid;
-static List *cached_privs_roles = NIL;
+static List *cached_privs_roles = NIL;
static Oid cached_member_role = InvalidOid;
-static List *cached_membership_roles = NIL;
+static List *cached_membership_roles = NIL;
static const char *getid(const char *s, char *n);
@@ -73,7 +73,7 @@ static void check_circularity(const Acl *old_acl, const AclItem *mod_aip,
Oid ownerId);
static Acl *recursive_revoke(Acl *acl, Oid grantee, AclMode revoke_privs,
Oid ownerId, DropBehavior behavior);
-static int oidComparator(const void *arg1, const void *arg2);
+static int oidComparator(const void *arg1, const void *arg2);
static AclMode convert_priv_string(text *priv_type_text);
@@ -143,8 +143,8 @@ getid(const char *s, char *n)
ereport(ERROR,
(errcode(ERRCODE_NAME_TOO_LONG),
errmsg("identifier too long"),
- errdetail("Identifier must be less than %d characters.",
- NAMEDATALEN)));
+ errdetail("Identifier must be less than %d characters.",
+ NAMEDATALEN)));
n[len++] = *s;
}
@@ -230,7 +230,7 @@ aclparse(const char *s, AclItem *aip)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
errmsg("unrecognized key word: \"%s\"", name),
- errhint("ACL key word must be \"group\" or \"user\".")));
+ errhint("ACL key word must be \"group\" or \"user\".")));
s = getid(s, name); /* move s to the name beyond the keyword */
if (name[0] == '\0')
ereport(ERROR,
@@ -289,8 +289,8 @@ aclparse(const char *s, AclItem *aip)
default:
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid mode character: must be one of \"%s\"",
- ACL_ALL_RIGHTS_STR)));
+ errmsg("invalid mode character: must be one of \"%s\"",
+ ACL_ALL_RIGHTS_STR)));
}
privs |= read;
@@ -302,8 +302,8 @@ aclparse(const char *s, AclItem *aip)
aip->ai_grantee = get_roleid_checked(name);
/*
- * XXX Allow a degree of backward compatibility by defaulting the
- * grantor to the superuser.
+ * XXX Allow a degree of backward compatibility by defaulting the grantor
+ * to the superuser.
*/
if (*s == '/')
{
@@ -380,7 +380,7 @@ aclitemin(PG_FUNCTION_ARGS)
if (*s)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("extra garbage at the end of the ACL specification")));
+ errmsg("extra garbage at the end of the ACL specification")));
PG_RETURN_ACLITEM_P(aip);
}
@@ -565,14 +565,14 @@ acldefault(GrantObjectType objtype, Oid ownerId)
}
/*
- * Note that the owner's entry shows all ordinary privileges but no
- * grant options. This is because his grant options come "from the
- * system" and not from his own efforts. (The SQL spec says that the
- * owner's rights come from a "_SYSTEM" authid.) However, we do
- * consider that the owner's ordinary privileges are self-granted;
- * this lets him revoke them. We implement the owner's grant options
- * without any explicit "_SYSTEM"-like ACL entry, by internally
- * special-casing the owner whereever we are testing grant options.
+ * Note that the owner's entry shows all ordinary privileges but no grant
+ * options. This is because his grant options come "from the system" and
+ * not from his own efforts. (The SQL spec says that the owner's rights
+ * come from a "_SYSTEM" authid.) However, we do consider that the
+ * owner's ordinary privileges are self-granted; this lets him revoke
+ * them. We implement the owner's grant options without any explicit
+ * "_SYSTEM"-like ACL entry, by internally special-casing the owner
+ * whereever we are testing grant options.
*/
aip->ai_grantee = ownerId;
aip->ai_grantor = ownerId;
@@ -631,10 +631,10 @@ aclupdate(const Acl *old_acl, const AclItem *mod_aip,
old_aip = ACL_DAT(old_acl);
/*
- * Search the ACL for an existing entry for this grantee and grantor.
- * If one exists, just modify the entry in-place (well, in the same
- * position, since we actually return a copy); otherwise, insert the
- * new entry at the end.
+ * Search the ACL for an existing entry for this grantee and grantor. If
+ * one exists, just modify the entry in-place (well, in the same position,
+ * since we actually return a copy); otherwise, insert the new entry at
+ * the end.
*/
for (dst = 0; dst < num; ++dst)
@@ -676,7 +676,7 @@ aclupdate(const Acl *old_acl, const AclItem *mod_aip,
break;
case ACL_MODECHG_DEL:
ACLITEM_SET_RIGHTS(new_aip[dst],
- old_rights & ~ACLITEM_GET_RIGHTS(*mod_aip));
+ old_rights & ~ACLITEM_GET_RIGHTS(*mod_aip));
break;
case ACL_MODECHG_EQL:
ACLITEM_SET_RIGHTS(new_aip[dst],
@@ -700,8 +700,8 @@ aclupdate(const Acl *old_acl, const AclItem *mod_aip,
}
/*
- * Remove abandoned privileges (cascading revoke). Currently we can
- * only handle this when the grantee is not PUBLIC.
+ * Remove abandoned privileges (cascading revoke). Currently we can only
+ * handle this when the grantee is not PUBLIC.
*/
if ((old_goptions & ~new_goptions) != 0)
{
@@ -742,8 +742,8 @@ aclnewowner(const Acl *old_acl, Oid oldOwnerId, Oid newOwnerId)
/*
* Make a copy of the given ACL, substituting new owner ID for old
- * wherever it appears as either grantor or grantee. Also note if the
- * new owner ID is already present.
+ * wherever it appears as either grantor or grantee. Also note if the new
+ * owner ID is already present.
*/
num = ACL_NUM(old_acl);
old_aip = ACL_DAT(old_acl);
@@ -763,21 +763,20 @@ aclnewowner(const Acl *old_acl, Oid oldOwnerId, Oid newOwnerId)
}
/*
- * If the old ACL contained any references to the new owner, then we
- * may now have generated an ACL containing duplicate entries. Find
- * them and merge them so that there are not duplicates. (This is
- * relatively expensive since we use a stupid O(N^2) algorithm, but
- * it's unlikely to be the normal case.)
+ * If the old ACL contained any references to the new owner, then we may
+ * now have generated an ACL containing duplicate entries. Find them and
+ * merge them so that there are not duplicates. (This is relatively
+ * expensive since we use a stupid O(N^2) algorithm, but it's unlikely to
+ * be the normal case.)
*
- * To simplify deletion of duplicate entries, we temporarily leave them
- * in the array but set their privilege masks to zero; when we reach
- * such an entry it's just skipped. (Thus, a side effect of this code
- * will be to remove privilege-free entries, should there be any in
- * the input.) dst is the next output slot, targ is the currently
- * considered input slot (always >= dst), and src scans entries to the
- * right of targ looking for duplicates. Once an entry has been
- * emitted to dst it is known duplicate-free and need not be
- * considered anymore.
+ * To simplify deletion of duplicate entries, we temporarily leave them in
+ * the array but set their privilege masks to zero; when we reach such an
+ * entry it's just skipped. (Thus, a side effect of this code will be to
+ * remove privilege-free entries, should there be any in the input.) dst
+ * is the next output slot, targ is the currently considered input slot
+ * (always >= dst), and src scans entries to the right of targ looking for
+ * duplicates. Once an entry has been emitted to dst it is known
+ * duplicate-free and need not be considered anymore.
*/
if (newpresent)
{
@@ -877,14 +876,14 @@ cc_restart:
own_privs = aclmask(acl,
mod_aip->ai_grantor,
ownerId,
- ACL_GRANT_OPTION_FOR(ACLITEM_GET_GOPTIONS(*mod_aip)),
+ ACL_GRANT_OPTION_FOR(ACLITEM_GET_GOPTIONS(*mod_aip)),
ACLMASK_ALL);
own_privs = ACL_OPTION_TO_PRIVS(own_privs);
if ((ACLITEM_GET_GOPTIONS(*mod_aip) & ~own_privs) != 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_GRANT_OPERATION),
- errmsg("grant options cannot be granted back to your own grantor")));
+ errmsg("grant options cannot be granted back to your own grantor")));
pfree(acl);
}
@@ -1041,11 +1040,11 @@ aclmask(const Acl *acl, Oid roleid, Oid ownerId,
}
/*
- * Check privileges granted indirectly via role memberships.
- * We do this in a separate pass to minimize expensive indirect
- * membership tests. In particular, it's worth testing whether
- * a given ACL entry grants any privileges still of interest before
- * we perform the has_privs_of_role test.
+ * Check privileges granted indirectly via role memberships. We do this in
+ * a separate pass to minimize expensive indirect membership tests. In
+ * particular, it's worth testing whether a given ACL entry grants any
+ * privileges still of interest before we perform the has_privs_of_role
+ * test.
*/
remaining = mask & ~result;
for (i = 0; i < num; i++)
@@ -1140,11 +1139,11 @@ aclmask_direct(const Acl *acl, Oid roleid, Oid ownerId,
int
aclmembers(const Acl *acl, Oid **roleids)
{
- Oid *list;
+ Oid *list;
const AclItem *acldat;
- int i,
- j,
- k;
+ int i,
+ j,
+ k;
if (acl == NULL || ACL_NUM(acl) == 0)
{
@@ -1183,8 +1182,8 @@ aclmembers(const Acl *acl, Oid **roleids)
}
/*
- * We could repalloc the array down to minimum size, but it's hardly
- * worth it since it's only transient memory.
+ * We could repalloc the array down to minimum size, but it's hardly worth
+ * it since it's only transient memory.
*/
*roleids = list;
@@ -1198,8 +1197,8 @@ aclmembers(const Acl *acl, Oid **roleids)
static int
oidComparator(const void *arg1, const void *arg2)
{
- Oid oid1 = * (const Oid *) arg1;
- Oid oid2 = * (const Oid *) arg2;
+ Oid oid1 = *(const Oid *) arg1;
+ Oid oid2 = *(const Oid *) arg2;
if (oid1 > oid2)
return 1;
@@ -1257,7 +1256,7 @@ Datum
makeaclitem(PG_FUNCTION_ARGS)
{
Oid grantee = PG_GETARG_OID(0);
- Oid grantor = PG_GETARG_OID(1);
+ Oid grantor = PG_GETARG_OID(1);
text *privtext = PG_GETARG_TEXT_P(2);
bool goption = PG_GETARG_BOOL(3);
AclItem *result;
@@ -1282,7 +1281,7 @@ convert_priv_string(text *priv_type_text)
char *priv_type;
priv_type = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(priv_type_text)));
+ PointerGetDatum(priv_type_text)));
if (pg_strcasecmp(priv_type, "SELECT") == 0)
return ACL_SELECT;
@@ -1410,7 +1409,7 @@ has_table_privilege_id(PG_FUNCTION_ARGS)
{
Oid tableoid = PG_GETARG_OID(0);
text *priv_type_text = PG_GETARG_TEXT_P(1);
- Oid roleid;
+ Oid roleid;
AclMode mode;
AclResult aclresult;
@@ -1493,7 +1492,7 @@ convert_table_priv_string(text *priv_type_text)
char *priv_type;
priv_type = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(priv_type_text)));
+ PointerGetDatum(priv_type_text)));
/*
* Return mode from priv_type string
@@ -1704,7 +1703,7 @@ convert_database_name(text *databasename)
Oid oid;
dbname = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(databasename)));
+ PointerGetDatum(databasename)));
oid = get_database_oid(dbname);
if (!OidIsValid(oid))
@@ -1725,7 +1724,7 @@ convert_database_priv_string(text *priv_type_text)
char *priv_type;
priv_type = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(priv_type_text)));
+ PointerGetDatum(priv_type_text)));
/*
* Return mode from priv_type string
@@ -1916,10 +1915,10 @@ convert_function_name(text *functionname)
Oid oid;
funcname = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(functionname)));
+ PointerGetDatum(functionname)));
oid = DatumGetObjectId(DirectFunctionCall1(regprocedurein,
- CStringGetDatum(funcname)));
+ CStringGetDatum(funcname)));
if (!OidIsValid(oid))
ereport(ERROR,
@@ -1939,7 +1938,7 @@ convert_function_priv_string(text *priv_type_text)
char *priv_type;
priv_type = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(priv_type_text)));
+ PointerGetDatum(priv_type_text)));
/*
* Return mode from priv_type string
@@ -2120,7 +2119,7 @@ convert_language_name(text *languagename)
Oid oid;
langname = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(languagename)));
+ PointerGetDatum(languagename)));
oid = GetSysCacheOid(LANGNAME,
CStringGetDatum(langname),
@@ -2143,7 +2142,7 @@ convert_language_priv_string(text *priv_type_text)
char *priv_type;
priv_type = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(priv_type_text)));
+ PointerGetDatum(priv_type_text)));
/*
* Return mode from priv_type string
@@ -2324,7 +2323,7 @@ convert_schema_name(text *schemaname)
Oid oid;
nspname = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(schemaname)));
+ PointerGetDatum(schemaname)));
oid = GetSysCacheOid(NAMESPACENAME,
CStringGetDatum(nspname),
@@ -2347,7 +2346,7 @@ convert_schema_priv_string(text *priv_type_text)
char *priv_type;
priv_type = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(priv_type_text)));
+ PointerGetDatum(priv_type_text)));
/*
* Return mode from priv_type string
@@ -2462,7 +2461,7 @@ has_tablespace_privilege_id(PG_FUNCTION_ARGS)
{
Oid tablespaceoid = PG_GETARG_OID(0);
text *priv_type_text = PG_GETARG_TEXT_P(1);
- Oid roleid;
+ Oid roleid;
AclMode mode;
AclResult aclresult;
@@ -2532,7 +2531,7 @@ convert_tablespace_name(text *tablespacename)
Oid oid;
spcname = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(tablespacename)));
+ PointerGetDatum(tablespacename)));
oid = get_tablespace_oid(spcname);
if (!OidIsValid(oid))
@@ -2553,7 +2552,7 @@ convert_tablespace_priv_string(text *priv_type_text)
char *priv_type;
priv_type = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(priv_type_text)));
+ PointerGetDatum(priv_type_text)));
/*
* Return mode from priv_type string
@@ -2663,7 +2662,7 @@ pg_has_role_id(PG_FUNCTION_ARGS)
{
Oid roleoid = PG_GETARG_OID(0);
text *priv_type_text = PG_GETARG_TEXT_P(1);
- Oid roleid;
+ Oid roleid;
AclMode mode;
AclResult aclresult;
@@ -2739,7 +2738,7 @@ convert_role_priv_string(text *priv_type_text)
char *priv_type;
priv_type = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(priv_type_text)));
+ PointerGetDatum(priv_type_text)));
/*
* Return mode from priv_type string
@@ -2795,8 +2794,8 @@ initialize_acl(void)
if (!IsBootstrapProcessingMode())
{
/*
- * In normal mode, set a callback on any syscache
- * invalidation of pg_auth_members rows
+ * In normal mode, set a callback on any syscache invalidation of
+ * pg_auth_members rows
*/
CacheRegisterSyscacheCallback(AUTHMEMROLEMEM,
RoleMembershipCacheCallback,
@@ -2806,7 +2805,7 @@ initialize_acl(void)
/*
* RoleMembershipCacheCallback
- * Syscache inval callback function
+ * Syscache inval callback function
*/
static void
RoleMembershipCacheCallback(Datum arg, Oid relid)
@@ -2853,19 +2852,19 @@ has_rolinherit(Oid roleid)
static List *
roles_has_privs_of(Oid roleid)
{
- List *roles_list;
- ListCell *l;
- List *new_cached_privs_roles;
- MemoryContext oldctx;
+ List *roles_list;
+ ListCell *l;
+ List *new_cached_privs_roles;
+ MemoryContext oldctx;
/* If cache is already valid, just return the list */
if (OidIsValid(cached_privs_role) && cached_privs_role == roleid)
return cached_privs_roles;
- /*
- * Find all the roles that roleid is a member of,
- * including multi-level recursion. The role itself will always
- * be the first element of the resulting list.
+ /*
+ * Find all the roles that roleid is a member of, including multi-level
+ * recursion. The role itself will always be the first element of the
+ * resulting list.
*
* Each element of the list is scanned to see if it adds any indirect
* memberships. We can use a single list as both the record of
@@ -2877,9 +2876,9 @@ roles_has_privs_of(Oid roleid)
foreach(l, roles_list)
{
- Oid memberid = lfirst_oid(l);
- CatCList *memlist;
- int i;
+ Oid memberid = lfirst_oid(l);
+ CatCList *memlist;
+ int i;
/* Ignore non-inheriting roles */
if (!has_rolinherit(memberid))
@@ -2892,12 +2891,12 @@ roles_has_privs_of(Oid roleid)
for (i = 0; i < memlist->n_members; i++)
{
HeapTuple tup = &memlist->members[i]->tuple;
- Oid otherid = ((Form_pg_auth_members) GETSTRUCT(tup))->roleid;
+ Oid otherid = ((Form_pg_auth_members) GETSTRUCT(tup))->roleid;
/*
* Even though there shouldn't be any loops in the membership
- * graph, we must test for having already seen this role.
- * It is legal for instance to have both A->B and A->C->B.
+ * graph, we must test for having already seen this role. It is
+ * legal for instance to have both A->B and A->C->B.
*/
roles_list = list_append_unique_oid(roles_list, otherid);
}
@@ -2915,7 +2914,7 @@ roles_has_privs_of(Oid roleid)
/*
* Now safe to assign to state variable
*/
- cached_privs_role = InvalidOid; /* just paranoia */
+ cached_privs_role = InvalidOid; /* just paranoia */
list_free(cached_privs_roles);
cached_privs_roles = new_cached_privs_roles;
cached_privs_role = roleid;
@@ -2937,19 +2936,19 @@ roles_has_privs_of(Oid roleid)
static List *
roles_is_member_of(Oid roleid)
{
- List *roles_list;
- ListCell *l;
- List *new_cached_membership_roles;
- MemoryContext oldctx;
+ List *roles_list;
+ ListCell *l;
+ List *new_cached_membership_roles;
+ MemoryContext oldctx;
/* If cache is already valid, just return the list */
if (OidIsValid(cached_member_role) && cached_member_role == roleid)
return cached_membership_roles;
- /*
- * Find all the roles that roleid is a member of,
- * including multi-level recursion. The role itself will always
- * be the first element of the resulting list.
+ /*
+ * Find all the roles that roleid is a member of, including multi-level
+ * recursion. The role itself will always be the first element of the
+ * resulting list.
*
* Each element of the list is scanned to see if it adds any indirect
* memberships. We can use a single list as both the record of
@@ -2961,9 +2960,9 @@ roles_is_member_of(Oid roleid)
foreach(l, roles_list)
{
- Oid memberid = lfirst_oid(l);
- CatCList *memlist;
- int i;
+ Oid memberid = lfirst_oid(l);
+ CatCList *memlist;
+ int i;
/* Find roles that memberid is directly a member of */
memlist = SearchSysCacheList(AUTHMEMMEMROLE, 1,
@@ -2972,12 +2971,12 @@ roles_is_member_of(Oid roleid)
for (i = 0; i < memlist->n_members; i++)
{
HeapTuple tup = &memlist->members[i]->tuple;
- Oid otherid = ((Form_pg_auth_members) GETSTRUCT(tup))->roleid;
+ Oid otherid = ((Form_pg_auth_members) GETSTRUCT(tup))->roleid;
/*
* Even though there shouldn't be any loops in the membership
- * graph, we must test for having already seen this role.
- * It is legal for instance to have both A->B and A->C->B.
+ * graph, we must test for having already seen this role. It is
+ * legal for instance to have both A->B and A->C->B.
*/
roles_list = list_append_unique_oid(roles_list, otherid);
}
@@ -3023,7 +3022,7 @@ has_privs_of_role(Oid member, Oid role)
if (superuser_arg(member))
return true;
- /*
+ /*
* Find all the roles that member has the privileges of, including
* multi-level recursion, then see if target role is any one of them.
*/
@@ -3047,7 +3046,7 @@ is_member_of_role(Oid member, Oid role)
if (superuser_arg(member))
return true;
- /*
+ /*
* Find all the roles that member is a member of, including multi-level
* recursion, then see if target role is any one of them.
*/
@@ -3080,8 +3079,8 @@ bool
is_admin_of_role(Oid member, Oid role)
{
bool result = false;
- List *roles_list;
- ListCell *l;
+ List *roles_list;
+ ListCell *l;
/* Fast path for simple case */
if (member == role)
@@ -3091,18 +3090,18 @@ is_admin_of_role(Oid member, Oid role)
if (superuser_arg(member))
return true;
- /*
- * Find all the roles that member is a member of,
- * including multi-level recursion. We build a list in the same way
- * that is_member_of_role does to track visited and unvisited roles.
+ /*
+ * Find all the roles that member is a member of, including multi-level
+ * recursion. We build a list in the same way that is_member_of_role does
+ * to track visited and unvisited roles.
*/
roles_list = list_make1_oid(member);
foreach(l, roles_list)
{
- Oid memberid = lfirst_oid(l);
- CatCList *memlist;
- int i;
+ Oid memberid = lfirst_oid(l);
+ CatCList *memlist;
+ int i;
/* Find roles that memberid is directly a member of */
memlist = SearchSysCacheList(AUTHMEMMEMROLE, 1,
@@ -3111,7 +3110,7 @@ is_admin_of_role(Oid member, Oid role)
for (i = 0; i < memlist->n_members; i++)
{
HeapTuple tup = &memlist->members[i]->tuple;
- Oid otherid = ((Form_pg_auth_members) GETSTRUCT(tup))->roleid;
+ Oid otherid = ((Form_pg_auth_members) GETSTRUCT(tup))->roleid;
if (otherid == role &&
((Form_pg_auth_members) GETSTRUCT(tup))->admin_option)
@@ -3138,7 +3137,7 @@ is_admin_of_role(Oid member, Oid role)
static int
count_one_bits(AclMode mask)
{
- int nbits = 0;
+ int nbits = 0;
/* this code relies on AclMode being an unsigned type */
while (mask)
@@ -3157,14 +3156,14 @@ count_one_bits(AclMode mask)
* The grantor must always be either the object owner or some role that has
* been explicitly granted grant options. This ensures that all granted
* privileges appear to flow from the object owner, and there are never
- * multiple "original sources" of a privilege. Therefore, if the would-be
+ * multiple "original sources" of a privilege. Therefore, if the would-be
* grantor is a member of a role that has the needed grant options, we have
* to do the grant as that role instead.
*
* It is possible that the would-be grantor is a member of several roles
* that have different subsets of the desired grant options, but no one
* role has 'em all. In this case we pick a role with the largest number
- * of desired options. Ties are broken in favor of closer ancestors.
+ * of desired options. Ties are broken in favor of closer ancestors.
*
* roleId: the role attempting to do the GRANT/REVOKE
* privileges: the privileges to be granted/revoked
@@ -3181,15 +3180,15 @@ select_best_grantor(Oid roleId, AclMode privileges,
Oid *grantorId, AclMode *grantOptions)
{
AclMode needed_goptions = ACL_GRANT_OPTION_FOR(privileges);
- List *roles_list;
+ List *roles_list;
int nrights;
ListCell *l;
/*
- * The object owner is always treated as having all grant options,
- * so if roleId is the owner it's easy. Also, if roleId is a superuser
- * it's easy: superusers are implicitly members of every role, so they
- * act as the object owner.
+ * The object owner is always treated as having all grant options, so if
+ * roleId is the owner it's easy. Also, if roleId is a superuser it's
+ * easy: superusers are implicitly members of every role, so they act as
+ * the object owner.
*/
if (roleId == ownerId || superuser_arg(roleId))
{
@@ -3200,8 +3199,8 @@ select_best_grantor(Oid roleId, AclMode privileges,
/*
* Otherwise we have to do a careful search to see if roleId has the
- * privileges of any suitable role. Note: we can hang onto the result
- * of roles_has_privs_of() throughout this loop, because aclmask_direct()
+ * privileges of any suitable role. Note: we can hang onto the result of
+ * roles_has_privs_of() throughout this loop, because aclmask_direct()
* doesn't query any role memberships.
*/
roles_list = roles_has_privs_of(roleId);
@@ -3213,8 +3212,8 @@ select_best_grantor(Oid roleId, AclMode privileges,
foreach(l, roles_list)
{
- Oid otherrole = lfirst_oid(l);
- AclMode otherprivs;
+ Oid otherrole = lfirst_oid(l);
+ AclMode otherprivs;
otherprivs = aclmask_direct(acl, otherrole, ownerId,
needed_goptions, ACLMASK_ALL);
@@ -3225,13 +3224,14 @@ select_best_grantor(Oid roleId, AclMode privileges,
*grantOptions = otherprivs;
return;
}
+
/*
* If it has just some of the needed privileges, remember best
* candidate.
*/
if (otherprivs != ACL_NO_RIGHTS)
{
- int nnewrights = count_one_bits(otherprivs);
+ int nnewrights = count_one_bits(otherprivs);
if (nnewrights > nrights)
{
diff --git a/src/backend/utils/adt/array_userfuncs.c b/src/backend/utils/adt/array_userfuncs.c
index fd83025d6e2..08a7072634c 100644
--- a/src/backend/utils/adt/array_userfuncs.c
+++ b/src/backend/utils/adt/array_userfuncs.c
@@ -6,7 +6,7 @@
* Copyright (c) 2003-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/array_userfuncs.c,v 1.15 2005/01/01 20:44:17 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/array_userfuncs.c,v 1.16 2005/10/15 02:49:27 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -96,17 +96,17 @@ array_push(PG_FUNCTION_ARGS)
else
ereport(ERROR,
(errcode(ERRCODE_DATA_EXCEPTION),
- errmsg("argument must be empty or one-dimensional array")));
+ errmsg("argument must be empty or one-dimensional array")));
/*
- * We arrange to look up info about element type only once per series
- * of calls, assuming the element type doesn't change underneath us.
+ * We arrange to look up info about element type only once per series of
+ * calls, assuming the element type doesn't change underneath us.
*/
my_extra = (ArrayMetaState *) fcinfo->flinfo->fn_extra;
if (my_extra == NULL)
{
fcinfo->flinfo->fn_extra = MemoryContextAlloc(fcinfo->flinfo->fn_mcxt,
- sizeof(ArrayMetaState));
+ sizeof(ArrayMetaState));
my_extra = (ArrayMetaState *) fcinfo->flinfo->fn_extra;
my_extra->element_type = InvalidOid;
}
@@ -194,8 +194,8 @@ array_cat(PG_FUNCTION_ARGS)
ndims2 = ARR_NDIM(v2);
/*
- * short circuit - if one input array is empty, and the other is not,
- * we return the non-empty one as the result
+ * short circuit - if one input array is empty, and the other is not, we
+ * return the non-empty one as the result
*
* if both are empty, return the first one
*/
@@ -245,8 +245,8 @@ array_cat(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
errmsg("cannot concatenate incompatible arrays"),
- errdetail("Arrays with differing element dimensions are "
- "not compatible for concatenation.")));
+ errdetail("Arrays with differing element dimensions are "
+ "not compatible for concatenation.")));
dims[i] = dims1[i];
lbs[i] = lbs1[i];
@@ -255,9 +255,8 @@ array_cat(PG_FUNCTION_ARGS)
else if (ndims1 == ndims2 - 1)
{
/*
- * resulting array has the second argument as the outer array,
- * with the first argument appended to the front of the outer
- * dimension
+ * resulting array has the second argument as the outer array, with
+ * the first argument appended to the front of the outer dimension
*/
ndims = ndims2;
dims = (int *) palloc(ndims * sizeof(int));
@@ -278,8 +277,8 @@ array_cat(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
errmsg("cannot concatenate incompatible arrays"),
- errdetail("Arrays with differing dimensions are not "
- "compatible for concatenation.")));
+ errdetail("Arrays with differing dimensions are not "
+ "compatible for concatenation.")));
}
}
else
@@ -287,8 +286,8 @@ array_cat(PG_FUNCTION_ARGS)
/*
* (ndims1 == ndims2 + 1)
*
- * resulting array has the first argument as the outer array, with
- * the second argument appended to the end of the outer dimension
+ * resulting array has the first argument as the outer array, with the
+ * second argument appended to the end of the outer dimension
*/
ndims = ndims1;
dims = (int *) palloc(ndims * sizeof(int));
@@ -306,8 +305,8 @@ array_cat(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
errmsg("cannot concatenate incompatible arrays"),
- errdetail("Arrays with differing dimensions are not "
- "compatible for concatenation.")));
+ errdetail("Arrays with differing dimensions are not "
+ "compatible for concatenation.")));
}
}
@@ -351,7 +350,7 @@ create_singleton_array(FunctionCallInfo fcinfo,
if (element_type == 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("invalid array element type OID: %u", element_type)));
+ errmsg("invalid array element type OID: %u", element_type)));
if (ndims < 1)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
@@ -371,14 +370,14 @@ create_singleton_array(FunctionCallInfo fcinfo,
}
/*
- * We arrange to look up info about element type only once per series
- * of calls, assuming the element type doesn't change underneath us.
+ * We arrange to look up info about element type only once per series of
+ * calls, assuming the element type doesn't change underneath us.
*/
my_extra = (ArrayMetaState *) fcinfo->flinfo->fn_extra;
if (my_extra == NULL)
{
fcinfo->flinfo->fn_extra = MemoryContextAlloc(fcinfo->flinfo->fn_mcxt,
- sizeof(ArrayMetaState));
+ sizeof(ArrayMetaState));
my_extra = (ArrayMetaState *) fcinfo->flinfo->fn_extra;
my_extra->element_type = InvalidOid;
}
diff --git a/src/backend/utils/adt/arrayfuncs.c b/src/backend/utils/adt/arrayfuncs.c
index efb4ea9dc14..5304d47fa8a 100644
--- a/src/backend/utils/adt/arrayfuncs.c
+++ b/src/backend/utils/adt/arrayfuncs.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/arrayfuncs.c,v 1.122 2005/08/15 19:40:20 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/arrayfuncs.c,v 1.123 2005/10/15 02:49:27 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -130,8 +130,7 @@ array_in(PG_FUNCTION_ARGS)
char *string = PG_GETARG_CSTRING(0); /* external form */
Oid element_type = PG_GETARG_OID(1); /* type of an array
* element */
- int32 typmod = PG_GETARG_INT32(2); /* typmod for array
- * elements */
+ int32 typmod = PG_GETARG_INT32(2); /* typmod for array elements */
int typlen;
bool typbyval;
char typalign;
@@ -151,14 +150,14 @@ array_in(PG_FUNCTION_ARGS)
/*
* We arrange to look up info about element type, including its input
- * conversion proc, only once per series of calls, assuming the
- * element type doesn't change underneath us.
+ * conversion proc, only once per series of calls, assuming the element
+ * type doesn't change underneath us.
*/
my_extra = (ArrayMetaState *) fcinfo->flinfo->fn_extra;
if (my_extra == NULL)
{
fcinfo->flinfo->fn_extra = MemoryContextAlloc(fcinfo->flinfo->fn_mcxt,
- sizeof(ArrayMetaState));
+ sizeof(ArrayMetaState));
my_extra = (ArrayMetaState *) fcinfo->flinfo->fn_extra;
my_extra->element_type = ~element_type;
}
@@ -166,8 +165,7 @@ array_in(PG_FUNCTION_ARGS)
if (my_extra->element_type != element_type)
{
/*
- * Get info about element type, including its input conversion
- * proc
+ * Get info about element type, including its input conversion proc
*/
get_type_io_data(element_type, IOFunc_input,
&my_extra->typlen, &my_extra->typbyval,
@@ -191,8 +189,8 @@ array_in(PG_FUNCTION_ARGS)
* Otherwise, we require the input to be in curly-brace style, and we
* prescan the input to determine dimensions.
*
- * Dimension info takes the form of one or more [n] or [m:n] items. The
- * outer loop iterates once per dimension item.
+ * Dimension info takes the form of one or more [n] or [m:n] items. The outer
+ * loop iterates once per dimension item.
*/
p = string_save;
ndim = 0;
@@ -250,7 +248,7 @@ array_in(PG_FUNCTION_ARGS)
if (ub < lBound[ndim])
ereport(ERROR,
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
- errmsg("upper bound cannot be less than lower bound")));
+ errmsg("upper bound cannot be less than lower bound")));
dim[ndim] = ub - lBound[ndim] + 1;
ndim++;
@@ -282,8 +280,8 @@ array_in(PG_FUNCTION_ARGS)
p++;
/*
- * intuit dimensions from brace structure -- it better match what
- * we were given
+ * intuit dimensions from brace structure -- it better match what we
+ * were given
*/
if (*p != '{')
ereport(ERROR,
@@ -293,13 +291,13 @@ array_in(PG_FUNCTION_ARGS)
if (ndim_braces != ndim)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("array dimensions incompatible with array literal")));
+ errmsg("array dimensions incompatible with array literal")));
for (i = 0; i < ndim; ++i)
{
if (dim[i] != dim_braces[i])
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("array dimensions incompatible with array literal")));
+ errmsg("array dimensions incompatible with array literal")));
}
}
@@ -406,22 +404,22 @@ ArrayCount(char *str, int *dim, char typdelim)
/* Signal a premature end of the string */
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("malformed array literal: \"%s\"", str)));
+ errmsg("malformed array literal: \"%s\"", str)));
break;
case '\\':
/*
- * An escape must be after a level start, after an
- * element start, or after an element delimiter. In
- * any case we now must be past an element start.
+ * An escape must be after a level start, after an element
+ * start, or after an element delimiter. In any case we
+ * now must be past an element start.
*/
if (parse_state != ARRAY_LEVEL_STARTED &&
parse_state != ARRAY_ELEM_STARTED &&
parse_state != ARRAY_QUOTED_ELEM_STARTED &&
parse_state != ARRAY_ELEM_DELIMITED)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("malformed array literal: \"%s\"", str)));
+ (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
+ errmsg("malformed array literal: \"%s\"", str)));
if (parse_state != ARRAY_QUOTED_ELEM_STARTED)
parse_state = ARRAY_ELEM_STARTED;
/* skip the escaped character */
@@ -429,22 +427,22 @@ ArrayCount(char *str, int *dim, char typdelim)
ptr++;
else
ereport(ERROR,
- (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("malformed array literal: \"%s\"", str)));
+ (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
+ errmsg("malformed array literal: \"%s\"", str)));
break;
case '\"':
/*
* A quote must be after a level start, after a quoted
- * element start, or after an element delimiter. In
- * any case we now must be past an element start.
+ * element start, or after an element delimiter. In any
+ * case we now must be past an element start.
*/
if (parse_state != ARRAY_LEVEL_STARTED &&
parse_state != ARRAY_QUOTED_ELEM_STARTED &&
parse_state != ARRAY_ELEM_DELIMITED)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("malformed array literal: \"%s\"", str)));
+ (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
+ errmsg("malformed array literal: \"%s\"", str)));
in_quotes = !in_quotes;
if (in_quotes)
parse_state = ARRAY_QUOTED_ELEM_STARTED;
@@ -455,22 +453,22 @@ ArrayCount(char *str, int *dim, char typdelim)
if (!in_quotes)
{
/*
- * A left brace can occur if no nesting has
- * occurred yet, after a level start, or after a
- * level delimiter.
+ * A left brace can occur if no nesting has occurred
+ * yet, after a level start, or after a level
+ * delimiter.
*/
if (parse_state != ARRAY_NO_LEVEL &&
parse_state != ARRAY_LEVEL_STARTED &&
parse_state != ARRAY_LEVEL_DELIMITED)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("malformed array literal: \"%s\"", str)));
+ (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
+ errmsg("malformed array literal: \"%s\"", str)));
parse_state = ARRAY_LEVEL_STARTED;
if (nest_level >= MAXDIM)
ereport(ERROR,
- (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
- errmsg("number of array dimensions (%d) exceeds the maximum allowed (%d)",
- nest_level, MAXDIM)));
+ (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg("number of array dimensions (%d) exceeds the maximum allowed (%d)",
+ nest_level, MAXDIM)));
temp[nest_level] = 0;
nest_level++;
if (ndim < nest_level)
@@ -481,9 +479,9 @@ ArrayCount(char *str, int *dim, char typdelim)
if (!in_quotes)
{
/*
- * A right brace can occur after an element start,
- * an element completion, a quoted element
- * completion, or a level completion.
+ * A right brace can occur after an element start, an
+ * element completion, a quoted element completion, or
+ * a level completion.
*/
if (parse_state != ARRAY_ELEM_STARTED &&
parse_state != ARRAY_ELEM_COMPLETED &&
@@ -491,22 +489,22 @@ ArrayCount(char *str, int *dim, char typdelim)
parse_state != ARRAY_LEVEL_COMPLETED &&
!(nest_level == 1 && parse_state == ARRAY_LEVEL_STARTED))
ereport(ERROR,
- (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("malformed array literal: \"%s\"", str)));
+ (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
+ errmsg("malformed array literal: \"%s\"", str)));
parse_state = ARRAY_LEVEL_COMPLETED;
if (nest_level == 0)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("malformed array literal: \"%s\"", str)));
+ (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
+ errmsg("malformed array literal: \"%s\"", str)));
nest_level--;
if ((nelems_last[nest_level] != 1) &&
- (nelems[nest_level] != nelems_last[nest_level]))
+ (nelems[nest_level] != nelems_last[nest_level]))
ereport(ERROR,
- (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("multidimensional arrays must have "
- "array expressions with matching "
- "dimensions")));
+ (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
+ errmsg("multidimensional arrays must have "
+ "array expressions with matching "
+ "dimensions")));
nelems_last[nest_level] = nelems[nest_level];
nelems[nest_level] = 1;
if (nest_level == 0)
@@ -527,17 +525,17 @@ ArrayCount(char *str, int *dim, char typdelim)
if (*ptr == typdelim)
{
/*
- * Delimiters can occur after an element
- * start, an element completion, a quoted
- * element completion, or a level completion.
+ * Delimiters can occur after an element start, an
+ * element completion, a quoted element
+ * completion, or a level completion.
*/
if (parse_state != ARRAY_ELEM_STARTED &&
parse_state != ARRAY_ELEM_COMPLETED &&
- parse_state != ARRAY_QUOTED_ELEM_COMPLETED &&
+ parse_state != ARRAY_QUOTED_ELEM_COMPLETED &&
parse_state != ARRAY_LEVEL_COMPLETED)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("malformed array literal: \"%s\"", str)));
+ (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
+ errmsg("malformed array literal: \"%s\"", str)));
if (parse_state == ARRAY_LEVEL_COMPLETED)
parse_state = ARRAY_LEVEL_DELIMITED;
else
@@ -549,16 +547,16 @@ ArrayCount(char *str, int *dim, char typdelim)
{
/*
* Other non-space characters must be after a
- * level start, after an element start, or
- * after an element delimiter. In any case we
- * now must be past an element start.
+ * level start, after an element start, or after
+ * an element delimiter. In any case we now must
+ * be past an element start.
*/
if (parse_state != ARRAY_LEVEL_STARTED &&
parse_state != ARRAY_ELEM_STARTED &&
parse_state != ARRAY_ELEM_DELIMITED)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("malformed array literal: \"%s\"", str)));
+ (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
+ errmsg("malformed array literal: \"%s\"", str)));
parse_state = ARRAY_ELEM_STARTED;
}
}
@@ -637,18 +635,18 @@ ReadArrayStr(char *arrayStr,
MemSet(indx, 0, sizeof(indx));
/*
- * We have to remove " and \ characters to create a clean item value
- * to pass to the datatype input routine. We overwrite each item
- * value in-place within arrayStr to do this. srcptr is the current
- * scan point, and dstptr is where we are copying to.
+ * We have to remove " and \ characters to create a clean item value to
+ * pass to the datatype input routine. We overwrite each item value
+ * in-place within arrayStr to do this. srcptr is the current scan point,
+ * and dstptr is where we are copying to.
*
- * We also want to suppress leading and trailing unquoted whitespace.
- * We use the leadingspace flag to suppress leading space. Trailing
- * space is tracked by using dstendptr to point to the last significant
- * output character.
+ * We also want to suppress leading and trailing unquoted whitespace. We use
+ * the leadingspace flag to suppress leading space. Trailing space is
+ * tracked by using dstendptr to point to the last significant output
+ * character.
*
- * The error checking in this routine is mostly pro-forma, since we
- * expect that ArrayCount() already validated the string.
+ * The error checking in this routine is mostly pro-forma, since we expect
+ * that ArrayCount() already validated the string.
*/
srcptr = arrayStr;
while (!eoArray)
@@ -706,9 +704,9 @@ ReadArrayStr(char *arrayStr,
{
if (nest_level >= ndim)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("malformed array literal: \"%s\"",
- origStr)));
+ (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
+ errmsg("malformed array literal: \"%s\"",
+ origStr)));
nest_level++;
indx[nest_level - 1] = 0;
srcptr++;
@@ -721,9 +719,9 @@ ReadArrayStr(char *arrayStr,
{
if (nest_level == 0)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("malformed array literal: \"%s\"",
- origStr)));
+ (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
+ errmsg("malformed array literal: \"%s\"",
+ origStr)));
if (i == -1)
i = ArrayGetOffset0(ndim, indx, prod);
indx[nest_level - 1] = 0;
@@ -751,8 +749,8 @@ ReadArrayStr(char *arrayStr,
else if (isspace((unsigned char) *srcptr))
{
/*
- * If leading space, drop it immediately. Else,
- * copy but don't advance dstendptr.
+ * If leading space, drop it immediately. Else, copy
+ * but don't advance dstendptr.
*/
if (leadingspace)
srcptr++;
@@ -913,14 +911,14 @@ array_out(PG_FUNCTION_ARGS)
/*
* We arrange to look up info about element type, including its output
- * conversion proc, only once per series of calls, assuming the
- * element type doesn't change underneath us.
+ * conversion proc, only once per series of calls, assuming the element
+ * type doesn't change underneath us.
*/
my_extra = (ArrayMetaState *) fcinfo->flinfo->fn_extra;
if (my_extra == NULL)
{
fcinfo->flinfo->fn_extra = MemoryContextAlloc(fcinfo->flinfo->fn_mcxt,
- sizeof(ArrayMetaState));
+ sizeof(ArrayMetaState));
my_extra = (ArrayMetaState *) fcinfo->flinfo->fn_extra;
my_extra->element_type = InvalidOid;
}
@@ -928,8 +926,7 @@ array_out(PG_FUNCTION_ARGS)
if (my_extra->element_type != element_type)
{
/*
- * Get info about element type, including its output conversion
- * proc
+ * Get info about element type, including its output conversion proc
*/
get_type_io_data(element_type, IOFunc_output,
&my_extra->typlen, &my_extra->typbyval,
@@ -956,8 +953,8 @@ array_out(PG_FUNCTION_ARGS)
}
/*
- * we will need to add explicit dimensions if any dimension has a
- * lower bound other than one
+ * we will need to add explicit dimensions if any dimension has a lower
+ * bound other than one
*/
for (i = 0; i < ndim; i++)
{
@@ -969,9 +966,9 @@ array_out(PG_FUNCTION_ARGS)
}
/*
- * Convert all values to string form, count total space needed
- * (including any overhead such as escaping backslashes), and detect
- * whether each item needs double quotes.
+ * Convert all values to string form, count total space needed (including
+ * any overhead such as escaping backslashes), and detect whether each
+ * item needs double quotes.
*/
values = (char **) palloc(nitems * sizeof(char *));
needquotes = (bool *) palloc(nitems * sizeof(bool));
@@ -991,7 +988,7 @@ array_out(PG_FUNCTION_ARGS)
/* count data plus backslashes; detect chars needing quotes */
if (values[i][0] == '\0')
- needquote = true; /* force quotes for empty string */
+ needquote = true; /* force quotes for empty string */
else
needquote = false;
@@ -1121,8 +1118,7 @@ array_recv(PG_FUNCTION_ARGS)
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
Oid spec_element_type = PG_GETARG_OID(1); /* type of an array
* element */
- int32 typmod = PG_GETARG_INT32(2); /* typmod for array
- * elements */
+ int32 typmod = PG_GETARG_INT32(2); /* typmod for array elements */
Oid element_type;
int typlen;
bool typbyval;
@@ -1174,15 +1170,15 @@ array_recv(PG_FUNCTION_ARGS)
nitems = ArrayGetNItems(ndim, dim);
/*
- * We arrange to look up info about element type, including its
- * receive conversion proc, only once per series of calls, assuming
- * the element type doesn't change underneath us.
+ * We arrange to look up info about element type, including its receive
+ * conversion proc, only once per series of calls, assuming the element
+ * type doesn't change underneath us.
*/
my_extra = (ArrayMetaState *) fcinfo->flinfo->fn_extra;
if (my_extra == NULL)
{
fcinfo->flinfo->fn_extra = MemoryContextAlloc(fcinfo->flinfo->fn_mcxt,
- sizeof(ArrayMetaState));
+ sizeof(ArrayMetaState));
my_extra = (ArrayMetaState *) fcinfo->flinfo->fn_extra;
my_extra->element_type = ~element_type;
}
@@ -1197,8 +1193,8 @@ array_recv(PG_FUNCTION_ARGS)
if (!OidIsValid(my_extra->typiofunc))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("no binary input function available for type %s",
- format_type_be(element_type))));
+ errmsg("no binary input function available for type %s",
+ format_type_be(element_type))));
fmgr_info_cxt(my_extra->typiofunc, &my_extra->proc,
fcinfo->flinfo->fn_mcxt);
my_extra->element_type = element_type;
@@ -1278,10 +1274,10 @@ ReadArrayBinary(StringInfo buf,
errmsg("insufficient data left in message")));
/*
- * Rather than copying data around, we just set up a phony
- * StringInfo pointing to the correct portion of the input buffer.
- * We assume we can scribble on the input buffer so as to maintain
- * the convention that StringInfos have a trailing null.
+ * Rather than copying data around, we just set up a phony StringInfo
+ * pointing to the correct portion of the input buffer. We assume we
+ * can scribble on the input buffer so as to maintain the convention
+ * that StringInfos have a trailing null.
*/
elem_buf.data = &buf->data[buf->cursor];
elem_buf.maxlen = itemlen + 1;
@@ -1359,14 +1355,14 @@ array_send(PG_FUNCTION_ARGS)
/*
* We arrange to look up info about element type, including its send
- * conversion proc, only once per series of calls, assuming the
- * element type doesn't change underneath us.
+ * conversion proc, only once per series of calls, assuming the element
+ * type doesn't change underneath us.
*/
my_extra = (ArrayMetaState *) fcinfo->flinfo->fn_extra;
if (my_extra == NULL)
{
fcinfo->flinfo->fn_extra = MemoryContextAlloc(fcinfo->flinfo->fn_mcxt,
- sizeof(ArrayMetaState));
+ sizeof(ArrayMetaState));
my_extra = (ArrayMetaState *) fcinfo->flinfo->fn_extra;
my_extra->element_type = InvalidOid;
}
@@ -1381,8 +1377,8 @@ array_send(PG_FUNCTION_ARGS)
if (!OidIsValid(my_extra->typiofunc))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("no binary output function available for type %s",
- format_type_be(element_type))));
+ errmsg("no binary output function available for type %s",
+ format_type_be(element_type))));
fmgr_info_cxt(my_extra->typiofunc, &my_extra->proc,
fcinfo->flinfo->fn_mcxt);
my_extra->element_type = element_type;
@@ -1646,14 +1642,14 @@ array_get_slice(ArrayType *array,
if (arraylen > 0)
{
/*
- * fixed-length arrays -- currently, cannot slice these because
- * parser labels output as being of the fixed-length array type!
- * Code below shows how we could support it if the parser were
- * changed to label output as a suitable varlena array type.
+ * fixed-length arrays -- currently, cannot slice these because parser
+ * labels output as being of the fixed-length array type! Code below
+ * shows how we could support it if the parser were changed to label
+ * output as a suitable varlena array type.
*/
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("slices of fixed-length arrays not implemented")));
+ errmsg("slices of fixed-length arrays not implemented")));
/*
* fixed-length arrays -- these are assumed to be 1-d, 0-based XXX
@@ -1678,10 +1674,9 @@ array_get_slice(ArrayType *array,
}
/*
- * Check provided subscripts. A slice exceeding the current array
- * limits is silently truncated to the array limits. If we end up
- * with an empty slice, return NULL (should it be an empty array
- * instead?)
+ * Check provided subscripts. A slice exceeding the current array limits
+ * is silently truncated to the array limits. If we end up with an empty
+ * slice, return NULL (should it be an empty array instead?)
*/
if (ndim < nSubscripts || ndim <= 0 || ndim > MAXDIM)
RETURN_NULL(ArrayType *);
@@ -1719,8 +1714,8 @@ array_get_slice(ArrayType *array,
memcpy(ARR_DIMS(newarray), span, ndim * sizeof(int));
/*
- * Lower bounds of the new array are set to 1. Formerly (before 7.3)
- * we copied the given lowerIndx values ... but that seems confusing.
+ * Lower bounds of the new array are set to 1. Formerly (before 7.3) we
+ * copied the given lowerIndx values ... but that seems confusing.
*/
newlb = ARR_LBOUND(newarray);
for (i = 0; i < ndim; i++)
@@ -1815,9 +1810,9 @@ array_set(ArrayType *array,
ndim = ARR_NDIM(array);
/*
- * if number of dims is zero, i.e. an empty array, create an array
- * with nSubscripts dimensions, and set the lower bounds to the
- * supplied subscripts
+ * if number of dims is zero, i.e. an empty array, create an array with
+ * nSubscripts dimensions, and set the lower bounds to the supplied
+ * subscripts
*/
if (ndim == 0)
{
@@ -1987,7 +1982,7 @@ array_set_slice(ArrayType *array,
*/
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("updates on slices of fixed-length arrays not implemented")));
+ errmsg("updates on slices of fixed-length arrays not implemented")));
}
/* detoast arrays if necessary */
@@ -1999,9 +1994,9 @@ array_set_slice(ArrayType *array,
ndim = ARR_NDIM(array);
/*
- * if number of dims is zero, i.e. an empty array, create an array
- * with nSubscripts dimensions, and set the upper and lower bounds to
- * the supplied subscripts
+ * if number of dims is zero, i.e. an empty array, create an array with
+ * nSubscripts dimensions, and set the upper and lower bounds to the
+ * supplied subscripts
*/
if (ndim == 0)
{
@@ -2038,10 +2033,9 @@ array_set_slice(ArrayType *array,
memcpy(lb, ARR_LBOUND(array), ndim * sizeof(int));
/*
- * Check provided subscripts. A slice exceeding the current array
- * limits throws an error, *except* in the 1-D case where we will
- * extend the array as long as no hole is created. An empty slice is
- * an error, too.
+ * Check provided subscripts. A slice exceeding the current array limits
+ * throws an error, *except* in the 1-D case where we will extend the
+ * array as long as no hole is created. An empty slice is an error, too.
*/
for (i = 0; i < nSubscripts; i++)
{
@@ -2083,8 +2077,8 @@ array_set_slice(ArrayType *array,
}
/*
- * Make sure source array has enough entries. Note we ignore the
- * shape of the source array and just read entries serially.
+ * Make sure source array has enough entries. Note we ignore the shape of
+ * the source array and just read entries serially.
*/
mda_get_range(ndim, span, lowerIndx, upperIndx);
nsrcitems = ArrayGetNItems(ndim, span);
@@ -2104,8 +2098,8 @@ array_set_slice(ArrayType *array,
if (ndim > 1)
{
/*
- * here we do not need to cope with extension of the array; it
- * would be a lot more complicated if we had to do so...
+ * here we do not need to cope with extension of the array; it would
+ * be a lot more complicated if we had to do so...
*/
olditemsize = array_slice_size(ndim, dim, lb, ARR_DATA_PTR(array),
lowerIndx, upperIndx,
@@ -2115,8 +2109,7 @@ array_set_slice(ArrayType *array,
else
{
/*
- * here we must allow for possibility of slice larger than orig
- * array
+ * here we must allow for possibility of slice larger than orig array
*/
int oldlb = ARR_LBOUND(array)[0];
int oldub = oldlb + ARR_DIMS(array)[0] - 1;
@@ -2148,8 +2141,8 @@ array_set_slice(ArrayType *array,
if (ndim > 1)
{
/*
- * here we do not need to cope with extension of the array; it
- * would be a lot more complicated if we had to do so...
+ * here we do not need to cope with extension of the array; it would
+ * be a lot more complicated if we had to do so...
*/
array_insert_slice(ndim, dim, lb, ARR_DATA_PTR(array), olddatasize,
ARR_DATA_PTR(newarray),
@@ -2192,7 +2185,7 @@ array_set_slice(ArrayType *array,
* or binary-compatible with, the first argument type of fn().
* * retType: OID of element type of output array. This must be the same as,
* or binary-compatible with, the result type of fn().
- * * amstate: workspace for array_map. Must be zeroed by caller before
+ * * amstate: workspace for array_map. Must be zeroed by caller before
* first call, and not touched after that.
*
* It is legitimate to pass a freshly-zeroed ArrayMapState on each call,
@@ -2250,9 +2243,9 @@ array_map(FunctionCallInfo fcinfo, Oid inpType, Oid retType,
}
/*
- * We arrange to look up info about input and return element types
- * only once per series of calls, assuming the element type doesn't
- * change underneath us.
+ * We arrange to look up info about input and return element types only
+ * once per series of calls, assuming the element type doesn't change
+ * underneath us.
*/
inp_extra = &amstate->inp_extra;
ret_extra = &amstate->ret_extra;
@@ -2297,9 +2290,9 @@ array_map(FunctionCallInfo fcinfo, Oid inpType, Oid retType,
/*
* Apply the given function to source elt and extra args.
*
- * We assume the extra args are non-NULL, so need not check whether
- * fn() is strict. Would need to do more work here to support
- * arrays containing nulls, too.
+ * We assume the extra args are non-NULL, so need not check whether fn()
+ * is strict. Would need to do more work here to support arrays
+ * containing nulls, too.
*/
fcinfo->arg[0] = elt;
fcinfo->argnull[0] = false;
@@ -2329,8 +2322,7 @@ array_map(FunctionCallInfo fcinfo, Oid inpType, Oid retType,
memcpy(ARR_DIMS(result), ARR_DIMS(v), 2 * ndim * sizeof(int));
/*
- * Note: do not risk trying to pfree the results of the called
- * function
+ * Note: do not risk trying to pfree the results of the called function
*/
CopyArrayEls(ARR_DATA_PTR(result), values, nitems,
typlen, typbyval, typalign, false);
@@ -2543,7 +2535,7 @@ array_eq(PG_FUNCTION_ARGS)
if (element_type != ARR_ELEMTYPE(array2))
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("cannot compare arrays of different element types")));
+ errmsg("cannot compare arrays of different element types")));
/* fast path if the arrays do not have the same number of elements */
if (nitems1 != nitems2)
@@ -2551,10 +2543,10 @@ array_eq(PG_FUNCTION_ARGS)
else
{
/*
- * We arrange to look up the equality function only once per
- * series of calls, assuming the element type doesn't change
- * underneath us. The typcache is used so that we have no memory
- * leakage when being used as an index support function.
+ * We arrange to look up the equality function only once per series of
+ * calls, assuming the element type doesn't change underneath us. The
+ * typcache is used so that we have no memory leakage when being used
+ * as an index support function.
*/
typentry = (TypeCacheEntry *) fcinfo->flinfo->fn_extra;
if (typentry == NULL ||
@@ -2565,8 +2557,8 @@ array_eq(PG_FUNCTION_ARGS)
if (!OidIsValid(typentry->eq_opr_finfo.fn_oid))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("could not identify an equality operator for type %s",
- format_type_be(element_type))));
+ errmsg("could not identify an equality operator for type %s",
+ format_type_be(element_type))));
fcinfo->flinfo->fn_extra = (void *) typentry;
}
typlen = typentry->typlen;
@@ -2697,13 +2689,13 @@ array_cmp(FunctionCallInfo fcinfo)
if (element_type != ARR_ELEMTYPE(array2))
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("cannot compare arrays of different element types")));
+ errmsg("cannot compare arrays of different element types")));
/*
- * We arrange to look up the comparison function only once per series
- * of calls, assuming the element type doesn't change underneath us.
- * The typcache is used so that we have no memory leakage when being
- * used as an index support function.
+ * We arrange to look up the comparison function only once per series of
+ * calls, assuming the element type doesn't change underneath us. The
+ * typcache is used so that we have no memory leakage when being used as
+ * an index support function.
*/
typentry = (TypeCacheEntry *) fcinfo->flinfo->fn_extra;
if (typentry == NULL ||
@@ -2714,8 +2706,8 @@ array_cmp(FunctionCallInfo fcinfo)
if (!OidIsValid(typentry->cmp_proc_finfo.fn_oid))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("could not identify a comparison function for type %s",
- format_type_be(element_type))));
+ errmsg("could not identify a comparison function for type %s",
+ format_type_be(element_type))));
fcinfo->flinfo->fn_extra = (void *) typentry;
}
typlen = typentry->typlen;
@@ -3121,11 +3113,11 @@ array_type_length_coerce_internal(ArrayType *src,
errmsg("target type is not an array")));
/*
- * We don't deal with domain constraints yet, so bail out. This
- * isn't currently a problem, because we also don't support arrays
- * of domain type elements either. But in the future we might. At
- * that point consideration should be given to removing the check
- * below and adding a domain constraints check to the coercion.
+ * We don't deal with domain constraints yet, so bail out. This isn't
+ * currently a problem, because we also don't support arrays of domain
+ * type elements either. But in the future we might. At that point
+ * consideration should be given to removing the check below and
+ * adding a domain constraints check to the coercion.
*/
if (getBaseType(tgt_elem_type) != tgt_elem_type)
ereport(ERROR,
@@ -3150,8 +3142,8 @@ array_type_length_coerce_internal(ArrayType *src,
}
/*
- * If it's binary-compatible, modify the element type in the array
- * header, but otherwise leave the array as we received it.
+ * If it's binary-compatible, modify the element type in the array header,
+ * but otherwise leave the array as we received it.
*/
if (my_extra->coerce_finfo.fn_oid == InvalidOid)
{
@@ -3166,8 +3158,8 @@ array_type_length_coerce_internal(ArrayType *src,
/*
* Use array_map to apply the function to each array element.
*
- * We pass on the desttypmod and isExplicit flags whether or not the
- * function wants them.
+ * We pass on the desttypmod and isExplicit flags whether or not the function
+ * wants them.
*/
InitFunctionCallInfoData(locfcinfo, &my_extra->coerce_finfo, 3,
NULL, NULL);
@@ -3207,8 +3199,8 @@ array_length_coerce(PG_FUNCTION_ARGS)
PG_RETURN_ARRAYTYPE_P(v);
/*
- * We arrange to look up the element type's coercion function only
- * once per series of calls, assuming the element type doesn't change
+ * We arrange to look up the element type's coercion function only once
+ * per series of calls, assuming the element type doesn't change
* underneath us.
*/
my_extra = (alc_extra *) fmgr_info->fn_extra;
@@ -3303,7 +3295,7 @@ accumArrayResult(ArrayBuildState *astate,
if ((astate->nelems % ARRAY_ELEMS_CHUNKSIZE) == 0)
astate->dvalues = (Datum *)
repalloc(astate->dvalues,
- (astate->nelems + ARRAY_ELEMS_CHUNKSIZE) * sizeof(Datum));
+ (astate->nelems + ARRAY_ELEMS_CHUNKSIZE) * sizeof(Datum));
}
if (disnull)
@@ -3381,9 +3373,9 @@ makeMdArrayResult(ArrayBuildState *astate,
Datum
array_larger(PG_FUNCTION_ARGS)
{
- ArrayType *v1,
- *v2,
- *result;
+ ArrayType *v1,
+ *v2,
+ *result;
v1 = PG_GETARG_ARRAYTYPE_P(0);
v2 = PG_GETARG_ARRAYTYPE_P(1);
@@ -3396,9 +3388,9 @@ array_larger(PG_FUNCTION_ARGS)
Datum
array_smaller(PG_FUNCTION_ARGS)
{
- ArrayType *v1,
- *v2,
- *result;
+ ArrayType *v1,
+ *v2,
+ *result;
v1 = PG_GETARG_ARRAYTYPE_P(0);
v2 = PG_GETARG_ARRAYTYPE_P(1);
diff --git a/src/backend/utils/adt/ascii.c b/src/backend/utils/adt/ascii.c
index 361dec59f57..599b37b1f39 100644
--- a/src/backend/utils/adt/ascii.c
+++ b/src/backend/utils/adt/ascii.c
@@ -5,7 +5,7 @@
* Portions Copyright (c) 1999-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/ascii.c,v 1.25 2005/09/24 17:53:15 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/ascii.c,v 1.26 2005/10/15 02:49:28 momjian Exp $
*
*-----------------------------------------------------------------------
*/
@@ -73,8 +73,8 @@ pg_to_ascii(unsigned char *src, unsigned char *src_end, unsigned char *dest, int
{
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("encoding conversion from %s to ASCII not supported",
- pg_encoding_to_char(enc))));
+ errmsg("encoding conversion from %s to ASCII not supported",
+ pg_encoding_to_char(enc))));
return; /* keep compiler quiet */
}
diff --git a/src/backend/utils/adt/cash.c b/src/backend/utils/adt/cash.c
index 8788af9f87e..f9e2f10325a 100644
--- a/src/backend/utils/adt/cash.c
+++ b/src/backend/utils/adt/cash.c
@@ -9,7 +9,7 @@
* workings can be found in the book "Software Solutions in C" by
* Dale Schumacher, Academic Press, ISBN: 0-12-632360-7.
*
- * $PostgreSQL: pgsql/src/backend/utils/adt/cash.c,v 1.65 2005/07/21 04:41:43 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/cash.c,v 1.66 2005/10/15 02:49:28 momjian Exp $
*/
#include "postgres.h"
@@ -85,14 +85,14 @@ cash_in(PG_FUNCTION_ARGS)
struct lconv *lconvert = PGLC_localeconv();
/*
- * frac_digits will be CHAR_MAX in some locales, notably C. However,
- * just testing for == CHAR_MAX is risky, because of compilers like
- * gcc that "helpfully" let you alter the platform-standard definition
- * of whether char is signed or not. If we are so unfortunate as to
- * get compiled with a nonstandard -fsigned-char or -funsigned-char
- * switch, then our idea of CHAR_MAX will not agree with libc's. The
- * safest course is not to test for CHAR_MAX at all, but to impose a
- * range check for plausible frac_digits values.
+ * frac_digits will be CHAR_MAX in some locales, notably C. However, just
+ * testing for == CHAR_MAX is risky, because of compilers like gcc that
+ * "helpfully" let you alter the platform-standard definition of whether
+ * char is signed or not. If we are so unfortunate as to get compiled
+ * with a nonstandard -fsigned-char or -funsigned-char switch, then our
+ * idea of CHAR_MAX will not agree with libc's. The safest course is not
+ * to test for CHAR_MAX at all, but to impose a range check for plausible
+ * frac_digits values.
*/
fpoint = lconvert->frac_digits;
if (fpoint < 0 || fpoint > 10)
@@ -195,7 +195,7 @@ cash_in(PG_FUNCTION_ARGS)
if (*s != '\0')
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type money: \"%s\"", str)));
+ errmsg("invalid input syntax for type money: \"%s\"", str)));
result = value * sgn;
@@ -238,8 +238,8 @@ cash_out(PG_FUNCTION_ARGS)
points = 2; /* best guess in this case, I think */
/*
- * As with frac_digits, must apply a range check to mon_grouping to
- * avoid being fooled by variant CHAR_MAX values.
+ * As with frac_digits, must apply a range check to mon_grouping to avoid
+ * being fooled by variant CHAR_MAX values.
*/
mon_group = *lconvert->mon_grouping;
if (mon_group <= 0 || mon_group > 6)
diff --git a/src/backend/utils/adt/char.c b/src/backend/utils/adt/char.c
index bc208164c1f..663fac909e6 100644
--- a/src/backend/utils/adt/char.c
+++ b/src/backend/utils/adt/char.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/char.c,v 1.42 2004/12/31 22:01:21 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/char.c,v 1.43 2005/10/15 02:49:28 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -187,9 +187,9 @@ text_char(PG_FUNCTION_ARGS)
char result;
/*
- * An empty input string is converted to \0 (for consistency with
- * charin). If the input is longer than one character, the excess data
- * is silently discarded.
+ * An empty input string is converted to \0 (for consistency with charin).
+ * If the input is longer than one character, the excess data is silently
+ * discarded.
*/
if (VARSIZE(arg1) > VARHDRSZ)
result = *(VARDATA(arg1));
diff --git a/src/backend/utils/adt/date.c b/src/backend/utils/adt/date.c
index ec1d808544b..619a099b654 100644
--- a/src/backend/utils/adt/date.c
+++ b/src/backend/utils/adt/date.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/date.c,v 1.121 2005/10/09 17:21:46 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/date.c,v 1.122 2005/10/15 02:49:28 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -18,7 +18,7 @@
#include <ctype.h>
#include <limits.h>
#include <float.h>
-#include <time.h>
+#include <time.h>
#include "access/hash.h"
#include "libpq/pqformat.h"
@@ -38,10 +38,10 @@
#endif
-static int time2tm(TimeADT time, struct pg_tm *tm, fsec_t *fsec);
-static int timetz2tm(TimeTzADT *time, struct pg_tm *tm, fsec_t *fsec, int *tzp);
-static int tm2time(struct pg_tm *tm, fsec_t fsec, TimeADT *result);
-static int tm2timetz(struct pg_tm *tm, fsec_t fsec, int tz, TimeTzADT *result);
+static int time2tm(TimeADT time, struct pg_tm * tm, fsec_t *fsec);
+static int timetz2tm(TimeTzADT *time, struct pg_tm * tm, fsec_t *fsec, int *tzp);
+static int tm2time(struct pg_tm * tm, fsec_t fsec, TimeADT *result);
+static int tm2timetz(struct pg_tm * tm, fsec_t fsec, int tz, TimeTzADT *result);
static void AdjustTimeForTypmod(TimeADT *time, int32 typmod);
/*****************************************************************************
@@ -56,7 +56,7 @@ Datum
date_in(PG_FUNCTION_ARGS)
{
char *str = PG_GETARG_CSTRING(0);
- DateADT date;
+ DateADT date;
fsec_t fsec;
struct pg_tm tt,
*tm = &tt;
@@ -83,7 +83,7 @@ date_in(PG_FUNCTION_ARGS)
case DTK_CURRENT:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("date/time value \"current\" is no longer supported")));
+ errmsg("date/time value \"current\" is no longer supported")));
GetCurrentDateTime(tm);
break;
@@ -108,13 +108,13 @@ date_in(PG_FUNCTION_ARGS)
Datum
date_out(PG_FUNCTION_ARGS)
{
- DateADT date = PG_GETARG_DATEADT(0);
+ DateADT date = PG_GETARG_DATEADT(0);
char *result;
struct pg_tm tt,
*tm = &tt;
char buf[MAXDATELEN + 1];
- j2date(date +POSTGRES_EPOCH_JDATE,
+ j2date(date + POSTGRES_EPOCH_JDATE,
&(tm->tm_year), &(tm->tm_mon), &(tm->tm_mday));
EncodeDateOnly(tm, DateStyle, buf);
@@ -140,7 +140,7 @@ date_recv(PG_FUNCTION_ARGS)
Datum
date_send(PG_FUNCTION_ARGS)
{
- DateADT date = PG_GETARG_DATEADT(0);
+ DateADT date = PG_GETARG_DATEADT(0);
StringInfoData buf;
pq_begintypsend(&buf);
@@ -306,7 +306,7 @@ date2timestamptz(DateADT dateVal)
#ifdef HAVE_INT64_TIMESTAMP
result = dateVal * USECS_PER_DAY + tz * USECS_PER_SEC;
#else
- result = dateVal * (double)SECS_PER_DAY + tz;
+ result = dateVal * (double) SECS_PER_DAY + tz;
#endif
return result;
@@ -715,7 +715,7 @@ date_timestamp(PG_FUNCTION_ARGS)
Datum
timestamp_date(PG_FUNCTION_ARGS)
{
- Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
+ Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
DateADT result;
struct pg_tm tt,
*tm = &tt;
@@ -797,11 +797,11 @@ abstime_date(PG_FUNCTION_ARGS)
case NOEND_ABSTIME:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot convert reserved abstime value to date")));
+ errmsg("cannot convert reserved abstime value to date")));
/*
- * pretend to drop through to make compiler think that result
- * will be set
+ * pretend to drop through to make compiler think that result will
+ * be set
*/
default:
@@ -821,7 +821,7 @@ Datum
date_text(PG_FUNCTION_ARGS)
{
/* Input is a Date, but may as well leave it in Datum form */
- Datum date = PG_GETARG_DATUM(0);
+ Datum date = PG_GETARG_DATUM(0);
text *result;
char *str;
int len;
@@ -914,11 +914,11 @@ time_in(PG_FUNCTION_ARGS)
* Convert a tm structure to a time data type.
*/
static int
-tm2time(struct pg_tm *tm, fsec_t fsec, TimeADT *result)
+tm2time(struct pg_tm * tm, fsec_t fsec, TimeADT *result)
{
#ifdef HAVE_INT64_TIMESTAMP
*result = ((((tm->tm_hour * MINS_PER_HOUR + tm->tm_min) * SECS_PER_MINUTE) + tm->tm_sec)
- * USECS_PER_SEC) + fsec;
+ * USECS_PER_SEC) + fsec;
#else
*result = ((tm->tm_hour * MINS_PER_HOUR + tm->tm_min) * SECS_PER_MINUTE) + tm->tm_sec + fsec;
#endif
@@ -931,7 +931,7 @@ tm2time(struct pg_tm *tm, fsec_t fsec, TimeADT *result)
* local time zone. If out of this range, leave as GMT. - tgl 97/05/27
*/
static int
-time2tm(TimeADT time, struct pg_tm *tm, fsec_t *fsec)
+time2tm(TimeADT time, struct pg_tm * tm, fsec_t *fsec)
{
#ifdef HAVE_INT64_TIMESTAMP
tm->tm_hour = time / USECS_PER_HOUR;
@@ -946,8 +946,8 @@ time2tm(TimeADT time, struct pg_tm *tm, fsec_t *fsec)
recalc:
trem = time;
- TMODULO(trem, tm->tm_hour, (double)SECS_PER_HOUR);
- TMODULO(trem, tm->tm_min, (double)SECS_PER_MINUTE);
+ TMODULO(trem, tm->tm_hour, (double) SECS_PER_HOUR);
+ TMODULO(trem, tm->tm_min, (double) SECS_PER_MINUTE);
TMODULO(trem, tm->tm_sec, 1.0);
trem = TIMEROUND(trem);
/* roundoff may need to propagate to higher-order fields */
@@ -989,6 +989,7 @@ Datum
time_recv(PG_FUNCTION_ARGS)
{
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
+
#ifdef NOT_USED
Oid typelem = PG_GETARG_OID(1);
#endif
@@ -1072,7 +1073,6 @@ AdjustTimeForTypmod(TimeADT *time, int32 typmod)
INT64CONST(5),
INT64CONST(0)
};
-
#else
/* note MAX_TIME_PRECISION differs in this case */
static const double TimeScales[MAX_TIME_PRECISION + 1] = {
@@ -1093,21 +1093,21 @@ AdjustTimeForTypmod(TimeADT *time, int32 typmod)
if (typmod >= 0 && typmod <= MAX_TIME_PRECISION)
{
/*
- * Note: this round-to-nearest code is not completely consistent
- * about rounding values that are exactly halfway between integral
- * values. On most platforms, rint() will implement
- * round-to-nearest-even, but the integer code always rounds up
- * (away from zero). Is it worth trying to be consistent?
+ * Note: this round-to-nearest code is not completely consistent about
+ * rounding values that are exactly halfway between integral values.
+ * On most platforms, rint() will implement round-to-nearest-even, but
+ * the integer code always rounds up (away from zero). Is it worth
+ * trying to be consistent?
*/
#ifdef HAVE_INT64_TIMESTAMP
if (*time >= INT64CONST(0))
*time = ((*time + TimeOffsets[typmod]) / TimeScales[typmod]) *
- TimeScales[typmod];
+ TimeScales[typmod];
else
*time = -((((-*time) + TimeOffsets[typmod]) / TimeScales[typmod]) *
- TimeScales[typmod]);
+ TimeScales[typmod]);
#else
- *time = rint((double) * time * TimeScales[typmod]) / TimeScales[typmod];
+ *time = rint((double) *time * TimeScales[typmod]) / TimeScales[typmod];
#endif
}
}
@@ -1208,8 +1208,8 @@ Datum
overlaps_time(PG_FUNCTION_ARGS)
{
/*
- * The arguments are TimeADT, but we leave them as generic Datums to
- * avoid dereferencing nulls (TimeADT is pass-by-reference!)
+ * The arguments are TimeADT, but we leave them as generic Datums to avoid
+ * dereferencing nulls (TimeADT is pass-by-reference!)
*/
Datum ts1 = PG_GETARG_DATUM(0);
Datum te1 = PG_GETARG_DATUM(1);
@@ -1226,9 +1226,9 @@ overlaps_time(PG_FUNCTION_ARGS)
(DatumGetTimeADT(t1) < DatumGetTimeADT(t2))
/*
- * If both endpoints of interval 1 are null, the result is null
- * (unknown). If just one endpoint is null, take ts1 as the non-null
- * one. Otherwise, take ts1 as the lesser endpoint.
+ * If both endpoints of interval 1 are null, the result is null (unknown).
+ * If just one endpoint is null, take ts1 as the non-null one. Otherwise,
+ * take ts1 as the lesser endpoint.
*/
if (ts1IsNull)
{
@@ -1276,8 +1276,8 @@ overlaps_time(PG_FUNCTION_ARGS)
if (TIMEADT_GT(ts1, ts2))
{
/*
- * This case is ts1 < te2 OR te1 < te2, which may look redundant
- * but in the presence of nulls it's not quite completely so.
+ * This case is ts1 < te2 OR te1 < te2, which may look redundant but
+ * in the presence of nulls it's not quite completely so.
*/
if (te2IsNull)
PG_RETURN_NULL();
@@ -1287,8 +1287,8 @@ overlaps_time(PG_FUNCTION_ARGS)
PG_RETURN_NULL();
/*
- * If te1 is not null then we had ts1 <= te1 above, and we just
- * found ts1 >= te2, hence te1 >= te2.
+ * If te1 is not null then we had ts1 <= te1 above, and we just found
+ * ts1 >= te2, hence te1 >= te2.
*/
PG_RETURN_BOOL(false);
}
@@ -1303,8 +1303,8 @@ overlaps_time(PG_FUNCTION_ARGS)
PG_RETURN_NULL();
/*
- * If te2 is not null then we had ts2 <= te2 above, and we just
- * found ts2 >= te1, hence te2 >= te1.
+ * If te2 is not null then we had ts2 <= te2 above, and we just found
+ * ts2 >= te1, hence te2 >= te1.
*/
PG_RETURN_BOOL(false);
}
@@ -1312,8 +1312,7 @@ overlaps_time(PG_FUNCTION_ARGS)
{
/*
* For ts1 = ts2 the spec says te1 <> te2 OR te1 = te2, which is a
- * rather silly way of saying "true if both are nonnull, else
- * null".
+ * rather silly way of saying "true if both are nonnull, else null".
*/
if (te1IsNull || te2IsNull)
PG_RETURN_NULL();
@@ -1330,7 +1329,7 @@ overlaps_time(PG_FUNCTION_ARGS)
Datum
timestamp_time(PG_FUNCTION_ARGS)
{
- Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
+ Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
TimeADT result;
struct pg_tm tt,
*tm = &tt;
@@ -1351,7 +1350,7 @@ timestamp_time(PG_FUNCTION_ARGS)
* USECS_PER_DAY) - timestamp;
*/
result = ((((tm->tm_hour * MINS_PER_HOUR + tm->tm_min) * SECS_PER_MINUTE) + tm->tm_sec) *
- USECS_PER_SEC) + fsec;
+ USECS_PER_SEC) + fsec;
#else
result = ((tm->tm_hour * MINS_PER_HOUR + tm->tm_min) * SECS_PER_MINUTE) + tm->tm_sec + fsec;
#endif
@@ -1388,7 +1387,7 @@ timestamptz_time(PG_FUNCTION_ARGS)
* USECS_PER_DAY) - timestamp;
*/
result = ((((tm->tm_hour * MINS_PER_HOUR + tm->tm_min) * SECS_PER_MINUTE) + tm->tm_sec) *
- USECS_PER_SEC) + fsec;
+ USECS_PER_SEC) + fsec;
#else
result = ((tm->tm_hour * MINS_PER_HOUR + tm->tm_min) * SECS_PER_MINUTE) + tm->tm_sec + fsec;
#endif
@@ -1402,12 +1401,12 @@ timestamptz_time(PG_FUNCTION_ARGS)
Datum
datetime_timestamp(PG_FUNCTION_ARGS)
{
- DateADT date = PG_GETARG_DATEADT(0);
+ DateADT date = PG_GETARG_DATEADT(0);
TimeADT time = PG_GETARG_TIMEADT(1);
Timestamp result;
result = DatumGetTimestamp(DirectFunctionCall1(date_timestamp,
- DateADTGetDatum(date)));
+ DateADTGetDatum(date)));
result += time;
PG_RETURN_TIMESTAMP(result);
@@ -1461,8 +1460,8 @@ interval_time(PG_FUNCTION_ARGS)
}
#else
result = span->time;
- if (result >= (double)SECS_PER_DAY || result < 0)
- result -= floor(result / (double)SECS_PER_DAY) * (double)SECS_PER_DAY;
+ if (result >= (double) SECS_PER_DAY || result < 0)
+ result -= floor(result / (double) SECS_PER_DAY) * (double) SECS_PER_DAY;
#endif
PG_RETURN_TIMEADT(result);
@@ -1506,7 +1505,7 @@ time_pl_interval(PG_FUNCTION_ARGS)
TimeADT time1;
result = time + span->time;
- TMODULO(result, time1, (double)SECS_PER_DAY);
+ TMODULO(result, time1, (double) SECS_PER_DAY);
if (result < 0)
result += SECS_PER_DAY;
#endif
@@ -1533,7 +1532,7 @@ time_mi_interval(PG_FUNCTION_ARGS)
TimeADT time1;
result = time - span->time;
- TMODULO(result, time1, (double)SECS_PER_DAY);
+ TMODULO(result, time1, (double) SECS_PER_DAY);
if (result < 0)
result += SECS_PER_DAY;
#endif
@@ -1678,8 +1677,8 @@ time_part(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("\"time\" units \"%s\" not recognized",
- DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(units))))));
+ DatumGetCString(DirectFunctionCall1(textout,
+ PointerGetDatum(units))))));
result = 0;
}
@@ -1698,7 +1697,7 @@ time_part(PG_FUNCTION_ARGS)
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("\"time\" units \"%s\" not recognized",
DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(units))))));
+ PointerGetDatum(units))))));
result = 0;
}
@@ -1714,7 +1713,7 @@ time_part(PG_FUNCTION_ARGS)
* Convert a tm structure to a time data type.
*/
static int
-tm2timetz(struct pg_tm *tm, fsec_t fsec, int tz, TimeTzADT *result)
+tm2timetz(struct pg_tm * tm, fsec_t fsec, int tz, TimeTzADT *result)
{
#ifdef HAVE_INT64_TIMESTAMP
result->time = ((((tm->tm_hour * MINS_PER_HOUR + tm->tm_min) * SECS_PER_MINUTE) + tm->tm_sec) *
@@ -1787,6 +1786,7 @@ Datum
timetz_recv(PG_FUNCTION_ARGS)
{
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
+
#ifdef NOT_USED
Oid typelem = PG_GETARG_OID(1);
#endif
@@ -1831,7 +1831,7 @@ timetz_send(PG_FUNCTION_ARGS)
* Convert TIME WITH TIME ZONE data type to POSIX time structure.
*/
static int
-timetz2tm(TimeTzADT *time, struct pg_tm *tm, fsec_t *fsec, int *tzp)
+timetz2tm(TimeTzADT *time, struct pg_tm * tm, fsec_t *fsec, int *tzp)
{
#ifdef HAVE_INT64_TIMESTAMP
int64 trem = time->time;
@@ -1846,8 +1846,8 @@ timetz2tm(TimeTzADT *time, struct pg_tm *tm, fsec_t *fsec, int *tzp)
double trem = time->time;
recalc:
- TMODULO(trem, tm->tm_hour, (double)SECS_PER_HOUR);
- TMODULO(trem, tm->tm_min, (double)SECS_PER_MINUTE);
+ TMODULO(trem, tm->tm_hour, (double) SECS_PER_HOUR);
+ TMODULO(trem, tm->tm_min, (double) SECS_PER_MINUTE);
TMODULO(trem, tm->tm_sec, 1.0);
trem = TIMEROUND(trem);
/* roundoff may need to propagate to higher-order fields */
@@ -1995,8 +1995,8 @@ timetz_hash(PG_FUNCTION_ARGS)
/*
* Specify hash length as sizeof(double) + sizeof(int4), not as
- * sizeof(TimeTzADT), so that any garbage pad bytes in the structure
- * won't be included in the hash!
+ * sizeof(TimeTzADT), so that any garbage pad bytes in the structure won't
+ * be included in the hash!
*/
return hash_any((unsigned char *) key, sizeof(key->time) + sizeof(key->zone));
}
@@ -2052,7 +2052,7 @@ timetz_pl_interval(PG_FUNCTION_ARGS)
result->time += USECS_PER_DAY;
#else
result->time = time->time + span->time;
- TMODULO(result->time, time1.time, (double)SECS_PER_DAY);
+ TMODULO(result->time, time1.time, (double) SECS_PER_DAY);
if (result->time < 0)
result->time += SECS_PER_DAY;
#endif
@@ -2085,7 +2085,7 @@ timetz_mi_interval(PG_FUNCTION_ARGS)
result->time += USECS_PER_DAY;
#else
result->time = time->time - span->time;
- TMODULO(result->time, time1.time, (double)SECS_PER_DAY);
+ TMODULO(result->time, time1.time, (double) SECS_PER_DAY);
if (result->time < 0)
result->time += SECS_PER_DAY;
#endif
@@ -2105,8 +2105,8 @@ Datum
overlaps_timetz(PG_FUNCTION_ARGS)
{
/*
- * The arguments are TimeTzADT *, but we leave them as generic Datums
- * for convenience of notation --- and to avoid dereferencing nulls.
+ * The arguments are TimeTzADT *, but we leave them as generic Datums for
+ * convenience of notation --- and to avoid dereferencing nulls.
*/
Datum ts1 = PG_GETARG_DATUM(0);
Datum te1 = PG_GETARG_DATUM(1);
@@ -2123,9 +2123,9 @@ overlaps_timetz(PG_FUNCTION_ARGS)
DatumGetBool(DirectFunctionCall2(timetz_lt,t1,t2))
/*
- * If both endpoints of interval 1 are null, the result is null
- * (unknown). If just one endpoint is null, take ts1 as the non-null
- * one. Otherwise, take ts1 as the lesser endpoint.
+ * If both endpoints of interval 1 are null, the result is null (unknown).
+ * If just one endpoint is null, take ts1 as the non-null one. Otherwise,
+ * take ts1 as the lesser endpoint.
*/
if (ts1IsNull)
{
@@ -2173,8 +2173,8 @@ overlaps_timetz(PG_FUNCTION_ARGS)
if (TIMETZ_GT(ts1, ts2))
{
/*
- * This case is ts1 < te2 OR te1 < te2, which may look redundant
- * but in the presence of nulls it's not quite completely so.
+ * This case is ts1 < te2 OR te1 < te2, which may look redundant but
+ * in the presence of nulls it's not quite completely so.
*/
if (te2IsNull)
PG_RETURN_NULL();
@@ -2184,8 +2184,8 @@ overlaps_timetz(PG_FUNCTION_ARGS)
PG_RETURN_NULL();
/*
- * If te1 is not null then we had ts1 <= te1 above, and we just
- * found ts1 >= te2, hence te1 >= te2.
+ * If te1 is not null then we had ts1 <= te1 above, and we just found
+ * ts1 >= te2, hence te1 >= te2.
*/
PG_RETURN_BOOL(false);
}
@@ -2200,8 +2200,8 @@ overlaps_timetz(PG_FUNCTION_ARGS)
PG_RETURN_NULL();
/*
- * If te2 is not null then we had ts2 <= te2 above, and we just
- * found ts2 >= te1, hence te2 >= te1.
+ * If te2 is not null then we had ts2 <= te2 above, and we just found
+ * ts2 >= te1, hence te2 >= te1.
*/
PG_RETURN_BOOL(false);
}
@@ -2209,8 +2209,7 @@ overlaps_timetz(PG_FUNCTION_ARGS)
{
/*
* For ts1 = ts2 the spec says te1 <> te2 OR te1 = te2, which is a
- * rather silly way of saying "true if both are nonnull, else
- * null".
+ * rather silly way of saying "true if both are nonnull, else null".
*/
if (te1IsNull || te2IsNull)
PG_RETURN_NULL();
@@ -2297,14 +2296,14 @@ timestamptz_timetz(PG_FUNCTION_ARGS)
Datum
datetimetz_timestamptz(PG_FUNCTION_ARGS)
{
- DateADT date = PG_GETARG_DATEADT(0);
+ DateADT date = PG_GETARG_DATEADT(0);
TimeTzADT *time = PG_GETARG_TIMETZADT_P(1);
TimestampTz result;
#ifdef HAVE_INT64_TIMESTAMP
result = date * USECS_PER_DAY + time->time + time->zone * USECS_PER_SEC;
#else
- result = date * (double)SECS_PER_DAY + time->time + time->zone;
+ result = date * (double) SECS_PER_DAY + time->time + time->zone;
#endif
PG_RETURN_TIMESTAMP(result);
@@ -2355,8 +2354,8 @@ text_timetz(PG_FUNCTION_ARGS)
if (VARSIZE(str) - VARHDRSZ > MAXDATELEN)
ereport(ERROR,
(errcode(ERRCODE_INVALID_DATETIME_FORMAT),
- errmsg("invalid input syntax for type time with time zone: \"%s\"",
- VARDATA(str))));
+ errmsg("invalid input syntax for type time with time zone: \"%s\"",
+ VARDATA(str))));
sp = VARDATA(str);
dp = dstr;
@@ -2410,12 +2409,12 @@ timetz_part(PG_FUNCTION_ARGS)
case DTK_TZ_MINUTE:
result = -tz;
result /= SECS_PER_MINUTE;
- FMODULO(result, dummy, (double)SECS_PER_MINUTE);
+ FMODULO(result, dummy, (double) SECS_PER_MINUTE);
break;
case DTK_TZ_HOUR:
dummy = -tz;
- FMODULO(dummy, result, (double)SECS_PER_HOUR);
+ FMODULO(dummy, result, (double) SECS_PER_HOUR);
break;
case DTK_MICROSEC:
@@ -2460,9 +2459,9 @@ timetz_part(PG_FUNCTION_ARGS)
default:
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("\"time with time zone\" units \"%s\" not recognized",
- DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(units))))));
+ errmsg("\"time with time zone\" units \"%s\" not recognized",
+ DatumGetCString(DirectFunctionCall1(textout,
+ PointerGetDatum(units))))));
result = 0;
}
@@ -2479,9 +2478,9 @@ timetz_part(PG_FUNCTION_ARGS)
{
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("\"time with time zone\" units \"%s\" not recognized",
- DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(units))))));
+ errmsg("\"time with time zone\" units \"%s\" not recognized",
+ DatumGetCString(DirectFunctionCall1(textout,
+ PointerGetDatum(units))))));
result = 0;
}
@@ -2500,15 +2499,15 @@ timetz_zone(PG_FUNCTION_ARGS)
TimeTzADT *t = PG_GETARG_TIMETZADT_P(1);
TimeTzADT *result;
int tz;
- char tzname[TZ_STRLEN_MAX + 1];
- int len;
+ char tzname[TZ_STRLEN_MAX + 1];
+ int len;
pg_tz *tzp;
/*
- * Look up the requested timezone. First we look in the timezone
- * database (to handle cases like "America/New_York"), and if that
- * fails, we look in the date token table (to handle cases like "EST").
- */
+ * Look up the requested timezone. First we look in the timezone database
+ * (to handle cases like "America/New_York"), and if that fails, we look
+ * in the date token table (to handle cases like "EST").
+ */
len = Min(VARSIZE(zone) - VARHDRSZ, TZ_STRLEN_MAX);
memcpy(tzname, VARDATA(zone), len);
tzname[len] = '\0';
@@ -2516,7 +2515,7 @@ timetz_zone(PG_FUNCTION_ARGS)
if (tzp)
{
/* Get the offset-from-GMT that is valid today for the selected zone */
- pg_time_t now;
+ pg_time_t now;
struct pg_tm *tm;
now = time(NULL);
@@ -2546,7 +2545,7 @@ timetz_zone(PG_FUNCTION_ARGS)
}
result = (TimeTzADT *) palloc(sizeof(TimeTzADT));
-
+
#ifdef HAVE_INT64_TIMESTAMP
result->time = t->time + (t->zone - tz) * USECS_PER_SEC;
while (result->time < INT64CONST(0))
@@ -2582,7 +2581,7 @@ timetz_izone(PG_FUNCTION_ARGS)
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("\"interval\" time zone \"%s\" not valid",
DatumGetCString(DirectFunctionCall1(interval_out,
- PointerGetDatum(zone))))));
+ PointerGetDatum(zone))))));
#ifdef HAVE_INT64_TIMESTAMP
tz = -(zone->time / USECS_PER_SEC);
diff --git a/src/backend/utils/adt/datetime.c b/src/backend/utils/adt/datetime.c
index faacdb2eba4..5b3fc46d9c2 100644
--- a/src/backend/utils/adt/datetime.c
+++ b/src/backend/utils/adt/datetime.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/datetime.c,v 1.159 2005/10/14 11:47:57 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/datetime.c,v 1.160 2005/10/15 02:49:28 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -28,16 +28,16 @@
static int DecodeNumber(int flen, char *field, bool haveTextMonth,
int fmask, int *tmask,
- struct pg_tm *tm, fsec_t *fsec, int *is2digits);
+ struct pg_tm * tm, fsec_t *fsec, int *is2digits);
static int DecodeNumberField(int len, char *str,
int fmask, int *tmask,
- struct pg_tm *tm, fsec_t *fsec, int *is2digits);
+ struct pg_tm * tm, fsec_t *fsec, int *is2digits);
static int DecodeTime(char *str, int fmask, int *tmask,
- struct pg_tm *tm, fsec_t *fsec);
+ struct pg_tm * tm, fsec_t *fsec);
static int DecodeTimezone(char *str, int *tzp);
static int DecodePosixTimezone(char *str, int *tzp);
static datetkn *datebsearch(char *key, datetkn *base, unsigned int nel);
-static int DecodeDate(char *str, int fmask, int *tmask, struct pg_tm *tm);
+static int DecodeDate(char *str, int fmask, int *tmask, struct pg_tm * tm);
static void TrimTrailingZeros(char *str);
@@ -308,8 +308,7 @@ static datetkn datetktbl[] = {
{"lhdt", DTZ, POS(44)}, /* Lord Howe Daylight Time, Australia */
{"lhst", TZ, POS(42)}, /* Lord Howe Standard Time, Australia */
{"ligt", TZ, POS(40)}, /* From Melbourne, Australia */
- {"lint", TZ, POS(56)}, /* Line Islands Time (Kiribati; +14
- * hours!) */
+ {"lint", TZ, POS(56)}, /* Line Islands Time (Kiribati; +14 hours!) */
{"lkt", TZ, POS(24)}, /* Lanka Time */
{"m", UNITS, DTK_MONTH}, /* "month" for ISO input */
{"magst", DTZ, POS(48)}, /* Magadan Summer Time */
@@ -681,7 +680,7 @@ j2day(int date)
* Get the transaction start time ("now()") broken down as a struct pg_tm.
*/
void
-GetCurrentDateTime(struct pg_tm *tm)
+GetCurrentDateTime(struct pg_tm * tm)
{
int tz;
fsec_t fsec;
@@ -698,7 +697,7 @@ GetCurrentDateTime(struct pg_tm *tm)
* including fractional seconds and timezone offset.
*/
void
-GetCurrentTimeUsec(struct pg_tm *tm, fsec_t *fsec, int *tzp)
+GetCurrentTimeUsec(struct pg_tm * tm, fsec_t *fsec, int *tzp)
{
int tz;
@@ -741,8 +740,8 @@ TrimTrailingZeros(char *str)
*
* timestr - the input string
* workbuf - workspace for field string storage. This must be
- * larger than the largest legal input for this datetime type --
- * some additional space will be needed to NUL terminate fields.
+ * larger than the largest legal input for this datetime type --
+ * some additional space will be needed to NUL terminate fields.
* buflen - the size of workbuf
* field[] - pointers to field strings are returned in this array
* ftype[] - field type indicators are returned in this array
@@ -776,10 +775,10 @@ ParseDateTime(const char *timestr, char *workbuf, size_t buflen,
const char *bufend = workbuf + buflen;
/*
- * Set the character pointed-to by "bufptr" to "newchar", and
- * increment "bufptr". "end" gives the end of the buffer -- we
- * return an error if there is no space left to append a character
- * to the buffer. Note that "bufptr" is evaluated twice.
+ * Set the character pointed-to by "bufptr" to "newchar", and increment
+ * "bufptr". "end" gives the end of the buffer -- we return an error if
+ * there is no space left to append a character to the buffer. Note that
+ * "bufptr" is evaluated twice.
*/
#define APPEND_CHAR(bufptr, end, newchar) \
do \
@@ -835,8 +834,8 @@ ParseDateTime(const char *timestr, char *workbuf, size_t buflen,
APPEND_CHAR(bufp, bufend, *cp++);
/*
- * insist that the delimiters match to get a
- * three-field date.
+ * insist that the delimiters match to get a three-field
+ * date.
*/
if (*cp == delim)
{
@@ -855,8 +854,8 @@ ParseDateTime(const char *timestr, char *workbuf, size_t buflen,
}
/*
- * otherwise, number only and will determine year, month, day,
- * or concatenated fields later...
+ * otherwise, number only and will determine year, month, day, or
+ * concatenated fields later...
*/
else
ftype[nf] = DTK_NUMBER;
@@ -872,8 +871,7 @@ ParseDateTime(const char *timestr, char *workbuf, size_t buflen,
}
/*
- * text? then date string, month, day of week, special, or
- * timezone
+ * text? then date string, month, day of week, special, or timezone
*/
else if (isalpha((unsigned char) *cp))
{
@@ -883,8 +881,8 @@ ParseDateTime(const char *timestr, char *workbuf, size_t buflen,
APPEND_CHAR(bufp, bufend, pg_tolower((unsigned char) *cp++));
/*
- * Full date string with leading text month? Could also be a
- * POSIX time zone...
+ * Full date string with leading text month? Could also be a POSIX
+ * time zone...
*/
if (*cp == '-' || *cp == '/' || *cp == '.')
{
@@ -969,13 +967,12 @@ ParseDateTime(const char *timestr, char *workbuf, size_t buflen,
*/
int
DecodeDateTime(char **field, int *ftype, int nf,
- int *dtype, struct pg_tm *tm, fsec_t *fsec, int *tzp)
+ int *dtype, struct pg_tm * tm, fsec_t *fsec, int *tzp)
{
int fmask = 0,
tmask,
type;
- int ptype = 0; /* "prefix type" for ISO y2001m02d04
- * format */
+ int ptype = 0; /* "prefix type" for ISO y2001m02d04 format */
int i;
int val;
int dterr;
@@ -1054,8 +1051,8 @@ DecodeDateTime(char **field, int *ftype, int nf,
/*
* Starts with a digit but we already have a time
- * field? Then we are in trouble with a date and
- * time already...
+ * field? Then we are in trouble with a date and time
+ * already...
*/
if ((fmask & DTK_TIME_M) == DTK_TIME_M)
return DTERR_BAD_FORMAT;
@@ -1070,8 +1067,8 @@ DecodeDateTime(char **field, int *ftype, int nf,
*cp = '\0';
/*
- * Then read the rest of the field as a
- * concatenated time
+ * Then read the rest of the field as a concatenated
+ * time
*/
dterr = DecodeNumberField(strlen(field[i]), field[i],
fmask,
@@ -1115,8 +1112,8 @@ DecodeDateTime(char **field, int *ftype, int nf,
* DecodeTime()
*/
/* test for > 24:00:00 */
- if (tm->tm_hour > 24 ||
- (tm->tm_hour == 24 && (tm->tm_min > 0 || tm->tm_sec > 0)))
+ if (tm->tm_hour > 24 ||
+ (tm->tm_hour == 24 && (tm->tm_min > 0 || tm->tm_sec > 0)))
return DTERR_FIELD_OVERFLOW;
break;
@@ -1132,9 +1129,8 @@ DecodeDateTime(char **field, int *ftype, int nf,
return dterr;
/*
- * Already have a time zone? Then maybe this is the
- * second field of a POSIX time: EST+3 (equivalent to
- * PST)
+ * Already have a time zone? Then maybe this is the second
+ * field of a POSIX time: EST+3 (equivalent to PST)
*/
if (i > 0 && (fmask & DTK_M(TZ)) != 0 &&
ftype[i - 1] == DTK_TZ &&
@@ -1278,7 +1274,7 @@ DecodeDateTime(char **field, int *ftype, int nf,
case DTK_TIME:
/* previous field was "t" for ISO time */
dterr = DecodeNumberField(strlen(field[i]), field[i],
- (fmask | DTK_DATE_M),
+ (fmask | DTK_DATE_M),
&tmask, tm,
fsec, &is2digits);
if (dterr < 0)
@@ -1316,9 +1312,9 @@ DecodeDateTime(char **field, int *ftype, int nf,
else if (cp != NULL && flen - strlen(cp) > 2)
{
/*
- * Interpret as a concatenated date or time Set
- * the type field to allow decoding other fields
- * later. Example: 20011223 or 040506
+ * Interpret as a concatenated date or time Set the
+ * type field to allow decoding other fields later.
+ * Example: 20011223 or 040506
*/
dterr = DecodeNumberField(flen, field[i], fmask,
&tmask, tm,
@@ -1363,8 +1359,8 @@ DecodeDateTime(char **field, int *ftype, int nf,
{
case DTK_CURRENT:
ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("date/time value \"current\" is no longer supported")));
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("date/time value \"current\" is no longer supported")));
return DTERR_BAD_FORMAT;
break;
@@ -1380,7 +1376,7 @@ DecodeDateTime(char **field, int *ftype, int nf,
*dtype = DTK_DATE;
GetCurrentDateTime(tm);
j2date(date2j(tm->tm_year, tm->tm_mon, tm->tm_mday) - 1,
- &tm->tm_year, &tm->tm_mon, &tm->tm_mday);
+ &tm->tm_year, &tm->tm_mon, &tm->tm_mday);
tm->tm_hour = 0;
tm->tm_min = 0;
tm->tm_sec = 0;
@@ -1400,7 +1396,7 @@ DecodeDateTime(char **field, int *ftype, int nf,
*dtype = DTK_DATE;
GetCurrentDateTime(tm);
j2date(date2j(tm->tm_year, tm->tm_mon, tm->tm_mday) + 1,
- &tm->tm_year, &tm->tm_mon, &tm->tm_mday);
+ &tm->tm_year, &tm->tm_mon, &tm->tm_mday);
tm->tm_hour = 0;
tm->tm_min = 0;
tm->tm_sec = 0;
@@ -1425,8 +1421,8 @@ DecodeDateTime(char **field, int *ftype, int nf,
case MONTH:
/*
- * already have a (numeric) month? then see if we
- * can substitute...
+ * already have a (numeric) month? then see if we can
+ * substitute...
*/
if ((fmask & DTK_M(MONTH)) && !haveTextMonth &&
!(fmask & DTK_M(DAY)) && tm->tm_mon >= 1 &&
@@ -1442,8 +1438,8 @@ DecodeDateTime(char **field, int *ftype, int nf,
case DTZMOD:
/*
- * daylight savings time modifier (solves "MET
- * DST" syntax)
+ * daylight savings time modifier (solves "MET DST"
+ * syntax)
*/
tmask |= DTK_M(DTZ);
tm->tm_isdst = 1;
@@ -1455,8 +1451,8 @@ DecodeDateTime(char **field, int *ftype, int nf,
case DTZ:
/*
- * set mask for TZ here _or_ check for DTZ later
- * when getting default timezone
+ * set mask for TZ here _or_ check for DTZ later when
+ * getting default timezone
*/
tmask |= DTK_M(TZ);
tm->tm_isdst = 1;
@@ -1497,9 +1493,8 @@ DecodeDateTime(char **field, int *ftype, int nf,
case ISOTIME:
/*
- * This is a filler field "t" indicating that the
- * next field is time. Try to verify that this is
- * sensible.
+ * This is a filler field "t" indicating that the next
+ * field is time. Try to verify that this is sensible.
*/
tmask = 0;
@@ -1546,8 +1541,8 @@ DecodeDateTime(char **field, int *ftype, int nf,
else
ereport(ERROR,
(errcode(ERRCODE_INVALID_DATETIME_FORMAT),
- errmsg("inconsistent use of year %04d and \"BC\"",
- tm->tm_year)));
+ errmsg("inconsistent use of year %04d and \"BC\"",
+ tm->tm_year)));
}
else if (is2digits)
{
@@ -1597,9 +1592,9 @@ DecodeDateTime(char **field, int *ftype, int nf,
}
/*
- * Check for valid day of month, now that we know for sure the
- * month and year. Note we don't use MD_FIELD_OVERFLOW here,
- * since it seems unlikely that "Feb 29" is a YMD-order error.
+ * Check for valid day of month, now that we know for sure the month
+ * and year. Note we don't use MD_FIELD_OVERFLOW here, since it seems
+ * unlikely that "Feb 29" is a YMD-order error.
*/
if (tm->tm_mday > day_tab[isleap(tm->tm_year)][tm->tm_mon - 1])
return DTERR_FIELD_OVERFLOW;
@@ -1608,8 +1603,8 @@ DecodeDateTime(char **field, int *ftype, int nf,
if (tzp != NULL && !(fmask & DTK_M(TZ)))
{
/*
- * daylight savings time modifier but no standard timezone?
- * then error
+ * daylight savings time modifier but no standard timezone? then
+ * error
*/
if (fmask & DTK_M(DTZMOD))
return DTERR_BAD_FORMAT;
@@ -1634,7 +1629,7 @@ DecodeDateTime(char **field, int *ftype, int nf,
* of mktime(), anyway.
*/
int
-DetermineTimeZoneOffset(struct pg_tm *tm, pg_tz *tzp)
+DetermineTimeZoneOffset(struct pg_tm * tm, pg_tz *tzp)
{
int date,
sec;
@@ -1658,15 +1653,15 @@ DetermineTimeZoneOffset(struct pg_tm *tm, pg_tz *tzp)
/*
* First, generate the pg_time_t value corresponding to the given
- * y/m/d/h/m/s taken as GMT time. If this overflows, punt and decide
- * the timezone is GMT. (We only need to worry about overflow on
- * machines where pg_time_t is 32 bits.)
+ * y/m/d/h/m/s taken as GMT time. If this overflows, punt and decide the
+ * timezone is GMT. (We only need to worry about overflow on machines
+ * where pg_time_t is 32 bits.)
*/
if (!IS_VALID_JULIAN(tm->tm_year, tm->tm_mon, tm->tm_mday))
goto overflow;
date = date2j(tm->tm_year, tm->tm_mon, tm->tm_mday) - UNIX_EPOCH_JDATE;
- day = ((pg_time_t) date) *SECS_PER_DAY;
+ day = ((pg_time_t) date) * SECS_PER_DAY;
if (day / SECS_PER_DAY != date)
goto overflow;
sec = tm->tm_sec + (tm->tm_min + tm->tm_hour * MINS_PER_HOUR) * SECS_PER_MINUTE;
@@ -1676,10 +1671,10 @@ DetermineTimeZoneOffset(struct pg_tm *tm, pg_tz *tzp)
goto overflow;
/*
- * Find the DST time boundary just before or following the target time.
- * We assume that all zones have GMT offsets less than 24 hours, and
- * that DST boundaries can't be closer together than 48 hours, so
- * backing up 24 hours and finding the "next" boundary will work.
+ * Find the DST time boundary just before or following the target time. We
+ * assume that all zones have GMT offsets less than 24 hours, and that DST
+ * boundaries can't be closer together than 48 hours, so backing up 24
+ * hours and finding the "next" boundary will work.
*/
prevtime = mytime - SECS_PER_DAY;
if (mytime < 0 && prevtime > 0)
@@ -1689,7 +1684,7 @@ DetermineTimeZoneOffset(struct pg_tm *tm, pg_tz *tzp)
&before_gmtoff, &before_isdst,
&boundary,
&after_gmtoff, &after_isdst,
- tzp);
+ tzp);
if (res < 0)
goto overflow; /* failure? */
@@ -1697,7 +1692,7 @@ DetermineTimeZoneOffset(struct pg_tm *tm, pg_tz *tzp)
{
/* Non-DST zone, life is simple */
tm->tm_isdst = before_isdst;
- return - (int) before_gmtoff;
+ return -(int) before_gmtoff;
}
/*
@@ -1722,24 +1717,25 @@ DetermineTimeZoneOffset(struct pg_tm *tm, pg_tz *tzp)
if (beforetime <= boundary && aftertime < boundary)
{
tm->tm_isdst = before_isdst;
- return - (int) before_gmtoff;
+ return -(int) before_gmtoff;
}
if (beforetime > boundary && aftertime >= boundary)
{
tm->tm_isdst = after_isdst;
- return - (int) after_gmtoff;
+ return -(int) after_gmtoff;
}
+
/*
- * It's an invalid or ambiguous time due to timezone transition.
- * Prefer the standard-time interpretation.
+ * It's an invalid or ambiguous time due to timezone transition. Prefer
+ * the standard-time interpretation.
*/
if (after_isdst == 0)
{
tm->tm_isdst = after_isdst;
- return - (int) after_gmtoff;
+ return -(int) after_gmtoff;
}
tm->tm_isdst = before_isdst;
- return - (int) before_gmtoff;
+ return -(int) before_gmtoff;
overflow:
/* Given date is out of range, so assume UTC */
@@ -1762,7 +1758,7 @@ overflow:
*/
int
DecodeTimeOnly(char **field, int *ftype, int nf,
- int *dtype, struct pg_tm *tm, fsec_t *fsec, int *tzp)
+ int *dtype, struct pg_tm * tm, fsec_t *fsec, int *tzp)
{
int fmask = 0,
tmask,
@@ -1792,8 +1788,8 @@ DecodeTimeOnly(char **field, int *ftype, int nf,
case DTK_DATE:
/*
- * Time zone not allowed? Then should not accept dates or
- * time zones no matter what else!
+ * Time zone not allowed? Then should not accept dates or time
+ * zones no matter what else!
*/
if (tzp == NULL)
return DTERR_BAD_FORMAT;
@@ -1815,15 +1811,13 @@ DecodeTimeOnly(char **field, int *ftype, int nf,
/*
* Starts with a digit but we already have a time
- * field? Then we are in trouble with time
- * already...
+ * field? Then we are in trouble with time already...
*/
if ((fmask & DTK_TIME_M) == DTK_TIME_M)
return DTERR_BAD_FORMAT;
/*
- * Should not get here and fail. Sanity check
- * only...
+ * Should not get here and fail. Sanity check only...
*/
if ((cp = strchr(field[i], '-')) == NULL)
return DTERR_BAD_FORMAT;
@@ -1835,8 +1829,8 @@ DecodeTimeOnly(char **field, int *ftype, int nf,
*cp = '\0';
/*
- * Then read the rest of the field as a
- * concatenated time
+ * Then read the rest of the field as a concatenated
+ * time
*/
dterr = DecodeNumberField(strlen(field[i]), field[i],
(fmask | DTK_DATE_M),
@@ -1879,9 +1873,8 @@ DecodeTimeOnly(char **field, int *ftype, int nf,
return dterr;
/*
- * Already have a time zone? Then maybe this is the
- * second field of a POSIX time: EST+3 (equivalent to
- * PST)
+ * Already have a time zone? Then maybe this is the second
+ * field of a POSIX time: EST+3 (equivalent to PST)
*/
if (i > 0 && (fmask & DTK_M(TZ)) != 0 &&
ftype[i - 1] == DTK_TZ &&
@@ -2025,10 +2018,10 @@ DecodeTimeOnly(char **field, int *ftype, int nf,
tmask |= DTK_TIME_M;
#ifdef HAVE_INT64_TIMESTAMP
dt2time(time * USECS_PER_DAY,
- &tm->tm_hour, &tm->tm_min, &tm->tm_sec, fsec);
+ &tm->tm_hour, &tm->tm_min, &tm->tm_sec, fsec);
#else
dt2time(time * SECS_PER_DAY,
- &tm->tm_hour, &tm->tm_min, &tm->tm_sec, fsec);
+ &tm->tm_hour, &tm->tm_min, &tm->tm_sec, fsec);
#endif
}
break;
@@ -2036,7 +2029,7 @@ DecodeTimeOnly(char **field, int *ftype, int nf,
case DTK_TIME:
/* previous field was "t" for ISO time */
dterr = DecodeNumberField(strlen(field[i]), field[i],
- (fmask | DTK_DATE_M),
+ (fmask | DTK_DATE_M),
&tmask, tm,
fsec, &is2digits);
if (dterr < 0)
@@ -2080,12 +2073,12 @@ DecodeTimeOnly(char **field, int *ftype, int nf,
else if (flen - strlen(cp) > 2)
{
/*
- * Interpret as a concatenated date or time
- * Set the type field to allow decoding other
- * fields later. Example: 20011223 or 040506
+ * Interpret as a concatenated date or time Set
+ * the type field to allow decoding other fields
+ * later. Example: 20011223 or 040506
*/
dterr = DecodeNumberField(flen, field[i],
- (fmask | DTK_DATE_M),
+ (fmask | DTK_DATE_M),
&tmask, tm,
fsec, &is2digits);
if (dterr < 0)
@@ -2133,8 +2126,8 @@ DecodeTimeOnly(char **field, int *ftype, int nf,
{
case DTK_CURRENT:
ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("date/time value \"current\" is no longer supported")));
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("date/time value \"current\" is no longer supported")));
return DTERR_BAD_FORMAT;
break;
@@ -2162,8 +2155,8 @@ DecodeTimeOnly(char **field, int *ftype, int nf,
case DTZMOD:
/*
- * daylight savings time modifier (solves "MET
- * DST" syntax)
+ * daylight savings time modifier (solves "MET DST"
+ * syntax)
*/
tmask |= DTK_M(DTZ);
tm->tm_isdst = 1;
@@ -2175,8 +2168,8 @@ DecodeTimeOnly(char **field, int *ftype, int nf,
case DTZ:
/*
- * set mask for TZ here _or_ check for DTZ later
- * when getting default timezone
+ * set mask for TZ here _or_ check for DTZ later when
+ * getting default timezone
*/
tmask |= DTK_M(TZ);
tm->tm_isdst = 1;
@@ -2247,14 +2240,14 @@ DecodeTimeOnly(char **field, int *ftype, int nf,
if (tm->tm_hour < 0 || tm->tm_min < 0 || tm->tm_min > 59 ||
tm->tm_sec < 0 || tm->tm_sec > 60 || tm->tm_hour > 24 ||
- /* test for > 24:00:00 */
- (tm->tm_hour == 24 && (tm->tm_min > 0 || tm->tm_sec > 0 ||
+ /* test for > 24:00:00 */
+ (tm->tm_hour == 24 && (tm->tm_min > 0 || tm->tm_sec > 0 ||
#ifdef HAVE_INT64_TIMESTAMP
- *fsec > INT64CONST(0))) ||
+ *fsec > INT64CONST(0))) ||
*fsec < INT64CONST(0) || *fsec >= USECS_PER_SEC)
return DTERR_FIELD_OVERFLOW;
#else
- *fsec > 0)) ||
+ *fsec > 0)) ||
*fsec < 0 || *fsec >= 1)
return DTERR_FIELD_OVERFLOW;
#endif
@@ -2269,8 +2262,7 @@ DecodeTimeOnly(char **field, int *ftype, int nf,
*tmp = &tt;
/*
- * daylight savings time modifier but no standard timezone? then
- * error
+ * daylight savings time modifier but no standard timezone? then error
*/
if (fmask & DTK_M(DTZMOD))
return DTERR_BAD_FORMAT;
@@ -2300,7 +2292,7 @@ DecodeTimeOnly(char **field, int *ftype, int nf,
* Insist on a complete set of fields.
*/
static int
-DecodeDate(char *str, int fmask, int *tmask, struct pg_tm *tm)
+DecodeDate(char *str, int fmask, int *tmask, struct pg_tm * tm)
{
fsec_t fsec;
int nf = 0;
@@ -2458,7 +2450,7 @@ DecodeDate(char *str, int fmask, int *tmask, struct pg_tm *tm)
* can be used to represent time spans.
*/
static int
-DecodeTime(char *str, int fmask, int *tmask, struct pg_tm *tm, fsec_t *fsec)
+DecodeTime(char *str, int fmask, int *tmask, struct pg_tm * tm, fsec_t *fsec)
{
char *cp;
@@ -2522,7 +2514,7 @@ DecodeTime(char *str, int fmask, int *tmask, struct pg_tm *tm, fsec_t *fsec)
*/
static int
DecodeNumber(int flen, char *str, bool haveTextMonth, int fmask,
- int *tmask, struct pg_tm *tm, fsec_t *fsec, int *is2digits)
+ int *tmask, struct pg_tm * tm, fsec_t *fsec, int *is2digits)
{
int val;
char *cp;
@@ -2539,8 +2531,8 @@ DecodeNumber(int flen, char *str, bool haveTextMonth, int fmask,
double frac;
/*
- * More than two digits before decimal point? Then could be a date
- * or a run-together time: 2001.360 20011225 040506.789
+ * More than two digits before decimal point? Then could be a date or
+ * a run-together time: 2001.360 20011225 040506.789
*/
if (cp - str > 2)
{
@@ -2581,9 +2573,9 @@ DecodeNumber(int flen, char *str, bool haveTextMonth, int fmask,
case 0:
/*
- * Nothing so far; make a decision about what we think the
- * input is. There used to be lots of heuristics here, but
- * the consensus now is to be paranoid. It *must* be either
+ * Nothing so far; make a decision about what we think the input
+ * is. There used to be lots of heuristics here, but the
+ * consensus now is to be paranoid. It *must* be either
* YYYY-MM-DD (with a more-than-two-digit year field), or the
* field order defined by DateOrder.
*/
@@ -2614,12 +2606,11 @@ DecodeNumber(int flen, char *str, bool haveTextMonth, int fmask,
if (haveTextMonth)
{
/*
- * We are at the first numeric field of a date that
- * included a textual month name. We want to support the
- * variants MON-DD-YYYY, DD-MON-YYYY, and YYYY-MON-DD as
- * unambiguous inputs. We will also accept MON-DD-YY or
- * DD-MON-YY in either DMY or MDY modes, as well as
- * YY-MON-DD in YMD mode.
+ * We are at the first numeric field of a date that included a
+ * textual month name. We want to support the variants
+ * MON-DD-YYYY, DD-MON-YYYY, and YYYY-MON-DD as unambiguous
+ * inputs. We will also accept MON-DD-YY or DD-MON-YY in
+ * either DMY or MDY modes, as well as YY-MON-DD in YMD mode.
*/
if (flen >= 3 || DateOrder == DATEORDER_YMD)
{
@@ -2693,8 +2684,8 @@ DecodeNumber(int flen, char *str, bool haveTextMonth, int fmask,
}
/*
- * When processing a year field, mark it for adjustment if it's only
- * one or two digits.
+ * When processing a year field, mark it for adjustment if it's only one
+ * or two digits.
*/
if (*tmask == DTK_M(YEAR))
*is2digits = (flen <= 2);
@@ -2712,13 +2703,13 @@ DecodeNumber(int flen, char *str, bool haveTextMonth, int fmask,
*/
static int
DecodeNumberField(int len, char *str, int fmask,
- int *tmask, struct pg_tm *tm, fsec_t *fsec, int *is2digits)
+ int *tmask, struct pg_tm * tm, fsec_t *fsec, int *is2digits)
{
char *cp;
/*
- * Have a decimal point? Then this is a date or something with a
- * seconds field...
+ * Have a decimal point? Then this is a date or something with a seconds
+ * field...
*/
if ((cp = strchr(str, '.')) != NULL)
{
@@ -2970,7 +2961,7 @@ DecodeSpecial(int field, char *lowtoken, int *val)
* preceding an hh:mm:ss field. - thomas 1998-04-30
*/
int
-DecodeInterval(char **field, int *ftype, int nf, int *dtype, struct pg_tm *tm, fsec_t *fsec)
+DecodeInterval(char **field, int *ftype, int nf, int *dtype, struct pg_tm * tm, fsec_t *fsec)
{
int is_before = FALSE;
char *cp;
@@ -3014,9 +3005,9 @@ DecodeInterval(char **field, int *ftype, int nf, int *dtype, struct pg_tm *tm, f
Assert(*field[i] == '-' || *field[i] == '+');
/*
- * A single signed number ends up here, but will be
- * rejected by DecodeTime(). So, work this out to drop
- * through to DTK_NUMBER, which *can* tolerate this.
+ * A single signed number ends up here, but will be rejected
+ * by DecodeTime(). So, work this out to drop through to
+ * DTK_NUMBER, which *can* tolerate this.
*/
cp = field[i] + 1;
while (*cp != '\0' && *cp != ':' && *cp != '.')
@@ -3035,8 +3026,8 @@ DecodeInterval(char **field, int *ftype, int nf, int *dtype, struct pg_tm *tm, f
/*
* Set the next type to be a day, if units are not
- * specified. This handles the case of '1 +02:03'
- * since we are reading right to left.
+ * specified. This handles the case of '1 +02:03' since we
+ * are reading right to left.
*/
type = DTK_DAY;
tmask = DTK_M(TZ);
@@ -3366,7 +3357,7 @@ DateTimeParseError(int dterr, const char *str, const char *datatype)
(errcode(ERRCODE_DATETIME_FIELD_OVERFLOW),
errmsg("date/time field value out of range: \"%s\"",
str),
- errhint("Perhaps you need a different \"datestyle\" setting.")));
+ errhint("Perhaps you need a different \"datestyle\" setting.")));
break;
case DTERR_INTERVAL_OVERFLOW:
ereport(ERROR,
@@ -3376,9 +3367,9 @@ DateTimeParseError(int dterr, const char *str, const char *datatype)
break;
case DTERR_TZDISP_OVERFLOW:
ereport(ERROR,
- (errcode(ERRCODE_INVALID_TIME_ZONE_DISPLACEMENT_VALUE),
- errmsg("time zone displacement out of range: \"%s\"",
- str)));
+ (errcode(ERRCODE_INVALID_TIME_ZONE_DISPLACEMENT_VALUE),
+ errmsg("time zone displacement out of range: \"%s\"",
+ str)));
break;
case DTERR_BAD_FORMAT:
default:
@@ -3424,7 +3415,7 @@ datebsearch(char *key, datetkn *base, unsigned int nel)
* Encode date as local time.
*/
int
-EncodeDateOnly(struct pg_tm *tm, int style, char *str)
+EncodeDateOnly(struct pg_tm * tm, int style, char *str)
{
if (tm->tm_mon < 1 || tm->tm_mon > MONTHS_PER_YEAR)
return -1;
@@ -3438,7 +3429,7 @@ EncodeDateOnly(struct pg_tm *tm, int style, char *str)
tm->tm_year, tm->tm_mon, tm->tm_mday);
else
sprintf(str, "%04d-%02d-%02d %s",
- -(tm->tm_year - 1), tm->tm_mon, tm->tm_mday, "BC");
+ -(tm->tm_year - 1), tm->tm_mon, tm->tm_mday, "BC");
break;
case USE_SQL_DATES:
@@ -3484,7 +3475,7 @@ EncodeDateOnly(struct pg_tm *tm, int style, char *str)
* Encode time fields only.
*/
int
-EncodeTimeOnly(struct pg_tm *tm, fsec_t fsec, int *tzp, int style, char *str)
+EncodeTimeOnly(struct pg_tm * tm, fsec_t fsec, int *tzp, int style, char *str)
{
if (tm->tm_hour < 0 || tm->tm_hour > HOURS_PER_DAY)
return -1;
@@ -3492,8 +3483,8 @@ EncodeTimeOnly(struct pg_tm *tm, fsec_t fsec, int *tzp, int style, char *str)
sprintf(str, "%02d:%02d", tm->tm_hour, tm->tm_min);
/*
- * Print fractional seconds if any. The fractional field widths
- * here should be equal to the larger of MAX_TIME_PRECISION and
+ * Print fractional seconds if any. The fractional field widths here
+ * should be equal to the larger of MAX_TIME_PRECISION and
* MAX_TIMESTAMP_PRECISION.
*/
if (fsec != 0)
@@ -3534,15 +3525,15 @@ EncodeTimeOnly(struct pg_tm *tm, fsec_t fsec, int *tzp, int style, char *str)
* European - dd/mm/yyyy
*/
int
-EncodeDateTime(struct pg_tm *tm, fsec_t fsec, int *tzp, char **tzn, int style, char *str)
+EncodeDateTime(struct pg_tm * tm, fsec_t fsec, int *tzp, char **tzn, int style, char *str)
{
int day,
hour,
min;
/*
- * Why are we checking only the month field? Change this to an
- * assert... if (tm->tm_mon < 1 || tm->tm_mon > MONTHS_PER_YEAR) return -1;
+ * Why are we checking only the month field? Change this to an assert...
+ * if (tm->tm_mon < 1 || tm->tm_mon > MONTHS_PER_YEAR) return -1;
*/
Assert(tm->tm_mon >= 1 && tm->tm_mon <= MONTHS_PER_YEAR);
@@ -3556,11 +3547,11 @@ EncodeDateTime(struct pg_tm *tm, fsec_t fsec, int *tzp, char **tzn, int style, c
tm->tm_mon, tm->tm_mday, tm->tm_hour, tm->tm_min);
/*
- * Print fractional seconds if any. The field widths here
- * should be at least equal to MAX_TIMESTAMP_PRECISION.
+ * Print fractional seconds if any. The field widths here should
+ * be at least equal to MAX_TIMESTAMP_PRECISION.
*
- * In float mode, don't print fractional seconds before 1 AD,
- * since it's unlikely there's any precision left ...
+ * In float mode, don't print fractional seconds before 1 AD, since
+ * it's unlikely there's any precision left ...
*/
#ifdef HAVE_INT64_TIMESTAMP
if (fsec != 0)
@@ -3579,10 +3570,10 @@ EncodeDateTime(struct pg_tm *tm, fsec_t fsec, int *tzp, char **tzn, int style, c
sprintf(str + strlen(str), ":%02d", tm->tm_sec);
/*
- * tzp == NULL indicates that we don't want *any* time zone
- * info in the output string. *tzn != NULL indicates that we
- * have alpha time zone info available. tm_isdst != -1
- * indicates that we have a valid time zone translation.
+ * tzp == NULL indicates that we don't want *any* time zone info
+ * in the output string. *tzn != NULL indicates that we have alpha
+ * time zone info available. tm_isdst != -1 indicates that we have
+ * a valid time zone translation.
*/
if (tzp != NULL && tm->tm_isdst >= 0)
{
@@ -3608,11 +3599,11 @@ EncodeDateTime(struct pg_tm *tm, fsec_t fsec, int *tzp, char **tzn, int style, c
tm->tm_hour, tm->tm_min);
/*
- * Print fractional seconds if any. The field widths here
- * should be at least equal to MAX_TIMESTAMP_PRECISION.
+ * Print fractional seconds if any. The field widths here should
+ * be at least equal to MAX_TIMESTAMP_PRECISION.
*
- * In float mode, don't print fractional seconds before 1 AD,
- * since it's unlikely there's any precision left ...
+ * In float mode, don't print fractional seconds before 1 AD, since
+ * it's unlikely there's any precision left ...
*/
#ifdef HAVE_INT64_TIMESTAMP
if (fsec != 0)
@@ -3656,11 +3647,11 @@ EncodeDateTime(struct pg_tm *tm, fsec_t fsec, int *tzp, char **tzn, int style, c
tm->tm_hour, tm->tm_min);
/*
- * Print fractional seconds if any. The field widths here
- * should be at least equal to MAX_TIMESTAMP_PRECISION.
+ * Print fractional seconds if any. The field widths here should
+ * be at least equal to MAX_TIMESTAMP_PRECISION.
*
- * In float mode, don't print fractional seconds before 1 AD,
- * since it's unlikely there's any precision left ...
+ * In float mode, don't print fractional seconds before 1 AD, since
+ * it's unlikely there's any precision left ...
*/
#ifdef HAVE_INT64_TIMESTAMP
if (fsec != 0)
@@ -3703,7 +3694,7 @@ EncodeDateTime(struct pg_tm *tm, fsec_t fsec, int *tzp, char **tzn, int style, c
strncpy(str, days[tm->tm_wday], 3);
strcpy(str + 3, " ");
-
+
if (DateOrder == DATEORDER_DMY)
sprintf(str + 4, "%02d %3s", tm->tm_mday, months[tm->tm_mon - 1]);
else
@@ -3712,11 +3703,11 @@ EncodeDateTime(struct pg_tm *tm, fsec_t fsec, int *tzp, char **tzn, int style, c
sprintf(str + 10, " %02d:%02d", tm->tm_hour, tm->tm_min);
/*
- * Print fractional seconds if any. The field widths here
- * should be at least equal to MAX_TIMESTAMP_PRECISION.
+ * Print fractional seconds if any. The field widths here should
+ * be at least equal to MAX_TIMESTAMP_PRECISION.
*
- * In float mode, don't print fractional seconds before 1 AD,
- * since it's unlikely there's any precision left ...
+ * In float mode, don't print fractional seconds before 1 AD, since
+ * it's unlikely there's any precision left ...
*/
#ifdef HAVE_INT64_TIMESTAMP
if (fsec != 0)
@@ -3735,7 +3726,7 @@ EncodeDateTime(struct pg_tm *tm, fsec_t fsec, int *tzp, char **tzn, int style, c
sprintf(str + strlen(str), ":%02d", tm->tm_sec);
sprintf(str + strlen(str), " %04d",
- (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1));
+ (tm->tm_year > 0) ? tm->tm_year : -(tm->tm_year - 1));
if (tzp != NULL && tm->tm_isdst >= 0)
{
@@ -3745,10 +3736,9 @@ EncodeDateTime(struct pg_tm *tm, fsec_t fsec, int *tzp, char **tzn, int style, c
{
/*
* We have a time zone, but no string version. Use the
- * numeric form, but be sure to include a leading
- * space to avoid formatting something which would be
- * rejected by the date/time parser later. - thomas
- * 2001-10-19
+ * numeric form, but be sure to include a leading space to
+ * avoid formatting something which would be rejected by
+ * the date/time parser later. - thomas 2001-10-19
*/
hour = -(*tzp / SECS_PER_HOUR);
min = (abs(*tzp) / MINS_PER_HOUR) % MINS_PER_HOUR;
@@ -3774,7 +3764,7 @@ EncodeDateTime(struct pg_tm *tm, fsec_t fsec, int *tzp, char **tzn, int style, c
* - thomas 1998-04-30
*/
int
-EncodeInterval(struct pg_tm *tm, fsec_t fsec, int style, char *str)
+EncodeInterval(struct pg_tm * tm, fsec_t fsec, int style, char *str)
{
int is_before = FALSE;
int is_nonzero = FALSE;
@@ -3782,9 +3772,8 @@ EncodeInterval(struct pg_tm *tm, fsec_t fsec, int style, char *str)
/*
* The sign of year and month are guaranteed to match, since they are
- * stored internally as "month". But we'll need to check for is_before
- * and is_nonzero when determining the signs of hour/minute/seconds
- * fields.
+ * stored internally as "month". But we'll need to check for is_before and
+ * is_nonzero when determining the signs of hour/minute/seconds fields.
*/
switch (style)
{
diff --git a/src/backend/utils/adt/datum.c b/src/backend/utils/adt/datum.c
index 03e02278d11..0b229e20593 100644
--- a/src/backend/utils/adt/datum.c
+++ b/src/backend/utils/adt/datum.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/datum.c,v 1.30 2004/12/31 22:01:21 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/datum.c,v 1.31 2005/10/15 02:49:28 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -179,11 +179,10 @@ datumIsEqual(Datum value1, Datum value2, bool typByVal, int typLen)
if (typByVal)
{
/*
- * just compare the two datums. NOTE: just comparing "len" bytes
- * will not do the work, because we do not know how these bytes
- * are aligned inside the "Datum". We assume instead that any
- * given datatype is consistent about how it fills extraneous bits
- * in the Datum.
+ * just compare the two datums. NOTE: just comparing "len" bytes will
+ * not do the work, because we do not know how these bytes are aligned
+ * inside the "Datum". We assume instead that any given datatype is
+ * consistent about how it fills extraneous bits in the Datum.
*/
res = (value1 == value2);
}
diff --git a/src/backend/utils/adt/dbsize.c b/src/backend/utils/adt/dbsize.c
index c8917b145c4..4a0ac3dcfb1 100644
--- a/src/backend/utils/adt/dbsize.c
+++ b/src/backend/utils/adt/dbsize.c
@@ -5,7 +5,7 @@
* Copyright (c) 2002-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/dbsize.c,v 1.5 2005/09/29 22:04:36 alvherre Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/dbsize.c,v 1.6 2005/10/15 02:49:28 momjian Exp $
*
*/
@@ -31,22 +31,22 @@ static int64
db_dir_size(const char *path)
{
int64 dirsize = 0;
- struct dirent *direntry;
- DIR *dirdesc;
- char filename[MAXPGPATH];
+ struct dirent *direntry;
+ DIR *dirdesc;
+ char filename[MAXPGPATH];
dirdesc = AllocateDir(path);
if (!dirdesc)
- return 0;
+ return 0;
while ((direntry = ReadDir(dirdesc, path)) != NULL)
{
- struct stat fst;
+ struct stat fst;
- if (strcmp(direntry->d_name, ".") == 0 ||
+ if (strcmp(direntry->d_name, ".") == 0 ||
strcmp(direntry->d_name, "..") == 0)
- continue;
+ continue;
snprintf(filename, MAXPGPATH, "%s/%s", path, direntry->d_name);
@@ -54,8 +54,8 @@ db_dir_size(const char *path)
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not stat \"%s\": %m", filename)));
-
- dirsize += fst.st_size;
+
+ dirsize += fst.st_size;
}
FreeDir(dirdesc);
@@ -69,10 +69,10 @@ static int64
calculate_database_size(Oid dbOid)
{
int64 totalsize;
- DIR *dirdesc;
- struct dirent *direntry;
- char dirpath[MAXPGPATH];
- char pathname[MAXPGPATH];
+ DIR *dirdesc;
+ struct dirent *direntry;
+ char dirpath[MAXPGPATH];
+ char pathname[MAXPGPATH];
/* Shared storage in pg_global is not counted */
@@ -84,16 +84,16 @@ calculate_database_size(Oid dbOid)
snprintf(dirpath, MAXPGPATH, "%s/pg_tblspc", DataDir);
dirdesc = AllocateDir(dirpath);
if (!dirdesc)
- ereport(ERROR,
+ ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not open tablespace directory \"%s\": %m",
dirpath)));
while ((direntry = ReadDir(dirdesc, dirpath)) != NULL)
{
- if (strcmp(direntry->d_name, ".") == 0 ||
+ if (strcmp(direntry->d_name, ".") == 0 ||
strcmp(direntry->d_name, "..") == 0)
- continue;
+ continue;
snprintf(pathname, MAXPGPATH, "%s/pg_tblspc/%s/%u",
DataDir, direntry->d_name, dbOid);
@@ -104,7 +104,7 @@ calculate_database_size(Oid dbOid)
/* Complain if we found no trace of the DB at all */
if (!totalsize)
- ereport(ERROR,
+ ereport(ERROR,
(ERRCODE_UNDEFINED_DATABASE,
errmsg("database with OID %u does not exist", dbOid)));
@@ -114,7 +114,7 @@ calculate_database_size(Oid dbOid)
Datum
pg_database_size_oid(PG_FUNCTION_ARGS)
{
- Oid dbOid = PG_GETARG_OID(0);
+ Oid dbOid = PG_GETARG_OID(0);
PG_RETURN_INT64(calculate_database_size(dbOid));
}
@@ -122,8 +122,8 @@ pg_database_size_oid(PG_FUNCTION_ARGS)
Datum
pg_database_size_name(PG_FUNCTION_ARGS)
{
- Name dbName = PG_GETARG_NAME(0);
- Oid dbOid = get_database_oid(NameStr(*dbName));
+ Name dbName = PG_GETARG_NAME(0);
+ Oid dbOid = get_database_oid(NameStr(*dbName));
if (!OidIsValid(dbOid))
ereport(ERROR,
@@ -141,16 +141,16 @@ pg_database_size_name(PG_FUNCTION_ARGS)
static int64
calculate_tablespace_size(Oid tblspcOid)
{
- char tblspcPath[MAXPGPATH];
- char pathname[MAXPGPATH];
- int64 totalsize=0;
- DIR *dirdesc;
- struct dirent *direntry;
+ char tblspcPath[MAXPGPATH];
+ char pathname[MAXPGPATH];
+ int64 totalsize = 0;
+ DIR *dirdesc;
+ struct dirent *direntry;
if (tblspcOid == DEFAULTTABLESPACE_OID)
- snprintf(tblspcPath, MAXPGPATH, "%s/base", DataDir);
+ snprintf(tblspcPath, MAXPGPATH, "%s/base", DataDir);
else if (tblspcOid == GLOBALTABLESPACE_OID)
- snprintf(tblspcPath, MAXPGPATH, "%s/global", DataDir);
+ snprintf(tblspcPath, MAXPGPATH, "%s/global", DataDir);
else
snprintf(tblspcPath, MAXPGPATH, "%s/pg_tblspc/%u", DataDir, tblspcOid);
@@ -164,11 +164,11 @@ calculate_tablespace_size(Oid tblspcOid)
while ((direntry = ReadDir(dirdesc, tblspcPath)) != NULL)
{
- struct stat fst;
+ struct stat fst;
- if (strcmp(direntry->d_name, ".") == 0 ||
+ if (strcmp(direntry->d_name, ".") == 0 ||
strcmp(direntry->d_name, "..") == 0)
- continue;
+ continue;
snprintf(pathname, MAXPGPATH, "%s/%s", tblspcPath, direntry->d_name);
@@ -178,29 +178,29 @@ calculate_tablespace_size(Oid tblspcOid)
errmsg("could not stat \"%s\": %m", pathname)));
if (fst.st_mode & S_IFDIR)
- totalsize += db_dir_size(pathname);
-
- totalsize += fst.st_size;
+ totalsize += db_dir_size(pathname);
+
+ totalsize += fst.st_size;
}
FreeDir(dirdesc);
-
+
return totalsize;
}
Datum
pg_tablespace_size_oid(PG_FUNCTION_ARGS)
{
- Oid tblspcOid = PG_GETARG_OID(0);
-
+ Oid tblspcOid = PG_GETARG_OID(0);
+
PG_RETURN_INT64(calculate_tablespace_size(tblspcOid));
}
Datum
pg_tablespace_size_name(PG_FUNCTION_ARGS)
{
- Name tblspcName = PG_GETARG_NAME(0);
- Oid tblspcOid = get_tablespace_oid(NameStr(*tblspcName));
+ Name tblspcName = PG_GETARG_NAME(0);
+ Oid tblspcOid = get_tablespace_oid(NameStr(*tblspcName));
if (!OidIsValid(tblspcOid))
ereport(ERROR,
@@ -226,22 +226,22 @@ calculate_relation_size(RelFileNode *rfn)
Assert(OidIsValid(rfn->spcNode));
if (rfn->spcNode == DEFAULTTABLESPACE_OID)
- snprintf(dirpath, MAXPGPATH, "%s/base/%u", DataDir, rfn->dbNode);
+ snprintf(dirpath, MAXPGPATH, "%s/base/%u", DataDir, rfn->dbNode);
else if (rfn->spcNode == GLOBALTABLESPACE_OID)
- snprintf(dirpath, MAXPGPATH, "%s/global", DataDir);
+ snprintf(dirpath, MAXPGPATH, "%s/global", DataDir);
else
- snprintf(dirpath, MAXPGPATH, "%s/pg_tblspc/%u/%u",
+ snprintf(dirpath, MAXPGPATH, "%s/pg_tblspc/%u/%u",
DataDir, rfn->spcNode, rfn->dbNode);
- for (segcount = 0; ; segcount++)
+ for (segcount = 0;; segcount++)
{
struct stat fst;
if (segcount == 0)
- snprintf(pathname, MAXPGPATH, "%s/%u",
+ snprintf(pathname, MAXPGPATH, "%s/%u",
dirpath, rfn->relNode);
else
- snprintf(pathname, MAXPGPATH, "%s/%u.%u",
+ snprintf(pathname, MAXPGPATH, "%s/%u.%u",
dirpath, rfn->relNode, segcount);
if (stat(pathname, &fst) < 0)
@@ -262,7 +262,7 @@ calculate_relation_size(RelFileNode *rfn)
Datum
pg_relation_size_oid(PG_FUNCTION_ARGS)
{
- Oid relOid=PG_GETARG_OID(0);
+ Oid relOid = PG_GETARG_OID(0);
Relation rel;
int64 size;
@@ -282,12 +282,12 @@ pg_relation_size_name(PG_FUNCTION_ARGS)
RangeVar *relrv;
Relation rel;
int64 size;
-
- relrv = makeRangeVarFromNameList(textToQualifiedNameList(relname));
+
+ relrv = makeRangeVarFromNameList(textToQualifiedNameList(relname));
rel = relation_openrv(relrv, AccessShareLock);
-
+
size = calculate_relation_size(&(rel->rd_node));
-
+
relation_close(rel, AccessShareLock);
PG_RETURN_INT64(size);
@@ -295,9 +295,9 @@ pg_relation_size_name(PG_FUNCTION_ARGS)
/*
- * Compute the on-disk size of files for the relation according to the
- * stat function, optionally including heap data, index data, and/or
- * toast data.
+ * Compute the on-disk size of files for the relation according to the
+ * stat function, optionally including heap data, index data, and/or
+ * toast data.
*/
static int64
calculate_total_relation_size(Oid Relid)
@@ -317,7 +317,7 @@ calculate_total_relation_size(Oid Relid)
if (heapRel->rd_rel->relhasindex)
{
/* recursively include any dependent indexes */
- List *index_oids = RelationGetIndexList(heapRel);
+ List *index_oids = RelationGetIndexList(heapRel);
foreach(cell, index_oids)
{
@@ -344,13 +344,13 @@ calculate_total_relation_size(Oid Relid)
}
/*
- * Compute on-disk size of files for 'relation' including
- * heap data, index data, and toasted data.
+ * Compute on-disk size of files for 'relation' including
+ * heap data, index data, and toasted data.
*/
Datum
pg_total_relation_size_oid(PG_FUNCTION_ARGS)
{
- Oid relid = PG_GETARG_OID(0);
+ Oid relid = PG_GETARG_OID(0);
PG_RETURN_INT64(calculate_total_relation_size(relid));
}
@@ -361,10 +361,10 @@ pg_total_relation_size_name(PG_FUNCTION_ARGS)
text *relname = PG_GETARG_TEXT_P(0);
RangeVar *relrv;
Oid relid;
-
- relrv = makeRangeVarFromNameList(textToQualifiedNameList(relname));
+
+ relrv = makeRangeVarFromNameList(textToQualifiedNameList(relname));
relid = RangeVarGetRelid(relrv, false);
-
+
PG_RETURN_INT64(calculate_total_relation_size(relid));
}
@@ -374,35 +374,35 @@ pg_total_relation_size_name(PG_FUNCTION_ARGS)
Datum
pg_size_pretty(PG_FUNCTION_ARGS)
{
- int64 size = PG_GETARG_INT64(0);
- char *result = palloc(50 + VARHDRSZ);
- int64 limit = 10 * 1024;
- int64 mult = 1;
+ int64 size = PG_GETARG_INT64(0);
+ char *result = palloc(50 + VARHDRSZ);
+ int64 limit = 10 * 1024;
+ int64 mult = 1;
if (size < limit * mult)
- snprintf(VARDATA(result), 50, INT64_FORMAT " bytes", size);
+ snprintf(VARDATA(result), 50, INT64_FORMAT " bytes", size);
else
{
mult *= 1024;
if (size < limit * mult)
- snprintf(VARDATA(result), 50, INT64_FORMAT " kB",
- (size + mult / 2) / mult);
+ snprintf(VARDATA(result), 50, INT64_FORMAT " kB",
+ (size + mult / 2) / mult);
else
{
mult *= 1024;
if (size < limit * mult)
- snprintf(VARDATA(result), 50, INT64_FORMAT " MB",
+ snprintf(VARDATA(result), 50, INT64_FORMAT " MB",
(size + mult / 2) / mult);
else
{
mult *= 1024;
if (size < limit * mult)
- snprintf(VARDATA(result), 50, INT64_FORMAT " GB",
+ snprintf(VARDATA(result), 50, INT64_FORMAT " GB",
(size + mult / 2) / mult);
else
{
- mult *= 1024;
- snprintf(VARDATA(result), 50, INT64_FORMAT " TB",
+ mult *= 1024;
+ snprintf(VARDATA(result), 50, INT64_FORMAT " TB",
(size + mult / 2) / mult);
}
}
diff --git a/src/backend/utils/adt/encode.c b/src/backend/utils/adt/encode.c
index 659263230ff..1f23a8419ee 100644
--- a/src/backend/utils/adt/encode.c
+++ b/src/backend/utils/adt/encode.c
@@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/encode.c,v 1.15 2005/09/24 17:53:15 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/encode.c,v 1.16 2005/10/15 02:49:28 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -175,7 +175,7 @@ hex_decode(const char *src, unsigned len, char *dst)
if (s >= srcend)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("invalid hexadecimal data: odd number of digits")));
+ errmsg("invalid hexadecimal data: odd number of digits")));
v2 = get_hex(*s++);
*p++ = v1 | v2;
@@ -428,8 +428,8 @@ esc_decode(const char *src, unsigned srclen, char *dst)
else
{
/*
- * One backslash, not followed by ### valid octal. Should
- * never get here, since esc_dec_len does same check.
+ * One backslash, not followed by ### valid octal. Should never
+ * get here, since esc_dec_len does same check.
*/
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
diff --git a/src/backend/utils/adt/float.c b/src/backend/utils/adt/float.c
index c943ee2c71d..fb37e36624e 100644
--- a/src/backend/utils/adt/float.c
+++ b/src/backend/utils/adt/float.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/float.c,v 1.114 2005/04/06 23:56:07 neilc Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/float.c,v 1.115 2005/10/15 02:49:28 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -235,11 +235,11 @@ CheckFloat8Val(double val)
if (fabs(val) > FLOAT8_MAX)
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
- errmsg("type \"double precision\" value out of range: overflow")));
+ errmsg("type \"double precision\" value out of range: overflow")));
if (val != 0.0 && fabs(val) < FLOAT8_MIN)
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
- errmsg("type \"double precision\" value out of range: underflow")));
+ errmsg("type \"double precision\" value out of range: underflow")));
}
/*
@@ -258,15 +258,15 @@ float4in(PG_FUNCTION_ARGS)
char *endptr;
/*
- * endptr points to the first character _after_ the sequence we
- * recognized as a valid floating point number. orig_num points to the
- * original input string.
+ * endptr points to the first character _after_ the sequence we recognized
+ * as a valid floating point number. orig_num points to the original input
+ * string.
*/
orig_num = num;
/*
- * Check for an empty-string input to begin with, to avoid the
- * vagaries of strtod() on different platforms.
+ * Check for an empty-string input to begin with, to avoid the vagaries of
+ * strtod() on different platforms.
*/
if (*num == '\0')
ereport(ERROR,
@@ -285,10 +285,9 @@ float4in(PG_FUNCTION_ARGS)
if (endptr == num || errno != 0)
{
/*
- * C99 requires that strtod() accept NaN and [-]Infinity, but not
- * all platforms support that yet (and some accept them but set
- * ERANGE anyway...) Therefore, we check for these inputs
- * ourselves.
+ * C99 requires that strtod() accept NaN and [-]Infinity, but not all
+ * platforms support that yet (and some accept them but set ERANGE
+ * anyway...) Therefore, we check for these inputs ourselves.
*/
if (pg_strncasecmp(num, "NaN", 3) == 0)
{
@@ -320,9 +319,9 @@ float4in(PG_FUNCTION_ARGS)
else
{
/*
- * Many versions of Solaris have a bug wherein strtod sets endptr
- * to point one byte beyond the end of the string when given "inf"
- * or "infinity".
+ * Many versions of Solaris have a bug wherein strtod sets endptr to
+ * point one byte beyond the end of the string when given "inf" or
+ * "infinity".
*/
if (endptr != num && endptr[-1] == '\0')
endptr--;
@@ -341,8 +340,8 @@ float4in(PG_FUNCTION_ARGS)
orig_num)));
/*
- * if we get here, we have a legal double, still need to check to see
- * if it's a legal float4
+ * if we get here, we have a legal double, still need to check to see if
+ * it's a legal float4
*/
if (!isinf(val))
CheckFloat4Val(val);
@@ -426,21 +425,21 @@ float8in(PG_FUNCTION_ARGS)
char *endptr;
/*
- * endptr points to the first character _after_ the sequence we
- * recognized as a valid floating point number. orig_num points to the
- * original input string.
+ * endptr points to the first character _after_ the sequence we recognized
+ * as a valid floating point number. orig_num points to the original input
+ * string.
*/
orig_num = num;
/*
- * Check for an empty-string input to begin with, to avoid the
- * vagaries of strtod() on different platforms.
+ * Check for an empty-string input to begin with, to avoid the vagaries of
+ * strtod() on different platforms.
*/
if (*num == '\0')
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type double precision: \"%s\"",
- orig_num)));
+ errmsg("invalid input syntax for type double precision: \"%s\"",
+ orig_num)));
/* skip leading whitespace */
while (*num != '\0' && isspace((unsigned char) *num))
@@ -453,10 +452,9 @@ float8in(PG_FUNCTION_ARGS)
if (endptr == num || errno != 0)
{
/*
- * C99 requires that strtod() accept NaN and [-]Infinity, but not
- * all platforms support that yet (and some accept them but set
- * ERANGE anyway...) Therefore, we check for these inputs
- * ourselves.
+ * C99 requires that strtod() accept NaN and [-]Infinity, but not all
+ * platforms support that yet (and some accept them but set ERANGE
+ * anyway...) Therefore, we check for these inputs ourselves.
*/
if (pg_strncasecmp(num, "NaN", 3) == 0)
{
@@ -476,21 +474,21 @@ float8in(PG_FUNCTION_ARGS)
else if (errno == ERANGE)
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
- errmsg("\"%s\" is out of range for type double precision",
- orig_num)));
+ errmsg("\"%s\" is out of range for type double precision",
+ orig_num)));
else
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type double precision: \"%s\"",
- orig_num)));
+ errmsg("invalid input syntax for type double precision: \"%s\"",
+ orig_num)));
}
#ifdef HAVE_BUGGY_SOLARIS_STRTOD
else
{
/*
- * Many versions of Solaris have a bug wherein strtod sets endptr
- * to point one byte beyond the end of the string when given "inf"
- * or "infinity".
+ * Many versions of Solaris have a bug wherein strtod sets endptr to
+ * point one byte beyond the end of the string when given "inf" or
+ * "infinity".
*/
if (endptr != num && endptr[-1] == '\0')
endptr--;
@@ -505,8 +503,8 @@ float8in(PG_FUNCTION_ARGS)
if (*endptr != '\0')
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type double precision: \"%s\"",
- orig_num)));
+ errmsg("invalid input syntax for type double precision: \"%s\"",
+ orig_num)));
if (!isinf(val))
CheckFloat8Val(val);
@@ -860,9 +858,9 @@ static int
float4_cmp_internal(float4 a, float4 b)
{
/*
- * We consider all NANs to be equal and larger than any non-NAN. This
- * is somewhat arbitrary; the important thing is to have a consistent
- * sort order.
+ * We consider all NANs to be equal and larger than any non-NAN. This is
+ * somewhat arbitrary; the important thing is to have a consistent sort
+ * order.
*/
if (isnan(a))
{
@@ -956,9 +954,9 @@ static int
float8_cmp_internal(float8 a, float8 b)
{
/*
- * We consider all NANs to be equal and larger than any non-NAN. This
- * is somewhat arbitrary; the important thing is to have a consistent
- * sort order.
+ * We consider all NANs to be equal and larger than any non-NAN. This is
+ * somewhat arbitrary; the important thing is to have a consistent sort
+ * order.
*/
if (isnan(a))
{
@@ -1465,8 +1463,8 @@ dpow(PG_FUNCTION_ARGS)
float8 result;
/*
- * The SQL spec requires that we emit a particular SQLSTATE error code
- * for certain error conditions.
+ * The SQL spec requires that we emit a particular SQLSTATE error code for
+ * certain error conditions.
*/
if ((arg1 == 0 && arg2 < 0) ||
(arg1 < 0 && floor(arg2) != arg2))
@@ -1475,8 +1473,8 @@ dpow(PG_FUNCTION_ARGS)
errmsg("invalid argument for power function")));
/*
- * We must check both for errno getting set and for a NaN result, in
- * order to deal with the vagaries of different platforms...
+ * We must check both for errno getting set and for a NaN result, in order
+ * to deal with the vagaries of different platforms...
*/
errno = 0;
result = pow(arg1, arg2);
@@ -1504,9 +1502,9 @@ dexp(PG_FUNCTION_ARGS)
float8 result;
/*
- * We must check both for errno getting set and for a NaN result, in
- * order to deal with the vagaries of different platforms. Also, a
- * zero result implies unreported underflow.
+ * We must check both for errno getting set and for a NaN result, in order
+ * to deal with the vagaries of different platforms. Also, a zero result
+ * implies unreported underflow.
*/
errno = 0;
result = exp(arg1);
@@ -1534,8 +1532,8 @@ dlog1(PG_FUNCTION_ARGS)
float8 result;
/*
- * Emit particular SQLSTATE error codes for ln(). This is required by
- * the SQL standard.
+ * Emit particular SQLSTATE error codes for ln(). This is required by the
+ * SQL standard.
*/
if (arg1 == 0.0)
ereport(ERROR,
@@ -1563,9 +1561,9 @@ dlog10(PG_FUNCTION_ARGS)
float8 result;
/*
- * Emit particular SQLSTATE error codes for log(). The SQL spec
- * doesn't define log(), but it does define ln(), so it makes sense to
- * emit the same error code for an analogous error condition.
+ * Emit particular SQLSTATE error codes for log(). The SQL spec doesn't
+ * define log(), but it does define ln(), so it makes sense to emit the
+ * same error code for an analogous error condition.
*/
if (arg1 == 0.0)
ereport(ERROR,
@@ -1914,9 +1912,8 @@ float8_accum(PG_FUNCTION_ARGS)
/*
* If we're invoked by nodeAgg, we can cheat and modify our first
- * parameter in-place to reduce palloc overhead. Otherwise we
- * construct a new array with the updated transition data and
- * return it.
+ * parameter in-place to reduce palloc overhead. Otherwise we construct a
+ * new array with the updated transition data and return it.
*/
if (fcinfo->context && IsA(fcinfo->context, AggState))
{
@@ -1937,7 +1934,7 @@ float8_accum(PG_FUNCTION_ARGS)
result = construct_array(transdatums, 3,
FLOAT8OID,
- sizeof(float8), false /* float8 byval */ , 'd');
+ sizeof(float8), false /* float8 byval */ , 'd');
PG_RETURN_ARRAYTYPE_P(result);
}
@@ -1968,9 +1965,8 @@ float4_accum(PG_FUNCTION_ARGS)
/*
* If we're invoked by nodeAgg, we can cheat and modify our first
- * parameter in-place to reduce palloc overhead. Otherwise we
- * construct a new array with the updated transition data and
- * return it.
+ * parameter in-place to reduce palloc overhead. Otherwise we construct a
+ * new array with the updated transition data and return it.
*/
if (fcinfo->context && IsA(fcinfo->context, AggState))
{
@@ -1991,7 +1987,7 @@ float4_accum(PG_FUNCTION_ARGS)
result = construct_array(transdatums, 3,
FLOAT8OID,
- sizeof(float8), false /* float8 byval */ , 'd');
+ sizeof(float8), false /* float8 byval */ , 'd');
PG_RETURN_ARRAYTYPE_P(result);
}
diff --git a/src/backend/utils/adt/format_type.c b/src/backend/utils/adt/format_type.c
index 0280196af9a..adbfb588580 100644
--- a/src/backend/utils/adt/format_type.c
+++ b/src/backend/utils/adt/format_type.c
@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/format_type.c,v 1.40 2005/03/29 00:17:08 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/format_type.c,v 1.41 2005/10/15 02:49:28 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -138,12 +138,12 @@ format_type_internal(Oid type_oid, int32 typemod,
typeform = (Form_pg_type) GETSTRUCT(tuple);
/*
- * Check if it's an array (and not a domain --- we don't want to show
- * the substructure of a domain type). Fixed-length array types such
- * as "name" shouldn't get deconstructed either. As of Postgres 8.1,
- * rather than checking typlen we check the toast property, and don't
- * deconstruct "plain storage" array types --- this is because we don't
- * want to show oidvector as oid[].
+ * Check if it's an array (and not a domain --- we don't want to show the
+ * substructure of a domain type). Fixed-length array types such as
+ * "name" shouldn't get deconstructed either. As of Postgres 8.1, rather
+ * than checking typlen we check the toast property, and don't deconstruct
+ * "plain storage" array types --- this is because we don't want to show
+ * oidvector as oid[].
*/
array_base_type = typeform->typelem;
@@ -171,14 +171,14 @@ format_type_internal(Oid type_oid, int32 typemod,
is_array = false;
/*
- * See if we want to special-case the output for certain built-in
- * types. Note that these special cases should all correspond to
- * special productions in gram.y, to ensure that the type name will be
- * taken as a system type, not a user type of the same name.
+ * See if we want to special-case the output for certain built-in types.
+ * Note that these special cases should all correspond to special
+ * productions in gram.y, to ensure that the type name will be taken as a
+ * system type, not a user type of the same name.
*
* If we do not provide a special-case output here, the type name will be
- * handled the same way as a user type name --- in particular, it will
- * be double-quoted if it matches any lexer keyword. This behavior is
+ * handled the same way as a user type name --- in particular, it will be
+ * double-quoted if it matches any lexer keyword. This behavior is
* essential for some cases, such as types "bit" and "char".
*/
buf = NULL; /* flag for no special case */
@@ -193,8 +193,8 @@ format_type_internal(Oid type_oid, int32 typemod,
{
/*
* bit with typmod -1 is not the same as BIT, which means
- * BIT(1) per SQL spec. Report it as the quoted typename
- * so that parser will not assign a bogus typmod.
+ * BIT(1) per SQL spec. Report it as the quoted typename so
+ * that parser will not assign a bogus typmod.
*/
}
else
@@ -212,9 +212,9 @@ format_type_internal(Oid type_oid, int32 typemod,
else if (typemod_given)
{
/*
- * bpchar with typmod -1 is not the same as CHARACTER,
- * which means CHARACTER(1) per SQL spec. Report it as
- * bpchar so that parser will not assign a bogus typmod.
+ * bpchar with typmod -1 is not the same as CHARACTER, which
+ * means CHARACTER(1) per SQL spec. Report it as bpchar so
+ * that parser will not assign a bogus typmod.
*/
}
else
@@ -382,9 +382,9 @@ format_type_internal(Oid type_oid, int32 typemod,
{
/*
* Default handling: report the name as it appears in the catalog.
- * Here, we must qualify the name if it is not visible in the
- * search path, and we must double-quote it if it's not a standard
- * identifier or if it matches any keyword.
+ * Here, we must qualify the name if it is not visible in the search
+ * path, and we must double-quote it if it's not a standard identifier
+ * or if it matches any keyword.
*/
char *nspname;
char *typname;
diff --git a/src/backend/utils/adt/formatting.c b/src/backend/utils/adt/formatting.c
index 1e3553816d7..90e940e7b9c 100644
--- a/src/backend/utils/adt/formatting.c
+++ b/src/backend/utils/adt/formatting.c
@@ -1,7 +1,7 @@
/* -----------------------------------------------------------------------
* formatting.c
*
- * $PostgreSQL: pgsql/src/backend/utils/adt/formatting.c,v 1.99 2005/08/18 13:43:08 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/formatting.c,v 1.100 2005/10/15 02:49:28 momjian Exp $
*
*
* Portions Copyright (c) 1999-2005, PostgreSQL Global Development Group
@@ -135,9 +135,9 @@ typedef struct
{
const char *name; /* keyword */
int len; /* keyword length */
- int (*action) (int arg, char *inout, /* action for keyword */
- int suf, bool is_to_char, bool is_interval,
- FormatNode *node, void *data);
+ int (*action) (int arg, char *inout, /* action for keyword */
+ int suf, bool is_to_char, bool is_interval,
+ FormatNode *node, void *data);
int id; /* keyword id */
bool isitdigit; /* is expected output/input digit */
} KeyWord;
@@ -252,7 +252,7 @@ static char *numth[] = {"st", "nd", "rd", "th", NULL};
* Flags for DCH version
* ----------
*/
-static bool DCH_global_fx = false;
+static bool DCH_global_fx = false;
/* ----------
@@ -379,7 +379,7 @@ typedef struct
q,
j,
us,
- yysz; /* is it YY or YYYY ? */
+ yysz; /* is it YY or YYYY ? */
} TmFromChar;
#define ZERO_tmfc(_X) memset(_X, 0, sizeof(TmFromChar))
@@ -442,17 +442,17 @@ do { \
errmsg("invalid format specification for an interval value"), \
errhint("Intervals are not tied to specific calendar dates."))); \
} while(0)
-
+
/*****************************************************************************
* KeyWords definition & action
*****************************************************************************/
-static int dch_global(int arg, char *inout, int suf, bool is_to_char,
- bool is_interval, FormatNode *node, void *data);
-static int dch_time(int arg, char *inout, int suf, bool is_to_char,
- bool is_interval, FormatNode *node, void *data);
-static int dch_date(int arg, char *inout, int suf, bool is_to_char,
- bool is_interval, FormatNode *node, void *data);
+static int dch_global(int arg, char *inout, int suf, bool is_to_char,
+ bool is_interval, FormatNode *node, void *data);
+static int dch_time(int arg, char *inout, int suf, bool is_to_char,
+ bool is_interval, FormatNode *node, void *data);
+static int dch_date(int arg, char *inout, int suf, bool is_to_char,
+ bool is_interval, FormatNode *node, void *data);
/* ----------
* Suffixes:
@@ -803,7 +803,7 @@ static const KeyWord NUM_keywords[] = {
* KeyWords index for DATE-TIME version
* ----------
*/
-static const int DCH_index[KeyWord_INDEX_SIZE] = {
+static const int DCH_index[KeyWord_INDEX_SIZE] = {
/*
0 1 2 3 4 5 6 7 8 9
*/
@@ -827,7 +827,7 @@ static const int DCH_index[KeyWord_INDEX_SIZE] = {
* KeyWords index for NUMBER version
* ----------
*/
-static const int NUM_index[KeyWord_INDEX_SIZE] = {
+static const int NUM_index[KeyWord_INDEX_SIZE] = {
/*
0 1 2 3 4 5 6 7 8 9
*/
@@ -871,8 +871,7 @@ typedef struct NUMProc
*number_p, /* pointer to current number position */
*inout, /* in / out buffer */
*inout_p, /* pointer to current inout position */
- *last_relevant, /* last relevant number after decimal
- * point */
+ *last_relevant, /* last relevant number after decimal point */
*L_negative_sign, /* Locale */
*L_positive_sign,
@@ -887,13 +886,13 @@ typedef struct NUMProc
* ----------
*/
static const KeyWord *index_seq_search(char *str, const KeyWord *kw,
- const int *index);
+ const int *index);
static KeySuffix *suff_search(char *str, KeySuffix *suf, int type);
static void NUMDesc_prepare(NUMDesc *num, FormatNode *n);
static void parse_format(FormatNode *node, char *str, const KeyWord *kw,
KeySuffix *suf, const int *index, int ver, NUMDesc *Num);
static char *DCH_processor(FormatNode *node, char *inout, bool is_to_char,
- bool is_interval, void *data);
+ bool is_interval, void *data);
#ifdef DEBUG_TO_FROM_CHAR
static void dump_index(const KeyWord *k, const int *index);
@@ -909,7 +908,7 @@ static char *str_tolower(char *buff);
/* static int is_acdc(char *str, int *len); */
static int seq_search(char *name, char **array, int type, int max, int *len);
static void do_to_timestamp(text *date_txt, text *fmt,
- struct pg_tm *tm, fsec_t *fsec);
+ struct pg_tm * tm, fsec_t *fsec);
static char *fill_str(char *str, int c, int max);
static FormatNode *NUM_cache(int len, NUMDesc *Num, char *pars_str, bool *shouldFree);
static char *int_to_roman(int number);
@@ -1047,7 +1046,7 @@ NUMDesc_prepare(NUMDesc *num, FormatNode *n)
NUM_cache_remove(last_NUMCacheEntry);
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("cannot use \"V\" and decimal point together")));
+ errmsg("cannot use \"V\" and decimal point together")));
}
num->flag |= NUM_F_DECIMAL;
break;
@@ -1152,7 +1151,7 @@ NUMDesc_prepare(NUMDesc *num, FormatNode *n)
NUM_cache_remove(last_NUMCacheEntry);
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("cannot use \"V\" and decimal point together")));
+ errmsg("cannot use \"V\" and decimal point together")));
}
num->flag |= NUM_F_MULTI;
break;
@@ -1324,11 +1323,11 @@ DCH_processor(FormatNode *node, char *inout, bool is_to_char,
if (!is_to_char && *s == '\0')
/*
- * The input string is shorter than format picture, so it's
- * good time to break this loop...
+ * The input string is shorter than format picture, so it's good
+ * time to break this loop...
*
- * Note: this isn't relevant for TO_CHAR mode, beacuse it use
- * 'inout' allocated by format picture length.
+ * Note: this isn't relevant for TO_CHAR mode, beacuse it use 'inout'
+ * allocated by format picture length.
*/
break;
@@ -1393,7 +1392,7 @@ dump_node(FormatNode *node, int max)
{
if (n->type == NODE_TYPE_ACTION)
elog(DEBUG_elog_output, "%d:\t NODE_TYPE_ACTION '%s'\t(%s,%s)",
- a, n->key->name, DUMP_THth(n->suffix), DUMP_FM(n->suffix));
+ a, n->key->name, DUMP_THth(n->suffix), DUMP_FM(n->suffix));
else if (n->type == NODE_TYPE_CHAR)
elog(DEBUG_elog_output, "%d:\t NODE_TYPE_CHAR '%c'", a, n->character);
else if (n->type == NODE_TYPE_END)
@@ -1578,8 +1577,8 @@ seq_search(char *name, char **array, int type, int max, int *len)
#ifdef DEBUG_TO_FROM_CHAR
/*
- * elog(DEBUG_elog_output, "N: %c, P: %c, A: %s (%s)", *n, *p,
- * *a, name);
+ * elog(DEBUG_elog_output, "N: %c, P: %c, A: %s (%s)", *n, *p, *a,
+ * name);
*/
#endif
if (*n != *p)
@@ -1637,7 +1636,7 @@ dump_index(const KeyWord *k, const int *index)
*/
static int
dch_global(int arg, char *inout, int suf, bool is_to_char, bool is_interval,
- FormatNode *node, void *data)
+ FormatNode *node, void *data)
{
if (arg == DCH_FX)
DCH_global_fx = true;
@@ -1704,7 +1703,7 @@ strdigits_len(char *str)
*/
static int
dch_time(int arg, char *inout, int suf, bool is_to_char, bool is_interval,
- FormatNode *node, void *data)
+ FormatNode *node, void *data)
{
char *p_inout = inout;
struct pg_tm *tm = NULL;
@@ -1727,7 +1726,7 @@ dch_time(int arg, char *inout, int suf, bool is_to_char, bool is_interval,
if (is_to_char)
{
strcpy(inout, ((tm->tm_hour > 11
- && tm->tm_hour < HOURS_PER_DAY) ? P_M_STR : A_M_STR));
+ && tm->tm_hour < HOURS_PER_DAY) ? P_M_STR : A_M_STR));
return strlen(p_inout);
}
else
@@ -1747,7 +1746,7 @@ dch_time(int arg, char *inout, int suf, bool is_to_char, bool is_interval,
if (is_to_char)
{
strcpy(inout, ((tm->tm_hour > 11
- && tm->tm_hour < HOURS_PER_DAY) ? PM_STR : AM_STR));
+ && tm->tm_hour < HOURS_PER_DAY) ? PM_STR : AM_STR));
return strlen(p_inout);
}
else
@@ -1767,7 +1766,7 @@ dch_time(int arg, char *inout, int suf, bool is_to_char, bool is_interval,
if (is_to_char)
{
strcpy(inout, ((tm->tm_hour > 11
- && tm->tm_hour < HOURS_PER_DAY) ? p_m_STR : a_m_STR));
+ && tm->tm_hour < HOURS_PER_DAY) ? p_m_STR : a_m_STR));
return strlen(p_inout);
}
else
@@ -1787,7 +1786,7 @@ dch_time(int arg, char *inout, int suf, bool is_to_char, bool is_interval,
if (is_to_char)
{
strcpy(inout, ((tm->tm_hour > 11
- && tm->tm_hour < HOURS_PER_DAY) ? pm_STR : am_STR));
+ && tm->tm_hour < HOURS_PER_DAY) ? pm_STR : am_STR));
return strlen(p_inout);
}
else
@@ -1925,15 +1924,13 @@ dch_time(int arg, char *inout, int suf, bool is_to_char, bool is_interval,
}
/*
- * 25 is 0.25 and 250 is 0.25 too; 025 is 0.025 and not
- * 0.25
+ * 25 is 0.25 and 250 is 0.25 too; 025 is 0.025 and not 0.25
*/
tmfc->ms *= x == 1 ? 100 :
x == 2 ? 10 : 1;
/*
- * elog(DEBUG3, "X: %d, MS: %d, LEN: %d", x, tmfc->ms,
- * len);
+ * elog(DEBUG3, "X: %d, MS: %d, LEN: %d", x, tmfc->ms, len);
*/
return len + SKIP_THth(suf);
}
@@ -1974,8 +1971,7 @@ dch_time(int arg, char *inout, int suf, bool is_to_char, bool is_interval,
x == 5 ? 10 : 1;
/*
- * elog(DEBUG3, "X: %d, US: %d, LEN: %d", x, tmfc->us,
- * len);
+ * elog(DEBUG3, "X: %d, US: %d, LEN: %d", x, tmfc->us, len);
*/
return len + SKIP_THth(suf);
}
@@ -2049,7 +2045,7 @@ do { \
*/
static int
dch_date(int arg, char *inout, int suf, bool is_to_char, bool is_interval,
- FormatNode *node, void *data)
+ FormatNode *node, void *data)
{
char buff[DCH_CACHE_SIZE],
workbuff[32],
@@ -2069,8 +2065,8 @@ dch_date(int arg, char *inout, int suf, bool is_to_char, bool is_interval,
tmfc = (TmFromChar *) data;
/*
- * In the FROM-char is not difference between "January" or "JANUARY"
- * or "january", all is before search convert to "first-upper". This
+ * In the FROM-char is not difference between "January" or "JANUARY" or
+ * "january", all is before search convert to "first-upper". This
* convention is used for MONTH, MON, DAY, DY
*/
if (!is_to_char)
@@ -2193,7 +2189,7 @@ dch_date(int arg, char *inout, int suf, bool is_to_char, bool is_interval,
return strlen(p_inout);
case DCH_MON:
- INVALID_FOR_INTERVAL;
+ INVALID_FOR_INTERVAL;
if (!tm->tm_mon)
return -1;
strcpy(inout, months[tm->tm_mon - 1]);
@@ -2201,14 +2197,14 @@ dch_date(int arg, char *inout, int suf, bool is_to_char, bool is_interval,
return strlen(p_inout);
case DCH_Mon:
- INVALID_FOR_INTERVAL;
+ INVALID_FOR_INTERVAL;
if (!tm->tm_mon)
return -1;
strcpy(inout, months[tm->tm_mon - 1]);
return strlen(p_inout);
case DCH_mon:
- INVALID_FOR_INTERVAL;
+ INVALID_FOR_INTERVAL;
if (!tm->tm_mon)
return -1;
strcpy(inout, months[tm->tm_mon - 1]);
@@ -2238,38 +2234,38 @@ dch_date(int arg, char *inout, int suf, bool is_to_char, bool is_interval,
}
break;
case DCH_DAY:
- INVALID_FOR_INTERVAL;
+ INVALID_FOR_INTERVAL;
strcpy(workbuff, days[tm->tm_wday]);
sprintf(inout, "%*s", S_FM(suf) ? 0 : -9, str_toupper(workbuff));
return strlen(p_inout);
case DCH_Day:
- INVALID_FOR_INTERVAL;
+ INVALID_FOR_INTERVAL;
sprintf(inout, "%*s", S_FM(suf) ? 0 : -9, days[tm->tm_wday]);
return strlen(p_inout);
case DCH_day:
- INVALID_FOR_INTERVAL;
+ INVALID_FOR_INTERVAL;
sprintf(inout, "%*s", S_FM(suf) ? 0 : -9, days[tm->tm_wday]);
*inout = pg_tolower((unsigned char) *inout);
return strlen(p_inout);
case DCH_DY:
- INVALID_FOR_INTERVAL;
+ INVALID_FOR_INTERVAL;
strcpy(inout, days[tm->tm_wday]);
str_toupper(inout);
- return 3; /* truncate */
+ return 3; /* truncate */
case DCH_Dy:
- INVALID_FOR_INTERVAL;
+ INVALID_FOR_INTERVAL;
strcpy(inout, days[tm->tm_wday]);
- return 3; /* truncate */
+ return 3; /* truncate */
case DCH_dy:
- INVALID_FOR_INTERVAL;
+ INVALID_FOR_INTERVAL;
strcpy(inout, days[tm->tm_wday]);
*inout = pg_tolower((unsigned char) *inout);
- return 3; /* truncate */
+ return 3; /* truncate */
case DCH_DDD:
if (is_to_char)
@@ -2316,7 +2312,7 @@ dch_date(int arg, char *inout, int suf, bool is_to_char, bool is_interval,
}
break;
case DCH_D:
- INVALID_FOR_INTERVAL;
+ INVALID_FOR_INTERVAL;
if (is_to_char)
{
sprintf(inout, "%d", tm->tm_wday + 1);
@@ -2357,7 +2353,7 @@ dch_date(int arg, char *inout, int suf, bool is_to_char, bool is_interval,
if (is_to_char)
{
sprintf(inout, "%0*d", S_FM(suf) ? 0 : 2,
- date2isoweek(tm->tm_year, tm->tm_mon, tm->tm_mday));
+ date2isoweek(tm->tm_year, tm->tm_mon, tm->tm_mday));
if (S_THth(suf))
str_numth(p_inout, inout, S_TH_TYPE(suf));
return strlen(p_inout);
@@ -2447,17 +2443,17 @@ dch_date(int arg, char *inout, int suf, bool is_to_char, bool is_interval,
arg == DCH_YYYY ?
ADJUST_YEAR(tm->tm_year, is_interval) :
ADJUST_YEAR(date2isoyear(
- tm->tm_year,
- tm->tm_mon,
- tm->tm_mday), is_interval));
+ tm->tm_year,
+ tm->tm_mon,
+ tm->tm_mday), is_interval));
else
sprintf(inout, "%d",
arg == DCH_YYYY ?
ADJUST_YEAR(tm->tm_year, is_interval) :
ADJUST_YEAR(date2isoyear(
- tm->tm_year,
- tm->tm_mon,
- tm->tm_mday), is_interval));
+ tm->tm_year,
+ tm->tm_mon,
+ tm->tm_mday), is_interval));
if (S_THth(suf))
str_numth(p_inout, inout, S_TH_TYPE(suf));
return strlen(p_inout);
@@ -2486,8 +2482,8 @@ dch_date(int arg, char *inout, int suf, bool is_to_char, bool is_interval,
arg == DCH_YYY ?
ADJUST_YEAR(tm->tm_year, is_interval) :
ADJUST_YEAR(date2isoyear(tm->tm_year,
- tm->tm_mon, tm->tm_mday),
- is_interval));
+ tm->tm_mon, tm->tm_mday),
+ is_interval));
i = strlen(buff);
strcpy(inout, buff + (i - 3));
if (S_THth(suf))
@@ -2518,8 +2514,8 @@ dch_date(int arg, char *inout, int suf, bool is_to_char, bool is_interval,
arg == DCH_YY ?
ADJUST_YEAR(tm->tm_year, is_interval) :
ADJUST_YEAR(date2isoyear(tm->tm_year,
- tm->tm_mon, tm->tm_mday),
- is_interval));
+ tm->tm_mon, tm->tm_mday),
+ is_interval));
i = strlen(buff);
strcpy(inout, buff + (i - 2));
if (S_THth(suf))
@@ -2531,8 +2527,8 @@ dch_date(int arg, char *inout, int suf, bool is_to_char, bool is_interval,
sscanf(inout, "%02d", &tmfc->year);
/*
- * 2-digit year: '00' ... '69' = 2000 ... 2069 '70' ...
- * '99' = 1970 ... 1999
+ * 2-digit year: '00' ... '69' = 2000 ... 2069 '70' ... '99'
+ * = 1970 ... 1999
*/
if (tmfc->year < 70)
tmfc->year += 2000;
@@ -2550,8 +2546,8 @@ dch_date(int arg, char *inout, int suf, bool is_to_char, bool is_interval,
arg == DCH_Y ?
ADJUST_YEAR(tm->tm_year, is_interval) :
ADJUST_YEAR(date2isoyear(tm->tm_year,
- tm->tm_mon, tm->tm_mday),
- is_interval));
+ tm->tm_mon, tm->tm_mday),
+ is_interval));
i = strlen(buff);
strcpy(inout, buff + (i - 1));
if (S_THth(suf))
@@ -2751,8 +2747,8 @@ datetime_to_char_body(TmToChar *tmtc, text *fmt, bool is_interval)
result = palloc((fmt_len * DCH_MAX_ITEM_SIZ) + 1);
/*
- * Allocate new memory if format picture is bigger than static cache
- * and not use cache (call parser always)
+ * Allocate new memory if format picture is bigger than static cache and
+ * not use cache (call parser always)
*/
if (fmt_len > DCH_CACHE_SIZE)
{
@@ -2778,8 +2774,8 @@ datetime_to_char_body(TmToChar *tmtc, text *fmt, bool is_interval)
ent = DCH_cache_getnew(fmt_str);
/*
- * Not in the cache, must run parser and save a new
- * format-picture to the cache.
+ * Not in the cache, must run parser and save a new format-picture
+ * to the cache.
*/
parse_format(ent->format, fmt_str, DCH_keywords,
DCH_suff, DCH_index, DCH_TYPE, NULL);
@@ -2802,8 +2798,8 @@ datetime_to_char_body(TmToChar *tmtc, text *fmt, bool is_interval)
pfree(fmt_str);
/*
- * for result is allocated max memory, which current format-picture
- * needs, now it allocate result with real size
+ * for result is allocated max memory, which current format-picture needs,
+ * now it allocate result with real size
*/
if (result && *result)
{
@@ -2965,7 +2961,7 @@ to_date(PG_FUNCTION_ARGS)
*/
static void
do_to_timestamp(text *date_txt, text *fmt,
- struct pg_tm *tm, fsec_t *fsec)
+ struct pg_tm * tm, fsec_t *fsec)
{
FormatNode *format;
TmFromChar tmfc;
@@ -2990,8 +2986,8 @@ do_to_timestamp(text *date_txt, text *fmt,
*(fmt_str + fmt_len) = '\0';
/*
- * Allocate new memory if format picture is bigger than static
- * cache and not use cache (call parser always)
+ * Allocate new memory if format picture is bigger than static cache
+ * and not use cache (call parser always)
*/
if (fmt_len > DCH_CACHE_SIZE)
{
@@ -3059,8 +3055,8 @@ do_to_timestamp(text *date_txt, text *fmt,
DEBUG_TMFC(&tmfc);
/*
- * Convert values that user define for FROM_CHAR
- * (to_date/to_timestamp) to standard 'tm'
+ * Convert values that user define for FROM_CHAR (to_date/to_timestamp) to
+ * standard 'tm'
*/
if (tmfc.ssss)
{
@@ -3125,18 +3121,19 @@ do_to_timestamp(text *date_txt, text *fmt,
if (tmfc.year)
{
- if (tmfc.yysz==2 && tmfc.cc)
+ if (tmfc.yysz == 2 && tmfc.cc)
{
- /* CC and YY defined
- * why -[2000|1900]? See dch_date() DCH_YY code.
+ /*
+ * CC and YY defined why -[2000|1900]? See dch_date() DCH_YY code.
*/
- tm->tm_year = (tmfc.cc-1)*100 + (tmfc.year >= 2000 ? tmfc.year-2000 : tmfc.year-1900);
+ tm->tm_year = (tmfc.cc - 1) * 100 + (tmfc.year >= 2000 ? tmfc.year - 2000 : tmfc.year - 1900);
}
- else if (tmfc.yysz==1 && tmfc.cc)
+ else if (tmfc.yysz == 1 && tmfc.cc)
{
- /* CC and Y defined
+ /*
+ * CC and Y defined
*/
- tm->tm_year = (tmfc.cc-1)*100 + tmfc.year-2000;
+ tm->tm_year = (tmfc.cc - 1) * 100 + tmfc.year - 2000;
}
else
/* set year (and ignore CC if defined) */
@@ -3184,7 +3181,7 @@ do_to_timestamp(text *date_txt, text *fmt,
if (!tm->tm_year)
ereport(ERROR,
(errcode(ERRCODE_INVALID_DATETIME_FORMAT),
- errmsg("cannot calculate day of year without year information")));
+ errmsg("cannot calculate day of year without year information")));
y = ysum[isleap(tm->tm_year)];
@@ -3369,9 +3366,9 @@ NUM_cache(int len, NUMDesc *Num, char *pars_str, bool *shouldFree)
*(str + len) = '\0';
/*
- * Allocate new memory if format picture is bigger than static cache
- * and not use cache (call parser always). This branches sets
- * shouldFree to true, accordingly.
+ * Allocate new memory if format picture is bigger than static cache and
+ * not use cache (call parser always). This branches sets shouldFree to
+ * true, accordingly.
*/
if (len > NUM_CACHE_SIZE)
{
@@ -3402,8 +3399,8 @@ NUM_cache(int len, NUMDesc *Num, char *pars_str, bool *shouldFree)
ent = NUM_cache_getnew(str);
/*
- * Not in the cache, must run parser and save a new
- * format-picture to the cache.
+ * Not in the cache, must run parser and save a new format-picture
+ * to the cache.
*/
parse_format(ent->format, str, NUM_keywords,
NULL, NUM_index, NUM_TYPE, &ent->Num);
@@ -3591,18 +3588,18 @@ get_last_relevant_decnum(char *num)
static void
NUM_numpart_from_char(NUMProc *Np, int id, int plen)
{
- bool isread = FALSE;
-
+ bool isread = FALSE;
+
#ifdef DEBUG_TO_FROM_CHAR
elog(DEBUG_elog_output, " --- scan start --- id=%s",
- (id==NUM_0 || id==NUM_9) ? "NUM_0/9" : id==NUM_DEC ? "NUM_DEC" : "???");
+ (id == NUM_0 || id == NUM_9) ? "NUM_0/9" : id == NUM_DEC ? "NUM_DEC" : "???");
#endif
if (*Np->inout_p == ' ')
Np->inout_p++;
#define OVERLOAD_TEST (Np->inout_p >= Np->inout + plen)
-#define AMOUNT_TEST(_s) (plen-(Np->inout_p-Np->inout) >= _s)
+#define AMOUNT_TEST(_s) (plen-(Np->inout_p-Np->inout) >= _s)
if (*Np->inout_p == ' ')
Np->inout_p++;
@@ -3613,13 +3610,13 @@ NUM_numpart_from_char(NUMProc *Np, int id, int plen)
/*
* read sign before number
*/
- if (*Np->number == ' ' && (id == NUM_0 || id == NUM_9 ) &&
- (Np->read_pre + Np->read_post)==0)
+ if (*Np->number == ' ' && (id == NUM_0 || id == NUM_9) &&
+ (Np->read_pre + Np->read_post) == 0)
{
#ifdef DEBUG_TO_FROM_CHAR
- elog(DEBUG_elog_output, "Try read sign (%c), locale positive: %s, negative: %s",
- *Np->inout_p, Np->L_positive_sign, Np->L_negative_sign);
+ elog(DEBUG_elog_output, "Try read sign (%c), locale positive: %s, negative: %s",
+ *Np->inout_p, Np->L_positive_sign, Np->L_negative_sign);
#endif
/*
@@ -3627,20 +3624,21 @@ NUM_numpart_from_char(NUMProc *Np, int id, int plen)
*/
if (IS_LSIGN(Np->Num) && Np->Num->lsign == NUM_LSIGN_PRE)
{
- int x=0;
+ int x = 0;
+
#ifdef DEBUG_TO_FROM_CHAR
elog(DEBUG_elog_output, "Try read locale pre-sign (%c)", *Np->inout_p);
#endif
- if ((x = strlen(Np->L_negative_sign)) &&
+ if ((x = strlen(Np->L_negative_sign)) &&
AMOUNT_TEST(x) &&
- strncmp(Np->inout_p, Np->L_negative_sign, x)==0)
+ strncmp(Np->inout_p, Np->L_negative_sign, x) == 0)
{
Np->inout_p += x;
*Np->number = '-';
}
- else if ((x = strlen(Np->L_positive_sign)) &&
- AMOUNT_TEST(x) &&
- strncmp(Np->inout_p, Np->L_positive_sign, x)==0)
+ else if ((x = strlen(Np->L_positive_sign)) &&
+ AMOUNT_TEST(x) &&
+ strncmp(Np->inout_p, Np->L_positive_sign, x) == 0)
{
Np->inout_p += x;
*Np->number = '+';
@@ -3651,6 +3649,7 @@ NUM_numpart_from_char(NUMProc *Np, int id, int plen)
#ifdef DEBUG_TO_FROM_CHAR
elog(DEBUG_elog_output, "Try read simple sign (%c)", *Np->inout_p);
#endif
+
/*
* simple + - < >
*/
@@ -3658,14 +3657,14 @@ NUM_numpart_from_char(NUMProc *Np, int id, int plen)
*Np->inout_p == '<'))
{
- *Np->number = '-'; /* set - */
+ *Np->number = '-'; /* set - */
Np->inout_p++;
}
else if (*Np->inout_p == '+')
{
- *Np->number = '+'; /* set + */
+ *Np->number = '+'; /* set + */
Np->inout_p++;
}
}
@@ -3673,11 +3672,11 @@ NUM_numpart_from_char(NUMProc *Np, int id, int plen)
if (OVERLOAD_TEST)
return;
-
+
#ifdef DEBUG_TO_FROM_CHAR
elog(DEBUG_elog_output, "Scan for numbers (%c), current number: '%s'", *Np->inout_p, Np->number);
#endif
-
+
/*
* read digit
*/
@@ -3696,13 +3695,14 @@ NUM_numpart_from_char(NUMProc *Np, int id, int plen)
Np->read_pre++;
isread = TRUE;
-
+
#ifdef DEBUG_TO_FROM_CHAR
elog(DEBUG_elog_output, "Read digit (%c)", *Np->inout_p);
#endif
- /*
- * read decimal point
- */
+
+ /*
+ * read decimal point
+ */
}
else if (IS_DECIMAL(Np->Num) && Np->read_dec == FALSE)
{
@@ -3726,7 +3726,7 @@ NUM_numpart_from_char(NUMProc *Np, int id, int plen)
elog(DEBUG_elog_output, "Try read locale point (%c)",
*Np->inout_p);
#endif
- if (x && AMOUNT_TEST(x) && strncmp(Np->inout_p, Np->decimal, x)==0)
+ if (x && AMOUNT_TEST(x) && strncmp(Np->inout_p, Np->decimal, x) == 0)
{
Np->inout_p += x - 1;
*Np->number_p = '.';
@@ -3739,69 +3739,68 @@ NUM_numpart_from_char(NUMProc *Np, int id, int plen)
if (OVERLOAD_TEST)
return;
-
+
/*
* Read sign behind "last" number
*
- * We need sign detection because determine exact position of
- * post-sign is difficult:
+ * We need sign detection because determine exact position of post-sign is
+ * difficult:
*
- * FM9999.9999999S -> 123.001-
- * 9.9S -> .5-
- * FM9.999999MI -> 5.01-
+ * FM9999.9999999S -> 123.001- 9.9S -> .5- FM9.999999MI
+ * -> 5.01-
*/
if (*Np->number == ' ' && Np->read_pre + Np->read_post > 0)
{
/*
- * locale sign (NUM_S) is always anchored behind a last number, if:
- * - locale sign expected
- * - last read char was NUM_0/9 or NUM_DEC
- * - and next char is not digit
- */
- if (IS_LSIGN(Np->Num) && isread &&
- (Np->inout_p+1) <= Np->inout + plen &&
- !isdigit((unsigned char) *(Np->inout_p+1)))
+ * locale sign (NUM_S) is always anchored behind a last number, if: -
+ * locale sign expected - last read char was NUM_0/9 or NUM_DEC - and
+ * next char is not digit
+ */
+ if (IS_LSIGN(Np->Num) && isread &&
+ (Np->inout_p + 1) <= Np->inout + plen &&
+ !isdigit((unsigned char) *(Np->inout_p + 1)))
{
- int x;
- char *tmp = Np->inout_p++;
-
+ int x;
+ char *tmp = Np->inout_p++;
+
#ifdef DEBUG_TO_FROM_CHAR
elog(DEBUG_elog_output, "Try read locale post-sign (%c)", *Np->inout_p);
#endif
- if ((x = strlen(Np->L_negative_sign)) &&
+ if ((x = strlen(Np->L_negative_sign)) &&
AMOUNT_TEST(x) &&
- strncmp(Np->inout_p, Np->L_negative_sign, x)==0)
+ strncmp(Np->inout_p, Np->L_negative_sign, x) == 0)
{
- Np->inout_p += x-1; /* -1 .. NUM_processor() do inout_p++ */
+ Np->inout_p += x - 1; /* -1 .. NUM_processor() do inout_p++ */
*Np->number = '-';
}
- else if ((x = strlen(Np->L_positive_sign)) &&
- AMOUNT_TEST(x) &&
- strncmp(Np->inout_p, Np->L_positive_sign, x)==0)
+ else if ((x = strlen(Np->L_positive_sign)) &&
+ AMOUNT_TEST(x) &&
+ strncmp(Np->inout_p, Np->L_positive_sign, x) == 0)
{
- Np->inout_p += x-1; /* -1 .. NUM_processor() do inout_p++ */
+ Np->inout_p += x - 1; /* -1 .. NUM_processor() do inout_p++ */
*Np->number = '+';
}
if (*Np->number == ' ')
/* no sign read */
Np->inout_p = tmp;
}
-
+
/*
* try read non-locale sign, it's happen only if format is not exact
* and we cannot determine sign position of MI/PL/SG, an example:
*
- * FM9.999999MI -> 5.01-
+ * FM9.999999MI -> 5.01-
*
- * if (.... && IS_LSIGN(Np->Num)==FALSE) prevents read wrong formats
- * like to_number('1 -', '9S') where sign is not anchored to last number.
+ * if (.... && IS_LSIGN(Np->Num)==FALSE) prevents read wrong formats like
+ * to_number('1 -', '9S') where sign is not anchored to last number.
*/
- else if (isread==FALSE && IS_LSIGN(Np->Num)==FALSE &&
- (IS_PLUS(Np->Num) || IS_MINUS(Np->Num)))
+ else if (isread == FALSE && IS_LSIGN(Np->Num) == FALSE &&
+ (IS_PLUS(Np->Num) || IS_MINUS(Np->Num)))
{
#ifdef DEBUG_TO_FROM_CHAR
elog(DEBUG_elog_output, "Try read simple post-sign (%c)", *Np->inout_p);
#endif
+
/*
* simple + -
*/
@@ -3848,8 +3847,8 @@ NUM_numpart_to_char(NUMProc *Np, int id)
Np->num_in = FALSE;
/*
- * Write sign if real number will write to output Note:
- * IS_PREDEC_SPACE() handle "9.9" --> " .1"
+ * Write sign if real number will write to output Note: IS_PREDEC_SPACE()
+ * handle "9.9" --> " .1"
*/
if (Np->sign_wrote == FALSE &&
(Np->num_curr >= Np->num_pre || (IS_ZERO(Np->Num) && Np->Num->zero_start == Np->num_curr)) &&
@@ -4032,7 +4031,7 @@ NUM_processor(FormatNode *node, NUMDesc *Num, char *inout, char *number,
Np->inout = inout;
Np->last_relevant = NULL;
Np->read_post = 0;
- Np->read_pre = 0;
+ Np->read_pre = 0;
Np->read_dec = FALSE;
if (Np->Num->zero_start)
@@ -4114,8 +4113,8 @@ NUM_processor(FormatNode *node, NUMDesc *Num, char *inout, char *number,
if (IS_DECIMAL(Np->Num))
Np->last_relevant = get_last_relevant_decnum(
Np->number +
- ((Np->Num->zero_end - Np->num_pre > 0) ?
- Np->Num->zero_end - Np->num_pre : 0));
+ ((Np->Num->zero_end - Np->num_pre > 0) ?
+ Np->Num->zero_end - Np->num_pre : 0));
}
if (Np->sign_wrote == FALSE && Np->num_pre == 0)
@@ -4185,10 +4184,10 @@ NUM_processor(FormatNode *node, NUMDesc *Num, char *inout, char *number,
/*
* Create/reading digit/zero/blank/sing
*
- * 'NUM_S' note:
- * The locale sign is anchored to number and we read/write it
- * when we work with first or last number (NUM_0/NUM_9). This
- * is reason why NUM_S missing in follow switch().
+ * 'NUM_S' note: The locale sign is anchored to number and we
+ * read/write it when we work with first or last number
+ * (NUM_0/NUM_9). This is reason why NUM_S missing in follow
+ * switch().
*/
switch (n->key->id)
{
@@ -4497,7 +4496,7 @@ numeric_to_number(PG_FUNCTION_ARGS)
result = DirectFunctionCall3(numeric_in,
CStringGetDatum(numstr),
ObjectIdGetDatum(InvalidOid),
- Int32GetDatum(((precision << 16) | scale) + VARHDRSZ));
+ Int32GetDatum(((precision << 16) | scale) + VARHDRSZ));
pfree(numstr);
return result;
}
@@ -4536,7 +4535,7 @@ numeric_to_char(PG_FUNCTION_ARGS)
Int32GetDatum(0)));
numstr = orgnum =
int_to_roman(DatumGetInt32(DirectFunctionCall1(numeric_int4,
- NumericGetDatum(x))));
+ NumericGetDatum(x))));
pfree(x);
}
else
@@ -4546,16 +4545,16 @@ numeric_to_char(PG_FUNCTION_ARGS)
if (IS_MULTI(&Num))
{
Numeric a = DatumGetNumeric(DirectFunctionCall1(int4_numeric,
- Int32GetDatum(10)));
+ Int32GetDatum(10)));
Numeric b = DatumGetNumeric(DirectFunctionCall1(int4_numeric,
- Int32GetDatum(Num.multi)));
+ Int32GetDatum(Num.multi)));
x = DatumGetNumeric(DirectFunctionCall2(numeric_power,
NumericGetDatum(a),
NumericGetDatum(b)));
val = DatumGetNumeric(DirectFunctionCall2(numeric_mul,
- NumericGetDatum(value),
- NumericGetDatum(x)));
+ NumericGetDatum(value),
+ NumericGetDatum(x)));
pfree(x);
pfree(a);
pfree(b);
@@ -4639,7 +4638,7 @@ int4_to_char(PG_FUNCTION_ARGS)
else
{
orgnum = DatumGetCString(DirectFunctionCall1(int4out,
- Int32GetDatum(value)));
+ Int32GetDatum(value)));
}
len = strlen(orgnum);
@@ -4711,7 +4710,7 @@ int8_to_char(PG_FUNCTION_ARGS)
{
/* Currently don't support int8 conversion to roman... */
numstr = orgnum = int_to_roman(DatumGetInt32(
- DirectFunctionCall1(int84, Int64GetDatum(value))));
+ DirectFunctionCall1(int84, Int64GetDatum(value))));
}
else
{
@@ -4720,14 +4719,14 @@ int8_to_char(PG_FUNCTION_ARGS)
double multi = pow((double) 10, (double) Num.multi);
value = DatumGetInt64(DirectFunctionCall2(int8mul,
- Int64GetDatum(value),
- DirectFunctionCall1(dtoi8,
- Float8GetDatum(multi))));
+ Int64GetDatum(value),
+ DirectFunctionCall1(dtoi8,
+ Float8GetDatum(multi))));
Num.pre += Num.multi;
}
orgnum = DatumGetCString(DirectFunctionCall1(int8out,
- Int64GetDatum(value)));
+ Int64GetDatum(value)));
len = strlen(orgnum);
if (*orgnum == '-')
diff --git a/src/backend/utils/adt/genfile.c b/src/backend/utils/adt/genfile.c
index cbbf9ca1c37..06ff9afe032 100644
--- a/src/backend/utils/adt/genfile.c
+++ b/src/backend/utils/adt/genfile.c
@@ -5,11 +5,11 @@
*
*
* Copyright (c) 2004-2005, PostgreSQL Global Development Group
- *
+ *
* Author: Andreas Pflug <pgadmin@pse-consulting.de>
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/genfile.c,v 1.6 2005/08/29 19:39:39 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/genfile.c,v 1.7 2005/10/15 02:49:28 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -30,10 +30,10 @@
#include "utils/memutils.h"
-typedef struct
+typedef struct
{
- char *location;
- DIR *dirdesc;
+ char *location;
+ DIR *dirdesc;
} directory_fctx;
@@ -46,9 +46,9 @@ typedef struct
static char *
check_and_make_absolute(text *arg)
{
- int input_len = VARSIZE(arg) - VARHDRSZ;
- char *filename = palloc(input_len + 1);
-
+ int input_len = VARSIZE(arg) - VARHDRSZ;
+ char *filename = palloc(input_len + 1);
+
memcpy(filename, VARDATA(arg), input_len);
filename[input_len] = '\0';
@@ -58,7 +58,7 @@ check_and_make_absolute(text *arg)
if (path_contains_parent_reference(filename))
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- (errmsg("reference to parent directory (\"..\") not allowed"))));
+ (errmsg("reference to parent directory (\"..\") not allowed"))));
if (is_absolute_path(filename))
{
@@ -70,14 +70,15 @@ check_and_make_absolute(text *arg)
path_is_prefix_of_path(Log_directory, filename))
return filename;
- ereport(ERROR,
+ ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
(errmsg("absolute path not allowed"))));
return NULL; /* keep compiler quiet */
}
else
{
- char *absname = palloc(strlen(DataDir) + strlen(filename) + 2);
+ char *absname = palloc(strlen(DataDir) + strlen(filename) + 2);
+
sprintf(absname, "%s/%s", DataDir, filename);
pfree(filename);
return absname;
@@ -94,13 +95,13 @@ pg_read_file(PG_FUNCTION_ARGS)
text *filename_t = PG_GETARG_TEXT_P(0);
int64 seek_offset = PG_GETARG_INT64(1);
int64 bytes_to_read = PG_GETARG_INT64(2);
- char *buf;
+ char *buf;
size_t nbytes;
- FILE *file;
- char *filename;
+ FILE *file;
+ char *filename;
if (!superuser())
- ereport(ERROR,
+ ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
(errmsg("must be superuser to read files"))));
@@ -128,7 +129,7 @@ pg_read_file(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("requested length too large")));
-
+
buf = palloc((Size) bytes_to_read + VARHDRSZ);
nbytes = fread(VARDATA(buf), 1, (size_t) bytes_to_read, file);
@@ -153,7 +154,7 @@ Datum
pg_stat_file(PG_FUNCTION_ARGS)
{
text *filename_t = PG_GETARG_TEXT_P(0);
- char *filename;
+ char *filename;
struct stat fst;
Datum values[6];
bool isnull[6];
@@ -161,7 +162,7 @@ pg_stat_file(PG_FUNCTION_ARGS)
TupleDesc tupdesc;
if (!superuser())
- ereport(ERROR,
+ ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
(errmsg("must be superuser to get file information"))));
@@ -173,8 +174,8 @@ pg_stat_file(PG_FUNCTION_ARGS)
errmsg("could not stat file \"%s\": %m", filename)));
/*
- * This record type had better match the output parameters declared
- * for me in pg_proc.h (actually, in system_views.sql at the moment).
+ * This record type had better match the output parameters declared for me
+ * in pg_proc.h (actually, in system_views.sql at the moment).
*/
tupdesc = CreateTemplateTupleDesc(6, false);
TupleDescInitEntry(tupdesc, (AttrNumber) 1,
@@ -220,12 +221,12 @@ pg_stat_file(PG_FUNCTION_ARGS)
Datum
pg_ls_dir(PG_FUNCTION_ARGS)
{
- FuncCallContext *funcctx;
- struct dirent *de;
- directory_fctx *fctx;
+ FuncCallContext *funcctx;
+ struct dirent *de;
+ directory_fctx *fctx;
if (!superuser())
- ereport(ERROR,
+ ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
(errmsg("must be superuser to get directory listings"))));
@@ -242,7 +243,7 @@ pg_ls_dir(PG_FUNCTION_ARGS)
fctx->dirdesc = AllocateDir(fctx->location);
if (!fctx->dirdesc)
- ereport(ERROR,
+ ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not open directory \"%s\": %m",
fctx->location)));
@@ -252,16 +253,16 @@ pg_ls_dir(PG_FUNCTION_ARGS)
}
funcctx = SRF_PERCALL_SETUP();
- fctx = (directory_fctx*) funcctx->user_fctx;
+ fctx = (directory_fctx *) funcctx->user_fctx;
while ((de = ReadDir(fctx->dirdesc, fctx->location)) != NULL)
{
int len = strlen(de->d_name);
- text *result;
+ text *result;
if (strcmp(de->d_name, ".") == 0 ||
strcmp(de->d_name, "..") == 0)
- continue;
+ continue;
result = palloc(len + VARHDRSZ);
VARATT_SIZEP(result) = len + VARHDRSZ;
diff --git a/src/backend/utils/adt/geo_ops.c b/src/backend/utils/adt/geo_ops.c
index 1786da6dd1c..2f1714a034a 100644
--- a/src/backend/utils/adt/geo_ops.c
+++ b/src/backend/utils/adt/geo_ops.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/geo_ops.c,v 1.90 2005/07/01 19:19:02 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/geo_ops.c,v 1.91 2005/10/15 02:49:28 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -387,7 +387,7 @@ box_in(PG_FUNCTION_ARGS)
|| (*s != '\0'))
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type box: \"%s\"", str)));
+ errmsg("invalid input syntax for type box: \"%s\"", str)));
/* reorder corners if necessary... */
if (box->high.x < box->low.x)
@@ -951,7 +951,7 @@ line_in(PG_FUNCTION_ARGS)
|| (*s != '\0'))
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type line: \"%s\"", str)));
+ errmsg("invalid input syntax for type line: \"%s\"", str)));
line = (LINE *) palloc(sizeof(LINE));
line_construct_pts(line, &lseg.p[0], &lseg.p[1]);
@@ -1292,10 +1292,9 @@ line_interpt_internal(LINE *l1, LINE *l2)
y;
/*
- * NOTE: if the lines are identical then we will find they are
- * parallel and report "no intersection". This is a little weird, but
- * since there's no *unique* intersection, maybe it's appropriate
- * behavior.
+ * NOTE: if the lines are identical then we will find they are parallel
+ * and report "no intersection". This is a little weird, but since
+ * there's no *unique* intersection, maybe it's appropriate behavior.
*/
if (DatumGetBool(DirectFunctionCall2(line_parallel,
LinePGetDatum(l1),
@@ -1400,7 +1399,7 @@ path_in(PG_FUNCTION_ARGS)
if ((npts = pair_count(str, ',')) <= 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type path: \"%s\"", str)));
+ errmsg("invalid input syntax for type path: \"%s\"", str)));
s = str;
while (isspace((unsigned char) *s))
@@ -1420,10 +1419,10 @@ path_in(PG_FUNCTION_ARGS)
path->npts = npts;
if ((!path_decode(TRUE, npts, s, &isopen, &s, &(path->p[0])))
- && (!((depth == 0) && (*s == '\0'))) && !((depth >= 1) && (*s == RDELIM)))
+ && (!((depth == 0) && (*s == '\0'))) && !((depth >= 1) && (*s == RDELIM)))
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type path: \"%s\"", str)));
+ errmsg("invalid input syntax for type path: \"%s\"", str)));
path->closed = (!isopen);
@@ -1460,7 +1459,7 @@ path_recv(PG_FUNCTION_ARGS)
if (npts < 0 || npts >= (int32) ((INT_MAX - offsetof(PATH, p[0])) / sizeof(Point)))
ereport(ERROR,
(errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
- errmsg("invalid number of points in external \"path\" value")));
+ errmsg("invalid number of points in external \"path\" value")));
size = offsetof(PATH, p[0]) +sizeof(path->p[0]) * npts;
path = (PATH *) palloc(size);
@@ -1730,7 +1729,7 @@ path_distance(PG_FUNCTION_ARGS)
tmp = DatumGetFloat8(DirectFunctionCall2(lseg_distance,
LsegPGetDatum(&seg1),
- LsegPGetDatum(&seg2)));
+ LsegPGetDatum(&seg2)));
if (!have_min || tmp < min)
{
min = tmp;
@@ -1801,7 +1800,7 @@ point_in(PG_FUNCTION_ARGS)
if (!pair_decode(str, &x, &y, &s) || (*s != '\0'))
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type point: \"%s\"", str)));
+ errmsg("invalid input syntax for type point: \"%s\"", str)));
point = (Point *) palloc(sizeof(Point));
@@ -1976,7 +1975,7 @@ point_dt(Point *pt1, Point *pt2)
{
#ifdef GEODEBUG
printf("point_dt- segment (%f,%f),(%f,%f) length is %f\n",
- pt1->x, pt1->y, pt2->x, pt2->y, HYPOT(pt1->x - pt2->x, pt1->y - pt2->y));
+ pt1->x, pt1->y, pt2->x, pt2->y, HYPOT(pt1->x - pt2->x, pt1->y - pt2->y));
#endif
return HYPOT(pt1->x - pt2->x, pt1->y - pt2->y);
}
@@ -2029,7 +2028,7 @@ lseg_in(PG_FUNCTION_ARGS)
|| (*s != '\0'))
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type lseg: \"%s\"", str)));
+ errmsg("invalid input syntax for type lseg: \"%s\"", str)));
#ifdef NOT_USED
lseg->m = point_sl(&lseg->p[0], &lseg->p[1]);
@@ -2374,8 +2373,8 @@ lseg_interpt(PG_FUNCTION_ARGS)
PG_RETURN_NULL();
/*
- * If the line intersection point isn't within l1 (or equivalently
- * l2), there is no valid segment intersection point at all.
+ * If the line intersection point isn't within l1 (or equivalently l2),
+ * there is no valid segment intersection point at all.
*/
if (!on_ps_internal(result, l1) ||
!on_ps_internal(result, l2))
@@ -2393,7 +2392,7 @@ lseg_interpt(PG_FUNCTION_ARGS)
result->y = l1->p[0].y;
}
else if ((FPeq(l1->p[1].x, l2->p[0].x) && FPeq(l1->p[1].y, l2->p[0].y)) ||
- (FPeq(l1->p[1].x, l2->p[1].x) && FPeq(l1->p[1].y, l2->p[1].y)))
+ (FPeq(l1->p[1].x, l2->p[1].x) && FPeq(l1->p[1].y, l2->p[1].y)))
{
result->x = l1->p[1].x;
result->y = l1->p[1].y;
@@ -2521,8 +2520,8 @@ dist_ppath(PG_FUNCTION_ARGS)
Assert(path->npts > 1);
/*
- * the distance from a point to a path is the smallest
- * distance from the point to any of its constituent segments.
+ * the distance from a point to a path is the smallest distance
+ * from the point to any of its constituent segments.
*/
for (i = 0; i < path->npts; i++)
{
@@ -2534,8 +2533,7 @@ dist_ppath(PG_FUNCTION_ARGS)
{
if (!path->closed)
continue;
- iprev = path->npts - 1; /* include the closure
- * segment */
+ iprev = path->npts - 1; /* include the closure segment */
}
statlseg_construct(&lseg, &path->p[iprev], &path->p[i]);
@@ -2853,8 +2851,8 @@ close_ps(PG_FUNCTION_ARGS)
}
/*
- * vert. and horiz. cases are down, now check if the closest point is
- * one of the end points or someplace on the lseg.
+ * vert. and horiz. cases are down, now check if the closest point is one
+ * of the end points or someplace on the lseg.
*/
invm = -1.0 / point_sl(&(lseg->p[0]), &(lseg->p[1]));
@@ -2862,8 +2860,8 @@ close_ps(PG_FUNCTION_ARGS)
* "band" */
if (pt->y < (tmp->A * pt->x + tmp->C))
{ /* we are below the lower edge */
- result = point_copy(&lseg->p[!yh]); /* below the lseg, take
- * lower end pt */
+ result = point_copy(&lseg->p[!yh]); /* below the lseg, take lower
+ * end pt */
#ifdef GEODEBUG
printf("close_ps below: tmp A %f B %f C %f m %f\n",
tmp->A, tmp->B, tmp->C, tmp->m);
@@ -2874,8 +2872,8 @@ close_ps(PG_FUNCTION_ARGS)
* "band" */
if (pt->y > (tmp->A * pt->x + tmp->C))
{ /* we are below the lower edge */
- result = point_copy(&lseg->p[yh]); /* above the lseg, take
- * higher end pt */
+ result = point_copy(&lseg->p[yh]); /* above the lseg, take higher
+ * end pt */
#ifdef GEODEBUG
printf("close_ps above: tmp A %f B %f C %f m %f\n",
tmp->A, tmp->B, tmp->C, tmp->m);
@@ -2884,8 +2882,8 @@ close_ps(PG_FUNCTION_ARGS)
}
/*
- * at this point the "normal" from point will hit lseg. The closet
- * point will be somewhere on the lseg
+ * at this point the "normal" from point will hit lseg. The closet point
+ * will be somewhere on the lseg
*/
tmp = line_construct_pm(pt, invm);
#ifdef GEODEBUG
@@ -2927,22 +2925,22 @@ close_lseg(PG_FUNCTION_ARGS)
if ((d = dist_ps_internal(&l2->p[0], l1)) < dist)
{
result = DatumGetPointP(DirectFunctionCall2(close_ps,
- PointPGetDatum(&l2->p[0]),
+ PointPGetDatum(&l2->p[0]),
LsegPGetDatum(l1)));
memcpy(&point, result, sizeof(Point));
result = DatumGetPointP(DirectFunctionCall2(close_ps,
- PointPGetDatum(&point),
+ PointPGetDatum(&point),
LsegPGetDatum(l2)));
}
if ((d = dist_ps_internal(&l2->p[1], l1)) < dist)
{
result = DatumGetPointP(DirectFunctionCall2(close_ps,
- PointPGetDatum(&l2->p[1]),
+ PointPGetDatum(&l2->p[1]),
LsegPGetDatum(l1)));
memcpy(&point, result, sizeof(Point));
result = DatumGetPointP(DirectFunctionCall2(close_ps,
- PointPGetDatum(&point),
+ PointPGetDatum(&point),
LsegPGetDatum(l2)));
}
@@ -3235,11 +3233,11 @@ on_sl(PG_FUNCTION_ARGS)
LINE *line = PG_GETARG_LINE_P(1);
PG_RETURN_BOOL(DatumGetBool(DirectFunctionCall2(on_pl,
- PointPGetDatum(&lseg->p[0]),
- LinePGetDatum(line))) &&
+ PointPGetDatum(&lseg->p[0]),
+ LinePGetDatum(line))) &&
DatumGetBool(DirectFunctionCall2(on_pl,
- PointPGetDatum(&lseg->p[1]),
- LinePGetDatum(line))));
+ PointPGetDatum(&lseg->p[1]),
+ LinePGetDatum(line))));
}
Datum
@@ -3249,10 +3247,10 @@ on_sb(PG_FUNCTION_ARGS)
BOX *box = PG_GETARG_BOX_P(1);
PG_RETURN_BOOL(DatumGetBool(DirectFunctionCall2(on_pb,
- PointPGetDatum(&lseg->p[0]),
+ PointPGetDatum(&lseg->p[0]),
BoxPGetDatum(box))) &&
DatumGetBool(DirectFunctionCall2(on_pb,
- PointPGetDatum(&lseg->p[1]),
+ PointPGetDatum(&lseg->p[1]),
BoxPGetDatum(box))));
}
@@ -3437,7 +3435,7 @@ poly_in(PG_FUNCTION_ARGS)
if ((npts = pair_count(str, ',')) <= 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type polygon: \"%s\"", str)));
+ errmsg("invalid input syntax for type polygon: \"%s\"", str)));
size = offsetof(POLYGON, p[0]) +sizeof(poly->p[0]) * npts;
poly = (POLYGON *) palloc0(size); /* zero any holes */
@@ -3449,7 +3447,7 @@ poly_in(PG_FUNCTION_ARGS)
|| (*s != '\0'))
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type polygon: \"%s\"", str)));
+ errmsg("invalid input syntax for type polygon: \"%s\"", str)));
make_bound_box(poly);
@@ -3489,7 +3487,7 @@ poly_recv(PG_FUNCTION_ARGS)
if (npts < 0 || npts >= (int32) ((INT_MAX - offsetof(POLYGON, p[0])) / sizeof(Point)))
ereport(ERROR,
(errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
- errmsg("invalid number of points in external \"polygon\" value")));
+ errmsg("invalid number of points in external \"polygon\" value")));
size = offsetof(POLYGON, p[0]) +sizeof(poly->p[0]) * npts;
poly = (POLYGON *) palloc0(size); /* zero any holes */
@@ -3544,8 +3542,7 @@ poly_left(PG_FUNCTION_ARGS)
result = polya->boundbox.high.x < polyb->boundbox.low.x;
/*
- * Avoid leaking memory for toasted inputs ... needed for rtree
- * indexes
+ * Avoid leaking memory for toasted inputs ... needed for rtree indexes
*/
PG_FREE_IF_COPY(polya, 0);
PG_FREE_IF_COPY(polyb, 1);
@@ -3568,8 +3565,7 @@ poly_overleft(PG_FUNCTION_ARGS)
result = polya->boundbox.high.x <= polyb->boundbox.high.x;
/*
- * Avoid leaking memory for toasted inputs ... needed for rtree
- * indexes
+ * Avoid leaking memory for toasted inputs ... needed for rtree indexes
*/
PG_FREE_IF_COPY(polya, 0);
PG_FREE_IF_COPY(polyb, 1);
@@ -3592,8 +3588,7 @@ poly_right(PG_FUNCTION_ARGS)
result = polya->boundbox.low.x > polyb->boundbox.high.x;
/*
- * Avoid leaking memory for toasted inputs ... needed for rtree
- * indexes
+ * Avoid leaking memory for toasted inputs ... needed for rtree indexes
*/
PG_FREE_IF_COPY(polya, 0);
PG_FREE_IF_COPY(polyb, 1);
@@ -3616,8 +3611,7 @@ poly_overright(PG_FUNCTION_ARGS)
result = polya->boundbox.low.x >= polyb->boundbox.low.x;
/*
- * Avoid leaking memory for toasted inputs ... needed for rtree
- * indexes
+ * Avoid leaking memory for toasted inputs ... needed for rtree indexes
*/
PG_FREE_IF_COPY(polya, 0);
PG_FREE_IF_COPY(polyb, 1);
@@ -3640,8 +3634,7 @@ poly_below(PG_FUNCTION_ARGS)
result = polya->boundbox.high.y < polyb->boundbox.low.y;
/*
- * Avoid leaking memory for toasted inputs ... needed for rtree
- * indexes
+ * Avoid leaking memory for toasted inputs ... needed for rtree indexes
*/
PG_FREE_IF_COPY(polya, 0);
PG_FREE_IF_COPY(polyb, 1);
@@ -3664,8 +3657,7 @@ poly_overbelow(PG_FUNCTION_ARGS)
result = polya->boundbox.high.y <= polyb->boundbox.high.y;
/*
- * Avoid leaking memory for toasted inputs ... needed for rtree
- * indexes
+ * Avoid leaking memory for toasted inputs ... needed for rtree indexes
*/
PG_FREE_IF_COPY(polya, 0);
PG_FREE_IF_COPY(polyb, 1);
@@ -3688,8 +3680,7 @@ poly_above(PG_FUNCTION_ARGS)
result = polya->boundbox.low.y > polyb->boundbox.high.y;
/*
- * Avoid leaking memory for toasted inputs ... needed for rtree
- * indexes
+ * Avoid leaking memory for toasted inputs ... needed for rtree indexes
*/
PG_FREE_IF_COPY(polya, 0);
PG_FREE_IF_COPY(polyb, 1);
@@ -3712,8 +3703,7 @@ poly_overabove(PG_FUNCTION_ARGS)
result = polya->boundbox.low.y >= polyb->boundbox.low.y;
/*
- * Avoid leaking memory for toasted inputs ... needed for rtree
- * indexes
+ * Avoid leaking memory for toasted inputs ... needed for rtree indexes
*/
PG_FREE_IF_COPY(polya, 0);
PG_FREE_IF_COPY(polyb, 1);
@@ -3742,8 +3732,7 @@ poly_same(PG_FUNCTION_ARGS)
result = plist_same(polya->npts, polya->p, polyb->p);
/*
- * Avoid leaking memory for toasted inputs ... needed for rtree
- * indexes
+ * Avoid leaking memory for toasted inputs ... needed for rtree indexes
*/
PG_FREE_IF_COPY(polya, 0);
PG_FREE_IF_COPY(polyb, 1);
@@ -3767,8 +3756,7 @@ poly_overlap(PG_FUNCTION_ARGS)
result = box_ov(&polya->boundbox, &polyb->boundbox);
/*
- * Avoid leaking memory for toasted inputs ... needed for rtree
- * indexes
+ * Avoid leaking memory for toasted inputs ... needed for rtree indexes
*/
PG_FREE_IF_COPY(polya, 0);
PG_FREE_IF_COPY(polyb, 1);
@@ -3833,8 +3821,7 @@ poly_contain(PG_FUNCTION_ARGS)
}
/*
- * Avoid leaking memory for toasted inputs ... needed for rtree
- * indexes
+ * Avoid leaking memory for toasted inputs ... needed for rtree indexes
*/
PG_FREE_IF_COPY(polya, 0);
PG_FREE_IF_COPY(polyb, 1);
@@ -4169,7 +4156,7 @@ path_mul_pt(PG_FUNCTION_ARGS)
for (i = 0; i < path->npts; i++)
{
p = DatumGetPointP(DirectFunctionCall2(point_mul,
- PointPGetDatum(&path->p[i]),
+ PointPGetDatum(&path->p[i]),
PointPGetDatum(point)));
path->p[i].x = p->x;
path->p[i].y = p->y;
@@ -4189,7 +4176,7 @@ path_div_pt(PG_FUNCTION_ARGS)
for (i = 0; i < path->npts; i++)
{
p = DatumGetPointP(DirectFunctionCall2(point_div,
- PointPGetDatum(&path->p[i]),
+ PointPGetDatum(&path->p[i]),
PointPGetDatum(point)));
path->p[i].x = p->x;
path->p[i].y = p->y;
@@ -4392,7 +4379,7 @@ circle_in(PG_FUNCTION_ARGS)
if (!pair_decode(s, &circle->center.x, &circle->center.y, &s))
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type circle: \"%s\"", str)));
+ errmsg("invalid input syntax for type circle: \"%s\"", str)));
if (*s == DELIM)
s++;
@@ -4402,7 +4389,7 @@ circle_in(PG_FUNCTION_ARGS)
if ((!single_decode(s, &circle->radius, &s)) || (circle->radius < 0))
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type circle: \"%s\"", str)));
+ errmsg("invalid input syntax for type circle: \"%s\"", str)));
while (depth > 0)
{
@@ -4417,13 +4404,13 @@ circle_in(PG_FUNCTION_ARGS)
else
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type circle: \"%s\"", str)));
+ errmsg("invalid input syntax for type circle: \"%s\"", str)));
}
if (*s != '\0')
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type circle: \"%s\"", str)));
+ errmsg("invalid input syntax for type circle: \"%s\"", str)));
PG_RETURN_CIRCLE_P(circle);
}
@@ -4780,7 +4767,7 @@ circle_mul_pt(PG_FUNCTION_ARGS)
result = circle_copy(circle);
p = DatumGetPointP(DirectFunctionCall2(point_mul,
- PointPGetDatum(&circle->center),
+ PointPGetDatum(&circle->center),
PointPGetDatum(point)));
result->center.x = p->x;
result->center.y = p->y;
@@ -4800,7 +4787,7 @@ circle_div_pt(PG_FUNCTION_ARGS)
result = circle_copy(circle);
p = DatumGetPointP(DirectFunctionCall2(point_div,
- PointPGetDatum(&circle->center),
+ PointPGetDatum(&circle->center),
PointPGetDatum(point)));
result->center.x = p->x;
result->center.y = p->y;
@@ -5001,7 +4988,7 @@ circle_poly(PG_FUNCTION_ARGS)
if (FPzero(circle->radius))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot convert circle with radius zero to polygon")));
+ errmsg("cannot convert circle with radius zero to polygon")));
if (npts < 2)
ereport(ERROR,
diff --git a/src/backend/utils/adt/inet_net_ntop.c b/src/backend/utils/adt/inet_net_ntop.c
index 67a55be5711..abbfcd592ca 100644
--- a/src/backend/utils/adt/inet_net_ntop.c
+++ b/src/backend/utils/adt/inet_net_ntop.c
@@ -14,7 +14,7 @@
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
* OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
- * $PostgreSQL: pgsql/src/backend/utils/adt/inet_net_ntop.c,v 1.20 2005/09/24 22:54:38 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/inet_net_ntop.c,v 1.21 2005/10/15 02:49:28 momjian Exp $
*/
#if defined(LIBC_SCCS) && !defined(lint)
@@ -412,11 +412,11 @@ static char *
inet_net_ntop_ipv6(const u_char *src, int bits, char *dst, size_t size)
{
/*
- * Note that int32_t and int16_t need only be "at least" large enough
- * to contain a value of the specified size. On some systems, like
- * Crays, there is no such thing as an integer variable with 16 bits.
- * Keep this in mind if you think this function should have been coded
- * to use pointer overlays. All the world's not a VAX.
+ * Note that int32_t and int16_t need only be "at least" large enough to
+ * contain a value of the specified size. On some systems, like Crays,
+ * there is no such thing as an integer variable with 16 bits. Keep this
+ * in mind if you think this function should have been coded to use
+ * pointer overlays. All the world's not a VAX.
*/
char tmp[sizeof "ffff:ffff:ffff:ffff:ffff:ffff:255.255.255.255/128"];
char *tp;
@@ -435,8 +435,8 @@ inet_net_ntop_ipv6(const u_char *src, int bits, char *dst, size_t size)
}
/*
- * Preprocess: Copy the input (bytewise) array into a wordwise array.
- * Find the longest run of 0x00's in src[] for :: shorthanding.
+ * Preprocess: Copy the input (bytewise) array into a wordwise array. Find
+ * the longest run of 0x00's in src[] for :: shorthanding.
*/
memset(words, '\0', sizeof words);
for (i = 0; i < NS_IN6ADDRSZ; i++)
@@ -491,8 +491,8 @@ inet_net_ntop_ipv6(const u_char *src, int bits, char *dst, size_t size)
*tp++ = ':';
/* Is this address an encapsulated IPv4? */
if (i == 6 && best.base == 0 && (best.len == 6 ||
- (best.len == 7 && words[7] != 0x0001) ||
- (best.len == 5 && words[5] == 0xffff)))
+ (best.len == 7 && words[7] != 0x0001) ||
+ (best.len == 5 && words[5] == 0xffff)))
{
int n;
diff --git a/src/backend/utils/adt/inet_net_pton.c b/src/backend/utils/adt/inet_net_pton.c
index a6911740cd5..e9239e317eb 100644
--- a/src/backend/utils/adt/inet_net_pton.c
+++ b/src/backend/utils/adt/inet_net_pton.c
@@ -14,7 +14,7 @@
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
* OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
- * $PostgreSQL: pgsql/src/backend/utils/adt/inet_net_pton.c,v 1.20 2005/02/01 00:59:09 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/inet_net_pton.c,v 1.21 2005/10/15 02:49:28 momjian Exp $
*/
#if defined(LIBC_SCCS) && !defined(lint)
@@ -207,7 +207,8 @@ inet_cidr_pton_ipv4(const char *src, u_char *dst, size_t size)
bits = 24;
else if (*odst >= 128) /* Class B */
bits = 16;
- else /* Class A */
+ else
+ /* Class A */
bits = 8;
/* If imputed mask is narrower than specified octets, widen. */
if (bits < ((dst - odst) * 8))
diff --git a/src/backend/utils/adt/int.c b/src/backend/utils/adt/int.c
index d35af1c913a..e41e584ffea 100644
--- a/src/backend/utils/adt/int.c
+++ b/src/backend/utils/adt/int.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/int.c,v 1.67 2005/07/10 21:36:21 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/int.c,v 1.68 2005/10/15 02:49:28 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -120,7 +120,7 @@ int2send(PG_FUNCTION_ARGS)
int2vector *
buildint2vector(const int2 *int2s, int n)
{
- int2vector *result;
+ int2vector *result;
result = (int2vector *) palloc0(Int2VectorSize(n));
@@ -128,8 +128,8 @@ buildint2vector(const int2 *int2s, int n)
memcpy(result->values, int2s, n * sizeof(int2));
/*
- * Attach standard array header. For historical reasons, we set the
- * index lower bound to 0 not 1.
+ * Attach standard array header. For historical reasons, we set the index
+ * lower bound to 0 not 1.
*/
result->size = Int2VectorSize(n);
result->ndim = 1;
@@ -212,7 +212,7 @@ Datum
int2vectorrecv(PG_FUNCTION_ARGS)
{
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
- int2vector *result;
+ int2vector *result;
result = (int2vector *)
DatumGetPointer(DirectFunctionCall3(array_recv,
@@ -686,10 +686,11 @@ int4pl(PG_FUNCTION_ARGS)
int32 result;
result = arg1 + arg2;
+
/*
- * Overflow check. If the inputs are of different signs then their sum
- * cannot overflow. If the inputs are of the same sign, their sum
- * had better be that sign too.
+ * Overflow check. If the inputs are of different signs then their sum
+ * cannot overflow. If the inputs are of the same sign, their sum had
+ * better be that sign too.
*/
if (SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
ereport(ERROR,
@@ -706,10 +707,11 @@ int4mi(PG_FUNCTION_ARGS)
int32 result;
result = arg1 - arg2;
+
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then
- * the result should be of the same sign as the first input.
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
+ * result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
ereport(ERROR,
@@ -726,21 +728,22 @@ int4mul(PG_FUNCTION_ARGS)
int32 result;
result = arg1 * arg2;
+
/*
- * Overflow check. We basically check to see if result / arg2 gives
- * arg1 again. There are two cases where this fails: arg2 = 0 (which
- * cannot overflow) and arg1 = INT_MIN, arg2 = -1 (where the division
- * itself will overflow and thus incorrectly match).
+ * Overflow check. We basically check to see if result / arg2 gives arg1
+ * again. There are two cases where this fails: arg2 = 0 (which cannot
+ * overflow) and arg1 = INT_MIN, arg2 = -1 (where the division itself will
+ * overflow and thus incorrectly match).
*
* Since the division is likely much more expensive than the actual
- * multiplication, we'd like to skip it where possible. The best
- * bang for the buck seems to be to check whether both inputs are in
- * the int16 range; if so, no overflow is possible.
+ * multiplication, we'd like to skip it where possible. The best bang for
+ * the buck seems to be to check whether both inputs are in the int16
+ * range; if so, no overflow is possible.
*/
if (!(arg1 >= (int32) SHRT_MIN && arg1 <= (int32) SHRT_MAX &&
arg2 >= (int32) SHRT_MIN && arg2 <= (int32) SHRT_MAX) &&
arg2 != 0 &&
- (result/arg2 != arg1 || (arg2 == -1 && arg1 < 0 && result < 0)))
+ (result / arg2 != arg1 || (arg2 == -1 && arg1 < 0 && result < 0)))
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("integer out of range")));
@@ -760,10 +763,11 @@ int4div(PG_FUNCTION_ARGS)
errmsg("division by zero")));
result = arg1 / arg2;
+
/*
- * Overflow check. The only possible overflow case is for
- * arg1 = INT_MIN, arg2 = -1, where the correct result is -INT_MIN,
- * which can't be represented on a two's-complement machine.
+ * Overflow check. The only possible overflow case is for arg1 = INT_MIN,
+ * arg2 = -1, where the correct result is -INT_MIN, which can't be
+ * represented on a two's-complement machine.
*/
if (arg2 == -1 && arg1 < 0 && result < 0)
ereport(ERROR,
@@ -819,10 +823,11 @@ int2pl(PG_FUNCTION_ARGS)
int16 result;
result = arg1 + arg2;
+
/*
- * Overflow check. If the inputs are of different signs then their sum
- * cannot overflow. If the inputs are of the same sign, their sum
- * had better be that sign too.
+ * Overflow check. If the inputs are of different signs then their sum
+ * cannot overflow. If the inputs are of the same sign, their sum had
+ * better be that sign too.
*/
if (SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
ereport(ERROR,
@@ -839,10 +844,11 @@ int2mi(PG_FUNCTION_ARGS)
int16 result;
result = arg1 - arg2;
+
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then
- * the result should be of the same sign as the first input.
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
+ * result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
ereport(ERROR,
@@ -859,11 +865,11 @@ int2mul(PG_FUNCTION_ARGS)
int32 result32;
/*
- * The most practical way to detect overflow is to do the arithmetic
- * in int32 (so that the result can't overflow) and then do a range
- * check.
+ * The most practical way to detect overflow is to do the arithmetic in
+ * int32 (so that the result can't overflow) and then do a range check.
*/
- result32 = (int32) arg1 * (int32) arg2;
+ result32 = (int32) arg1 *(int32) arg2;
+
if (result32 < SHRT_MIN || result32 > SHRT_MAX)
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
@@ -885,10 +891,11 @@ int2div(PG_FUNCTION_ARGS)
errmsg("division by zero")));
result = arg1 / arg2;
+
/*
- * Overflow check. The only possible overflow case is for
- * arg1 = SHRT_MIN, arg2 = -1, where the correct result is -SHRT_MIN,
- * which can't be represented on a two's-complement machine.
+ * Overflow check. The only possible overflow case is for arg1 =
+ * SHRT_MIN, arg2 = -1, where the correct result is -SHRT_MIN, which can't
+ * be represented on a two's-complement machine.
*/
if (arg2 == -1 && arg1 < 0 && result < 0)
ereport(ERROR,
@@ -905,10 +912,11 @@ int24pl(PG_FUNCTION_ARGS)
int32 result;
result = arg1 + arg2;
+
/*
- * Overflow check. If the inputs are of different signs then their sum
- * cannot overflow. If the inputs are of the same sign, their sum
- * had better be that sign too.
+ * Overflow check. If the inputs are of different signs then their sum
+ * cannot overflow. If the inputs are of the same sign, their sum had
+ * better be that sign too.
*/
if (SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
ereport(ERROR,
@@ -925,10 +933,11 @@ int24mi(PG_FUNCTION_ARGS)
int32 result;
result = arg1 - arg2;
+
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then
- * the result should be of the same sign as the first input.
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
+ * result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
ereport(ERROR,
@@ -945,18 +954,19 @@ int24mul(PG_FUNCTION_ARGS)
int32 result;
result = arg1 * arg2;
+
/*
- * Overflow check. We basically check to see if result / arg2 gives
- * arg1 again. There is one case where this fails: arg2 = 0 (which
- * cannot overflow).
+ * Overflow check. We basically check to see if result / arg2 gives arg1
+ * again. There is one case where this fails: arg2 = 0 (which cannot
+ * overflow).
*
* Since the division is likely much more expensive than the actual
- * multiplication, we'd like to skip it where possible. The best
- * bang for the buck seems to be to check whether both inputs are in
- * the int16 range; if so, no overflow is possible.
+ * multiplication, we'd like to skip it where possible. The best bang for
+ * the buck seems to be to check whether both inputs are in the int16
+ * range; if so, no overflow is possible.
*/
if (!(arg2 >= (int32) SHRT_MIN && arg2 <= (int32) SHRT_MAX) &&
- result/arg2 != arg1)
+ result / arg2 != arg1)
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("integer out of range")));
@@ -985,10 +995,11 @@ int42pl(PG_FUNCTION_ARGS)
int32 result;
result = arg1 + arg2;
+
/*
- * Overflow check. If the inputs are of different signs then their sum
- * cannot overflow. If the inputs are of the same sign, their sum
- * had better be that sign too.
+ * Overflow check. If the inputs are of different signs then their sum
+ * cannot overflow. If the inputs are of the same sign, their sum had
+ * better be that sign too.
*/
if (SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
ereport(ERROR,
@@ -1005,10 +1016,11 @@ int42mi(PG_FUNCTION_ARGS)
int32 result;
result = arg1 - arg2;
+
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then
- * the result should be of the same sign as the first input.
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
+ * result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
ereport(ERROR,
@@ -1025,18 +1037,19 @@ int42mul(PG_FUNCTION_ARGS)
int32 result;
result = arg1 * arg2;
+
/*
- * Overflow check. We basically check to see if result / arg1 gives
- * arg2 again. There is one case where this fails: arg1 = 0 (which
- * cannot overflow).
+ * Overflow check. We basically check to see if result / arg1 gives arg2
+ * again. There is one case where this fails: arg1 = 0 (which cannot
+ * overflow).
*
* Since the division is likely much more expensive than the actual
- * multiplication, we'd like to skip it where possible. The best
- * bang for the buck seems to be to check whether both inputs are in
- * the int16 range; if so, no overflow is possible.
+ * multiplication, we'd like to skip it where possible. The best bang for
+ * the buck seems to be to check whether both inputs are in the int16
+ * range; if so, no overflow is possible.
*/
if (!(arg1 >= (int32) SHRT_MIN && arg1 <= (int32) SHRT_MAX) &&
- result/arg1 != arg2)
+ result / arg1 != arg2)
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("integer out of range")));
@@ -1056,10 +1069,11 @@ int42div(PG_FUNCTION_ARGS)
errmsg("division by zero")));
result = arg1 / arg2;
+
/*
- * Overflow check. The only possible overflow case is for
- * arg1 = INT_MIN, arg2 = -1, where the correct result is -INT_MIN,
- * which can't be represented on a two's-complement machine.
+ * Overflow check. The only possible overflow case is for arg1 = INT_MIN,
+ * arg2 = -1, where the correct result is -INT_MIN, which can't be
+ * represented on a two's-complement machine.
*/
if (arg2 == -1 && arg1 < 0 && result < 0)
ereport(ERROR,
@@ -1352,8 +1366,7 @@ generate_series_step_int4(PG_FUNCTION_ARGS)
funcctx = SRF_FIRSTCALL_INIT();
/*
- * switch to memory context appropriate for multiple function
- * calls
+ * switch to memory context appropriate for multiple function calls
*/
oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
@@ -1376,8 +1389,7 @@ generate_series_step_int4(PG_FUNCTION_ARGS)
funcctx = SRF_PERCALL_SETUP();
/*
- * get the saved state and use current as the result for this
- * iteration
+ * get the saved state and use current as the result for this iteration
*/
fctx = funcctx->user_fctx;
result = fctx->current;
diff --git a/src/backend/utils/adt/int8.c b/src/backend/utils/adt/int8.c
index c5c3d30d03d..6418da312e0 100644
--- a/src/backend/utils/adt/int8.c
+++ b/src/backend/utils/adt/int8.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/int8.c,v 1.58 2005/03/12 20:25:06 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/int8.c,v 1.59 2005/10/15 02:49:28 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -59,8 +59,8 @@ scanint8(const char *str, bool errorOK, int64 *result)
int sign = 1;
/*
- * Do our own scan, rather than relying on sscanf which might be
- * broken for long long.
+ * Do our own scan, rather than relying on sscanf which might be broken
+ * for long long.
*/
/* skip leading spaces */
@@ -74,8 +74,7 @@ scanint8(const char *str, bool errorOK, int64 *result)
/*
* Do an explicit check for INT64_MIN. Ugly though this is, it's
- * cleaner than trying to get the loop below to handle it
- * portably.
+ * cleaner than trying to get the loop below to handle it portably.
*/
#ifndef INT64_IS_BUSTED
if (strncmp(ptr, "9223372036854775808", 19) == 0)
@@ -115,8 +114,8 @@ scanint8(const char *str, bool errorOK, int64 *result)
else
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
- errmsg("value \"%s\" is out of range for type bigint",
- str)));
+ errmsg("value \"%s\" is out of range for type bigint",
+ str)));
}
tmp = newtmp;
}
@@ -524,10 +523,11 @@ int8pl(PG_FUNCTION_ARGS)
int64 result;
result = arg1 + arg2;
+
/*
- * Overflow check. If the inputs are of different signs then their sum
- * cannot overflow. If the inputs are of the same sign, their sum
- * had better be that sign too.
+ * Overflow check. If the inputs are of different signs then their sum
+ * cannot overflow. If the inputs are of the same sign, their sum had
+ * better be that sign too.
*/
if (SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
ereport(ERROR,
@@ -544,10 +544,11 @@ int8mi(PG_FUNCTION_ARGS)
int64 result;
result = arg1 - arg2;
+
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then
- * the result should be of the same sign as the first input.
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
+ * result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
ereport(ERROR,
@@ -564,21 +565,22 @@ int8mul(PG_FUNCTION_ARGS)
int64 result;
result = arg1 * arg2;
+
/*
- * Overflow check. We basically check to see if result / arg2 gives
- * arg1 again. There are two cases where this fails: arg2 = 0 (which
- * cannot overflow) and arg1 = INT64_MIN, arg2 = -1 (where the division
- * itself will overflow and thus incorrectly match).
+ * Overflow check. We basically check to see if result / arg2 gives arg1
+ * again. There are two cases where this fails: arg2 = 0 (which cannot
+ * overflow) and arg1 = INT64_MIN, arg2 = -1 (where the division itself
+ * will overflow and thus incorrectly match).
*
* Since the division is likely much more expensive than the actual
- * multiplication, we'd like to skip it where possible. The best
- * bang for the buck seems to be to check whether both inputs are in
- * the int32 range; if so, no overflow is possible.
+ * multiplication, we'd like to skip it where possible. The best bang for
+ * the buck seems to be to check whether both inputs are in the int32
+ * range; if so, no overflow is possible.
*/
if (!(arg1 == (int64) ((int32) arg1) &&
arg2 == (int64) ((int32) arg2)) &&
arg2 != 0 &&
- (result/arg2 != arg1 || (arg2 == -1 && arg1 < 0 && result < 0)))
+ (result / arg2 != arg1 || (arg2 == -1 && arg1 < 0 && result < 0)))
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("bigint out of range")));
@@ -598,10 +600,11 @@ int8div(PG_FUNCTION_ARGS)
errmsg("division by zero")));
result = arg1 / arg2;
+
/*
- * Overflow check. The only possible overflow case is for
- * arg1 = INT64_MIN, arg2 = -1, where the correct result is -INT64_MIN,
- * which can't be represented on a two's-complement machine.
+ * Overflow check. The only possible overflow case is for arg1 =
+ * INT64_MIN, arg2 = -1, where the correct result is -INT64_MIN, which
+ * can't be represented on a two's-complement machine.
*/
if (arg2 == -1 && arg1 < 0 && result < 0)
ereport(ERROR,
@@ -653,9 +656,9 @@ int8inc(PG_FUNCTION_ARGS)
if (fcinfo->context && IsA(fcinfo->context, AggState))
{
/*
- * Special case to avoid palloc overhead for COUNT(): when called
- * from nodeAgg, we know that the argument is modifiable local
- * storage, so just update it in-place.
+ * Special case to avoid palloc overhead for COUNT(): when called from
+ * nodeAgg, we know that the argument is modifiable local storage, so
+ * just update it in-place.
*
* Note: this assumes int8 is a pass-by-ref type; if we ever support
* pass-by-val int8, this should be ifdef'd out when int8 is
@@ -723,10 +726,11 @@ int84pl(PG_FUNCTION_ARGS)
int64 result;
result = arg1 + arg2;
+
/*
- * Overflow check. If the inputs are of different signs then their sum
- * cannot overflow. If the inputs are of the same sign, their sum
- * had better be that sign too.
+ * Overflow check. If the inputs are of different signs then their sum
+ * cannot overflow. If the inputs are of the same sign, their sum had
+ * better be that sign too.
*/
if (SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
ereport(ERROR,
@@ -743,10 +747,11 @@ int84mi(PG_FUNCTION_ARGS)
int64 result;
result = arg1 - arg2;
+
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then
- * the result should be of the same sign as the first input.
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
+ * result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
ereport(ERROR,
@@ -763,18 +768,19 @@ int84mul(PG_FUNCTION_ARGS)
int64 result;
result = arg1 * arg2;
+
/*
- * Overflow check. We basically check to see if result / arg1 gives
- * arg2 again. There is one case where this fails: arg1 = 0 (which
- * cannot overflow).
+ * Overflow check. We basically check to see if result / arg1 gives arg2
+ * again. There is one case where this fails: arg1 = 0 (which cannot
+ * overflow).
*
* Since the division is likely much more expensive than the actual
- * multiplication, we'd like to skip it where possible. The best
- * bang for the buck seems to be to check whether both inputs are in
- * the int32 range; if so, no overflow is possible.
+ * multiplication, we'd like to skip it where possible. The best bang for
+ * the buck seems to be to check whether both inputs are in the int32
+ * range; if so, no overflow is possible.
*/
if (arg1 != (int64) ((int32) arg1) &&
- result/arg1 != arg2)
+ result / arg1 != arg2)
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("bigint out of range")));
@@ -794,10 +800,11 @@ int84div(PG_FUNCTION_ARGS)
errmsg("division by zero")));
result = arg1 / arg2;
+
/*
- * Overflow check. The only possible overflow case is for
- * arg1 = INT64_MIN, arg2 = -1, where the correct result is -INT64_MIN,
- * which can't be represented on a two's-complement machine.
+ * Overflow check. The only possible overflow case is for arg1 =
+ * INT64_MIN, arg2 = -1, where the correct result is -INT64_MIN, which
+ * can't be represented on a two's-complement machine.
*/
if (arg2 == -1 && arg1 < 0 && result < 0)
ereport(ERROR,
@@ -814,10 +821,11 @@ int48pl(PG_FUNCTION_ARGS)
int64 result;
result = arg1 + arg2;
+
/*
- * Overflow check. If the inputs are of different signs then their sum
- * cannot overflow. If the inputs are of the same sign, their sum
- * had better be that sign too.
+ * Overflow check. If the inputs are of different signs then their sum
+ * cannot overflow. If the inputs are of the same sign, their sum had
+ * better be that sign too.
*/
if (SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
ereport(ERROR,
@@ -834,10 +842,11 @@ int48mi(PG_FUNCTION_ARGS)
int64 result;
result = arg1 - arg2;
+
/*
- * Overflow check. If the inputs are of the same sign then their
- * difference cannot overflow. If they are of different signs then
- * the result should be of the same sign as the first input.
+ * Overflow check. If the inputs are of the same sign then their
+ * difference cannot overflow. If they are of different signs then the
+ * result should be of the same sign as the first input.
*/
if (!SAMESIGN(arg1, arg2) && !SAMESIGN(result, arg1))
ereport(ERROR,
@@ -854,18 +863,19 @@ int48mul(PG_FUNCTION_ARGS)
int64 result;
result = arg1 * arg2;
+
/*
- * Overflow check. We basically check to see if result / arg2 gives
- * arg1 again. There is one case where this fails: arg2 = 0 (which
- * cannot overflow).
+ * Overflow check. We basically check to see if result / arg2 gives arg1
+ * again. There is one case where this fails: arg2 = 0 (which cannot
+ * overflow).
*
* Since the division is likely much more expensive than the actual
- * multiplication, we'd like to skip it where possible. The best
- * bang for the buck seems to be to check whether both inputs are in
- * the int32 range; if so, no overflow is possible.
+ * multiplication, we'd like to skip it where possible. The best bang for
+ * the buck seems to be to check whether both inputs are in the int32
+ * range; if so, no overflow is possible.
*/
if (arg2 != (int64) ((int32) arg2) &&
- result/arg2 != arg1)
+ result / arg2 != arg1)
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
errmsg("bigint out of range")));
@@ -1027,9 +1037,9 @@ dtoi8(PG_FUNCTION_ARGS)
arg = rint(arg);
/*
- * Does it fit in an int64? Avoid assuming that we have handy
- * constants defined for the range boundaries, instead test for
- * overflow by reverse-conversion.
+ * Does it fit in an int64? Avoid assuming that we have handy constants
+ * defined for the range boundaries, instead test for overflow by
+ * reverse-conversion.
*/
result = (int64) arg;
@@ -1066,9 +1076,9 @@ ftoi8(PG_FUNCTION_ARGS)
darg = rint(arg);
/*
- * Does it fit in an int64? Avoid assuming that we have handy
- * constants defined for the range boundaries, instead test for
- * overflow by reverse-conversion.
+ * Does it fit in an int64? Avoid assuming that we have handy constants
+ * defined for the range boundaries, instead test for overflow by
+ * reverse-conversion.
*/
result = (int64) darg;
@@ -1183,8 +1193,7 @@ generate_series_step_int8(PG_FUNCTION_ARGS)
funcctx = SRF_FIRSTCALL_INIT();
/*
- * switch to memory context appropriate for multiple function
- * calls
+ * switch to memory context appropriate for multiple function calls
*/
oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
@@ -1207,8 +1216,7 @@ generate_series_step_int8(PG_FUNCTION_ARGS)
funcctx = SRF_PERCALL_SETUP();
/*
- * get the saved state and use current as the result for this
- * iteration
+ * get the saved state and use current as the result for this iteration
*/
fctx = funcctx->user_fctx;
result = fctx->current;
diff --git a/src/backend/utils/adt/like.c b/src/backend/utils/adt/like.c
index 1e84474c2ae..4bf2cd33872 100644
--- a/src/backend/utils/adt/like.c
+++ b/src/backend/utils/adt/like.c
@@ -11,7 +11,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/like.c,v 1.61 2005/09/24 17:53:15 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/like.c,v 1.62 2005/10/15 02:49:28 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -28,13 +28,13 @@
#define LIKE_ABORT (-1)
-static int MatchText(char *t, int tlen, char *p, int plen);
-static int MatchTextIC(char *t, int tlen, char *p, int plen);
-static int MatchBytea(char *t, int tlen, char *p, int plen);
+static int MatchText(char *t, int tlen, char *p, int plen);
+static int MatchTextIC(char *t, int tlen, char *p, int plen);
+static int MatchBytea(char *t, int tlen, char *p, int plen);
static text *do_like_escape(text *, text *);
-static int MBMatchText(char *t, int tlen, char *p, int plen);
-static int MBMatchTextIC(char *t, int tlen, char *p, int plen);
+static int MBMatchText(char *t, int tlen, char *p, int plen);
+static int MBMatchTextIC(char *t, int tlen, char *p, int plen);
static text *MB_do_like_escape(text *, text *);
/*--------------------
@@ -48,7 +48,7 @@ wchareq(char *p1, char *p2)
int p1_len;
/* Optimization: quickly compare the first byte. */
- if(*p1 != *p2)
+ if (*p1 != *p2)
return (0);
p1_len = pg_mblen(p1);
@@ -80,15 +80,15 @@ iwchareq(char *p1, char *p2)
int l;
/*
- * short cut. if *p1 and *p2 is lower than CHARMAX, then we could
- * assume they are ASCII
+ * short cut. if *p1 and *p2 is lower than CHARMAX, then we could assume
+ * they are ASCII
*/
if ((unsigned char) *p1 < CHARMAX && (unsigned char) *p2 < CHARMAX)
return (tolower((unsigned char) *p1) == tolower((unsigned char) *p2));
/*
- * if one of them is an ASCII while the other is not, then they must
- * be different characters
+ * if one of them is an ASCII while the other is not, then they must be
+ * different characters
*/
else if ((unsigned char) *p1 < CHARMAX || (unsigned char) *p2 < CHARMAX)
return (0);
@@ -452,7 +452,7 @@ like_escape_bytea(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_INVALID_ESCAPE_SEQUENCE),
errmsg("invalid escape string"),
- errhint("Escape string must be empty or one character.")));
+ errhint("Escape string must be empty or one character.")));
e = VARDATA(esc);
@@ -466,9 +466,9 @@ like_escape_bytea(PG_FUNCTION_ARGS)
}
/*
- * Otherwise, convert occurrences of the specified escape
- * character to '\', and double occurrences of '\' --- unless they
- * immediately follow an escape character!
+ * Otherwise, convert occurrences of the specified escape character to
+ * '\', and double occurrences of '\' --- unless they immediately
+ * follow an escape character!
*/
afterescape = false;
while (plen > 0)
@@ -530,8 +530,8 @@ MatchBytea(char *t, int tlen, char *p, int plen)
return LIKE_TRUE;
/*
- * Otherwise, scan for a text position at which we can match
- * the rest of the pattern.
+ * Otherwise, scan for a text position at which we can match the
+ * rest of the pattern.
*/
while (tlen > 0)
{
@@ -551,16 +551,16 @@ MatchBytea(char *t, int tlen, char *p, int plen)
}
/*
- * End of text with no match, so no point in trying later
- * places to start matching this pattern.
+ * End of text with no match, so no point in trying later places
+ * to start matching this pattern.
*/
return LIKE_ABORT;
}
else if ((*p != '_') && !BYTEA_CHAREQ(t, p))
{
/*
- * Not the single-character wildcard and no explicit match?
- * Then time to quit...
+ * Not the single-character wildcard and no explicit match? Then
+ * time to quit...
*/
return LIKE_FALSE;
}
@@ -580,8 +580,8 @@ MatchBytea(char *t, int tlen, char *p, int plen)
return LIKE_TRUE;
/*
- * End of text with no match, so no point in trying later places to
- * start matching this pattern.
+ * End of text with no match, so no point in trying later places to start
+ * matching this pattern.
*/
return LIKE_ABORT;
} /* MatchBytea() */
diff --git a/src/backend/utils/adt/like_match.c b/src/backend/utils/adt/like_match.c
index 94ad7997610..dc78e89f951 100644
--- a/src/backend/utils/adt/like_match.c
+++ b/src/backend/utils/adt/like_match.c
@@ -19,7 +19,7 @@
* Copyright (c) 1996-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/like_match.c,v 1.11 2005/09/24 17:53:15 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/like_match.c,v 1.12 2005/10/15 02:49:28 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -97,8 +97,8 @@ MatchText(char *t, int tlen, char *p, int plen)
return LIKE_TRUE;
/*
- * Otherwise, scan for a text position at which we can match
- * the rest of the pattern.
+ * Otherwise, scan for a text position at which we can match the
+ * rest of the pattern.
*/
while (tlen > 0)
{
@@ -118,16 +118,16 @@ MatchText(char *t, int tlen, char *p, int plen)
}
/*
- * End of text with no match, so no point in trying later
- * places to start matching this pattern.
+ * End of text with no match, so no point in trying later places
+ * to start matching this pattern.
*/
return LIKE_ABORT;
}
else if ((*p != '_') && !CHAREQ(t, p))
{
/*
- * Not the single-character wildcard and no explicit match?
- * Then time to quit...
+ * Not the single-character wildcard and no explicit match? Then
+ * time to quit...
*/
return LIKE_FALSE;
}
@@ -147,8 +147,8 @@ MatchText(char *t, int tlen, char *p, int plen)
return LIKE_TRUE;
/*
- * End of text with no match, so no point in trying later places to
- * start matching this pattern.
+ * End of text with no match, so no point in trying later places to start
+ * matching this pattern.
*/
return LIKE_ABORT;
} /* MatchText() */
@@ -183,8 +183,8 @@ MatchTextIC(char *t, int tlen, char *p, int plen)
return LIKE_TRUE;
/*
- * Otherwise, scan for a text position at which we can match
- * the rest of the pattern.
+ * Otherwise, scan for a text position at which we can match the
+ * rest of the pattern.
*/
while (tlen > 0)
{
@@ -204,16 +204,16 @@ MatchTextIC(char *t, int tlen, char *p, int plen)
}
/*
- * End of text with no match, so no point in trying later
- * places to start matching this pattern.
+ * End of text with no match, so no point in trying later places
+ * to start matching this pattern.
*/
return LIKE_ABORT;
}
else if ((*p != '_') && !ICHAREQ(t, p))
{
/*
- * Not the single-character wildcard and no explicit match?
- * Then time to quit...
+ * Not the single-character wildcard and no explicit match? Then
+ * time to quit...
*/
return LIKE_FALSE;
}
@@ -233,8 +233,8 @@ MatchTextIC(char *t, int tlen, char *p, int plen)
return LIKE_TRUE;
/*
- * End of text with no match, so no point in trying later places to
- * start matching this pattern.
+ * End of text with no match, so no point in trying later places to start
+ * matching this pattern.
*/
return LIKE_ABORT;
} /* MatchTextIC() */
@@ -289,7 +289,7 @@ do_like_escape(text *pat, text *esc)
ereport(ERROR,
(errcode(ERRCODE_INVALID_ESCAPE_SEQUENCE),
errmsg("invalid escape string"),
- errhint("Escape string must be empty or one character.")));
+ errhint("Escape string must be empty or one character.")));
e = VARDATA(esc);
@@ -303,9 +303,9 @@ do_like_escape(text *pat, text *esc)
}
/*
- * Otherwise, convert occurrences of the specified escape
- * character to '\', and double occurrences of '\' --- unless they
- * immediately follow an escape character!
+ * Otherwise, convert occurrences of the specified escape character to
+ * '\', and double occurrences of '\' --- unless they immediately
+ * follow an escape character!
*/
afterescape = false;
while (plen > 0)
diff --git a/src/backend/utils/adt/lockfuncs.c b/src/backend/utils/adt/lockfuncs.c
index 0bdf918e475..bf7ee788c42 100644
--- a/src/backend/utils/adt/lockfuncs.c
+++ b/src/backend/utils/adt/lockfuncs.c
@@ -6,7 +6,7 @@
* Copyright (c) 2002-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/lockfuncs.c,v 1.19 2005/06/18 19:33:42 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/lockfuncs.c,v 1.20 2005/10/15 02:49:28 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -21,7 +21,7 @@
/* This must match enum LockTagType! */
-static const char * const LockTagTypeNames[] = {
+static const char *const LockTagTypeNames[] = {
"relation",
"extend",
"page",
@@ -57,8 +57,7 @@ pg_lock_status(PG_FUNCTION_ARGS)
funcctx = SRF_FIRSTCALL_INIT();
/*
- * switch to memory context appropriate for multiple function
- * calls
+ * switch to memory context appropriate for multiple function calls
*/
oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
@@ -95,8 +94,8 @@ pg_lock_status(PG_FUNCTION_ARGS)
funcctx->tuple_desc = BlessTupleDesc(tupdesc);
/*
- * Collect all the locking information that we will format and
- * send out as a result set.
+ * Collect all the locking information that we will format and send
+ * out as a result set.
*/
mystatus = (PG_Lock_Status *) palloc(sizeof(PG_Lock_Status));
funcctx->user_fctx = (void *) mystatus;
@@ -130,9 +129,9 @@ pg_lock_status(PG_FUNCTION_ARGS)
proc = &(lockData->procs[mystatus->currIdx]);
/*
- * Look to see if there are any held lock modes in this PROCLOCK.
- * If so, report, and destructively modify lockData so we don't
- * report again.
+ * Look to see if there are any held lock modes in this PROCLOCK. If
+ * so, report, and destructively modify lockData so we don't report
+ * again.
*/
granted = false;
if (proclock->holdMask)
@@ -160,16 +159,16 @@ pg_lock_status(PG_FUNCTION_ARGS)
mode = proc->waitLockMode;
/*
- * We are now done with this PROCLOCK, so advance pointer
- * to continue with next one on next call.
+ * We are now done with this PROCLOCK, so advance pointer to
+ * continue with next one on next call.
*/
mystatus->currIdx++;
}
else
{
/*
- * Okay, we've displayed all the locks associated with
- * this PROCLOCK, proceed to the next one.
+ * Okay, we've displayed all the locks associated with this
+ * PROCLOCK, proceed to the next one.
*/
mystatus->currIdx++;
continue;
@@ -191,7 +190,7 @@ pg_lock_status(PG_FUNCTION_ARGS)
locktypename = tnbuf;
}
values[0] = DirectFunctionCall1(textin,
- CStringGetDatum(locktypename));
+ CStringGetDatum(locktypename));
switch (lock->tag.locktag_type)
@@ -257,7 +256,7 @@ pg_lock_status(PG_FUNCTION_ARGS)
else
nulls[10] = 'n';
values[11] = DirectFunctionCall1(textin,
- CStringGetDatum(GetLockmodeName(mode)));
+ CStringGetDatum(GetLockmodeName(mode)));
values[12] = BoolGetDatum(granted);
tuple = heap_formtuple(funcctx->tuple_desc, values, nulls);
diff --git a/src/backend/utils/adt/mac.c b/src/backend/utils/adt/mac.c
index 4d62c6e0250..c974b633ca1 100644
--- a/src/backend/utils/adt/mac.c
+++ b/src/backend/utils/adt/mac.c
@@ -1,7 +1,7 @@
/*
* PostgreSQL type definitions for MAC addresses.
*
- * $PostgreSQL: pgsql/src/backend/utils/adt/mac.c,v 1.34 2004/08/29 05:06:49 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/mac.c,v 1.35 2005/10/15 02:49:28 momjian Exp $
*/
#include "postgres.h"
@@ -62,14 +62,14 @@ macaddr_in(PG_FUNCTION_ARGS)
if (count != 6)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type macaddr: \"%s\"", str)));
+ errmsg("invalid input syntax for type macaddr: \"%s\"", str)));
if ((a < 0) || (a > 255) || (b < 0) || (b > 255) ||
(c < 0) || (c > 255) || (d < 0) || (d > 255) ||
(e < 0) || (e > 255) || (f < 0) || (f > 255))
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
- errmsg("invalid octet value in \"macaddr\" value: \"%s\"", str)));
+ errmsg("invalid octet value in \"macaddr\" value: \"%s\"", str)));
result = (macaddr *) palloc(sizeof(macaddr));
diff --git a/src/backend/utils/adt/misc.c b/src/backend/utils/adt/misc.c
index 88f776df062..14bb593c2c2 100644
--- a/src/backend/utils/adt/misc.c
+++ b/src/backend/utils/adt/misc.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/misc.c,v 1.48 2005/09/16 05:35:40 neilc Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/misc.c,v 1.49 2005/10/15 02:49:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -79,16 +79,16 @@ pg_signal_backend(int pid, int sig)
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- (errmsg("must be superuser to signal other server processes"))));
+ (errmsg("must be superuser to signal other server processes"))));
if (!IsBackendPid(pid))
{
/*
- * This is just a warning so a loop-through-resultset will not
- * abort if one backend terminated on it's own during the run
+ * This is just a warning so a loop-through-resultset will not abort
+ * if one backend terminated on it's own during the run
*/
ereport(WARNING,
- (errmsg("PID %d is not a PostgreSQL server process", pid)));
+ (errmsg("PID %d is not a PostgreSQL server process", pid)));
return false;
}
@@ -111,7 +111,7 @@ pg_cancel_backend(PG_FUNCTION_ARGS)
Datum
pg_reload_conf(PG_FUNCTION_ARGS)
{
- if (!superuser())
+ if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
(errmsg("must be superuser to signal the postmaster"))));
@@ -133,7 +133,7 @@ pg_reload_conf(PG_FUNCTION_ARGS)
Datum
pg_rotate_logfile(PG_FUNCTION_ARGS)
{
- if (!superuser())
+ if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
(errmsg("must be superuser to rotate log files"))));
@@ -141,7 +141,7 @@ pg_rotate_logfile(PG_FUNCTION_ARGS)
if (!Redirect_stderr)
{
ereport(WARNING,
- (errmsg("rotation not possible because log redirection not active")));
+ (errmsg("rotation not possible because log redirection not active")));
PG_RETURN_BOOL(false);
}
@@ -186,8 +186,7 @@ pg_tablespace_databases(PG_FUNCTION_ARGS)
fctx = palloc(sizeof(ts_db_fctx));
/*
- * size = tablespace dirname length + dir sep
- * char + oid + terminator
+ * size = tablespace dirname length + dir sep char + oid + terminator
*/
fctx->location = (char *) palloc(10 + 10 + 1);
if (tablespaceOid == GLOBALTABLESPACE_OID)
@@ -214,7 +213,7 @@ pg_tablespace_databases(PG_FUNCTION_ARGS)
errmsg("could not open directory \"%s\": %m",
fctx->location)));
ereport(WARNING,
- (errmsg("%u is not a tablespace OID", tablespaceOid)));
+ (errmsg("%u is not a tablespace OID", tablespaceOid)));
}
}
funcctx->user_fctx = fctx;
diff --git a/src/backend/utils/adt/nabstime.c b/src/backend/utils/adt/nabstime.c
index 148ee0abb1c..40e7522b879 100644
--- a/src/backend/utils/adt/nabstime.c
+++ b/src/backend/utils/adt/nabstime.c
@@ -10,7 +10,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/nabstime.c,v 1.144 2005/10/14 11:47:57 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/nabstime.c,v 1.145 2005/10/15 02:49:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -77,11 +77,11 @@
* Function prototypes -- internal to this file only
*/
-static AbsoluteTime tm2abstime(struct pg_tm *tm, int tz);
-static void reltime2tm(RelativeTime time, struct pg_tm *tm);
+static AbsoluteTime tm2abstime(struct pg_tm * tm, int tz);
+static void reltime2tm(RelativeTime time, struct pg_tm * tm);
static void parsetinterval(char *i_string,
- AbsoluteTime *i_start,
- AbsoluteTime *i_end);
+ AbsoluteTime *i_start,
+ AbsoluteTime *i_end);
/*
@@ -100,21 +100,21 @@ GetCurrentAbsoluteTime(void)
void
-abstime2tm(AbsoluteTime _time, int *tzp, struct pg_tm *tm, char **tzn)
+abstime2tm(AbsoluteTime _time, int *tzp, struct pg_tm * tm, char **tzn)
{
pg_time_t time = (pg_time_t) _time;
struct pg_tm *tx;
/*
- * If HasCTZSet is true then we have a brute force time zone
- * specified. Go ahead and rotate to the local time zone since we will
- * later bypass any calls which adjust the tm fields.
+ * If HasCTZSet is true then we have a brute force time zone specified. Go
+ * ahead and rotate to the local time zone since we will later bypass any
+ * calls which adjust the tm fields.
*/
if (HasCTZSet && (tzp != NULL))
time -= CTimeZone;
if (!HasCTZSet && tzp != NULL)
- tx = pg_localtime(&time,global_timezone);
+ tx = pg_localtime(&time, global_timezone);
else
tx = pg_gmtime(&time);
@@ -156,8 +156,8 @@ abstime2tm(AbsoluteTime _time, int *tzp, struct pg_tm *tm, char **tzn)
{
/*
* Copy no more than MAXTZLEN bytes of timezone to tzn, in
- * case it contains an error message, which doesn't fit in
- * the buffer
+ * case it contains an error message, which doesn't fit in the
+ * buffer
*/
StrNCpy(*tzn, tm->tm_zone, MAXTZLEN + 1);
if (strlen(tm->tm_zone) > MAXTZLEN)
@@ -178,7 +178,7 @@ abstime2tm(AbsoluteTime _time, int *tzp, struct pg_tm *tm, char **tzn)
* Note that tm has full year (not 1900-based) and 1-based month.
*/
static AbsoluteTime
-tm2abstime(struct pg_tm *tm, int tz)
+tm2abstime(struct pg_tm * tm, int tz)
{
int day;
AbsoluteTime sec;
@@ -188,7 +188,7 @@ tm2abstime(struct pg_tm *tm, int tz)
tm->tm_mon < 1 || tm->tm_mon > 12 ||
tm->tm_mday < 1 || tm->tm_mday > 31 ||
tm->tm_hour < 0 ||
- tm->tm_hour > 24 || /* test for > 24:00:00 */
+ tm->tm_hour > 24 || /* test for > 24:00:00 */
(tm->tm_hour == 24 && (tm->tm_min > 0 || tm->tm_sec > 0)) ||
tm->tm_min < 0 || tm->tm_min > 59 ||
tm->tm_sec < 0 || tm->tm_sec > 60)
@@ -204,11 +204,11 @@ tm2abstime(struct pg_tm *tm, int tz)
sec = tm->tm_sec + tz + (tm->tm_min + (day * HOURS_PER_DAY + tm->tm_hour) * MINS_PER_HOUR) * SECS_PER_MINUTE;
/*
- * check for overflow. We need a little slop here because the H/M/S plus
+ * check for overflow. We need a little slop here because the H/M/S plus
* TZ offset could add up to more than 1 day.
*/
- if ((day >= MAX_DAYNUM-10 && sec < 0) ||
- (day <= MIN_DAYNUM+10 && sec > 0))
+ if ((day >= MAX_DAYNUM - 10 && sec < 0) ||
+ (day <= MIN_DAYNUM + 10 && sec > 0))
return INVALID_ABSTIME;
/* check for reserved values (e.g. "current" on edge of usual range */
@@ -254,8 +254,8 @@ abstimein(PG_FUNCTION_ARGS)
case DTK_EPOCH:
/*
- * Don't bother retaining this as a reserved value, but
- * instead just set to the actual epoch time (1970-01-01)
+ * Don't bother retaining this as a reserved value, but instead
+ * just set to the actual epoch time (1970-01-01)
*/
result = 0;
break;
@@ -370,9 +370,9 @@ static int
abstime_cmp_internal(AbsoluteTime a, AbsoluteTime b)
{
/*
- * We consider all INVALIDs to be equal and larger than any
- * non-INVALID. This is somewhat arbitrary; the important thing is to
- * have a consistent sort order.
+ * We consider all INVALIDs to be equal and larger than any non-INVALID.
+ * This is somewhat arbitrary; the important thing is to have a consistent
+ * sort order.
*/
if (a == INVALID_ABSTIME)
{
@@ -463,7 +463,7 @@ btabstimecmp(PG_FUNCTION_ARGS)
Datum
timestamp_abstime(PG_FUNCTION_ARGS)
{
- Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
+ Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
AbsoluteTime result;
fsec_t fsec;
int tz;
@@ -509,7 +509,7 @@ abstime_timestamp(PG_FUNCTION_ARGS)
case INVALID_ABSTIME:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot convert abstime \"invalid\" to timestamp")));
+ errmsg("cannot convert abstime \"invalid\" to timestamp")));
TIMESTAMP_NOBEGIN(result);
break;
@@ -582,7 +582,7 @@ abstime_timestamptz(PG_FUNCTION_ARGS)
case INVALID_ABSTIME:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot convert abstime \"invalid\" to timestamp")));
+ errmsg("cannot convert abstime \"invalid\" to timestamp")));
TIMESTAMP_NOBEGIN(result);
break;
@@ -703,7 +703,7 @@ reltimesend(PG_FUNCTION_ARGS)
static void
-reltime2tm(RelativeTime time, struct pg_tm *tm)
+reltime2tm(RelativeTime time, struct pg_tm * tm)
{
double dtime = time;
@@ -764,12 +764,12 @@ tintervalout(PG_FUNCTION_ARGS)
else
{
p = DatumGetCString(DirectFunctionCall1(abstimeout,
- AbsoluteTimeGetDatum(tinterval->data[0])));
+ AbsoluteTimeGetDatum(tinterval->data[0])));
strcat(i_str, p);
pfree(p);
strcat(i_str, "\" \"");
p = DatumGetCString(DirectFunctionCall1(abstimeout,
- AbsoluteTimeGetDatum(tinterval->data[1])));
+ AbsoluteTimeGetDatum(tinterval->data[1])));
strcat(i_str, p);
pfree(p);
}
@@ -788,16 +788,16 @@ tintervalrecv(PG_FUNCTION_ARGS)
tinterval = (TimeInterval) palloc(sizeof(TimeIntervalData));
- tinterval ->status = pq_getmsgint(buf, sizeof(tinterval->status));
+ tinterval->status = pq_getmsgint(buf, sizeof(tinterval->status));
if (!(tinterval->status == T_INTERVAL_INVAL ||
tinterval->status == T_INTERVAL_VALID))
ereport(ERROR,
(errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
- errmsg("invalid status in external \"tinterval\" value")));
+ errmsg("invalid status in external \"tinterval\" value")));
- tinterval ->data[0] = pq_getmsgint(buf, sizeof(tinterval->data[0]));
- tinterval ->data[1] = pq_getmsgint(buf, sizeof(tinterval->data[1]));
+ tinterval->data[0] = pq_getmsgint(buf, sizeof(tinterval->data[0]));
+ tinterval->data[1] = pq_getmsgint(buf, sizeof(tinterval->data[1]));
PG_RETURN_TIMEINTERVAL(tinterval);
}
@@ -844,11 +844,11 @@ interval_reltime(PG_FUNCTION_ARGS)
#ifdef HAVE_INT64_TIMESTAMP
span = ((INT64CONST(365250000) * year + INT64CONST(30000000) * month +
- INT64CONST(1000000) * day) * INT64CONST(86400)) +
- interval->time;
+ INT64CONST(1000000) * day) * INT64CONST(86400)) +
+ interval->time;
span /= USECS_PER_SEC;
#else
- span = (DAYS_PER_YEAR * year + (double)DAYS_PER_MONTH * month + day) * SECS_PER_DAY + interval->time;
+ span = (DAYS_PER_YEAR * year + (double) DAYS_PER_MONTH * month + day) * SECS_PER_DAY + interval->time;
#endif
if (span < INT_MIN || span > INT_MAX)
@@ -876,7 +876,7 @@ reltime_interval(PG_FUNCTION_ARGS)
case INVALID_RELTIME:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot convert reltime \"invalid\" to interval")));
+ errmsg("cannot convert reltime \"invalid\" to interval")));
result->time = 0;
result->day = 0;
result->month = 0;
@@ -954,7 +954,7 @@ timepl(PG_FUNCTION_ARGS)
if (AbsoluteTimeIsReal(t1) &&
RelativeTimeIsValid(t2) &&
((t2 > 0 && t1 < NOEND_ABSTIME - t2) ||
- (t2 <= 0 && t1 > NOSTART_ABSTIME - t2))) /* prevent overflow */
+ (t2 <= 0 && t1 > NOSTART_ABSTIME - t2))) /* prevent overflow */
PG_RETURN_ABSOLUTETIME(t1 + t2);
PG_RETURN_ABSOLUTETIME(INVALID_ABSTIME);
@@ -973,7 +973,7 @@ timemi(PG_FUNCTION_ARGS)
if (AbsoluteTimeIsReal(t1) &&
RelativeTimeIsValid(t2) &&
((t2 > 0 && t1 > NOSTART_ABSTIME + t2) ||
- (t2 <= 0 && t1 < NOEND_ABSTIME + t2))) /* prevent overflow */
+ (t2 <= 0 && t1 < NOEND_ABSTIME + t2))) /* prevent overflow */
PG_RETURN_ABSOLUTETIME(t1 - t2);
PG_RETURN_ABSOLUTETIME(INVALID_ABSTIME);
@@ -993,10 +993,10 @@ intinterval(PG_FUNCTION_ARGS)
{
if (DatumGetBool(DirectFunctionCall2(abstimege,
AbsoluteTimeGetDatum(t),
- AbsoluteTimeGetDatum(tinterval->data[0]))) &&
+ AbsoluteTimeGetDatum(tinterval->data[0]))) &&
DatumGetBool(DirectFunctionCall2(abstimele,
AbsoluteTimeGetDatum(t),
- AbsoluteTimeGetDatum(tinterval->data[1]))))
+ AbsoluteTimeGetDatum(tinterval->data[1]))))
PG_RETURN_BOOL(true);
}
PG_RETURN_BOOL(false);
@@ -1046,9 +1046,9 @@ static int
reltime_cmp_internal(RelativeTime a, RelativeTime b)
{
/*
- * We consider all INVALIDs to be equal and larger than any
- * non-INVALID. This is somewhat arbitrary; the important thing is to
- * have a consistent sort order.
+ * We consider all INVALIDs to be equal and larger than any non-INVALID.
+ * This is somewhat arbitrary; the important thing is to have a consistent
+ * sort order.
*/
if (a == INVALID_RELTIME)
{
@@ -1147,11 +1147,11 @@ tintervalsame(PG_FUNCTION_ARGS)
PG_RETURN_BOOL(false);
if (DatumGetBool(DirectFunctionCall2(abstimeeq,
- AbsoluteTimeGetDatum(i1->data[0]),
- AbsoluteTimeGetDatum(i2->data[0]))) &&
+ AbsoluteTimeGetDatum(i1->data[0]),
+ AbsoluteTimeGetDatum(i2->data[0]))) &&
DatumGetBool(DirectFunctionCall2(abstimeeq,
- AbsoluteTimeGetDatum(i1->data[1]),
- AbsoluteTimeGetDatum(i2->data[1]))))
+ AbsoluteTimeGetDatum(i1->data[1]),
+ AbsoluteTimeGetDatum(i2->data[1]))))
PG_RETURN_BOOL(true);
PG_RETURN_BOOL(false);
}
@@ -1172,16 +1172,16 @@ tinterval_cmp_internal(TimeInterval a, TimeInterval b)
AbsoluteTime b_len;
/*
- * We consider all INVALIDs to be equal and larger than any
- * non-INVALID. This is somewhat arbitrary; the important thing is to
- * have a consistent sort order.
+ * We consider all INVALIDs to be equal and larger than any non-INVALID.
+ * This is somewhat arbitrary; the important thing is to have a consistent
+ * sort order.
*/
a_invalid = a->status == T_INTERVAL_INVAL ||
- a->data[0] == INVALID_ABSTIME ||
- a->data[1] == INVALID_ABSTIME;
+ a->data[0] == INVALID_ABSTIME ||
+ a->data[1] == INVALID_ABSTIME;
b_invalid = b->status == T_INTERVAL_INVAL ||
- b->data[0] == INVALID_ABSTIME ||
- b->data[1] == INVALID_ABSTIME;
+ b->data[0] == INVALID_ABSTIME ||
+ b->data[1] == INVALID_ABSTIME;
if (a_invalid)
{
@@ -1293,7 +1293,7 @@ tintervalleneq(PG_FUNCTION_ARGS)
if (i->status == T_INTERVAL_INVAL || t == INVALID_RELTIME)
PG_RETURN_BOOL(false);
rt = DatumGetRelativeTime(DirectFunctionCall1(tintervalrel,
- TimeIntervalGetDatum(i)));
+ TimeIntervalGetDatum(i)));
PG_RETURN_BOOL(rt != INVALID_RELTIME && rt == t);
}
@@ -1307,7 +1307,7 @@ tintervallenne(PG_FUNCTION_ARGS)
if (i->status == T_INTERVAL_INVAL || t == INVALID_RELTIME)
PG_RETURN_BOOL(false);
rt = DatumGetRelativeTime(DirectFunctionCall1(tintervalrel,
- TimeIntervalGetDatum(i)));
+ TimeIntervalGetDatum(i)));
PG_RETURN_BOOL(rt != INVALID_RELTIME && rt != t);
}
@@ -1321,7 +1321,7 @@ tintervallenlt(PG_FUNCTION_ARGS)
if (i->status == T_INTERVAL_INVAL || t == INVALID_RELTIME)
PG_RETURN_BOOL(false);
rt = DatumGetRelativeTime(DirectFunctionCall1(tintervalrel,
- TimeIntervalGetDatum(i)));
+ TimeIntervalGetDatum(i)));
PG_RETURN_BOOL(rt != INVALID_RELTIME && rt < t);
}
@@ -1335,7 +1335,7 @@ tintervallengt(PG_FUNCTION_ARGS)
if (i->status == T_INTERVAL_INVAL || t == INVALID_RELTIME)
PG_RETURN_BOOL(false);
rt = DatumGetRelativeTime(DirectFunctionCall1(tintervalrel,
- TimeIntervalGetDatum(i)));
+ TimeIntervalGetDatum(i)));
PG_RETURN_BOOL(rt != INVALID_RELTIME && rt > t);
}
@@ -1349,7 +1349,7 @@ tintervallenle(PG_FUNCTION_ARGS)
if (i->status == T_INTERVAL_INVAL || t == INVALID_RELTIME)
PG_RETURN_BOOL(false);
rt = DatumGetRelativeTime(DirectFunctionCall1(tintervalrel,
- TimeIntervalGetDatum(i)));
+ TimeIntervalGetDatum(i)));
PG_RETURN_BOOL(rt != INVALID_RELTIME && rt <= t);
}
@@ -1363,7 +1363,7 @@ tintervallenge(PG_FUNCTION_ARGS)
if (i->status == T_INTERVAL_INVAL || t == INVALID_RELTIME)
PG_RETURN_BOOL(false);
rt = DatumGetRelativeTime(DirectFunctionCall1(tintervalrel,
- TimeIntervalGetDatum(i)));
+ TimeIntervalGetDatum(i)));
PG_RETURN_BOOL(rt != INVALID_RELTIME && rt >= t);
}
@@ -1379,11 +1379,11 @@ tintervalct(PG_FUNCTION_ARGS)
if (i1->status == T_INTERVAL_INVAL || i2->status == T_INTERVAL_INVAL)
PG_RETURN_BOOL(false);
if (DatumGetBool(DirectFunctionCall2(abstimele,
- AbsoluteTimeGetDatum(i1->data[0]),
- AbsoluteTimeGetDatum(i2->data[0]))) &&
+ AbsoluteTimeGetDatum(i1->data[0]),
+ AbsoluteTimeGetDatum(i2->data[0]))) &&
DatumGetBool(DirectFunctionCall2(abstimege,
- AbsoluteTimeGetDatum(i1->data[1]),
- AbsoluteTimeGetDatum(i2->data[1]))))
+ AbsoluteTimeGetDatum(i1->data[1]),
+ AbsoluteTimeGetDatum(i2->data[1]))))
PG_RETURN_BOOL(true);
PG_RETURN_BOOL(false);
}
@@ -1400,11 +1400,11 @@ tintervalov(PG_FUNCTION_ARGS)
if (i1->status == T_INTERVAL_INVAL || i2->status == T_INTERVAL_INVAL)
PG_RETURN_BOOL(false);
if (DatumGetBool(DirectFunctionCall2(abstimelt,
- AbsoluteTimeGetDatum(i1->data[1]),
- AbsoluteTimeGetDatum(i2->data[0]))) ||
+ AbsoluteTimeGetDatum(i1->data[1]),
+ AbsoluteTimeGetDatum(i2->data[0]))) ||
DatumGetBool(DirectFunctionCall2(abstimegt,
- AbsoluteTimeGetDatum(i1->data[0]),
- AbsoluteTimeGetDatum(i2->data[1]))))
+ AbsoluteTimeGetDatum(i1->data[0]),
+ AbsoluteTimeGetDatum(i2->data[1]))))
PG_RETURN_BOOL(false);
PG_RETURN_BOOL(true);
}
@@ -1492,8 +1492,7 @@ parsetinterval(char *i_string,
goto bogus; /* syntax error */
p++;
if (strncmp(INVALID_INTERVAL_STR, p, strlen(INVALID_INTERVAL_STR)) == 0)
- goto bogus; /* undefined range, handled like a syntax
- * err. */
+ goto bogus; /* undefined range, handled like a syntax err. */
/* search for the end of the first date and change it to a \0 */
p1 = p;
while ((c = *p1) != '\0')
@@ -1507,7 +1506,7 @@ parsetinterval(char *i_string,
*p1 = '\0';
/* get the first date */
*i_start = DatumGetAbsoluteTime(DirectFunctionCall1(abstimein,
- CStringGetDatum(p)));
+ CStringGetDatum(p)));
/* undo change to \0 */
*p1 = c;
p = ++p1;
@@ -1537,7 +1536,7 @@ parsetinterval(char *i_string,
*p1 = '\0';
/* get the second date */
*i_end = DatumGetAbsoluteTime(DirectFunctionCall1(abstimein,
- CStringGetDatum(p)));
+ CStringGetDatum(p)));
/* undo change to \0 */
*p1 = c;
p = ++p1;
@@ -1566,7 +1565,7 @@ bogus:
(errcode(ERRCODE_INVALID_DATETIME_FORMAT),
errmsg("invalid input syntax for type tinterval: \"%s\"",
i_string)));
- *i_start = *i_end = INVALID_ABSTIME; /* keep compiler quiet */
+ *i_start = *i_end = INVALID_ABSTIME; /* keep compiler quiet */
}
@@ -1595,7 +1594,7 @@ timeofday(PG_FUNCTION_ARGS)
gettimeofday(&tp, &tpz);
tt = (pg_time_t) tp.tv_sec;
pg_strftime(templ, sizeof(templ), "%a %b %d %H:%M:%S.%%06d %Y %Z",
- pg_localtime(&tt,global_timezone));
+ pg_localtime(&tt, global_timezone));
snprintf(buf, sizeof(buf), templ, tp.tv_usec);
len = VARHDRSZ + strlen(buf);
diff --git a/src/backend/utils/adt/name.c b/src/backend/utils/adt/name.c
index 1200ad9b34c..0a52dcfec66 100644
--- a/src/backend/utils/adt/name.c
+++ b/src/backend/utils/adt/name.c
@@ -14,7 +14,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/name.c,v 1.55 2004/12/31 22:01:22 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/name.c,v 1.56 2005/10/15 02:49:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -258,8 +258,8 @@ namecpy(Name n1, Name n2)
int
namecat(Name n1, Name n2)
{
- return namestrcat(n1, NameStr(*n2)); /* n2 can't be any longer
- * than n1 */
+ return namestrcat(n1, NameStr(*n2)); /* n2 can't be any longer than
+ * n1 */
}
#endif
diff --git a/src/backend/utils/adt/network.c b/src/backend/utils/adt/network.c
index dc83d7028c5..17403c5f33c 100644
--- a/src/backend/utils/adt/network.c
+++ b/src/backend/utils/adt/network.c
@@ -1,7 +1,7 @@
/*
* PostgreSQL type definitions for the INET and CIDR types.
*
- * $PostgreSQL: pgsql/src/backend/utils/adt/network.c,v 1.54 2004/10/08 01:10:31 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/network.c,v 1.55 2005/10/15 02:49:29 momjian Exp $
*
* Jon Postel RIP 16 Oct 1998
*/
@@ -74,9 +74,9 @@ network_in(char *src, int type)
dst = (inet *) palloc0(VARHDRSZ + sizeof(inet_struct));
/*
- * First, check to see if this is an IPv6 or IPv4 address. IPv6
- * addresses will have a : somewhere in them (several, in fact) so if
- * there is one present, assume it's V6, otherwise assume it's V4.
+ * First, check to see if this is an IPv6 or IPv4 address. IPv6 addresses
+ * will have a : somewhere in them (several, in fact) so if there is one
+ * present, assume it's V6, otherwise assume it's V4.
*/
if (strchr(src, ':') != NULL)
@@ -94,8 +94,7 @@ network_in(char *src, int type)
type ? "cidr" : "inet", src)));
/*
- * Error check: CIDR values must not have any bits set beyond the
- * masklen.
+ * Error check: CIDR values must not have any bits set beyond the masklen.
*/
if (type)
{
@@ -195,7 +194,7 @@ inet_recv(PG_FUNCTION_ARGS)
ip_family(addr) != PGSQL_AF_INET6)
ereport(ERROR,
(errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
- errmsg("invalid address family in external \"inet\" value")));
+ errmsg("invalid address family in external \"inet\" value")));
bits = pq_getmsgbyte(buf);
if (bits < 0 || bits > ip_maxbits(addr))
ereport(ERROR,
@@ -221,8 +220,7 @@ inet_recv(PG_FUNCTION_ARGS)
addrptr[i] = pq_getmsgbyte(buf);
/*
- * Error check: CIDR values must not have any bits set beyond the
- * masklen.
+ * Error check: CIDR values must not have any bits set beyond the masklen.
*/
if (ip_type(addr))
{
@@ -457,7 +455,7 @@ network_sub(PG_FUNCTION_ARGS)
if (ip_family(a1) == ip_family(a2))
{
PG_RETURN_BOOL(ip_bits(a1) > ip_bits(a2)
- && bitncmp(ip_addr(a1), ip_addr(a2), ip_bits(a2)) == 0);
+ && bitncmp(ip_addr(a1), ip_addr(a2), ip_bits(a2)) == 0);
}
PG_RETURN_BOOL(false);
@@ -472,7 +470,7 @@ network_subeq(PG_FUNCTION_ARGS)
if (ip_family(a1) == ip_family(a2))
{
PG_RETURN_BOOL(ip_bits(a1) >= ip_bits(a2)
- && bitncmp(ip_addr(a1), ip_addr(a2), ip_bits(a2)) == 0);
+ && bitncmp(ip_addr(a1), ip_addr(a2), ip_bits(a2)) == 0);
}
PG_RETURN_BOOL(false);
@@ -487,7 +485,7 @@ network_sup(PG_FUNCTION_ARGS)
if (ip_family(a1) == ip_family(a2))
{
PG_RETURN_BOOL(ip_bits(a1) < ip_bits(a2)
- && bitncmp(ip_addr(a1), ip_addr(a2), ip_bits(a1)) == 0);
+ && bitncmp(ip_addr(a1), ip_addr(a2), ip_bits(a1)) == 0);
}
PG_RETURN_BOOL(false);
@@ -502,7 +500,7 @@ network_supeq(PG_FUNCTION_ARGS)
if (ip_family(a1) == ip_family(a2))
{
PG_RETURN_BOOL(ip_bits(a1) <= ip_bits(a2)
- && bitncmp(ip_addr(a1), ip_addr(a2), ip_bits(a1)) == 0);
+ && bitncmp(ip_addr(a1), ip_addr(a2), ip_bits(a1)) == 0);
}
PG_RETURN_BOOL(false);
@@ -870,8 +868,8 @@ convert_network_to_scalar(Datum value, Oid typid)
}
/*
- * Can't get here unless someone tries to use scalarltsel/scalargtsel
- * on an operator with one network and one non-network operand.
+ * Can't get here unless someone tries to use scalarltsel/scalargtsel on
+ * an operator with one network and one non-network operand.
*/
elog(ERROR, "unsupported type: %u", typid);
return 0;
diff --git a/src/backend/utils/adt/numeric.c b/src/backend/utils/adt/numeric.c
index 4aa631ee577..a8becf990d1 100644
--- a/src/backend/utils/adt/numeric.c
+++ b/src/backend/utils/adt/numeric.c
@@ -14,7 +14,7 @@
* Copyright (c) 1998-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/numeric.c,v 1.85 2005/07/10 21:13:59 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/numeric.c,v 1.86 2005/10/15 02:49:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -131,8 +131,7 @@ typedef struct NumericVar
{
int ndigits; /* # of digits in digits[] - can be 0! */
int weight; /* weight of first digit */
- int sign; /* NUMERIC_POS, NUMERIC_NEG, or
- * NUMERIC_NAN */
+ int sign; /* NUMERIC_POS, NUMERIC_NEG, or NUMERIC_NAN */
int dscale; /* display scale */
NumericDigit *buf; /* start of palloc'd space for digits[] */
NumericDigit *digits; /* base-NBASE digits */
@@ -157,10 +156,8 @@ static NumericVar const_two =
#if DEC_DIGITS == 4
static NumericDigit const_zero_point_five_data[1] = {5000};
-
#elif DEC_DIGITS == 2
static NumericDigit const_zero_point_five_data[1] = {50};
-
#elif DEC_DIGITS == 1
static NumericDigit const_zero_point_five_data[1] = {5};
#endif
@@ -169,10 +166,8 @@ static NumericVar const_zero_point_five =
#if DEC_DIGITS == 4
static NumericDigit const_zero_point_nine_data[1] = {9000};
-
#elif DEC_DIGITS == 2
static NumericDigit const_zero_point_nine_data[1] = {90};
-
#elif DEC_DIGITS == 1
static NumericDigit const_zero_point_nine_data[1] = {9};
#endif
@@ -183,12 +178,10 @@ static NumericVar const_zero_point_nine =
static NumericDigit const_zero_point_01_data[1] = {100};
static NumericVar const_zero_point_01 =
{1, -1, NUMERIC_POS, 2, NULL, const_zero_point_01_data};
-
#elif DEC_DIGITS == 2
static NumericDigit const_zero_point_01_data[1] = {1};
static NumericVar const_zero_point_01 =
{1, -1, NUMERIC_POS, 2, NULL, const_zero_point_01_data};
-
#elif DEC_DIGITS == 1
static NumericDigit const_zero_point_01_data[1] = {1};
static NumericVar const_zero_point_01 =
@@ -197,10 +190,8 @@ static NumericVar const_zero_point_01 =
#if DEC_DIGITS == 4
static NumericDigit const_one_point_one_data[2] = {1, 1000};
-
#elif DEC_DIGITS == 2
static NumericDigit const_one_point_one_data[2] = {1, 10};
-
#elif DEC_DIGITS == 1
static NumericDigit const_one_point_one_data[2] = {1, 1};
#endif
@@ -223,7 +214,6 @@ static const int round_powers[4] = {0, 1000, 100, 10};
#ifdef NUMERIC_DEBUG
static void dump_numeric(const char *str, Numeric num);
static void dump_var(const char *str, NumericVar *var);
-
#else
#define dump_numeric(s,n)
#define dump_var(s,v)
@@ -322,8 +312,8 @@ numeric_in(PG_FUNCTION_ARGS)
PG_RETURN_NUMERIC(make_result(&const_nan));
/*
- * Use set_var_from_str() to parse the input string and return it in
- * the packed DB storage format
+ * Use set_var_from_str() to parse the input string and return it in the
+ * packed DB storage format
*/
init_var(&value);
set_var_from_str(str, &value);
@@ -358,10 +348,10 @@ numeric_out(PG_FUNCTION_ARGS)
/*
* Get the number in the variable format.
*
- * Even if we didn't need to change format, we'd still need to copy the
- * value to have a modifiable copy for rounding. set_var_from_num()
- * also guarantees there is extra digit space in case we produce a
- * carry out from rounding.
+ * Even if we didn't need to change format, we'd still need to copy the value
+ * to have a modifiable copy for rounding. set_var_from_num() also
+ * guarantees there is extra digit space in case we produce a carry out
+ * from rounding.
*/
init_var(&x);
set_var_from_num(num, &x);
@@ -383,6 +373,7 @@ Datum
numeric_recv(PG_FUNCTION_ARGS)
{
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
+
#ifdef NOT_USED
Oid typelem = PG_GETARG_OID(1);
#endif
@@ -419,7 +410,7 @@ numeric_recv(PG_FUNCTION_ARGS)
if (d < 0 || d >= NBASE)
ereport(ERROR,
(errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
- errmsg("invalid digit in external \"numeric\" value")));
+ errmsg("invalid digit in external \"numeric\" value")));
value.digits[i] = d;
}
@@ -468,7 +459,7 @@ numeric_send(PG_FUNCTION_ARGS)
* scale of the attribute have to be applied on the value.
*/
Datum
-numeric (PG_FUNCTION_ARGS)
+numeric(PG_FUNCTION_ARGS)
{
Numeric num = PG_GETARG_NUMERIC(0);
int32 typmod = PG_GETARG_INT32(1);
@@ -487,8 +478,8 @@ numeric (PG_FUNCTION_ARGS)
PG_RETURN_NUMERIC(make_result(&const_nan));
/*
- * If the value isn't a valid type modifier, simply return a copy of
- * the input value
+ * If the value isn't a valid type modifier, simply return a copy of the
+ * input value
*/
if (typmod < (int32) (VARHDRSZ))
{
@@ -507,9 +498,8 @@ numeric (PG_FUNCTION_ARGS)
/*
* If the number is certainly in bounds and due to the target scale no
- * rounding could be necessary, just make a copy of the input and
- * modify its scale fields. (Note we assume the existing dscale is
- * honest...)
+ * rounding could be necessary, just make a copy of the input and modify
+ * its scale fields. (Note we assume the existing dscale is honest...)
*/
ddigits = (num->n_weight + 1) * DEC_DIGITS;
if (ddigits <= maxdigits && scale >= NUMERIC_DSCALE(num))
@@ -587,9 +577,9 @@ numeric_uminus(PG_FUNCTION_ARGS)
memcpy(res, num, num->varlen);
/*
- * The packed format is known to be totally zero digit trimmed always.
- * So we can identify a ZERO by the fact that there are no digits at
- * all. Do nothing to a zero.
+ * The packed format is known to be totally zero digit trimmed always. So
+ * we can identify a ZERO by the fact that there are no digits at all. Do
+ * nothing to a zero.
*/
if (num->varlen != NUMERIC_HDRSZ)
{
@@ -638,17 +628,16 @@ numeric_sign(PG_FUNCTION_ARGS)
init_var(&result);
/*
- * The packed format is known to be totally zero digit trimmed always.
- * So we can identify a ZERO by the fact that there are no digits at
- * all.
+ * The packed format is known to be totally zero digit trimmed always. So
+ * we can identify a ZERO by the fact that there are no digits at all.
*/
if (num->varlen == NUMERIC_HDRSZ)
set_var_from_var(&const_zero, &result);
else
{
/*
- * And if there are some, we return a copy of ONE with the sign of
- * our argument
+ * And if there are some, we return a copy of ONE with the sign of our
+ * argument
*/
set_var_from_var(&const_one, &result);
result.sign = NUMERIC_SIGN(num);
@@ -837,8 +826,8 @@ width_bucket_numeric(PG_FUNCTION_ARGS)
if (count <= 0)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION),
- errmsg("count must be greater than zero")));
+ (errcode(ERRCODE_INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION),
+ errmsg("count must be greater than zero")));
init_var(&result_var);
init_var(&count_var);
@@ -850,8 +839,8 @@ width_bucket_numeric(PG_FUNCTION_ARGS)
{
case 0:
ereport(ERROR,
- (errcode(ERRCODE_INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION),
- errmsg("lower bound cannot equal upper bound")));
+ (errcode(ERRCODE_INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION),
+ errmsg("lower bound cannot equal upper bound")));
/* bound1 < bound2 */
case -1:
@@ -1055,9 +1044,9 @@ cmp_numerics(Numeric num1, Numeric num2)
int result;
/*
- * We consider all NANs to be equal and larger than any non-NAN. This
- * is somewhat arbitrary; the important thing is to have a consistent
- * sort order.
+ * We consider all NANs to be equal and larger than any non-NAN. This is
+ * somewhat arbitrary; the important thing is to have a consistent sort
+ * order.
*/
if (NUMERIC_IS_NAN(num1))
{
@@ -1208,10 +1197,10 @@ numeric_mul(PG_FUNCTION_ARGS)
/*
* Unpack the values, let mul_var() compute the result and return it.
- * Unlike add_var() and sub_var(), mul_var() will round its result. In
- * the case of numeric_mul(), which is invoked for the * operator on
- * numerics, we request exact representation for the product (rscale =
- * sum(dscale of arg1, dscale of arg2)).
+ * Unlike add_var() and sub_var(), mul_var() will round its result. In the
+ * case of numeric_mul(), which is invoked for the * operator on numerics,
+ * we request exact representation for the product (rscale = sum(dscale of
+ * arg1, dscale of arg2)).
*/
init_var(&arg1);
init_var(&arg2);
@@ -1368,8 +1357,8 @@ numeric_smaller(PG_FUNCTION_ARGS)
Numeric num2 = PG_GETARG_NUMERIC(1);
/*
- * Use cmp_numerics so that this will agree with the comparison
- * operators, particularly as regards comparisons involving NaN.
+ * Use cmp_numerics so that this will agree with the comparison operators,
+ * particularly as regards comparisons involving NaN.
*/
if (cmp_numerics(num1, num2) < 0)
PG_RETURN_NUMERIC(num1);
@@ -1390,8 +1379,8 @@ numeric_larger(PG_FUNCTION_ARGS)
Numeric num2 = PG_GETARG_NUMERIC(1);
/*
- * Use cmp_numerics so that this will agree with the comparison
- * operators, particularly as regards comparisons involving NaN.
+ * Use cmp_numerics so that this will agree with the comparison operators,
+ * particularly as regards comparisons involving NaN.
*/
if (cmp_numerics(num1, num2) > 0)
PG_RETURN_NUMERIC(num1);
@@ -1469,9 +1458,9 @@ numeric_sqrt(PG_FUNCTION_ARGS)
PG_RETURN_NUMERIC(make_result(&const_nan));
/*
- * Unpack the argument and determine the result scale. We choose a
- * scale to give at least NUMERIC_MIN_SIG_DIGITS significant digits;
- * but in any case not less than the input's dscale.
+ * Unpack the argument and determine the result scale. We choose a scale
+ * to give at least NUMERIC_MIN_SIG_DIGITS significant digits; but in any
+ * case not less than the input's dscale.
*/
init_var(&arg);
init_var(&result);
@@ -1522,9 +1511,9 @@ numeric_exp(PG_FUNCTION_ARGS)
PG_RETURN_NUMERIC(make_result(&const_nan));
/*
- * Unpack the argument and determine the result scale. We choose a
- * scale to give at least NUMERIC_MIN_SIG_DIGITS significant digits;
- * but in any case not less than the input's dscale.
+ * Unpack the argument and determine the result scale. We choose a scale
+ * to give at least NUMERIC_MIN_SIG_DIGITS significant digits; but in any
+ * case not less than the input's dscale.
*/
init_var(&arg);
init_var(&result);
@@ -1535,8 +1524,8 @@ numeric_exp(PG_FUNCTION_ARGS)
val = numericvar_to_double_no_overflow(&arg);
/*
- * log10(result) = num * log10(e), so this is approximately the
- * decimal weight of the result:
+ * log10(result) = num * log10(e), so this is approximately the decimal
+ * weight of the result:
*/
val *= 0.434294481903252;
@@ -1646,8 +1635,8 @@ numeric_log(PG_FUNCTION_ARGS)
set_var_from_num(num2, &arg2);
/*
- * Call log_var() to compute and return the result; note it handles
- * scale selection itself.
+ * Call log_var() to compute and return the result; note it handles scale
+ * selection itself.
*/
log_var(&arg1, &arg2, &result);
@@ -1698,8 +1687,8 @@ numeric_power(PG_FUNCTION_ARGS)
trunc_var(&arg2_trunc, 0);
/*
- * Return special SQLSTATE error codes for a few conditions mandated
- * by the standard.
+ * Return special SQLSTATE error codes for a few conditions mandated by
+ * the standard.
*/
if ((cmp_var(&arg1, &const_zero) == 0 &&
cmp_var(&arg2, &const_zero) < 0) ||
@@ -2093,8 +2082,8 @@ do_numeric_accum(ArrayType *transarray, Numeric newval)
NumericGetDatum(newval));
sumX2 = DirectFunctionCall2(numeric_add, sumX2,
DirectFunctionCall2(numeric_mul,
- NumericGetDatum(newval),
- NumericGetDatum(newval)));
+ NumericGetDatum(newval),
+ NumericGetDatum(newval)));
transdatums[0] = N;
transdatums[1] = sumX;
@@ -2252,7 +2241,7 @@ numeric_variance(PG_FUNCTION_ARGS)
{
mul_var(&vN, &vNminus1, &vNminus1, 0); /* N * (N - 1) */
rscale = select_div_scale(&vsumX2, &vNminus1);
- div_var(&vsumX2, &vNminus1, &vsumX, rscale, true); /* variance */
+ div_var(&vsumX2, &vNminus1, &vsumX, rscale, true); /* variance */
res = make_result(&vsumX);
}
@@ -2328,7 +2317,7 @@ numeric_stddev(PG_FUNCTION_ARGS)
{
mul_var(&vN, &vNminus1, &vNminus1, 0); /* N * (N - 1) */
rscale = select_div_scale(&vsumX2, &vNminus1);
- div_var(&vsumX2, &vNminus1, &vsumX, rscale, true); /* variance */
+ div_var(&vsumX2, &vNminus1, &vsumX, rscale, true); /* variance */
sqrt_var(&vsumX, &vsumX, rscale); /* stddev */
res = make_result(&vsumX);
@@ -2377,12 +2366,12 @@ int2_sum(PG_FUNCTION_ARGS)
/*
* If we're invoked by nodeAgg, we can cheat and modify out first
- * parameter in-place to avoid palloc overhead. If not, we need to
- * return the new value of the transition variable.
+ * parameter in-place to avoid palloc overhead. If not, we need to return
+ * the new value of the transition variable.
*/
if (fcinfo->context && IsA(fcinfo->context, AggState))
{
- int64 *oldsum = (int64 *) PG_GETARG_POINTER(0);
+ int64 *oldsum = (int64 *) PG_GETARG_POINTER(0);
/* Leave the running sum unchanged in the new input is null */
if (!PG_ARGISNULL(1))
@@ -2422,12 +2411,12 @@ int4_sum(PG_FUNCTION_ARGS)
/*
* If we're invoked by nodeAgg, we can cheat and modify out first
- * parameter in-place to avoid palloc overhead. If not, we need to
- * return the new value of the transition variable.
+ * parameter in-place to avoid palloc overhead. If not, we need to return
+ * the new value of the transition variable.
*/
if (fcinfo->context && IsA(fcinfo->context, AggState))
{
- int64 *oldsum = (int64 *) PG_GETARG_POINTER(0);
+ int64 *oldsum = (int64 *) PG_GETARG_POINTER(0);
/* Leave the running sum unchanged in the new input is null */
if (!PG_ARGISNULL(1))
@@ -2467,9 +2456,9 @@ int8_sum(PG_FUNCTION_ARGS)
}
/*
- * Note that we cannot special-case the nodeAgg case here, as we
- * do for int2_sum and int4_sum: numeric is of variable size, so
- * we cannot modify our first parameter in-place.
+ * Note that we cannot special-case the nodeAgg case here, as we do for
+ * int2_sum and int4_sum: numeric is of variable size, so we cannot modify
+ * our first parameter in-place.
*/
oldsum = PG_GETARG_NUMERIC(0);
@@ -2514,8 +2503,8 @@ int2_avg_accum(PG_FUNCTION_ARGS)
/*
* If we're invoked by nodeAgg, we can cheat and modify our first
- * parameter in-place to reduce palloc overhead. Otherwise we need
- * to make a copy of it before scribbling on it.
+ * parameter in-place to reduce palloc overhead. Otherwise we need to make
+ * a copy of it before scribbling on it.
*/
if (fcinfo->context && IsA(fcinfo->context, AggState))
transarray = PG_GETARG_ARRAYTYPE_P(0);
@@ -2541,8 +2530,8 @@ int4_avg_accum(PG_FUNCTION_ARGS)
/*
* If we're invoked by nodeAgg, we can cheat and modify our first
- * parameter in-place to reduce palloc overhead. Otherwise we need
- * to make a copy of it before scribbling on it.
+ * parameter in-place to reduce palloc overhead. Otherwise we need to make
+ * a copy of it before scribbling on it.
*/
if (fcinfo->context && IsA(fcinfo->context, AggState))
transarray = PG_GETARG_ARRAYTYPE_P(0);
@@ -2743,8 +2732,8 @@ set_var_from_str(const char *str, NumericVar *dest)
NumericDigit *digits;
/*
- * We first parse the string to extract decimal digits and determine
- * the correct decimal weight. Then convert to NBASE representation.
+ * We first parse the string to extract decimal digits and determine the
+ * correct decimal weight. Then convert to NBASE representation.
*/
/* skip leading spaces */
@@ -2777,7 +2766,7 @@ set_var_from_str(const char *str, NumericVar *dest)
if (!isdigit((unsigned char) *cp))
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type numeric: \"%s\"", str)));
+ errmsg("invalid input syntax for type numeric: \"%s\"", str)));
decdigits = (unsigned char *) palloc(strlen(cp) + DEC_DIGITS * 2);
@@ -2800,8 +2789,8 @@ set_var_from_str(const char *str, NumericVar *dest)
if (have_dp)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type numeric: \"%s\"",
- str)));
+ errmsg("invalid input syntax for type numeric: \"%s\"",
+ str)));
have_dp = TRUE;
cp++;
}
@@ -2824,15 +2813,15 @@ set_var_from_str(const char *str, NumericVar *dest)
if (endptr == cp)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type numeric: \"%s\"",
- str)));
+ errmsg("invalid input syntax for type numeric: \"%s\"",
+ str)));
cp = endptr;
if (exponent > NUMERIC_MAX_PRECISION ||
exponent < -NUMERIC_MAX_PRECISION)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type numeric: \"%s\"",
- str)));
+ errmsg("invalid input syntax for type numeric: \"%s\"",
+ str)));
dweight += (int) exponent;
dscale -= (int) exponent;
if (dscale < 0)
@@ -2845,16 +2834,16 @@ set_var_from_str(const char *str, NumericVar *dest)
if (!isspace((unsigned char) *cp))
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type numeric: \"%s\"",
- str)));
+ errmsg("invalid input syntax for type numeric: \"%s\"",
+ str)));
cp++;
}
/*
- * Okay, convert pure-decimal representation to base NBASE. First we
- * need to determine the converted weight and ndigits. offset is the
- * number of decimal zeroes to insert before the first given digit to
- * have a correctly aligned first NBASE digit.
+ * Okay, convert pure-decimal representation to base NBASE. First we need
+ * to determine the converted weight and ndigits. offset is the number of
+ * decimal zeroes to insert before the first given digit to have a
+ * correctly aligned first NBASE digit.
*/
if (dweight >= 0)
weight = (dweight + 1 + DEC_DIGITS - 1) / DEC_DIGITS - 1;
@@ -2969,10 +2958,10 @@ get_str_from_var(NumericVar *var, int dscale)
/*
* Allocate space for the result.
*
- * i is set to to # of decimal digits before decimal point. dscale is the
- * # of decimal digits we will print after decimal point. We may
- * generate as many as DEC_DIGITS-1 excess digits at the end, and in
- * addition we need room for sign, decimal point, null terminator.
+ * i is set to to # of decimal digits before decimal point. dscale is the #
+ * of decimal digits we will print after decimal point. We may generate as
+ * many as DEC_DIGITS-1 excess digits at the end, and in addition we need
+ * room for sign, decimal point, null terminator.
*/
i = (var->weight + 1) * DEC_DIGITS;
if (i <= 0)
@@ -3037,9 +3026,9 @@ get_str_from_var(NumericVar *var, int dscale)
}
/*
- * If requested, output a decimal point and all the digits that follow
- * it. We initially put out a multiple of DEC_DIGITS digits, then
- * truncate if needed.
+ * If requested, output a decimal point and all the digits that follow it.
+ * We initially put out a multiple of DEC_DIGITS digits, then truncate if
+ * needed.
*/
if (dscale > 0)
{
@@ -3179,10 +3168,10 @@ apply_typmod(NumericVar *var, int32 typmod)
/*
* Check for overflow - note we can't do this before rounding, because
- * rounding could raise the weight. Also note that the var's weight
- * could be inflated by leading zeroes, which will be stripped before
- * storage but perhaps might not have been yet. In any case, we must
- * recognize a true zero, whose weight doesn't mean anything.
+ * rounding could raise the weight. Also note that the var's weight could
+ * be inflated by leading zeroes, which will be stripped before storage
+ * but perhaps might not have been yet. In any case, we must recognize a
+ * true zero, whose weight doesn't mean anything.
*/
ddigits = (var->weight + 1) * DEC_DIGITS;
if (ddigits > maxdigits)
@@ -3254,9 +3243,8 @@ numericvar_to_int8(NumericVar *var, int64 *result)
}
/*
- * For input like 10000000000, we must treat stripped digits as real.
- * So the loop assumes there are weight+1 digits before the decimal
- * point.
+ * For input like 10000000000, we must treat stripped digits as real. So
+ * the loop assumes there are weight+1 digits before the decimal point.
*/
weight = var->weight;
Assert(weight >= 0 && ndigits <= weight + 1);
@@ -3274,10 +3262,10 @@ numericvar_to_int8(NumericVar *var, int64 *result)
/*
* The overflow check is a bit tricky because we want to accept
- * INT64_MIN, which will overflow the positive accumulator. We
- * can detect this case easily though because INT64_MIN is the
- * only nonzero value for which -val == val (on a two's complement
- * machine, anyway).
+ * INT64_MIN, which will overflow the positive accumulator. We can
+ * detect this case easily though because INT64_MIN is the only
+ * nonzero value for which -val == val (on a two's complement machine,
+ * anyway).
*/
if ((val / NBASE) != oldval) /* possible overflow? */
{
@@ -3355,8 +3343,8 @@ numeric_to_double_no_overflow(Numeric num)
/* shouldn't happen ... */
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type double precision: \"%s\"",
- tmp)));
+ errmsg("invalid input syntax for type double precision: \"%s\"",
+ tmp)));
}
pfree(tmp);
@@ -3381,8 +3369,8 @@ numericvar_to_double_no_overflow(NumericVar *var)
/* shouldn't happen ... */
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type double precision: \"%s\"",
- tmp)));
+ errmsg("invalid input syntax for type double precision: \"%s\"",
+ tmp)));
}
pfree(tmp);
@@ -3454,8 +3442,7 @@ add_var(NumericVar *var1, NumericVar *var2, NumericVar *result)
else
{
/*
- * var1 is positive, var2 is negative Must compare absolute
- * values
+ * var1 is positive, var2 is negative Must compare absolute values
*/
switch (cmp_abs(var1, var2))
{
@@ -3715,10 +3702,9 @@ mul_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
/*
* Determine number of result digits to compute. If the exact result
- * would have more than rscale fractional digits, truncate the
- * computation with MUL_GUARD_DIGITS guard digits. We do that by
- * pretending that one or both inputs have fewer digits than they
- * really do.
+ * would have more than rscale fractional digits, truncate the computation
+ * with MUL_GUARD_DIGITS guard digits. We do that by pretending that one
+ * or both inputs have fewer digits than they really do.
*/
res_ndigits = var1ndigits + var2ndigits + 1;
maxdigits = res_weight + 1 + (rscale * DEC_DIGITS) + MUL_GUARD_DIGITS;
@@ -3752,12 +3738,12 @@ mul_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
/*
* We do the arithmetic in an array "dig[]" of signed int's. Since
- * INT_MAX is noticeably larger than NBASE*NBASE, this gives us
- * headroom to avoid normalizing carries immediately.
+ * INT_MAX is noticeably larger than NBASE*NBASE, this gives us headroom
+ * to avoid normalizing carries immediately.
*
* maxdig tracks the maximum possible value of any dig[] entry; when this
- * threatens to exceed INT_MAX, we take the time to propagate carries.
- * To avoid overflow in maxdig itself, it actually represents the max
+ * threatens to exceed INT_MAX, we take the time to propagate carries. To
+ * avoid overflow in maxdig itself, it actually represents the max
* possible value divided by NBASE-1.
*/
dig = (int *) palloc0(res_ndigits * sizeof(int));
@@ -3801,9 +3787,9 @@ mul_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
}
/*
- * Now we do a final carry propagation pass to normalize the result,
- * which we combine with storing the result digits into the output.
- * Note that this is still done at full precision w/guard digits.
+ * Now we do a final carry propagation pass to normalize the result, which
+ * we combine with storing the result digits into the output. Note that
+ * this is still done at full precision w/guard digits.
*/
alloc_var(result, res_ndigits);
res_digits = result->digits;
@@ -3909,24 +3895,24 @@ div_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
/*
* We do the arithmetic in an array "div[]" of signed int's. Since
- * INT_MAX is noticeably larger than NBASE*NBASE, this gives us
- * headroom to avoid normalizing carries immediately.
+ * INT_MAX is noticeably larger than NBASE*NBASE, this gives us headroom
+ * to avoid normalizing carries immediately.
*
- * We start with div[] containing one zero digit followed by the
- * dividend's digits (plus appended zeroes to reach the desired
- * precision including guard digits). Each step of the main loop
- * computes an (approximate) quotient digit and stores it into div[],
- * removing one position of dividend space. A final pass of carry
- * propagation takes care of any mistaken quotient digits.
+ * We start with div[] containing one zero digit followed by the dividend's
+ * digits (plus appended zeroes to reach the desired precision including
+ * guard digits). Each step of the main loop computes an (approximate)
+ * quotient digit and stores it into div[], removing one position of
+ * dividend space. A final pass of carry propagation takes care of any
+ * mistaken quotient digits.
*/
div = (int *) palloc0((div_ndigits + 1) * sizeof(int));
for (i = 0; i < var1ndigits; i++)
div[i + 1] = var1digits[i];
/*
- * We estimate each quotient digit using floating-point arithmetic,
- * taking the first four digits of the (current) dividend and divisor.
- * This must be float to avoid overflow.
+ * We estimate each quotient digit using floating-point arithmetic, taking
+ * the first four digits of the (current) dividend and divisor. This must
+ * be float to avoid overflow.
*/
fdivisor = (double) var2digits[0];
for (i = 1; i < 4; i++)
@@ -3938,10 +3924,10 @@ div_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
fdivisorinverse = 1.0 / fdivisor;
/*
- * maxdiv tracks the maximum possible absolute value of any div[]
- * entry; when this threatens to exceed INT_MAX, we take the time to
- * propagate carries. To avoid overflow in maxdiv itself, it actually
- * represents the max possible abs. value divided by NBASE-1.
+ * maxdiv tracks the maximum possible absolute value of any div[] entry;
+ * when this threatens to exceed INT_MAX, we take the time to propagate
+ * carries. To avoid overflow in maxdiv itself, it actually represents
+ * the max possible abs. value divided by NBASE-1.
*/
maxdiv = 1;
@@ -3992,8 +3978,8 @@ div_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
div[qi] = newdig;
/*
- * All the div[] digits except possibly div[qi] are now in
- * the range 0..NBASE-1.
+ * All the div[] digits except possibly div[qi] are now in the
+ * range 0..NBASE-1.
*/
maxdiv = Abs(newdig) / (NBASE - 1);
maxdiv = Max(maxdiv, 1);
@@ -4012,8 +3998,7 @@ div_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
/* Compute the (approximate) quotient digit */
fquotient = fdividend * fdivisorinverse;
qdigit = (fquotient >= 0.0) ? ((int) fquotient) :
- (((int) fquotient) - 1); /* truncate towards
- * -infinity */
+ (((int) fquotient) - 1); /* truncate towards -infinity */
maxdiv += Abs(qdigit);
}
@@ -4028,10 +4013,10 @@ div_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
}
/*
- * The dividend digit we are about to replace might still be
- * nonzero. Fold it into the next digit position. We don't need
- * to worry about overflow here since this should nearly cancel
- * with the subtraction of the divisor.
+ * The dividend digit we are about to replace might still be nonzero.
+ * Fold it into the next digit position. We don't need to worry about
+ * overflow here since this should nearly cancel with the subtraction
+ * of the divisor.
*/
div[qi + 1] += div[qi] * NBASE;
@@ -4050,9 +4035,9 @@ div_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
div[qi] = qdigit;
/*
- * Now we do a final carry propagation pass to normalize the result,
- * which we combine with storing the result digits into the output.
- * Note that this is still done at full precision w/guard digits.
+ * Now we do a final carry propagation pass to normalize the result, which
+ * we combine with storing the result digits into the output. Note that
+ * this is still done at full precision w/guard digits.
*/
alloc_var(result, div_ndigits + 1);
res_digits = result->digits;
@@ -4089,7 +4074,7 @@ div_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
round_var(result, rscale);
else
trunc_var(result, rscale);
-
+
/* Strip leading and trailing zeroes */
strip_var(result);
}
@@ -4112,8 +4097,8 @@ select_div_scale(NumericVar *var1, NumericVar *var2)
int rscale;
/*
- * The result scale of a division isn't specified in any SQL standard.
- * For PostgreSQL we select a result scale that will give at least
+ * The result scale of a division isn't specified in any SQL standard. For
+ * PostgreSQL we select a result scale that will give at least
* NUMERIC_MIN_SIG_DIGITS significant digits, so that numeric gives a
* result no less accurate than float8; but use a scale not less than
* either input's display scale.
@@ -4274,8 +4259,8 @@ sqrt_var(NumericVar *arg, NumericVar *result, int rscale)
}
/*
- * SQL2003 defines sqrt() in terms of power, so we need to emit the
- * right SQLSTATE error code if the operand is negative.
+ * SQL2003 defines sqrt() in terms of power, so we need to emit the right
+ * SQLSTATE error code if the operand is negative.
*/
if (stat < 0)
ereport(ERROR,
@@ -4445,9 +4430,8 @@ exp_var_internal(NumericVar *arg, NumericVar *result, int rscale)
*
* exp(x) = 1 + x + x^2/2! + x^3/3! + ...
*
- * Given the limited range of x, this should converge reasonably quickly.
- * We run the series until the terms fall below the local_rscale
- * limit.
+ * Given the limited range of x, this should converge reasonably quickly. We
+ * run the series until the terms fall below the local_rscale limit.
*/
add_var(&const_one, &x, result);
set_var_from_var(&x, &xpow);
@@ -4535,11 +4519,11 @@ ln_var(NumericVar *arg, NumericVar *result, int rscale)
*
* z + z^3/3 + z^5/5 + ...
*
- * where z = (x-1)/(x+1) is in the range (approximately) -0.053 .. 0.048
- * due to the above range-reduction of x.
+ * where z = (x-1)/(x+1) is in the range (approximately) -0.053 .. 0.048 due
+ * to the above range-reduction of x.
*
- * The convergence of this is not as fast as one would like, but is
- * tolerable given that z is small.
+ * The convergence of this is not as fast as one would like, but is tolerable
+ * given that z is small.
*/
sub_var(&x, &const_one, result);
add_var(&x, &const_one, &elem);
@@ -4711,8 +4695,7 @@ power_var(NumericVar *base, NumericVar *exp, NumericVar *result)
val = numericvar_to_double_no_overflow(&ln_num);
/*
- * log10(result) = num * log10(e), so this is approximately the
- * weight:
+ * log10(result) = num * log10(e), so this is approximately the weight:
*/
val *= 0.434294481903252;
@@ -4772,8 +4755,7 @@ power_var_int(NumericVar *base, int exp, NumericVar *result, int rscale)
/*
* The general case repeatedly multiplies base according to the bit
- * pattern of exp. We do the multiplications with some extra
- * precision.
+ * pattern of exp. We do the multiplications with some extra precision.
*/
neg = (exp < 0);
exp = Abs(exp);
@@ -4866,8 +4848,8 @@ cmp_abs(NumericVar *var1, NumericVar *var2)
}
/*
- * At this point, we've run out of digits on one side or the other; so
- * any remaining nonzero digits imply that side is larger
+ * At this point, we've run out of digits on one side or the other; so any
+ * remaining nonzero digits imply that side is larger
*/
while (i1 < var1->ndigits)
{
@@ -5071,8 +5053,8 @@ round_var(NumericVar *var, int rscale)
di = (var->weight + 1) * DEC_DIGITS + rscale;
/*
- * If di = 0, the value loses all digits, but could round up to 1 if
- * its first extra digit is >= 5. If di < 0 the result must be 0.
+ * If di = 0, the value loses all digits, but could round up to 1 if its
+ * first extra digit is >= 5. If di < 0 the result must be 0.
*/
if (di < 0)
{
diff --git a/src/backend/utils/adt/numutils.c b/src/backend/utils/adt/numutils.c
index fb7fd94b8c8..ffa225277e1 100644
--- a/src/backend/utils/adt/numutils.c
+++ b/src/backend/utils/adt/numutils.c
@@ -10,7 +10,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/numutils.c,v 1.68 2005/01/09 21:03:19 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/numutils.c,v 1.69 2005/10/15 02:49:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -63,8 +63,8 @@ pg_atoi(char *s, int size, int c)
char *badp;
/*
- * Some versions of strtol treat the empty string as an error, but
- * some seem not to. Make an explicit test to be sure we catch it.
+ * Some versions of strtol treat the empty string as an error, but some
+ * seem not to. Make an explicit test to be sure we catch it.
*/
if (s == NULL)
elog(ERROR, "NULL pointer");
@@ -85,8 +85,8 @@ pg_atoi(char *s, int size, int c)
s)));
/*
- * Skip any trailing whitespace; if anything but whitespace remains
- * before the terminating character, bail out
+ * Skip any trailing whitespace; if anything but whitespace remains before
+ * the terminating character, bail out
*/
while (*badp && *badp != c && isspace((unsigned char) *badp))
badp++;
@@ -108,19 +108,19 @@ pg_atoi(char *s, int size, int c)
)
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
- errmsg("value \"%s\" is out of range for type integer", s)));
+ errmsg("value \"%s\" is out of range for type integer", s)));
break;
case sizeof(int16):
if (errno == ERANGE || l < SHRT_MIN || l > SHRT_MAX)
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
- errmsg("value \"%s\" is out of range for type smallint", s)));
+ errmsg("value \"%s\" is out of range for type smallint", s)));
break;
case sizeof(int8):
if (errno == ERANGE || l < SCHAR_MIN || l > SCHAR_MAX)
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
- errmsg("value \"%s\" is out of range for 8-bit integer", s)));
+ errmsg("value \"%s\" is out of range for 8-bit integer", s)));
break;
default:
elog(ERROR, "unsupported result size: %d", size);
diff --git a/src/backend/utils/adt/oid.c b/src/backend/utils/adt/oid.c
index e9a2c741be2..62db042bbde 100644
--- a/src/backend/utils/adt/oid.c
+++ b/src/backend/utils/adt/oid.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/oid.c,v 1.63 2005/07/10 21:36:21 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/oid.c,v 1.64 2005/10/15 02:49:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -47,9 +47,9 @@ oidin_subr(const char *funcname, const char *s, char **endloc)
cvt = strtoul(s, &endptr, 10);
/*
- * strtoul() normally only sets ERANGE. On some systems it also may
- * set EINVAL, which simply means it couldn't parse the input string.
- * This is handled by the second "if" consistent across platforms.
+ * strtoul() normally only sets ERANGE. On some systems it also may set
+ * EINVAL, which simply means it couldn't parse the input string. This is
+ * handled by the second "if" consistent across platforms.
*/
if (errno && errno != ERANGE && errno != EINVAL)
ereport(ERROR,
@@ -88,16 +88,16 @@ oidin_subr(const char *funcname, const char *s, char **endloc)
result = (Oid) cvt;
/*
- * Cope with possibility that unsigned long is wider than Oid, in
- * which case strtoul will not raise an error for some values that are
- * out of the range of Oid.
+ * Cope with possibility that unsigned long is wider than Oid, in which
+ * case strtoul will not raise an error for some values that are out of
+ * the range of Oid.
*
- * For backwards compatibility, we want to accept inputs that are given
- * with a minus sign, so allow the input value if it matches after
- * either signed or unsigned extension to long.
+ * For backwards compatibility, we want to accept inputs that are given with
+ * a minus sign, so allow the input value if it matches after either
+ * signed or unsigned extension to long.
*
- * To ensure consistent results on 32-bit and 64-bit platforms, make sure
- * the error message is the same as if strtoul() had returned ERANGE.
+ * To ensure consistent results on 32-bit and 64-bit platforms, make sure the
+ * error message is the same as if strtoul() had returned ERANGE.
*/
#if OID_MAX != ULONG_MAX
if (cvt != (unsigned long) result &&
@@ -171,8 +171,8 @@ buildoidvector(const Oid *oids, int n)
memcpy(result->values, oids, n * sizeof(Oid));
/*
- * Attach standard array header. For historical reasons, we set the
- * index lower bound to 0 not 1.
+ * Attach standard array header. For historical reasons, we set the index
+ * lower bound to 0 not 1.
*/
result->size = OidVectorSize(n);
result->ndim = 1;
diff --git a/src/backend/utils/adt/oracle_compat.c b/src/backend/utils/adt/oracle_compat.c
index 5dd9a44ccf8..a1ddc00a782 100644
--- a/src/backend/utils/adt/oracle_compat.c
+++ b/src/backend/utils/adt/oracle_compat.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/oracle_compat.c,v 1.61 2005/08/24 17:50:00 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/oracle_compat.c,v 1.62 2005/10/15 02:49:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -87,12 +87,12 @@ texttowcs(const text *txt)
if (ncodes == (size_t) -1)
{
/*
- * Invalid multibyte character encountered. We try to give a
- * useful error message by letting pg_verifymbstr check the
- * string. But it's possible that the string is OK to us, and not
- * OK to mbstowcs --- this suggests that the LC_CTYPE locale is
- * different from the database encoding. Give a generic error
- * message if verifymbstr can't find anything wrong.
+ * Invalid multibyte character encountered. We try to give a useful
+ * error message by letting pg_verifymbstr check the string. But it's
+ * possible that the string is OK to us, and not OK to mbstowcs ---
+ * this suggests that the LC_CTYPE locale is different from the
+ * database encoding. Give a generic error message if verifymbstr
+ * can't find anything wrong.
*/
pg_verifymbstr(workstr, nbytes, false);
ereport(ERROR,
@@ -164,11 +164,11 @@ win32_utf8_texttowcs(const text *txt)
{
int nbytes = VARSIZE(txt) - VARHDRSZ;
wchar_t *result;
- int r;
+ int r;
/* Overflow paranoia */
if (nbytes < 0 ||
- nbytes > (int) (INT_MAX / sizeof(wchar_t)) -1)
+ nbytes > (int) (INT_MAX / sizeof(wchar_t)) - 1)
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of memory")));
@@ -206,9 +206,9 @@ win32_utf8_texttowcs(const text *txt)
static text *
win32_utf8_wcstotext(const wchar_t *str)
{
- text *result;
- int nbytes;
- int r;
+ text *result;
+ int nbytes;
+ int r;
nbytes = WideCharToMultiByte(CP_UTF8, 0, str, -1, NULL, 0, NULL, NULL);
if (nbytes == 0) /* shouldn't happen */
@@ -217,7 +217,7 @@ win32_utf8_wcstotext(const wchar_t *str)
errmsg("UTF16 to UTF8 translation failed: %lu",
GetLastError())));
- result = palloc(nbytes+VARHDRSZ);
+ result = palloc(nbytes + VARHDRSZ);
r = WideCharToMultiByte(CP_UTF8, 0, str, -1, VARDATA(result), nbytes,
NULL, NULL);
@@ -227,7 +227,7 @@ win32_utf8_wcstotext(const wchar_t *str)
errmsg("UTF16 to UTF8 translation failed: %lu",
GetLastError())));
- VARATT_SIZEP(result) = nbytes + VARHDRSZ - 1; /* -1 to ignore null */
+ VARATT_SIZEP(result) = nbytes + VARHDRSZ - 1; /* -1 to ignore null */
return result;
}
@@ -256,8 +256,7 @@ win32_wcstotext(const wchar_t *str, int ncodes)
#define texttowcs win32_texttowcs
#define wcstotext win32_wcstotext
-
-#endif /* WIN32 */
+#endif /* WIN32 */
/********************************************************************
@@ -278,10 +277,11 @@ Datum
lower(PG_FUNCTION_ARGS)
{
#ifdef USE_WIDE_UPPER_LOWER
+
/*
- * Use wide char code only when max encoding length > 1 and ctype != C.
- * Some operating systems fail with multi-byte encodings and a C locale.
- * Also, for a C locale there is no need to process as multibyte.
+ * Use wide char code only when max encoding length > 1 and ctype != C.
+ * Some operating systems fail with multi-byte encodings and a C locale.
+ * Also, for a C locale there is no need to process as multibyte.
*/
if (pg_database_encoding_max_length() > 1 && !lc_ctype_is_c())
{
@@ -309,8 +309,7 @@ lower(PG_FUNCTION_ARGS)
int m;
/*
- * Since we copied the string, we can scribble directly on the
- * value
+ * Since we copied the string, we can scribble directly on the value
*/
ptr = VARDATA(string);
m = VARSIZE(string) - VARHDRSZ;
@@ -344,10 +343,11 @@ Datum
upper(PG_FUNCTION_ARGS)
{
#ifdef USE_WIDE_UPPER_LOWER
+
/*
- * Use wide char code only when max encoding length > 1 and ctype != C.
- * Some operating systems fail with multi-byte encodings and a C locale.
- * Also, for a C locale there is no need to process as multibyte.
+ * Use wide char code only when max encoding length > 1 and ctype != C.
+ * Some operating systems fail with multi-byte encodings and a C locale.
+ * Also, for a C locale there is no need to process as multibyte.
*/
if (pg_database_encoding_max_length() > 1 && !lc_ctype_is_c())
{
@@ -375,8 +375,7 @@ upper(PG_FUNCTION_ARGS)
int m;
/*
- * Since we copied the string, we can scribble directly on the
- * value
+ * Since we copied the string, we can scribble directly on the value
*/
ptr = VARDATA(string);
m = VARSIZE(string) - VARHDRSZ;
@@ -413,10 +412,11 @@ Datum
initcap(PG_FUNCTION_ARGS)
{
#ifdef USE_WIDE_UPPER_LOWER
+
/*
- * Use wide char code only when max encoding length > 1 and ctype != C.
- * Some operating systems fail with multi-byte encodings and a C locale.
- * Also, for a C locale there is no need to process as multibyte.
+ * Use wide char code only when max encoding length > 1 and ctype != C.
+ * Some operating systems fail with multi-byte encodings and a C locale.
+ * Also, for a C locale there is no need to process as multibyte.
*/
if (pg_database_encoding_max_length() > 1 && !lc_ctype_is_c())
{
@@ -452,8 +452,7 @@ initcap(PG_FUNCTION_ARGS)
int m;
/*
- * Since we copied the string, we can scribble directly on the
- * value
+ * Since we copied the string, we can scribble directly on the value
*/
ptr = VARDATA(string);
m = VARSIZE(string) - VARHDRSZ;
@@ -732,8 +731,8 @@ dotrim(const char *string, int stringlen,
{
/*
* In the multibyte-encoding case, build arrays of pointers to
- * character starts, so that we can avoid inefficient checks
- * in the inner loops.
+ * character starts, so that we can avoid inefficient checks in
+ * the inner loops.
*/
const char **stringchars;
const char **setchars;
@@ -828,8 +827,7 @@ dotrim(const char *string, int stringlen,
else
{
/*
- * In the single-byte-encoding case, we don't need such
- * overhead.
+ * In the single-byte-encoding case, we don't need such overhead.
*/
if (doltrim)
{
@@ -1152,9 +1150,9 @@ translate(PG_FUNCTION_ARGS)
VARATT_SIZEP(result) = retlen + VARHDRSZ;
/*
- * There may be some wasted space in the result if deletions occurred,
- * but it's not worth reallocating it; the function result probably
- * won't live long anyway.
+ * There may be some wasted space in the result if deletions occurred, but
+ * it's not worth reallocating it; the function result probably won't live
+ * long anyway.
*/
PG_RETURN_TEXT_P(result);
diff --git a/src/backend/utils/adt/pg_locale.c b/src/backend/utils/adt/pg_locale.c
index 7c9c774d91b..303fec745ab 100644
--- a/src/backend/utils/adt/pg_locale.c
+++ b/src/backend/utils/adt/pg_locale.c
@@ -4,7 +4,7 @@
*
* Portions Copyright (c) 2002-2005, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/backend/utils/adt/pg_locale.c,v 1.31 2005/03/16 00:02:49 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/pg_locale.c,v 1.32 2005/10/15 02:49:29 momjian Exp $
*
*-----------------------------------------------------------------------
*/
@@ -124,9 +124,9 @@ const char *
locale_messages_assign(const char *value, bool doit, GucSource source)
{
#ifndef WIN32
+
/*
- * LC_MESSAGES category does not exist everywhere, but accept it
- * anyway
+ * LC_MESSAGES category does not exist everywhere, but accept it anyway
*/
#ifdef LC_MESSAGES
if (doit)
@@ -138,16 +138,15 @@ locale_messages_assign(const char *value, bool doit, GucSource source)
value = locale_xxx_assign(LC_MESSAGES, value, false, source);
#endif /* LC_MESSAGES */
return value;
-
-#else /* WIN32 */
+#else /* WIN32 */
/*
* Win32 does not have working setlocale() for LC_MESSAGES. We can only
- * use environment variables to change it (per gettext FAQ). This
- * means we can't actually check the supplied value, so always assume
- * it's good. Also, ignore attempts to set to "", which really means
- * "keep using the old value". (Actually it means "use the environment
- * value", but we are too lazy to try to implement that exactly.)
+ * use environment variables to change it (per gettext FAQ). This means
+ * we can't actually check the supplied value, so always assume it's good.
+ * Also, ignore attempts to set to "", which really means "keep using the
+ * old value". (Actually it means "use the environment value", but we are
+ * too lazy to try to implement that exactly.)
*/
if (doit && value[0])
{
@@ -160,12 +159,12 @@ locale_messages_assign(const char *value, bool doit, GucSource source)
if (!SetEnvironmentVariable("LC_MESSAGES", value))
return NULL;
- snprintf(env, sizeof(env)-1, "LC_MESSAGES=%s", value);
+ snprintf(env, sizeof(env) - 1, "LC_MESSAGES=%s", value);
if (_putenv(env))
return NULL;
}
return value;
-#endif /* WIN32 */
+#endif /* WIN32 */
}
@@ -289,8 +288,8 @@ PGLC_localeconv(void)
extlconv = localeconv();
/*
- * Must copy all values since restoring internal settings may
- * overwrite localeconv()'s results.
+ * Must copy all values since restoring internal settings may overwrite
+ * localeconv()'s results.
*/
CurrentLocaleConv = *extlconv;
CurrentLocaleConv.currency_symbol = strdup(extlconv->currency_symbol);
diff --git a/src/backend/utils/adt/pg_lzcompress.c b/src/backend/utils/adt/pg_lzcompress.c
index d7c34b6a929..48d93d0602c 100644
--- a/src/backend/utils/adt/pg_lzcompress.c
+++ b/src/backend/utils/adt/pg_lzcompress.c
@@ -1,7 +1,7 @@
/* ----------
* pg_lzcompress.c -
*
- * $PostgreSQL: pgsql/src/backend/utils/adt/pg_lzcompress.c,v 1.19 2005/05/25 21:40:41 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/pg_lzcompress.c,v 1.20 2005/10/15 02:49:29 momjian Exp $
*
* This is an implementation of LZ compression for PostgreSQL.
* It uses a simple history table and generates 2-3 byte tags
@@ -219,11 +219,11 @@ static PGLZ_Strategy strategy_default_data = {
6144, /* Data chunks greater equal 6K force
* compression */
/* except compressed result is greater uncompressed data */
- 20, /* Compression rates below 20% mean
- * fallback to uncompressed */
+ 20, /* Compression rates below 20% mean fallback
+ * to uncompressed */
/* storage except compression is forced by previous parameter */
- 128, /* Stop history lookup if a match of 128
- * bytes is found */
+ 128, /* Stop history lookup if a match of 128 bytes
+ * is found */
10 /* Lower good match size by 10% at every
* lookup loop iteration. */
};
@@ -233,10 +233,9 @@ PGLZ_Strategy *PGLZ_strategy_default = &strategy_default_data;
static PGLZ_Strategy strategy_always_data = {
0, /* Chunks of any size are compressed */
0, /* */
- 0, /* We want to save at least one single
- * byte */
- 128, /* Stop history lookup if a match of 128
- * bytes is found */
+ 0, /* We want to save at least one single byte */
+ 128, /* Stop history lookup if a match of 128 bytes
+ * is found */
6 /* Look harder for a good match. */
};
PGLZ_Strategy *PGLZ_strategy_always = &strategy_always_data;
@@ -246,8 +245,7 @@ static PGLZ_Strategy strategy_never_data = {
0, /* */
0, /* */
0, /* */
- 0, /* Zero indicates "store uncompressed
- * always" */
+ 0, /* Zero indicates "store uncompressed always" */
0 /* */
};
PGLZ_Strategy *PGLZ_strategy_never = &strategy_never_data;
@@ -395,8 +393,7 @@ pglz_find_match(PGLZ_HistEntry **hstart, char *input, char *end,
int32 off = 0;
/*
- * Traverse the linked history list until a good enough match is
- * found.
+ * Traverse the linked history list until a good enough match is found.
*/
hent = hstart[pglz_hist_idx(input, end)];
while (hent)
@@ -414,12 +411,12 @@ pglz_find_match(PGLZ_HistEntry **hstart, char *input, char *end,
break;
/*
- * Determine length of match. A better match must be larger than
- * the best so far. And if we already have a match of 16 or more
- * bytes, it's worth the call overhead to use memcmp() to check if
- * this match is equal for the same size. After that we must
- * fallback to character by character comparison to know the exact
- * position where the diff occurred.
+ * Determine length of match. A better match must be larger than the
+ * best so far. And if we already have a match of 16 or more bytes,
+ * it's worth the call overhead to use memcmp() to check if this match
+ * is equal for the same size. After that we must fallback to
+ * character by character comparison to know the exact position where
+ * the diff occurred.
*/
thislen = 0;
if (len >= 16)
@@ -462,8 +459,8 @@ pglz_find_match(PGLZ_HistEntry **hstart, char *input, char *end,
hent = hent->next;
/*
- * Be happy with lesser good matches the more entries we visited.
- * But no point in doing calculation if we're at end of list.
+ * Be happy with lesser good matches the more entries we visited. But
+ * no point in doing calculation if we're at end of list.
*/
if (hent)
{
@@ -565,10 +562,10 @@ pglz_compress(char *source, int32 slen, PGLZ_Header *dest, PGLZ_Strategy *strate
memset((void *) hist_start, 0, sizeof(hist_start));
/*
- * Compute the maximum result size allowed by the strategy. If the
- * input size exceeds force_input_size, the max result size is the
- * input size itself. Otherwise, it is the input size minus the
- * minimum wanted compression rate.
+ * Compute the maximum result size allowed by the strategy. If the input
+ * size exceeds force_input_size, the max result size is the input size
+ * itself. Otherwise, it is the input size minus the minimum wanted
+ * compression rate.
*/
if (slen >= strategy->force_input_size)
result_max = slen;
@@ -588,8 +585,8 @@ pglz_compress(char *source, int32 slen, PGLZ_Header *dest, PGLZ_Strategy *strate
while (dp < dend)
{
/*
- * If we already exceeded the maximum result size, set no
- * compression flag and stop this. But don't check too often.
+ * If we already exceeded the maximum result size, set no compression
+ * flag and stop this. But don't check too often.
*/
if (bp - bstart >= result_max)
{
@@ -632,9 +629,9 @@ pglz_compress(char *source, int32 slen, PGLZ_Header *dest, PGLZ_Strategy *strate
}
/*
- * If we are still in compressing mode, write out the last control
- * byte and determine if the compression gained the rate requested by
- * the strategy.
+ * If we are still in compressing mode, write out the last control byte
+ * and determine if the compression gained the rate requested by the
+ * strategy.
*/
if (do_compress)
{
@@ -647,8 +644,8 @@ pglz_compress(char *source, int32 slen, PGLZ_Header *dest, PGLZ_Strategy *strate
/*
* Done - if we successfully compressed and matched the strategy's
- * constraints, return the compressed result. Otherwise copy the
- * original source over it and return the original length.
+ * constraints, return the compressed result. Otherwise copy the original
+ * source over it and return the original length.
*/
if (do_compress)
{
@@ -704,9 +701,9 @@ pglz_decompress(PGLZ_Header *source, char *dest)
/*
* Otherwise it contains the match length minus 3 and the
* upper 4 bits of the offset. The next following byte
- * contains the lower 8 bits of the offset. If the length
- * is coded as 18, another extension tag byte tells how
- * much longer the match really was (0-255).
+ * contains the lower 8 bits of the offset. If the length is
+ * coded as 18, another extension tag byte tells how much
+ * longer the match really was (0-255).
*/
len = (dp[0] & 0x0f) + 3;
off = ((dp[0] & 0xf0) << 4) | dp[1];
@@ -715,10 +712,10 @@ pglz_decompress(PGLZ_Header *source, char *dest)
len += *dp++;
/*
- * Now we copy the bytes specified by the tag from OUTPUT
- * to OUTPUT. It is dangerous and platform dependent to
- * use memcpy() here, because the copied areas could
- * overlap extremely!
+ * Now we copy the bytes specified by the tag from OUTPUT to
+ * OUTPUT. It is dangerous and platform dependent to use
+ * memcpy() here, because the copied areas could overlap
+ * extremely!
*/
while (len--)
{
@@ -729,8 +726,8 @@ pglz_decompress(PGLZ_Header *source, char *dest)
else
{
/*
- * An unset control bit means LITERAL BYTE. So we just
- * copy one from INPUT to OUTPUT.
+ * An unset control bit means LITERAL BYTE. So we just copy
+ * one from INPUT to OUTPUT.
*/
*bp++ = *dp++;
}
@@ -764,8 +761,8 @@ pglz_get_next_decomp_char_from_lzdata(PGLZ_DecompState *dstate)
if (dstate->tocopy > 0)
{
/*
- * Copy one byte from output to output until we did it for the
- * length specified by the last tag. Return that byte.
+ * Copy one byte from output to output until we did it for the length
+ * specified by the last tag. Return that byte.
*/
dstate->tocopy--;
return (*(dstate->cp_out++) = *(dstate->cp_copy++));
@@ -774,21 +771,20 @@ pglz_get_next_decomp_char_from_lzdata(PGLZ_DecompState *dstate)
if (dstate->ctrl_count == 0)
{
/*
- * Get the next control byte if we need to, but check for EOF
- * before.
+ * Get the next control byte if we need to, but check for EOF before.
*/
if (dstate->cp_in == dstate->cp_end)
return EOF;
/*
* This decompression method saves time only, if we stop near the
- * beginning of the data (maybe because we're called by a
- * comparison function and a difference occurs early). Otherwise,
- * all the checks, needed here, cause too much overhead.
+ * beginning of the data (maybe because we're called by a comparison
+ * function and a difference occurs early). Otherwise, all the checks,
+ * needed here, cause too much overhead.
*
- * Thus we decompress the entire rest at once into the temporary
- * buffer and change the decomp state to return the prepared data
- * from the buffer by the more simple calls to
+ * Thus we decompress the entire rest at once into the temporary buffer
+ * and change the decomp state to return the prepared data from the
+ * buffer by the more simple calls to
* pglz_get_next_decomp_char_from_plain().
*/
if (dstate->cp_out - dstate->temp_buf >= 256)
@@ -856,8 +852,8 @@ pglz_get_next_decomp_char_from_lzdata(PGLZ_DecompState *dstate)
if (dstate->ctrl & 0x01)
{
/*
- * Bit is set, so tag is following. Setup copy information and do
- * the copy for the first byte as above.
+ * Bit is set, so tag is following. Setup copy information and do the
+ * copy for the first byte as above.
*/
int off;
diff --git a/src/backend/utils/adt/pgstatfuncs.c b/src/backend/utils/adt/pgstatfuncs.c
index b1bd11c9c20..8c10bf387d4 100644
--- a/src/backend/utils/adt/pgstatfuncs.c
+++ b/src/backend/utils/adt/pgstatfuncs.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/pgstatfuncs.c,v 1.24 2005/06/29 22:51:56 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/pgstatfuncs.c,v 1.25 2005/10/15 02:49:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -354,8 +354,8 @@ pg_stat_get_backend_activity_start(PG_FUNCTION_ARGS)
result = beentry->activity_start_timestamp;
/*
- * No time recorded for start of current query -- this is the case if
- * the user hasn't enabled query-level stats collection.
+ * No time recorded for start of current query -- this is the case if the
+ * user hasn't enabled query-level stats collection.
*/
if (result == 0)
PG_RETURN_NULL();
@@ -366,7 +366,7 @@ pg_stat_get_backend_activity_start(PG_FUNCTION_ARGS)
Datum
pg_stat_get_backend_start(PG_FUNCTION_ARGS)
{
- int32 beid = PG_GETARG_INT32(0);
+ int32 beid = PG_GETARG_INT32(0);
TimestampTz result;
PgStat_StatBeEntry *beentry;
@@ -389,7 +389,7 @@ Datum
pg_stat_get_backend_client_addr(PG_FUNCTION_ARGS)
{
PgStat_StatBeEntry *beentry;
- int32 beid;
+ int32 beid;
char remote_host[NI_MAXHOST];
int ret;
@@ -432,7 +432,7 @@ Datum
pg_stat_get_backend_client_port(PG_FUNCTION_ARGS)
{
PgStat_StatBeEntry *beentry;
- int32 beid;
+ int32 beid;
char remote_port[NI_MAXSERV];
int ret;
diff --git a/src/backend/utils/adt/quote.c b/src/backend/utils/adt/quote.c
index 808ae6142ed..98a8ae765ee 100644
--- a/src/backend/utils/adt/quote.c
+++ b/src/backend/utils/adt/quote.c
@@ -7,7 +7,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/quote.c,v 1.16 2005/07/02 17:01:50 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/quote.c,v 1.17 2005/10/15 02:49:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -65,13 +65,13 @@ quote_literal(PG_FUNCTION_ARGS)
cp1 = VARDATA(t);
cp2 = VARDATA(result);
- for(; len-- > 0; cp1++)
+ for (; len-- > 0; cp1++)
if (*cp1 == '\\')
{
*cp2++ = ESCAPE_STRING_SYNTAX;
break;
}
-
+
len = VARSIZE(t) - VARHDRSZ;
cp1 = VARDATA(t);
*cp2++ = '\'';
diff --git a/src/backend/utils/adt/regexp.c b/src/backend/utils/adt/regexp.c
index 0aba560aa9c..a872762c3c2 100644
--- a/src/backend/utils/adt/regexp.c
+++ b/src/backend/utils/adt/regexp.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/regexp.c,v 1.58 2005/09/24 17:53:15 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/regexp.c,v 1.59 2005/10/15 02:49:29 momjian Exp $
*
* Alistair Crooks added the code for the regex caching
* agc - cached the regular expressions used - there's a good chance
@@ -85,8 +85,8 @@ static cached_re_str re_array[MAX_CACHED_RES]; /* cached re's */
*
* Returns regex_t
*
- * text_re --- the pattern, expressed as an *untoasted* TEXT object
- * cflags --- compile options for the pattern
+ * text_re --- the pattern, expressed as an *untoasted* TEXT object
+ * cflags --- compile options for the pattern
*
* Pattern is given in the database encoding. We internally convert to
* array of pg_wchar which is what Spencer's regex package wants.
@@ -104,8 +104,8 @@ RE_compile_and_cache(text *text_re, int cflags)
/*
* Look for a match among previously compiled REs. Since the data
- * structure is self-organizing with most-used entries at the front,
- * our search strategy can just be to scan from the front.
+ * structure is self-organizing with most-used entries at the front, our
+ * search strategy can just be to scan from the front.
*/
for (i = 0; i < num_res; i++)
{
@@ -171,8 +171,8 @@ RE_compile_and_cache(text *text_re, int cflags)
re_temp.cre_flags = cflags;
/*
- * Okay, we have a valid new item in re_temp; insert it into the
- * storage array. Discard last entry if needed.
+ * Okay, we have a valid new item in re_temp; insert it into the storage
+ * array. Discard last entry if needed.
*/
if (num_res >= MAX_CACHED_RES)
{
@@ -213,7 +213,7 @@ RE_compile_and_execute(text *text_re, char *dat, int dat_len,
size_t data_len;
int regexec_result;
regex_t re;
- char errMsg[100];
+ char errMsg[100];
/* Convert data string to wide characters */
data = (pg_wchar *) palloc((dat_len + 1) * sizeof(pg_wchar));
@@ -405,10 +405,10 @@ textregexsubstr(PG_FUNCTION_ARGS)
regmatch_t pmatch[2];
/*
- * We pass two regmatch_t structs to get info about the overall match
- * and the match for the first parenthesized subexpression (if any).
- * If there is a parenthesized subexpression, we return what it
- * matched; else return what the whole regexp matched.
+ * We pass two regmatch_t structs to get info about the overall match and
+ * the match for the first parenthesized subexpression (if any). If there
+ * is a parenthesized subexpression, we return what it matched; else
+ * return what the whole regexp matched.
*/
match = RE_compile_and_execute(p,
VARDATA(s),
@@ -432,9 +432,9 @@ textregexsubstr(PG_FUNCTION_ARGS)
}
return DirectFunctionCall3(text_substr,
- PointerGetDatum(s),
- Int32GetDatum(so + 1),
- Int32GetDatum(eo - so));
+ PointerGetDatum(s),
+ Int32GetDatum(so + 1),
+ Int32GetDatum(eo - so));
}
PG_RETURN_NULL();
@@ -442,7 +442,7 @@ textregexsubstr(PG_FUNCTION_ARGS)
/*
* textregexreplace_noopt()
- * Return a replace string matched by a regular expression.
+ * Return a replace string matched by a regular expression.
* This function is a version that doesn't specify the option of
* textregexreplace. This is case sensitive, replace the first
* instance only.
@@ -458,15 +458,15 @@ textregexreplace_noopt(PG_FUNCTION_ARGS)
re = RE_compile_and_cache(p, regex_flavor);
return DirectFunctionCall4(replace_text_regexp,
- PointerGetDatum(s),
- PointerGetDatum(&re),
- PointerGetDatum(r),
- BoolGetDatum(false));
+ PointerGetDatum(s),
+ PointerGetDatum(&re),
+ PointerGetDatum(r),
+ BoolGetDatum(false));
}
/*
* textregexreplace()
- * Return a replace string matched by a regular expression.
+ * Return a replace string matched by a regular expression.
*/
Datum
textregexreplace(PG_FUNCTION_ARGS)
@@ -478,7 +478,7 @@ textregexreplace(PG_FUNCTION_ARGS)
char *opt_p = VARDATA(opt);
int opt_len = (VARSIZE(opt) - VARHDRSZ);
int i;
- bool global = false;
+ bool global = false;
bool ignorecase = false;
regex_t re;
@@ -492,12 +492,13 @@ textregexreplace(PG_FUNCTION_ARGS)
break;
case 'g':
global = true;
+
break;
default:
ereport(ERROR,
- (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("invalid option of regexp_replace: %c",
- opt_p[i])));
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("invalid option of regexp_replace: %c",
+ opt_p[i])));
break;
}
}
@@ -508,10 +509,10 @@ textregexreplace(PG_FUNCTION_ARGS)
re = RE_compile_and_cache(p, regex_flavor);
return DirectFunctionCall4(replace_text_regexp,
- PointerGetDatum(s),
- PointerGetDatum(&re),
- PointerGetDatum(r),
- BoolGetDatum(global));
+ PointerGetDatum(s),
+ PointerGetDatum(&re),
+ PointerGetDatum(r),
+ BoolGetDatum(global));
}
/* similar_escape()
@@ -555,7 +556,7 @@ similar_escape(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_INVALID_ESCAPE_SEQUENCE),
errmsg("invalid escape string"),
- errhint("Escape string must be empty or one character.")));
+ errhint("Escape string must be empty or one character.")));
}
/* We need room for ^, $, and up to 2 output bytes per input byte */
@@ -566,7 +567,7 @@ similar_escape(PG_FUNCTION_ARGS)
while (plen > 0)
{
- char pchar = *p;
+ char pchar = *p;
if (afterescape)
{
diff --git a/src/backend/utils/adt/regproc.c b/src/backend/utils/adt/regproc.c
index 3a52c8756d1..9a626c2f766 100644
--- a/src/backend/utils/adt/regproc.c
+++ b/src/backend/utils/adt/regproc.c
@@ -13,7 +13,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/regproc.c,v 1.95 2005/10/02 23:50:10 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/regproc.c,v 1.96 2005/10/15 02:49:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -71,17 +71,17 @@ regprocin(PG_FUNCTION_ARGS)
strspn(pro_name_or_oid, "0123456789") == strlen(pro_name_or_oid))
{
result = DatumGetObjectId(DirectFunctionCall1(oidin,
- CStringGetDatum(pro_name_or_oid)));
+ CStringGetDatum(pro_name_or_oid)));
PG_RETURN_OID(result);
}
/* Else it's a name, possibly schema-qualified */
/*
- * In bootstrap mode we assume the given name is not schema-qualified,
- * and just search pg_proc for a unique match. This is needed for
- * initializing other system catalogs (pg_namespace may not exist yet,
- * and certainly there are no schemas other than pg_catalog).
+ * In bootstrap mode we assume the given name is not schema-qualified, and
+ * just search pg_proc for a unique match. This is needed for
+ * initializing other system catalogs (pg_namespace may not exist yet, and
+ * certainly there are no schemas other than pg_catalog).
*/
if (IsBootstrapProcessingMode())
{
@@ -113,7 +113,7 @@ regprocin(PG_FUNCTION_ARGS)
if (matches == 0)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("function \"%s\" does not exist", pro_name_or_oid)));
+ errmsg("function \"%s\" does not exist", pro_name_or_oid)));
else if (matches > 1)
ereport(ERROR,
@@ -125,8 +125,8 @@ regprocin(PG_FUNCTION_ARGS)
}
/*
- * Normal case: parse the name into components and see if it matches
- * any pg_proc entries in the current search path.
+ * Normal case: parse the name into components and see if it matches any
+ * pg_proc entries in the current search path.
*/
names = stringToQualifiedNameList(pro_name_or_oid, "regprocin");
clist = FuncnameGetCandidates(names, -1);
@@ -134,7 +134,7 @@ regprocin(PG_FUNCTION_ARGS)
if (clist == NULL)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("function \"%s\" does not exist", pro_name_or_oid)));
+ errmsg("function \"%s\" does not exist", pro_name_or_oid)));
else if (clist->next != NULL)
ereport(ERROR,
(errcode(ERRCODE_AMBIGUOUS_FUNCTION),
@@ -172,9 +172,9 @@ regprocout(PG_FUNCTION_ARGS)
char *proname = NameStr(procform->proname);
/*
- * In bootstrap mode, skip the fancy namespace stuff and just
- * return the proc name. (This path is only needed for debugging
- * output anyway.)
+ * In bootstrap mode, skip the fancy namespace stuff and just return
+ * the proc name. (This path is only needed for debugging output
+ * anyway.)
*/
if (IsBootstrapProcessingMode())
result = pstrdup(proname);
@@ -258,15 +258,15 @@ regprocedurein(PG_FUNCTION_ARGS)
strspn(pro_name_or_oid, "0123456789") == strlen(pro_name_or_oid))
{
result = DatumGetObjectId(DirectFunctionCall1(oidin,
- CStringGetDatum(pro_name_or_oid)));
+ CStringGetDatum(pro_name_or_oid)));
PG_RETURN_OID(result);
}
/*
- * Else it's a name and arguments. Parse the name and arguments, look
- * up potential matches in the current namespace search list, and scan
- * to see which one exactly matches the given argument types. (There
- * will not be more than one match.)
+ * Else it's a name and arguments. Parse the name and arguments, look up
+ * potential matches in the current namespace search list, and scan to see
+ * which one exactly matches the given argument types. (There will not be
+ * more than one match.)
*
* XXX at present, this code will not work in bootstrap mode, hence this
* datatype cannot be used for any system column that needs to receive
@@ -286,7 +286,7 @@ regprocedurein(PG_FUNCTION_ARGS)
if (clist == NULL)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("function \"%s\" does not exist", pro_name_or_oid)));
+ errmsg("function \"%s\" does not exist", pro_name_or_oid)));
result = clist->oid;
@@ -323,8 +323,8 @@ format_procedure(Oid procedure_oid)
initStringInfo(&buf);
/*
- * Would this proc be found (given the right args) by
- * regprocedurein? If not, we need to qualify it.
+ * Would this proc be found (given the right args) by regprocedurein?
+ * If not, we need to qualify it.
*/
if (FunctionIsVisible(procedure_oid))
nspname = NULL;
@@ -421,17 +421,17 @@ regoperin(PG_FUNCTION_ARGS)
strspn(opr_name_or_oid, "0123456789") == strlen(opr_name_or_oid))
{
result = DatumGetObjectId(DirectFunctionCall1(oidin,
- CStringGetDatum(opr_name_or_oid)));
+ CStringGetDatum(opr_name_or_oid)));
PG_RETURN_OID(result);
}
/* Else it's a name, possibly schema-qualified */
/*
- * In bootstrap mode we assume the given name is not schema-qualified,
- * and just search pg_operator for a unique match. This is needed for
- * initializing other system catalogs (pg_namespace may not exist yet,
- * and certainly there are no schemas other than pg_catalog).
+ * In bootstrap mode we assume the given name is not schema-qualified, and
+ * just search pg_operator for a unique match. This is needed for
+ * initializing other system catalogs (pg_namespace may not exist yet, and
+ * certainly there are no schemas other than pg_catalog).
*/
if (IsBootstrapProcessingMode())
{
@@ -463,7 +463,7 @@ regoperin(PG_FUNCTION_ARGS)
if (matches == 0)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("operator does not exist: %s", opr_name_or_oid)));
+ errmsg("operator does not exist: %s", opr_name_or_oid)));
else if (matches > 1)
ereport(ERROR,
(errcode(ERRCODE_AMBIGUOUS_FUNCTION),
@@ -474,8 +474,8 @@ regoperin(PG_FUNCTION_ARGS)
}
/*
- * Normal case: parse the name into components and see if it matches
- * any pg_operator entries in the current search path.
+ * Normal case: parse the name into components and see if it matches any
+ * pg_operator entries in the current search path.
*/
names = stringToQualifiedNameList(opr_name_or_oid, "regoperin");
clist = OpernameGetCandidates(names, '\0');
@@ -521,9 +521,9 @@ regoperout(PG_FUNCTION_ARGS)
char *oprname = NameStr(operform->oprname);
/*
- * In bootstrap mode, skip the fancy namespace stuff and just
- * return the oper name. (This path is only needed for debugging
- * output anyway.)
+ * In bootstrap mode, skip the fancy namespace stuff and just return
+ * the oper name. (This path is only needed for debugging output
+ * anyway.)
*/
if (IsBootstrapProcessingMode())
result = pstrdup(oprname);
@@ -556,8 +556,7 @@ regoperout(PG_FUNCTION_ARGS)
else
{
/*
- * If OID doesn't match any pg_operator entry, return it
- * numerically
+ * If OID doesn't match any pg_operator entry, return it numerically
*/
result = (char *) palloc(NAMEDATALEN);
snprintf(result, NAMEDATALEN, "%u", oprid);
@@ -616,15 +615,15 @@ regoperatorin(PG_FUNCTION_ARGS)
strspn(opr_name_or_oid, "0123456789") == strlen(opr_name_or_oid))
{
result = DatumGetObjectId(DirectFunctionCall1(oidin,
- CStringGetDatum(opr_name_or_oid)));
+ CStringGetDatum(opr_name_or_oid)));
PG_RETURN_OID(result);
}
/*
- * Else it's a name and arguments. Parse the name and arguments, look
- * up potential matches in the current namespace search list, and scan
- * to see which one exactly matches the given argument types. (There
- * will not be more than one match.)
+ * Else it's a name and arguments. Parse the name and arguments, look up
+ * potential matches in the current namespace search list, and scan to see
+ * which one exactly matches the given argument types. (There will not be
+ * more than one match.)
*
* XXX at present, this code will not work in bootstrap mode, hence this
* datatype cannot be used for any system column that needs to receive
@@ -696,8 +695,8 @@ format_operator(Oid operator_oid)
initStringInfo(&buf);
/*
- * Would this oper be found (given the right args) by
- * regoperatorin? If not, we need to qualify it.
+ * Would this oper be found (given the right args) by regoperatorin?
+ * If not, we need to qualify it.
*/
if (!OperatorIsVisible(operator_oid))
{
@@ -727,8 +726,7 @@ format_operator(Oid operator_oid)
else
{
/*
- * If OID doesn't match any pg_operator entry, return it
- * numerically
+ * If OID doesn't match any pg_operator entry, return it numerically
*/
result = (char *) palloc(NAMEDATALEN);
snprintf(result, NAMEDATALEN, "%u", operator_oid);
@@ -797,20 +795,20 @@ regclassin(PG_FUNCTION_ARGS)
/* Numeric OID? */
if (class_name_or_oid[0] >= '0' &&
class_name_or_oid[0] <= '9' &&
- strspn(class_name_or_oid, "0123456789") == strlen(class_name_or_oid))
+ strspn(class_name_or_oid, "0123456789") == strlen(class_name_or_oid))
{
result = DatumGetObjectId(DirectFunctionCall1(oidin,
- CStringGetDatum(class_name_or_oid)));
+ CStringGetDatum(class_name_or_oid)));
PG_RETURN_OID(result);
}
/* Else it's a name, possibly schema-qualified */
/*
- * In bootstrap mode we assume the given name is not schema-qualified,
- * and just search pg_class for a match. This is needed for
- * initializing other system catalogs (pg_namespace may not exist yet,
- * and certainly there are no schemas other than pg_catalog).
+ * In bootstrap mode we assume the given name is not schema-qualified, and
+ * just search pg_class for a match. This is needed for initializing
+ * other system catalogs (pg_namespace may not exist yet, and certainly
+ * there are no schemas other than pg_catalog).
*/
if (IsBootstrapProcessingMode())
{
@@ -833,7 +831,7 @@ regclassin(PG_FUNCTION_ARGS)
else
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_TABLE),
- errmsg("relation \"%s\" does not exist", class_name_or_oid)));
+ errmsg("relation \"%s\" does not exist", class_name_or_oid)));
/* We assume there can be only one match */
@@ -844,8 +842,8 @@ regclassin(PG_FUNCTION_ARGS)
}
/*
- * Normal case: parse the name into components and see if it matches
- * any pg_class entries in the current search path.
+ * Normal case: parse the name into components and see if it matches any
+ * pg_class entries in the current search path.
*/
names = stringToQualifiedNameList(class_name_or_oid, "regclassin");
@@ -880,9 +878,9 @@ regclassout(PG_FUNCTION_ARGS)
char *classname = NameStr(classform->relname);
/*
- * In bootstrap mode, skip the fancy namespace stuff and just
- * return the class name. (This path is only needed for debugging
- * output anyway.)
+ * In bootstrap mode, skip the fancy namespace stuff and just return
+ * the class name. (This path is only needed for debugging output
+ * anyway.)
*/
if (IsBootstrapProcessingMode())
result = pstrdup(classname);
@@ -891,8 +889,7 @@ regclassout(PG_FUNCTION_ARGS)
char *nspname;
/*
- * Would this class be found by regclassin? If not, qualify
- * it.
+ * Would this class be found by regclassin? If not, qualify it.
*/
if (RelationIsVisible(classid))
nspname = NULL;
@@ -966,17 +963,17 @@ regtypein(PG_FUNCTION_ARGS)
strspn(typ_name_or_oid, "0123456789") == strlen(typ_name_or_oid))
{
result = DatumGetObjectId(DirectFunctionCall1(oidin,
- CStringGetDatum(typ_name_or_oid)));
+ CStringGetDatum(typ_name_or_oid)));
PG_RETURN_OID(result);
}
/* Else it's a type name, possibly schema-qualified or decorated */
/*
- * In bootstrap mode we assume the given name is not schema-qualified,
- * and just search pg_type for a match. This is needed for
- * initializing other system catalogs (pg_namespace may not exist yet,
- * and certainly there are no schemas other than pg_catalog).
+ * In bootstrap mode we assume the given name is not schema-qualified, and
+ * just search pg_type for a match. This is needed for initializing other
+ * system catalogs (pg_namespace may not exist yet, and certainly there
+ * are no schemas other than pg_catalog).
*/
if (IsBootstrapProcessingMode())
{
@@ -999,7 +996,7 @@ regtypein(PG_FUNCTION_ARGS)
else
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("type \"%s\" does not exist", typ_name_or_oid)));
+ errmsg("type \"%s\" does not exist", typ_name_or_oid)));
/* We assume there can be only one match */
@@ -1010,8 +1007,8 @@ regtypein(PG_FUNCTION_ARGS)
}
/*
- * Normal case: invoke the full parser to deal with special cases such
- * as array syntax.
+ * Normal case: invoke the full parser to deal with special cases such as
+ * array syntax.
*/
parseTypeString(typ_name_or_oid, &result, &typmod);
@@ -1043,9 +1040,9 @@ regtypeout(PG_FUNCTION_ARGS)
Form_pg_type typeform = (Form_pg_type) GETSTRUCT(typetup);
/*
- * In bootstrap mode, skip the fancy namespace stuff and just
- * return the type name. (This path is only needed for debugging
- * output anyway.)
+ * In bootstrap mode, skip the fancy namespace stuff and just return
+ * the type name. (This path is only needed for debugging output
+ * anyway.)
*/
if (IsBootstrapProcessingMode())
{
diff --git a/src/backend/utils/adt/ri_triggers.c b/src/backend/utils/adt/ri_triggers.c
index 8de31643a68..c49b17be10d 100644
--- a/src/backend/utils/adt/ri_triggers.c
+++ b/src/backend/utils/adt/ri_triggers.c
@@ -17,7 +17,7 @@
*
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/backend/utils/adt/ri_triggers.c,v 1.80 2005/06/28 05:09:00 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/ri_triggers.c,v 1.81 2005/10/15 02:49:29 momjian Exp $
*
* ----------
*/
@@ -194,8 +194,7 @@ RI_FKey_check(PG_FUNCTION_ARGS)
int match_type;
/*
- * Check that this is a valid trigger call on the right time and
- * event.
+ * Check that this is a valid trigger call on the right time and event.
*/
ri_CheckTrigger(fcinfo, "RI_FKey_check", RI_TRIGTYPE_INUP);
@@ -203,8 +202,7 @@ RI_FKey_check(PG_FUNCTION_ARGS)
tgargs = trigdata->tg_trigger->tgargs;
/*
- * Get the relation descriptors of the FK and PK tables and the new
- * tuple.
+ * Get the relation descriptors of the FK and PK tables and the new tuple.
*
* pk_rel is opened in RowShareLock mode since that's what our eventual
* SELECT FOR SHARE will get on it.
@@ -225,9 +223,9 @@ RI_FKey_check(PG_FUNCTION_ARGS)
}
/*
- * We should not even consider checking the row if it is no longer
- * valid since it was either deleted (doesn't matter) or updated (in
- * which case it'll be checked with its final values).
+ * We should not even consider checking the row if it is no longer valid
+ * since it was either deleted (doesn't matter) or updated (in which case
+ * it'll be checked with its final values).
*/
Assert(new_row_buf != InvalidBuffer);
if (!HeapTupleSatisfiesItself(new_row->t_data, new_row_buf))
@@ -311,8 +309,8 @@ RI_FKey_check(PG_FUNCTION_ARGS)
case RI_KEYS_ALL_NULL:
/*
- * No check - if NULLs are allowed at all is already checked
- * by NOT NULL constraint.
+ * No check - if NULLs are allowed at all is already checked by
+ * NOT NULL constraint.
*
* This is true for MATCH FULL, MATCH PARTIAL, and MATCH
* <unspecified>
@@ -323,21 +321,21 @@ RI_FKey_check(PG_FUNCTION_ARGS)
case RI_KEYS_SOME_NULL:
/*
- * This is the only case that differs between the three kinds
- * of MATCH.
+ * This is the only case that differs between the three kinds of
+ * MATCH.
*/
switch (match_type)
{
case RI_MATCH_TYPE_FULL:
/*
- * Not allowed - MATCH FULL says either all or none of
- * the attributes can be NULLs
+ * Not allowed - MATCH FULL says either all or none of the
+ * attributes can be NULLs
*/
ereport(ERROR,
(errcode(ERRCODE_FOREIGN_KEY_VIOLATION),
errmsg("insert or update on table \"%s\" violates foreign key constraint \"%s\"",
- RelationGetRelationName(trigdata->tg_relation),
+ RelationGetRelationName(trigdata->tg_relation),
tgargs[RI_CONSTRAINT_NAME_ARGNO]),
errdetail("MATCH FULL does not allow mixing of null and nonnull key values.")));
heap_close(pk_rel, RowShareLock);
@@ -346,8 +344,8 @@ RI_FKey_check(PG_FUNCTION_ARGS)
case RI_MATCH_TYPE_UNSPECIFIED:
/*
- * MATCH <unspecified> - if ANY column is null, we
- * have a match.
+ * MATCH <unspecified> - if ANY column is null, we have a
+ * match.
*/
heap_close(pk_rel, RowShareLock);
return PointerGetDatum(NULL);
@@ -355,14 +353,14 @@ RI_FKey_check(PG_FUNCTION_ARGS)
case RI_MATCH_TYPE_PARTIAL:
/*
- * MATCH PARTIAL - all non-null columns must match.
- * (not implemented, can be done by modifying the
- * query below to only include non-null columns, or by
- * writing a special version here)
+ * MATCH PARTIAL - all non-null columns must match. (not
+ * implemented, can be done by modifying the query below
+ * to only include non-null columns, or by writing a
+ * special version here)
*/
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("MATCH PARTIAL not yet implemented")));
+ errmsg("MATCH PARTIAL not yet implemented")));
heap_close(pk_rel, RowShareLock);
return PointerGetDatum(NULL);
}
@@ -370,8 +368,8 @@ RI_FKey_check(PG_FUNCTION_ARGS)
case RI_KEYS_NONE_NULL:
/*
- * Have a full qualified key - continue below for all three
- * kinds of MATCH.
+ * Have a full qualified key - continue below for all three kinds
+ * of MATCH.
*/
break;
}
@@ -385,7 +383,7 @@ RI_FKey_check(PG_FUNCTION_ARGS)
if ((qplan = ri_FetchPreparedPlan(&qkey)) == NULL)
{
char querystr[MAX_QUOTED_REL_NAME_LEN + 100 +
- (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS];
+ (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS];
char pkrelname[MAX_QUOTED_REL_NAME_LEN];
char attname[MAX_QUOTED_NAME_LEN];
const char *querysep;
@@ -406,12 +404,12 @@ RI_FKey_check(PG_FUNCTION_ARGS)
for (i = 0; i < qkey.nkeypairs; i++)
{
quoteOneName(attname,
- tgargs[RI_FIRST_ATTNAME_ARGNO + i * 2 + RI_KEYPAIR_PK_IDX]);
+ tgargs[RI_FIRST_ATTNAME_ARGNO + i * 2 + RI_KEYPAIR_PK_IDX]);
snprintf(querystr + strlen(querystr), sizeof(querystr) - strlen(querystr), " %s %s = $%d",
querysep, attname, i + 1);
querysep = "AND";
queryoids[i] = SPI_gettypeid(fk_rel->rd_att,
- qkey.keypair[i][RI_KEYPAIR_FK_IDX]);
+ qkey.keypair[i][RI_KEYPAIR_FK_IDX]);
}
strcat(querystr, " FOR SHARE OF x");
@@ -493,16 +491,15 @@ ri_Check_Pk_Match(Relation pk_rel, Relation fk_rel,
case RI_KEYS_ALL_NULL:
/*
- * No check - nothing could have been referencing this row
- * anyway.
+ * No check - nothing could have been referencing this row anyway.
*/
return true;
case RI_KEYS_SOME_NULL:
/*
- * This is the only case that differs between the three kinds
- * of MATCH.
+ * This is the only case that differs between the three kinds of
+ * MATCH.
*/
switch (match_type)
{
@@ -510,30 +507,30 @@ ri_Check_Pk_Match(Relation pk_rel, Relation fk_rel,
case RI_MATCH_TYPE_UNSPECIFIED:
/*
- * MATCH <unspecified>/FULL - if ANY column is null,
- * we can't be matching to this row already.
+ * MATCH <unspecified>/FULL - if ANY column is null, we
+ * can't be matching to this row already.
*/
return true;
case RI_MATCH_TYPE_PARTIAL:
/*
- * MATCH PARTIAL - all non-null columns must match.
- * (not implemented, can be done by modifying the
- * query below to only include non-null columns, or by
- * writing a special version here)
+ * MATCH PARTIAL - all non-null columns must match. (not
+ * implemented, can be done by modifying the query below
+ * to only include non-null columns, or by writing a
+ * special version here)
*/
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("MATCH PARTIAL not yet implemented")));
+ errmsg("MATCH PARTIAL not yet implemented")));
break;
}
case RI_KEYS_NONE_NULL:
/*
- * Have a full qualified key - continue below for all three
- * kinds of MATCH.
+ * Have a full qualified key - continue below for all three kinds
+ * of MATCH.
*/
break;
}
@@ -547,7 +544,7 @@ ri_Check_Pk_Match(Relation pk_rel, Relation fk_rel,
if ((qplan = ri_FetchPreparedPlan(&qkey)) == NULL)
{
char querystr[MAX_QUOTED_REL_NAME_LEN + 100 +
- (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS];
+ (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS];
char pkrelname[MAX_QUOTED_REL_NAME_LEN];
char attname[MAX_QUOTED_NAME_LEN];
const char *querysep;
@@ -568,12 +565,12 @@ ri_Check_Pk_Match(Relation pk_rel, Relation fk_rel,
for (i = 0; i < qkey.nkeypairs; i++)
{
quoteOneName(attname,
- tgargs[RI_FIRST_ATTNAME_ARGNO + i * 2 + RI_KEYPAIR_PK_IDX]);
+ tgargs[RI_FIRST_ATTNAME_ARGNO + i * 2 + RI_KEYPAIR_PK_IDX]);
snprintf(querystr + strlen(querystr), sizeof(querystr) - strlen(querystr), " %s %s = $%d",
querysep, attname, i + 1);
querysep = "AND";
queryoids[i] = SPI_gettypeid(pk_rel->rd_att,
- qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
+ qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
}
strcat(querystr, " FOR SHARE OF x");
@@ -621,8 +618,7 @@ RI_FKey_noaction_del(PG_FUNCTION_ARGS)
int match_type;
/*
- * Check that this is a valid trigger call on the right time and
- * event.
+ * Check that this is a valid trigger call on the right time and event.
*/
ri_CheckTrigger(fcinfo, "RI_FKey_noaction_del", RI_TRIGTYPE_DELETE);
@@ -636,8 +632,7 @@ RI_FKey_noaction_del(PG_FUNCTION_ARGS)
return PointerGetDatum(NULL);
/*
- * Get the relation descriptors of the FK and PK tables and the old
- * tuple.
+ * Get the relation descriptors of the FK and PK tables and the old tuple.
*
* fk_rel is opened in RowShareLock mode since that's what our eventual
* SELECT FOR SHARE will get on it.
@@ -699,13 +694,13 @@ RI_FKey_noaction_del(PG_FUNCTION_ARGS)
elog(ERROR, "SPI_connect failed");
/*
- * Fetch or prepare a saved plan for the restrict delete
- * lookup if foreign references exist
+ * Fetch or prepare a saved plan for the restrict delete lookup if
+ * foreign references exist
*/
if ((qplan = ri_FetchPreparedPlan(&qkey)) == NULL)
{
char querystr[MAX_QUOTED_REL_NAME_LEN + 100 +
- (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS];
+ (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS];
char fkrelname[MAX_QUOTED_REL_NAME_LEN];
char attname[MAX_QUOTED_NAME_LEN];
const char *querysep;
@@ -731,7 +726,7 @@ RI_FKey_noaction_del(PG_FUNCTION_ARGS)
querysep, attname, i + 1);
querysep = "AND";
queryoids[i] = SPI_gettypeid(pk_rel->rd_att,
- qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
+ qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
}
strcat(querystr, " FOR SHARE OF x");
@@ -741,8 +736,7 @@ RI_FKey_noaction_del(PG_FUNCTION_ARGS)
}
/*
- * We have a plan now. Run it to check for existing
- * references.
+ * We have a plan now. Run it to check for existing references.
*/
ri_PerformCheck(&qkey, qplan,
fk_rel, pk_rel,
@@ -800,8 +794,7 @@ RI_FKey_noaction_upd(PG_FUNCTION_ARGS)
int match_type;
/*
- * Check that this is a valid trigger call on the right time and
- * event.
+ * Check that this is a valid trigger call on the right time and event.
*/
ri_CheckTrigger(fcinfo, "RI_FKey_noaction_upd", RI_TRIGTYPE_UPDATE);
@@ -815,8 +808,8 @@ RI_FKey_noaction_upd(PG_FUNCTION_ARGS)
return PointerGetDatum(NULL);
/*
- * Get the relation descriptors of the FK and PK tables and the new
- * and old tuple.
+ * Get the relation descriptors of the FK and PK tables and the new and
+ * old tuple.
*
* fk_rel is opened in RowShareLock mode since that's what our eventual
* SELECT FOR SHARE will get on it.
@@ -879,8 +872,8 @@ RI_FKey_noaction_upd(PG_FUNCTION_ARGS)
match_type, tgnargs, tgargs))
{
/*
- * There's either another row, or no row could match this
- * one. In either case, we don't need to do the check.
+ * There's either another row, or no row could match this one.
+ * In either case, we don't need to do the check.
*/
heap_close(fk_rel, RowShareLock);
return PointerGetDatum(NULL);
@@ -890,13 +883,13 @@ RI_FKey_noaction_upd(PG_FUNCTION_ARGS)
elog(ERROR, "SPI_connect failed");
/*
- * Fetch or prepare a saved plan for the noaction update
- * lookup if foreign references exist
+ * Fetch or prepare a saved plan for the noaction update lookup if
+ * foreign references exist
*/
if ((qplan = ri_FetchPreparedPlan(&qkey)) == NULL)
{
char querystr[MAX_QUOTED_REL_NAME_LEN + 100 +
- (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS];
+ (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS];
char fkrelname[MAX_QUOTED_REL_NAME_LEN];
char attname[MAX_QUOTED_NAME_LEN];
const char *querysep;
@@ -922,7 +915,7 @@ RI_FKey_noaction_upd(PG_FUNCTION_ARGS)
querysep, attname, i + 1);
querysep = "AND";
queryoids[i] = SPI_gettypeid(pk_rel->rd_att,
- qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
+ qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
}
strcat(querystr, " FOR SHARE OF x");
@@ -932,8 +925,7 @@ RI_FKey_noaction_upd(PG_FUNCTION_ARGS)
}
/*
- * We have a plan now. Run it to check for existing
- * references.
+ * We have a plan now. Run it to check for existing references.
*/
ri_PerformCheck(&qkey, qplan,
fk_rel, pk_rel,
@@ -987,8 +979,7 @@ RI_FKey_cascade_del(PG_FUNCTION_ARGS)
int i;
/*
- * Check that this is a valid trigger call on the right time and
- * event.
+ * Check that this is a valid trigger call on the right time and event.
*/
ri_CheckTrigger(fcinfo, "RI_FKey_cascade_del", RI_TRIGTYPE_DELETE);
@@ -1002,11 +993,10 @@ RI_FKey_cascade_del(PG_FUNCTION_ARGS)
return PointerGetDatum(NULL);
/*
- * Get the relation descriptors of the FK and PK tables and the old
- * tuple.
+ * Get the relation descriptors of the FK and PK tables and the old tuple.
*
- * fk_rel is opened in RowExclusiveLock mode since that's what our
- * eventual DELETE will get on it.
+ * fk_rel is opened in RowExclusiveLock mode since that's what our eventual
+ * DELETE will get on it.
*/
fk_rel = heap_open(trigdata->tg_trigger->tgconstrrelid, RowExclusiveLock);
pk_rel = trigdata->tg_relation;
@@ -1057,7 +1047,7 @@ RI_FKey_cascade_del(PG_FUNCTION_ARGS)
if ((qplan = ri_FetchPreparedPlan(&qkey)) == NULL)
{
char querystr[MAX_QUOTED_REL_NAME_LEN + 100 +
- (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS];
+ (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS];
char fkrelname[MAX_QUOTED_REL_NAME_LEN];
char attname[MAX_QUOTED_NAME_LEN];
const char *querysep;
@@ -1083,7 +1073,7 @@ RI_FKey_cascade_del(PG_FUNCTION_ARGS)
querysep, attname, i + 1);
querysep = "AND";
queryoids[i] = SPI_gettypeid(pk_rel->rd_att,
- qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
+ qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
}
/* Prepare and save the plan */
@@ -1092,9 +1082,8 @@ RI_FKey_cascade_del(PG_FUNCTION_ARGS)
}
/*
- * We have a plan now. Build up the arguments from the key
- * values in the deleted PK tuple and delete the referencing
- * rows
+ * We have a plan now. Build up the arguments from the key values
+ * in the deleted PK tuple and delete the referencing rows
*/
ri_PerformCheck(&qkey, qplan,
fk_rel, pk_rel,
@@ -1150,8 +1139,7 @@ RI_FKey_cascade_upd(PG_FUNCTION_ARGS)
int j;
/*
- * Check that this is a valid trigger call on the right time and
- * event.
+ * Check that this is a valid trigger call on the right time and event.
*/
ri_CheckTrigger(fcinfo, "RI_FKey_cascade_upd", RI_TRIGTYPE_UPDATE);
@@ -1165,11 +1153,11 @@ RI_FKey_cascade_upd(PG_FUNCTION_ARGS)
return PointerGetDatum(NULL);
/*
- * Get the relation descriptors of the FK and PK tables and the new
- * and old tuple.
+ * Get the relation descriptors of the FK and PK tables and the new and
+ * old tuple.
*
- * fk_rel is opened in RowExclusiveLock mode since that's what our
- * eventual UPDATE will get on it.
+ * fk_rel is opened in RowExclusiveLock mode since that's what our eventual
+ * UPDATE will get on it.
*/
fk_rel = heap_open(trigdata->tg_trigger->tgconstrrelid, RowExclusiveLock);
pk_rel = trigdata->tg_relation;
@@ -1232,7 +1220,7 @@ RI_FKey_cascade_upd(PG_FUNCTION_ARGS)
if ((qplan = ri_FetchPreparedPlan(&qkey)) == NULL)
{
char querystr[MAX_QUOTED_REL_NAME_LEN + 100 +
- (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS * 2];
+ (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS * 2];
char qualstr[(MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS];
char fkrelname[MAX_QUOTED_REL_NAME_LEN];
char attname[MAX_QUOTED_NAME_LEN];
@@ -1266,7 +1254,7 @@ RI_FKey_cascade_upd(PG_FUNCTION_ARGS)
querysep = ",";
qualsep = "AND";
queryoids[i] = SPI_gettypeid(pk_rel->rd_att,
- qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
+ qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
queryoids[j] = queryoids[i];
}
strcat(querystr, qualstr);
@@ -1277,8 +1265,7 @@ RI_FKey_cascade_upd(PG_FUNCTION_ARGS)
}
/*
- * We have a plan now. Run it to update the existing
- * references.
+ * We have a plan now. Run it to update the existing references.
*/
ri_PerformCheck(&qkey, qplan,
fk_rel, pk_rel,
@@ -1339,8 +1326,7 @@ RI_FKey_restrict_del(PG_FUNCTION_ARGS)
int i;
/*
- * Check that this is a valid trigger call on the right time and
- * event.
+ * Check that this is a valid trigger call on the right time and event.
*/
ri_CheckTrigger(fcinfo, "RI_FKey_restrict_del", RI_TRIGTYPE_DELETE);
@@ -1354,8 +1340,7 @@ RI_FKey_restrict_del(PG_FUNCTION_ARGS)
return PointerGetDatum(NULL);
/*
- * Get the relation descriptors of the FK and PK tables and the old
- * tuple.
+ * Get the relation descriptors of the FK and PK tables and the old tuple.
*
* fk_rel is opened in RowShareLock mode since that's what our eventual
* SELECT FOR SHARE will get on it.
@@ -1404,13 +1389,13 @@ RI_FKey_restrict_del(PG_FUNCTION_ARGS)
elog(ERROR, "SPI_connect failed");
/*
- * Fetch or prepare a saved plan for the restrict delete
- * lookup if foreign references exist
+ * Fetch or prepare a saved plan for the restrict delete lookup if
+ * foreign references exist
*/
if ((qplan = ri_FetchPreparedPlan(&qkey)) == NULL)
{
char querystr[MAX_QUOTED_REL_NAME_LEN + 100 +
- (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS];
+ (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS];
char fkrelname[MAX_QUOTED_REL_NAME_LEN];
char attname[MAX_QUOTED_NAME_LEN];
const char *querysep;
@@ -1436,7 +1421,7 @@ RI_FKey_restrict_del(PG_FUNCTION_ARGS)
querysep, attname, i + 1);
querysep = "AND";
queryoids[i] = SPI_gettypeid(pk_rel->rd_att,
- qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
+ qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
}
strcat(querystr, " FOR SHARE OF x");
@@ -1446,8 +1431,7 @@ RI_FKey_restrict_del(PG_FUNCTION_ARGS)
}
/*
- * We have a plan now. Run it to check for existing
- * references.
+ * We have a plan now. Run it to check for existing references.
*/
ri_PerformCheck(&qkey, qplan,
fk_rel, pk_rel,
@@ -1509,8 +1493,7 @@ RI_FKey_restrict_upd(PG_FUNCTION_ARGS)
int i;
/*
- * Check that this is a valid trigger call on the right time and
- * event.
+ * Check that this is a valid trigger call on the right time and event.
*/
ri_CheckTrigger(fcinfo, "RI_FKey_restrict_upd", RI_TRIGTYPE_UPDATE);
@@ -1524,8 +1507,8 @@ RI_FKey_restrict_upd(PG_FUNCTION_ARGS)
return PointerGetDatum(NULL);
/*
- * Get the relation descriptors of the FK and PK tables and the new
- * and old tuple.
+ * Get the relation descriptors of the FK and PK tables and the new and
+ * old tuple.
*
* fk_rel is opened in RowShareLock mode since that's what our eventual
* SELECT FOR SHARE will get on it.
@@ -1585,13 +1568,13 @@ RI_FKey_restrict_upd(PG_FUNCTION_ARGS)
elog(ERROR, "SPI_connect failed");
/*
- * Fetch or prepare a saved plan for the restrict update
- * lookup if foreign references exist
+ * Fetch or prepare a saved plan for the restrict update lookup if
+ * foreign references exist
*/
if ((qplan = ri_FetchPreparedPlan(&qkey)) == NULL)
{
char querystr[MAX_QUOTED_REL_NAME_LEN + 100 +
- (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS];
+ (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS];
char fkrelname[MAX_QUOTED_REL_NAME_LEN];
char attname[MAX_QUOTED_NAME_LEN];
const char *querysep;
@@ -1617,7 +1600,7 @@ RI_FKey_restrict_upd(PG_FUNCTION_ARGS)
querysep, attname, i + 1);
querysep = "AND";
queryoids[i] = SPI_gettypeid(pk_rel->rd_att,
- qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
+ qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
}
strcat(querystr, " FOR SHARE OF x");
@@ -1627,8 +1610,7 @@ RI_FKey_restrict_upd(PG_FUNCTION_ARGS)
}
/*
- * We have a plan now. Run it to check for existing
- * references.
+ * We have a plan now. Run it to check for existing references.
*/
ri_PerformCheck(&qkey, qplan,
fk_rel, pk_rel,
@@ -1682,8 +1664,7 @@ RI_FKey_setnull_del(PG_FUNCTION_ARGS)
int i;
/*
- * Check that this is a valid trigger call on the right time and
- * event.
+ * Check that this is a valid trigger call on the right time and event.
*/
ri_CheckTrigger(fcinfo, "RI_FKey_setnull_del", RI_TRIGTYPE_DELETE);
@@ -1697,11 +1678,10 @@ RI_FKey_setnull_del(PG_FUNCTION_ARGS)
return PointerGetDatum(NULL);
/*
- * Get the relation descriptors of the FK and PK tables and the old
- * tuple.
+ * Get the relation descriptors of the FK and PK tables and the old tuple.
*
- * fk_rel is opened in RowExclusiveLock mode since that's what our
- * eventual UPDATE will get on it.
+ * fk_rel is opened in RowExclusiveLock mode since that's what our eventual
+ * UPDATE will get on it.
*/
fk_rel = heap_open(trigdata->tg_trigger->tgconstrrelid, RowExclusiveLock);
pk_rel = trigdata->tg_relation;
@@ -1747,13 +1727,12 @@ RI_FKey_setnull_del(PG_FUNCTION_ARGS)
elog(ERROR, "SPI_connect failed");
/*
- * Fetch or prepare a saved plan for the set null delete
- * operation
+ * Fetch or prepare a saved plan for the set null delete operation
*/
if ((qplan = ri_FetchPreparedPlan(&qkey)) == NULL)
{
char querystr[MAX_QUOTED_REL_NAME_LEN + 100 +
- (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS * 2];
+ (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS * 2];
char qualstr[(MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS];
char fkrelname[MAX_QUOTED_REL_NAME_LEN];
char attname[MAX_QUOTED_NAME_LEN];
@@ -1787,7 +1766,7 @@ RI_FKey_setnull_del(PG_FUNCTION_ARGS)
querysep = ",";
qualsep = "AND";
queryoids[i] = SPI_gettypeid(pk_rel->rd_att,
- qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
+ qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
}
strcat(querystr, qualstr);
@@ -1797,8 +1776,7 @@ RI_FKey_setnull_del(PG_FUNCTION_ARGS)
}
/*
- * We have a plan now. Run it to check for existing
- * references.
+ * We have a plan now. Run it to check for existing references.
*/
ri_PerformCheck(&qkey, qplan,
fk_rel, pk_rel,
@@ -1855,8 +1833,7 @@ RI_FKey_setnull_upd(PG_FUNCTION_ARGS)
bool use_cached_query;
/*
- * Check that this is a valid trigger call on the right time and
- * event.
+ * Check that this is a valid trigger call on the right time and event.
*/
ri_CheckTrigger(fcinfo, "RI_FKey_setnull_upd", RI_TRIGTYPE_UPDATE);
@@ -1870,11 +1847,10 @@ RI_FKey_setnull_upd(PG_FUNCTION_ARGS)
return PointerGetDatum(NULL);
/*
- * Get the relation descriptors of the FK and PK tables and the old
- * tuple.
+ * Get the relation descriptors of the FK and PK tables and the old tuple.
*
- * fk_rel is opened in RowExclusiveLock mode since that's what our
- * eventual UPDATE will get on it.
+ * fk_rel is opened in RowExclusiveLock mode since that's what our eventual
+ * UPDATE will get on it.
*/
fk_rel = heap_open(trigdata->tg_trigger->tgconstrrelid, RowExclusiveLock);
pk_rel = trigdata->tg_relation;
@@ -1932,17 +1908,16 @@ RI_FKey_setnull_upd(PG_FUNCTION_ARGS)
elog(ERROR, "SPI_connect failed");
/*
- * "MATCH <unspecified>" only changes columns corresponding to
- * the referenced columns that have changed in pk_rel. This
- * means the "SET attrn=NULL [, attrn=NULL]" string will be
- * change as well. In this case, we need to build a temporary
- * plan rather than use our cached plan, unless the update
- * happens to change all columns in the key. Fortunately, for
- * the most common case of a single-column foreign key, this
- * will be true.
+ * "MATCH <unspecified>" only changes columns corresponding to the
+ * referenced columns that have changed in pk_rel. This means the
+ * "SET attrn=NULL [, attrn=NULL]" string will be change as well.
+ * In this case, we need to build a temporary plan rather than use
+ * our cached plan, unless the update happens to change all
+ * columns in the key. Fortunately, for the most common case of a
+ * single-column foreign key, this will be true.
*
- * In case you're wondering, the inequality check works because
- * we know that the old key value has no NULLs (see above).
+ * In case you're wondering, the inequality check works because we
+ * know that the old key value has no NULLs (see above).
*/
use_cached_query = match_type == RI_MATCH_TYPE_FULL ||
@@ -1950,14 +1925,14 @@ RI_FKey_setnull_upd(PG_FUNCTION_ARGS)
&qkey, RI_KEYPAIR_PK_IDX);
/*
- * Fetch or prepare a saved plan for the set null update
- * operation if possible, or build a temporary plan if not.
+ * Fetch or prepare a saved plan for the set null update operation
+ * if possible, or build a temporary plan if not.
*/
if (!use_cached_query ||
(qplan = ri_FetchPreparedPlan(&qkey)) == NULL)
{
char querystr[MAX_QUOTED_REL_NAME_LEN + 100 +
- (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS * 2];
+ (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS * 2];
char qualstr[(MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS];
char fkrelname[MAX_QUOTED_REL_NAME_LEN];
char attname[MAX_QUOTED_NAME_LEN];
@@ -1986,8 +1961,8 @@ RI_FKey_setnull_upd(PG_FUNCTION_ARGS)
tgargs[RI_FIRST_ATTNAME_ARGNO + i * 2 + RI_KEYPAIR_FK_IDX]);
/*
- * MATCH <unspecified> - only change columns
- * corresponding to changed columns in pk_rel's key
+ * MATCH <unspecified> - only change columns corresponding
+ * to changed columns in pk_rel's key
*/
if (match_type == RI_MATCH_TYPE_FULL ||
!ri_OneKeyEqual(pk_rel, i, old_row, new_row, &qkey,
@@ -2001,7 +1976,7 @@ RI_FKey_setnull_upd(PG_FUNCTION_ARGS)
qualsep, attname, i + 1);
qualsep = "AND";
queryoids[i] = SPI_gettypeid(pk_rel->rd_att,
- qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
+ qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
}
strcat(querystr, qualstr);
@@ -2015,8 +1990,7 @@ RI_FKey_setnull_upd(PG_FUNCTION_ARGS)
}
/*
- * We have a plan now. Run it to update the existing
- * references.
+ * We have a plan now. Run it to update the existing references.
*/
ri_PerformCheck(&qkey, qplan,
fk_rel, pk_rel,
@@ -2069,8 +2043,7 @@ RI_FKey_setdefault_del(PG_FUNCTION_ARGS)
void *qplan;
/*
- * Check that this is a valid trigger call on the right time and
- * event.
+ * Check that this is a valid trigger call on the right time and event.
*/
ri_CheckTrigger(fcinfo, "RI_FKey_setdefault_del", RI_TRIGTYPE_DELETE);
@@ -2084,11 +2057,10 @@ RI_FKey_setdefault_del(PG_FUNCTION_ARGS)
return PointerGetDatum(NULL);
/*
- * Get the relation descriptors of the FK and PK tables and the old
- * tuple.
+ * Get the relation descriptors of the FK and PK tables and the old tuple.
*
- * fk_rel is opened in RowExclusiveLock mode since that's what our
- * eventual UPDATE will get on it.
+ * fk_rel is opened in RowExclusiveLock mode since that's what our eventual
+ * UPDATE will get on it.
*/
fk_rel = heap_open(trigdata->tg_trigger->tgconstrrelid, RowExclusiveLock);
pk_rel = trigdata->tg_relation;
@@ -2135,12 +2107,12 @@ RI_FKey_setdefault_del(PG_FUNCTION_ARGS)
/*
* Prepare a plan for the set default delete operation.
- * Unfortunately we need to do it on every invocation because
- * the default value could potentially change between calls.
+ * Unfortunately we need to do it on every invocation because the
+ * default value could potentially change between calls.
*/
{
char querystr[MAX_QUOTED_REL_NAME_LEN + 100 +
- (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS * 2];
+ (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS * 2];
char qualstr[(MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS];
char fkrelname[MAX_QUOTED_REL_NAME_LEN];
char attname[MAX_QUOTED_NAME_LEN];
@@ -2175,7 +2147,7 @@ RI_FKey_setdefault_del(PG_FUNCTION_ARGS)
querysep = ",";
qualsep = "AND";
queryoids[i] = SPI_gettypeid(pk_rel->rd_att,
- qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
+ qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
}
strcat(querystr, qualstr);
@@ -2185,8 +2157,7 @@ RI_FKey_setdefault_del(PG_FUNCTION_ARGS)
}
/*
- * We have a plan now. Run it to update the existing
- * references.
+ * We have a plan now. Run it to update the existing references.
*/
ri_PerformCheck(&qkey, qplan,
fk_rel, pk_rel,
@@ -2201,12 +2172,12 @@ RI_FKey_setdefault_del(PG_FUNCTION_ARGS)
heap_close(fk_rel, RowExclusiveLock);
/*
- * In the case we delete the row who's key is equal to the
- * default values AND a referencing row in the foreign key
- * table exists, we would just have updated it to the same
- * values. We need to do another lookup now and in case a
- * reference exists, abort the operation. That is already
- * implemented in the NO ACTION trigger.
+ * In the case we delete the row who's key is equal to the default
+ * values AND a referencing row in the foreign key table exists,
+ * we would just have updated it to the same values. We need to do
+ * another lookup now and in case a reference exists, abort the
+ * operation. That is already implemented in the NO ACTION
+ * trigger.
*/
RI_FKey_noaction_del(fcinfo);
@@ -2251,8 +2222,7 @@ RI_FKey_setdefault_upd(PG_FUNCTION_ARGS)
int match_type;
/*
- * Check that this is a valid trigger call on the right time and
- * event.
+ * Check that this is a valid trigger call on the right time and event.
*/
ri_CheckTrigger(fcinfo, "RI_FKey_setdefault_upd", RI_TRIGTYPE_UPDATE);
@@ -2266,11 +2236,10 @@ RI_FKey_setdefault_upd(PG_FUNCTION_ARGS)
return PointerGetDatum(NULL);
/*
- * Get the relation descriptors of the FK and PK tables and the old
- * tuple.
+ * Get the relation descriptors of the FK and PK tables and the old tuple.
*
- * fk_rel is opened in RowExclusiveLock mode since that's what our
- * eventual UPDATE will get on it.
+ * fk_rel is opened in RowExclusiveLock mode since that's what our eventual
+ * UPDATE will get on it.
*/
fk_rel = heap_open(trigdata->tg_trigger->tgconstrrelid, RowExclusiveLock);
pk_rel = trigdata->tg_relation;
@@ -2330,12 +2299,12 @@ RI_FKey_setdefault_upd(PG_FUNCTION_ARGS)
/*
* Prepare a plan for the set default delete operation.
- * Unfortunately we need to do it on every invocation because
- * the default value could potentially change between calls.
+ * Unfortunately we need to do it on every invocation because the
+ * default value could potentially change between calls.
*/
{
char querystr[MAX_QUOTED_REL_NAME_LEN + 100 +
- (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS * 2];
+ (MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS * 2];
char qualstr[(MAX_QUOTED_NAME_LEN + 32) * RI_MAX_NUMKEYS];
char fkrelname[MAX_QUOTED_REL_NAME_LEN];
char attname[MAX_QUOTED_NAME_LEN];
@@ -2365,12 +2334,12 @@ RI_FKey_setdefault_upd(PG_FUNCTION_ARGS)
tgargs[RI_FIRST_ATTNAME_ARGNO + i * 2 + RI_KEYPAIR_FK_IDX]);
/*
- * MATCH <unspecified> - only change columns
- * corresponding to changed columns in pk_rel's key
+ * MATCH <unspecified> - only change columns corresponding
+ * to changed columns in pk_rel's key
*/
if (match_type == RI_MATCH_TYPE_FULL ||
!ri_OneKeyEqual(pk_rel, i, old_row,
- new_row, &qkey, RI_KEYPAIR_PK_IDX))
+ new_row, &qkey, RI_KEYPAIR_PK_IDX))
{
snprintf(querystr + strlen(querystr), sizeof(querystr) - strlen(querystr), "%s %s = DEFAULT",
querysep, attname);
@@ -2380,7 +2349,7 @@ RI_FKey_setdefault_upd(PG_FUNCTION_ARGS)
qualsep, attname, i + 1);
qualsep = "AND";
queryoids[i] = SPI_gettypeid(pk_rel->rd_att,
- qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
+ qkey.keypair[i][RI_KEYPAIR_PK_IDX]);
}
strcat(querystr, qualstr);
@@ -2390,8 +2359,7 @@ RI_FKey_setdefault_upd(PG_FUNCTION_ARGS)
}
/*
- * We have a plan now. Run it to update the existing
- * references.
+ * We have a plan now. Run it to update the existing references.
*/
ri_PerformCheck(&qkey, qplan,
fk_rel, pk_rel,
@@ -2407,11 +2375,11 @@ RI_FKey_setdefault_upd(PG_FUNCTION_ARGS)
/*
* In the case we updated the row who's key was equal to the
- * default values AND a referencing row in the foreign key
- * table exists, we would just have updated it to the same
- * values. We need to do another lookup now and in case a
- * reference exists, abort the operation. That is already
- * implemented in the NO ACTION trigger.
+ * default values AND a referencing row in the foreign key table
+ * exists, we would just have updated it to the same values. We
+ * need to do another lookup now and in case a reference exists,
+ * abort the operation. That is already implemented in the NO
+ * ACTION trigger.
*/
RI_FKey_noaction_upd(fcinfo);
@@ -2474,11 +2442,11 @@ RI_FKey_keyequal_upd_pk(Trigger *trigger, Relation pk_rel,
if (!OidIsValid(trigger->tgconstrrelid))
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("no target table given for trigger \"%s\" on table \"%s\"",
- trigger->tgname,
- RelationGetRelationName(pk_rel)),
- errhint("Remove this referential integrity trigger and its mates, "
- "then do ALTER TABLE ADD CONSTRAINT.")));
+ errmsg("no target table given for trigger \"%s\" on table \"%s\"",
+ trigger->tgname,
+ RelationGetRelationName(pk_rel)),
+ errhint("Remove this referential integrity trigger and its mates, "
+ "then do ALTER TABLE ADD CONSTRAINT.")));
fk_rel = heap_open(trigger->tgconstrrelid, AccessShareLock);
@@ -2496,7 +2464,7 @@ RI_FKey_keyequal_upd_pk(Trigger *trigger, Relation pk_rel,
return ri_KeysEqual(pk_rel, old_row, new_row, &qkey,
RI_KEYPAIR_PK_IDX);
- /* Handle MATCH PARTIAL set null delete. */
+ /* Handle MATCH PARTIAL set null delete. */
case RI_MATCH_TYPE_PARTIAL:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
@@ -2548,11 +2516,11 @@ RI_FKey_keyequal_upd_fk(Trigger *trigger, Relation fk_rel,
if (!OidIsValid(trigger->tgconstrrelid))
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("no target table given for trigger \"%s\" on table \"%s\"",
- trigger->tgname,
- RelationGetRelationName(fk_rel)),
- errhint("Remove this referential integrity trigger and its mates, "
- "then do ALTER TABLE ADD CONSTRAINT.")));
+ errmsg("no target table given for trigger \"%s\" on table \"%s\"",
+ trigger->tgname,
+ RelationGetRelationName(fk_rel)),
+ errhint("Remove this referential integrity trigger and its mates, "
+ "then do ALTER TABLE ADD CONSTRAINT.")));
pk_rel = heap_open(trigger->tgconstrrelid, AccessShareLock);
@@ -2570,7 +2538,7 @@ RI_FKey_keyequal_upd_fk(Trigger *trigger, Relation fk_rel,
return ri_KeysEqual(fk_rel, old_row, new_row, &qkey,
RI_KEYPAIR_FK_IDX);
- /* Handle MATCH PARTIAL set null delete. */
+ /* Handle MATCH PARTIAL set null delete. */
case RI_MATCH_TYPE_PARTIAL:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
@@ -2603,7 +2571,7 @@ RI_Initial_Check(FkConstraint *fkconstraint, Relation rel, Relation pkrel)
{
const char *constrname = fkconstraint->constr_name;
char querystr[MAX_QUOTED_REL_NAME_LEN * 2 + 250 +
- (MAX_QUOTED_NAME_LEN + 32) * ((RI_MAX_NUMKEYS * 4) + 1)];
+ (MAX_QUOTED_NAME_LEN + 32) * ((RI_MAX_NUMKEYS * 4) + 1)];
char pkrelname[MAX_QUOTED_REL_NAME_LEN];
char relname[MAX_QUOTED_REL_NAME_LEN];
char attname[MAX_QUOTED_NAME_LEN];
@@ -2617,9 +2585,9 @@ RI_Initial_Check(FkConstraint *fkconstraint, Relation rel, Relation pkrel)
void *qplan;
/*
- * Check to make sure current user has enough permissions to do the
- * test query. (If not, caller can fall back to the trigger method,
- * which works because it changes user IDs on the fly.)
+ * Check to make sure current user has enough permissions to do the test
+ * query. (If not, caller can fall back to the trigger method, which
+ * works because it changes user IDs on the fly.)
*
* XXX are there any other show-stopper conditions to check?
*/
@@ -2669,8 +2637,8 @@ RI_Initial_Check(FkConstraint *fkconstraint, Relation rel, Relation pkrel)
}
/*
- * It's sufficient to test any one pk attribute for null to detect a
- * join failure.
+ * It's sufficient to test any one pk attribute for null to detect a join
+ * failure.
*/
quoteOneName(attname, strVal(linitial(fkconstraint->pk_attrs)));
snprintf(querystr + strlen(querystr), sizeof(querystr) - strlen(querystr),
@@ -2706,13 +2674,12 @@ RI_Initial_Check(FkConstraint *fkconstraint, Relation rel, Relation pkrel)
")");
/*
- * Temporarily increase work_mem so that the check query can be
- * executed more efficiently. It seems okay to do this because the
- * query is simple enough to not use a multiple of work_mem, and one
- * typically would not have many large foreign-key validations
- * happening concurrently. So this seems to meet the criteria for
- * being considered a "maintenance" operation, and accordingly we use
- * maintenance_work_mem.
+ * Temporarily increase work_mem so that the check query can be executed
+ * more efficiently. It seems okay to do this because the query is simple
+ * enough to not use a multiple of work_mem, and one typically would not
+ * have many large foreign-key validations happening concurrently. So
+ * this seems to meet the criteria for being considered a "maintenance"
+ * operation, and accordingly we use maintenance_work_mem.
*
* We do the equivalent of "SET LOCAL work_mem" so that transaction abort
* will restore the old value if we lose control due to an error.
@@ -2736,8 +2703,8 @@ RI_Initial_Check(FkConstraint *fkconstraint, Relation rel, Relation pkrel)
elog(ERROR, "SPI_prepare returned %d for %s", SPI_result, querystr);
/*
- * Run the plan. For safety we force a current snapshot to be used.
- * (In serializable mode, this arguably violates serializability, but we
+ * Run the plan. For safety we force a current snapshot to be used. (In
+ * serializable mode, this arguably violates serializability, but we
* really haven't got much choice.) We need at most one tuple returned,
* so pass limit = 1.
*/
@@ -2762,8 +2729,8 @@ RI_Initial_Check(FkConstraint *fkconstraint, Relation rel, Relation pkrel)
/*
* If it's MATCH FULL, and there are any nulls in the FK keys,
- * complain about that rather than the lack of a match. MATCH
- * FULL disallows partially-null FK rows.
+ * complain about that rather than the lack of a match. MATCH FULL
+ * disallows partially-null FK rows.
*/
if (fkconstraint->fk_matchtype == FKCONSTR_MATCH_FULL)
{
@@ -2785,8 +2752,8 @@ RI_Initial_Check(FkConstraint *fkconstraint, Relation rel, Relation pkrel)
}
/*
- * Although we didn't cache the query, we need to set up a fake
- * query key to pass to ri_ReportViolation.
+ * Although we didn't cache the query, we need to set up a fake query
+ * key to pass to ri_ReportViolation.
*/
MemSet(&qkey, 0, sizeof(qkey));
qkey.constr_queryno = RI_PLAN_CHECK_LOOKUPPK;
@@ -2804,8 +2771,8 @@ RI_Initial_Check(FkConstraint *fkconstraint, Relation rel, Relation pkrel)
elog(ERROR, "SPI_finish failed");
/*
- * Restore work_mem for the remainder of the current transaction. This
- * is another SET LOCAL, so it won't affect the session value, nor any
+ * Restore work_mem for the remainder of the current transaction. This is
+ * another SET LOCAL, so it won't affect the session value, nor any
* tentative value if there is one.
*/
snprintf(workmembuf, sizeof(workmembuf), "%d", old_work_mem);
@@ -2917,8 +2884,8 @@ ri_BuildQueryKeyFull(RI_QueryKey *key, Oid constr_id, int32 constr_queryno,
key->nkeypairs = (argc - RI_FIRST_ATTNAME_ARGNO) / 2;
/*
- * Lookup the attribute numbers of the arguments to the trigger call
- * and fill in the keypairs.
+ * Lookup the attribute numbers of the arguments to the trigger call and
+ * fill in the keypairs.
*/
for (i = 0, j = RI_FIRST_ATTNAME_ARGNO; j < argc; i++, j += 2)
{
@@ -2965,35 +2932,35 @@ ri_CheckTrigger(FunctionCallInfo fcinfo, const char *funcname, int tgkind)
!TRIGGER_FIRED_FOR_ROW(trigdata->tg_event))
ereport(ERROR,
(errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
- errmsg("function \"%s\" must be fired AFTER ROW", funcname)));
+ errmsg("function \"%s\" must be fired AFTER ROW", funcname)));
switch (tgkind)
{
case RI_TRIGTYPE_INSERT:
if (!TRIGGER_FIRED_BY_INSERT(trigdata->tg_event))
ereport(ERROR,
- (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
- errmsg("function \"%s\" must be fired for INSERT", funcname)));
+ (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
+ errmsg("function \"%s\" must be fired for INSERT", funcname)));
break;
case RI_TRIGTYPE_UPDATE:
if (!TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event))
ereport(ERROR,
- (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
- errmsg("function \"%s\" must be fired for UPDATE", funcname)));
+ (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
+ errmsg("function \"%s\" must be fired for UPDATE", funcname)));
break;
case RI_TRIGTYPE_INUP:
if (!TRIGGER_FIRED_BY_INSERT(trigdata->tg_event) &&
!TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event))
ereport(ERROR,
- (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
- errmsg("function \"%s\" must be fired for INSERT or UPDATE",
- funcname)));
+ (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
+ errmsg("function \"%s\" must be fired for INSERT or UPDATE",
+ funcname)));
break;
case RI_TRIGTYPE_DELETE:
if (!TRIGGER_FIRED_BY_DELETE(trigdata->tg_event))
ereport(ERROR,
- (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
- errmsg("function \"%s\" must be fired for DELETE", funcname)));
+ (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
+ errmsg("function \"%s\" must be fired for DELETE", funcname)));
break;
}
@@ -3010,15 +2977,15 @@ ri_CheckTrigger(FunctionCallInfo fcinfo, const char *funcname, int tgkind)
funcname)));
/*
- * Check that tgconstrrelid is known. We need to check here because
- * of ancient pg_dump bug; see notes in CreateTrigger().
+ * Check that tgconstrrelid is known. We need to check here because of
+ * ancient pg_dump bug; see notes in CreateTrigger().
*/
if (!OidIsValid(trigdata->tg_trigger->tgconstrrelid))
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("no target table given for trigger \"%s\" on table \"%s\"",
- trigdata->tg_trigger->tgname,
- RelationGetRelationName(trigdata->tg_relation)),
+ errmsg("no target table given for trigger \"%s\" on table \"%s\"",
+ trigdata->tg_trigger->tgname,
+ RelationGetRelationName(trigdata->tg_relation)),
errhint("Remove this referential integrity trigger and its mates, then do ALTER TABLE ADD CONSTRAINT.")));
}
@@ -3105,10 +3072,10 @@ ri_PerformCheck(RI_QueryKey *qkey, void *qplan,
query_rel = fk_rel;
/*
- * The values for the query are taken from the table on which the
- * trigger is called - it is normally the other one with respect to
- * query_rel. An exception is ri_Check_Pk_Match(), which uses the PK
- * table for both (the case when constrname == NULL)
+ * The values for the query are taken from the table on which the trigger
+ * is called - it is normally the other one with respect to query_rel. An
+ * exception is ri_Check_Pk_Match(), which uses the PK table for both (the
+ * case when constrname == NULL)
*/
if (qkey->constr_queryno == RI_PLAN_CHECK_LOOKUPPK && constrname != NULL)
{
@@ -3128,7 +3095,7 @@ ri_PerformCheck(RI_QueryKey *qkey, void *qplan,
vals, nulls);
if (old_tuple)
ri_ExtractValues(qkey, key_idx, source_rel, old_tuple,
- vals + qkey->nkeypairs, nulls + qkey->nkeypairs);
+ vals + qkey->nkeypairs, nulls + qkey->nkeypairs);
}
else
{
@@ -3138,17 +3105,16 @@ ri_PerformCheck(RI_QueryKey *qkey, void *qplan,
/*
* In READ COMMITTED mode, we just need to use an up-to-date regular
- * snapshot, and we will see all rows that could be interesting.
- * But in SERIALIZABLE mode, we can't change the transaction snapshot.
- * If the caller passes detectNewRows == false then it's okay to do the
- * query with the transaction snapshot; otherwise we use a current
- * snapshot, and tell the executor to error out if it finds any rows under
- * the current snapshot that wouldn't be visible per the transaction
- * snapshot.
+ * snapshot, and we will see all rows that could be interesting. But in
+ * SERIALIZABLE mode, we can't change the transaction snapshot. If the
+ * caller passes detectNewRows == false then it's okay to do the query
+ * with the transaction snapshot; otherwise we use a current snapshot, and
+ * tell the executor to error out if it finds any rows under the current
+ * snapshot that wouldn't be visible per the transaction snapshot.
*/
if (IsXactIsoLevelSerializable && detectNewRows)
{
- CommandCounterIncrement(); /* be sure all my own work is visible */
+ CommandCounterIncrement(); /* be sure all my own work is visible */
test_snapshot = CopySnapshot(GetLatestSnapshot());
crosscheck_snapshot = CopySnapshot(GetTransactionSnapshot());
}
@@ -3161,9 +3127,9 @@ ri_PerformCheck(RI_QueryKey *qkey, void *qplan,
/*
* If this is a select query (e.g., for a 'no action' or 'restrict'
- * trigger), we only need to see if there is a single row in the
- * table, matching the key. Otherwise, limit = 0 - because we want
- * the query to affect ALL the matching rows.
+ * trigger), we only need to see if there is a single row in the table,
+ * matching the key. Otherwise, limit = 0 - because we want the query to
+ * affect ALL the matching rows.
*/
limit = (expect_OK == SPI_OK_SELECT) ? 1 : 0;
@@ -3193,7 +3159,7 @@ ri_PerformCheck(RI_QueryKey *qkey, void *qplan,
/* XXX wouldn't it be clearer to do this part at the caller? */
if (constrname && expect_OK == SPI_OK_SELECT &&
- (SPI_processed == 0) == (qkey->constr_queryno == RI_PLAN_CHECK_LOOKUPPK))
+ (SPI_processed == 0) == (qkey->constr_queryno == RI_PLAN_CHECK_LOOKUPPK))
ri_ReportViolation(qkey, constrname,
pk_rel, fk_rel,
new_tuple ? new_tuple : old_tuple,
@@ -3257,8 +3223,8 @@ ri_ReportViolation(RI_QueryKey *qkey, const char *constrname,
errhint("This is most likely due to a rule having rewritten the query.")));
/*
- * Determine which relation to complain about. If tupdesc wasn't
- * passed by caller, assume the violator tuple came from there.
+ * Determine which relation to complain about. If tupdesc wasn't passed
+ * by caller, assume the violator tuple came from there.
*/
onfk = (qkey->constr_queryno == RI_PLAN_CHECK_LOOKUPPK);
if (onfk)
@@ -3276,8 +3242,8 @@ ri_ReportViolation(RI_QueryKey *qkey, const char *constrname,
/*
* Special case - if there are no keys at all, this is a 'no column'
- * constraint - no need to try to extract the values, and the message
- * in this case looks different.
+ * constraint - no need to try to extract the values, and the message in
+ * this case looks different.
*/
if (qkey->nkeypairs == 0)
{
@@ -3302,8 +3268,8 @@ ri_ReportViolation(RI_QueryKey *qkey, const char *constrname,
val = "null";
/*
- * Go to "..." if name or value doesn't fit in buffer. We reserve
- * 5 bytes to ensure we can add comma, "...", null.
+ * Go to "..." if name or value doesn't fit in buffer. We reserve 5
+ * bytes to ensure we can add comma, "...", null.
*/
if (strlen(name) >= (key_names + BUFLENGTH - 5) - name_ptr ||
strlen(val) >= (key_values + BUFLENGTH - 5) - val_ptr)
@@ -3322,18 +3288,18 @@ ri_ReportViolation(RI_QueryKey *qkey, const char *constrname,
(errcode(ERRCODE_FOREIGN_KEY_VIOLATION),
errmsg("insert or update on table \"%s\" violates foreign key constraint \"%s\"",
RelationGetRelationName(fk_rel), constrname),
- errdetail("Key (%s)=(%s) is not present in table \"%s\".",
- key_names, key_values,
- RelationGetRelationName(pk_rel))));
+ errdetail("Key (%s)=(%s) is not present in table \"%s\".",
+ key_names, key_values,
+ RelationGetRelationName(pk_rel))));
else
ereport(ERROR,
(errcode(ERRCODE_FOREIGN_KEY_VIOLATION),
errmsg("update or delete on \"%s\" violates foreign key constraint \"%s\" on \"%s\"",
RelationGetRelationName(pk_rel),
constrname, RelationGetRelationName(fk_rel)),
- errdetail("Key (%s)=(%s) is still referenced from table \"%s\".",
- key_names, key_values,
- RelationGetRelationName(fk_rel))));
+ errdetail("Key (%s)=(%s) is still referenced from table \"%s\".",
+ key_names, key_values,
+ RelationGetRelationName(fk_rel))));
}
/* ----------
@@ -3373,8 +3339,8 @@ ri_BuildQueryKeyPkCheck(RI_QueryKey *key, Oid constr_id, int32 constr_queryno,
key->nkeypairs = (argc - RI_FIRST_ATTNAME_ARGNO) / 2;
/*
- * Lookup the attribute numbers of the arguments to the trigger call
- * and fill in the keypairs.
+ * Lookup the attribute numbers of the arguments to the trigger call and
+ * fill in the keypairs.
*/
for (i = 0, j = RI_FIRST_ATTNAME_ARGNO + RI_KEYPAIR_PK_IDX; j < argc; i++, j += 2)
{
@@ -3542,8 +3508,8 @@ ri_KeysEqual(Relation rel, HeapTuple oldtup, HeapTuple newtup,
return false;
/*
- * Get the attribute's type OID and call the '=' operator to
- * compare the values.
+ * Get the attribute's type OID and call the '=' operator to compare
+ * the values.
*/
typeid = SPI_gettypeid(rel->rd_att, key->keypair[i][pairidx]);
if (!ri_AttributesEqual(typeid, oldvalue, newvalue))
@@ -3591,8 +3557,8 @@ ri_AllKeysUnequal(Relation rel, HeapTuple oldtup, HeapTuple newtup,
continue;
/*
- * Get the attributes type OID and call the '=' operator to
- * compare the values.
+ * Get the attributes type OID and call the '=' operator to compare
+ * the values.
*/
typeid = SPI_gettypeid(rel->rd_att, key->keypair[i][pairidx]);
if (!ri_AttributesEqual(typeid, oldvalue, newvalue))
@@ -3639,8 +3605,8 @@ ri_OneKeyEqual(Relation rel, int column, HeapTuple oldtup, HeapTuple newtup,
return false;
/*
- * Get the attributes type OID and call the '=' operator to compare
- * the values.
+ * Get the attributes type OID and call the '=' operator to compare the
+ * values.
*/
typeid = SPI_gettypeid(rel->rd_att, key->keypair[column][pairidx]);
if (!ri_AttributesEqual(typeid, oldvalue, newvalue))
@@ -3672,8 +3638,8 @@ ri_AttributesEqual(Oid typeid, Datum oldvalue, Datum newvalue)
if (!OidIsValid(typentry->eq_opr_finfo.fn_oid))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("could not identify an equality operator for type %s",
- format_type_be(typeid))));
+ errmsg("could not identify an equality operator for type %s",
+ format_type_be(typeid))));
/*
* Call the type specific '=' function
diff --git a/src/backend/utils/adt/rowtypes.c b/src/backend/utils/adt/rowtypes.c
index 07a5cf54eea..1a12185b048 100644
--- a/src/backend/utils/adt/rowtypes.c
+++ b/src/backend/utils/adt/rowtypes.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/rowtypes.c,v 1.12 2005/07/10 21:13:59 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/rowtypes.c,v 1.13 2005/10/15 02:49:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -54,6 +54,7 @@ record_in(PG_FUNCTION_ARGS)
{
char *string = PG_GETARG_CSTRING(0);
Oid tupType = PG_GETARG_OID(1);
+
#ifdef NOT_USED
int32 typmod = PG_GETARG_INT32(2);
#endif
@@ -72,14 +73,14 @@ record_in(PG_FUNCTION_ARGS)
/*
* Use the passed type unless it's RECORD; we can't support input of
- * anonymous types, mainly because there's no good way to figure out
- * which anonymous type is wanted. Note that for RECORD, what we'll
- * probably actually get is RECORD's typelem, ie, zero.
+ * anonymous types, mainly because there's no good way to figure out which
+ * anonymous type is wanted. Note that for RECORD, what we'll probably
+ * actually get is RECORD's typelem, ie, zero.
*/
if (tupType == InvalidOid || tupType == RECORDOID)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("input of anonymous composite types is not implemented")));
+ errmsg("input of anonymous composite types is not implemented")));
tupTypmod = -1; /* for all non-anonymous types */
tupdesc = lookup_rowtype_tupdesc(tupType, tupTypmod);
ncolumns = tupdesc->natts;
@@ -153,7 +154,7 @@ record_in(PG_FUNCTION_ARGS)
/* *ptr must be ')' */
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("malformed record literal: \"%s\"", string),
+ errmsg("malformed record literal: \"%s\"", string),
errdetail("Too few columns.")));
}
@@ -184,10 +185,10 @@ record_in(PG_FUNCTION_ARGS)
{
if (*ptr == '\0')
ereport(ERROR,
- (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("malformed record literal: \"%s\"",
- string),
- errdetail("Unexpected end of input.")));
+ (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
+ errmsg("malformed record literal: \"%s\"",
+ string),
+ errdetail("Unexpected end of input.")));
appendStringInfoChar(&buf, *ptr++);
}
else if (ch == '\"')
@@ -221,8 +222,8 @@ record_in(PG_FUNCTION_ARGS)
values[i] = FunctionCall3(&column_info->proc,
CStringGetDatum(buf.data),
- ObjectIdGetDatum(column_info->typioparam),
- Int32GetDatum(tupdesc->attrs[i]->atttypmod));
+ ObjectIdGetDatum(column_info->typioparam),
+ Int32GetDatum(tupdesc->attrs[i]->atttypmod));
nulls[i] = ' ';
}
@@ -249,9 +250,9 @@ record_in(PG_FUNCTION_ARGS)
tuple = heap_formtuple(tupdesc, values, nulls);
/*
- * We cannot return tuple->t_data because heap_formtuple allocates it
- * as part of a larger chunk, and our caller may expect to be able to
- * pfree our result. So must copy the info into a new palloc chunk.
+ * We cannot return tuple->t_data because heap_formtuple allocates it as
+ * part of a larger chunk, and our caller may expect to be able to pfree
+ * our result. So must copy the info into a new palloc chunk.
*/
result = (HeapTupleHeader) palloc(tuple->t_len);
memcpy(result, tuple->t_data, tuple->t_len);
@@ -420,6 +421,7 @@ record_recv(PG_FUNCTION_ARGS)
{
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
Oid tupType = PG_GETARG_OID(1);
+
#ifdef NOT_USED
int32 typmod = PG_GETARG_INT32(2);
#endif
@@ -437,14 +439,14 @@ record_recv(PG_FUNCTION_ARGS)
/*
* Use the passed type unless it's RECORD; we can't support input of
- * anonymous types, mainly because there's no good way to figure out
- * which anonymous type is wanted. Note that for RECORD, what we'll
- * probably actually get is RECORD's typelem, ie, zero.
+ * anonymous types, mainly because there's no good way to figure out which
+ * anonymous type is wanted. Note that for RECORD, what we'll probably
+ * actually get is RECORD's typelem, ie, zero.
*/
if (tupType == InvalidOid || tupType == RECORDOID)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("input of anonymous composite types is not implemented")));
+ errmsg("input of anonymous composite types is not implemented")));
tupTypmod = -1; /* for all non-anonymous types */
tupdesc = lookup_rowtype_tupdesc(tupType, tupTypmod);
ncolumns = tupdesc->natts;
@@ -537,10 +539,9 @@ record_recv(PG_FUNCTION_ARGS)
{
/*
* Rather than copying data around, we just set up a phony
- * StringInfo pointing to the correct portion of the input
- * buffer. We assume we can scribble on the input buffer so as
- * to maintain the convention that StringInfos have a trailing
- * null.
+ * StringInfo pointing to the correct portion of the input buffer.
+ * We assume we can scribble on the input buffer so as to maintain
+ * the convention that StringInfos have a trailing null.
*/
StringInfoData item_buf;
char csave;
@@ -568,16 +569,16 @@ record_recv(PG_FUNCTION_ARGS)
values[i] = FunctionCall3(&column_info->proc,
PointerGetDatum(&item_buf),
- ObjectIdGetDatum(column_info->typioparam),
- Int32GetDatum(tupdesc->attrs[i]->atttypmod));
+ ObjectIdGetDatum(column_info->typioparam),
+ Int32GetDatum(tupdesc->attrs[i]->atttypmod));
nulls[i] = ' ';
/* Trouble if it didn't eat the whole buffer */
if (item_buf.cursor != itemlen)
ereport(ERROR,
(errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
- errmsg("improper binary format in record column %d",
- i + 1)));
+ errmsg("improper binary format in record column %d",
+ i + 1)));
buf->data[buf->cursor] = csave;
}
@@ -586,9 +587,9 @@ record_recv(PG_FUNCTION_ARGS)
tuple = heap_formtuple(tupdesc, values, nulls);
/*
- * We cannot return tuple->t_data because heap_formtuple allocates it
- * as part of a larger chunk, and our caller may expect to be able to
- * pfree our result. So must copy the info into a new palloc chunk.
+ * We cannot return tuple->t_data because heap_formtuple allocates it as
+ * part of a larger chunk, and our caller may expect to be able to pfree
+ * our result. So must copy the info into a new palloc chunk.
*/
result = (HeapTupleHeader) palloc(tuple->t_len);
memcpy(result, tuple->t_data, tuple->t_len);
diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c
index 1a226bd49c3..04e8eb55161 100644
--- a/src/backend/utils/adt/ruleutils.c
+++ b/src/backend/utils/adt/ruleutils.c
@@ -3,7 +3,7 @@
* back to source text
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/ruleutils.c,v 1.206 2005/10/06 19:51:14 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/ruleutils.c,v 1.207 2005/10/15 02:49:29 momjian Exp $
*
* This software is copyrighted by Jan Wieck - Hamburg.
*
@@ -201,11 +201,11 @@ static void get_agg_expr(Aggref *aggref, deparse_context *context);
static void get_const_expr(Const *constval, deparse_context *context);
static void get_sublink_expr(SubLink *sublink, deparse_context *context);
static void get_from_clause(Query *query, const char *prefix,
- deparse_context *context);
+ deparse_context *context);
static void get_from_clause_item(Node *jtnode, Query *query,
deparse_context *context);
static void get_from_clause_alias(Alias *alias, RangeTblEntry *rte,
- deparse_context *context);
+ deparse_context *context);
static void get_from_clause_coldeflist(List *coldeflist,
deparse_context *context);
static void get_opclass_name(Oid opclass, Oid actual_datatype,
@@ -486,8 +486,8 @@ pg_get_triggerdef(PG_FUNCTION_ARGS)
trigrec = (Form_pg_trigger) GETSTRUCT(ht_trig);
/*
- * Start the trigger definition. Note that the trigger's name should
- * never be schema-qualified, but the trigger rel's name may be.
+ * Start the trigger definition. Note that the trigger's name should never
+ * be schema-qualified, but the trigger rel's name may be.
*/
initStringInfo(&buf);
@@ -527,7 +527,7 @@ pg_get_triggerdef(PG_FUNCTION_ARGS)
{
if (trigrec->tgconstrrelid != InvalidOid)
appendStringInfo(&buf, "FROM %s ",
- generate_relation_name(trigrec->tgconstrrelid));
+ generate_relation_name(trigrec->tgconstrrelid));
if (!trigrec->tgdeferrable)
appendStringInfo(&buf, "NOT ");
appendStringInfo(&buf, "DEFERRABLE INITIALLY ");
@@ -688,9 +688,9 @@ pg_get_indexdef_worker(Oid indexrelid, int colno, int prettyFlags)
amrec = (Form_pg_am) GETSTRUCT(ht_am);
/*
- * Get the index expressions, if any. (NOTE: we do not use the
- * relcache versions of the expressions and predicate, because we want
- * to display non-const-folded expressions.)
+ * Get the index expressions, if any. (NOTE: we do not use the relcache
+ * versions of the expressions and predicate, because we want to display
+ * non-const-folded expressions.)
*/
if (!heap_attisnull(ht_idx, Anum_pg_index_indexprs))
{
@@ -714,8 +714,8 @@ pg_get_indexdef_worker(Oid indexrelid, int colno, int prettyFlags)
context = deparse_context_for(get_rel_name(indrelid), indrelid);
/*
- * Start the index definition. Note that the index's name should
- * never be schema-qualified, but the indexed rel's name may be.
+ * Start the index definition. Note that the index's name should never be
+ * schema-qualified, but the indexed rel's name may be.
*/
initStringInfo(&buf);
@@ -764,7 +764,7 @@ pg_get_indexdef_worker(Oid indexrelid, int colno, int prettyFlags)
{
/* Need parens if it's not a bare function call */
if (indexkey && IsA(indexkey, FuncExpr) &&
- ((FuncExpr *) indexkey)->funcformat == COERCE_EXPLICIT_CALL)
+ ((FuncExpr *) indexkey)->funcformat == COERCE_EXPLICIT_CALL)
appendStringInfoString(&buf, str);
else
appendStringInfo(&buf, "(%s)", str);
@@ -831,7 +831,7 @@ pg_get_constraintdef(PG_FUNCTION_ARGS)
Oid constraintId = PG_GETARG_OID(0);
PG_RETURN_TEXT_P(string_to_text(pg_get_constraintdef_worker(constraintId,
- false, 0)));
+ false, 0)));
}
Datum
@@ -843,7 +843,7 @@ pg_get_constraintdef_ext(PG_FUNCTION_ARGS)
prettyFlags = pretty ? PRETTYFLAG_PAREN | PRETTYFLAG_INDENT : 0;
PG_RETURN_TEXT_P(string_to_text(pg_get_constraintdef_worker(constraintId,
- false, prettyFlags)));
+ false, prettyFlags)));
}
/* Internal version that returns a palloc'd C string */
@@ -865,8 +865,8 @@ pg_get_constraintdef_worker(Oid constraintId, bool fullCommand,
Form_pg_constraint conForm;
/*
- * Fetch the pg_constraint row. There's no syscache for pg_constraint
- * so we must do it the hard way.
+ * Fetch the pg_constraint row. There's no syscache for pg_constraint so
+ * we must do it the hard way.
*/
conDesc = heap_open(ConstraintRelationId, AccessShareLock);
@@ -914,7 +914,7 @@ pg_get_constraintdef_worker(Oid constraintId, bool fullCommand,
/* add foreign relation name */
appendStringInfo(&buf, ") REFERENCES %s(",
- generate_relation_name(conForm->confrelid));
+ generate_relation_name(conForm->confrelid));
/* Fetch and build referenced-column list */
val = heap_getattr(tup, Anum_pg_constraint_confkey,
@@ -1067,15 +1067,13 @@ pg_get_constraintdef_worker(Oid constraintId, bool fullCommand,
prettyFlags, 0);
/*
- * Now emit the constraint definition. There are cases
- * where the constraint expression will be fully
- * parenthesized and we don't need the outer parens ...
- * but there are other cases where we do need 'em. Be
- * conservative for now.
+ * Now emit the constraint definition. There are cases where
+ * the constraint expression will be fully parenthesized and
+ * we don't need the outer parens ... but there are other
+ * cases where we do need 'em. Be conservative for now.
*
* Note that simply checking for leading '(' and trailing ')'
- * would NOT be good enough, consider "(x > 0) AND (y >
- * 0)".
+ * would NOT be good enough, consider "(x > 0) AND (y > 0)".
*/
appendStringInfo(&buf, "CHECK (%s)", consrc);
@@ -1259,7 +1257,7 @@ pg_get_serial_sequence(PG_FUNCTION_ARGS)
/* Get the number of the column */
column = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(columnname)));
+ PointerGetDatum(columnname)));
attnum = get_attnum(tableOid, column);
if (attnum == InvalidAttrNumber)
@@ -1292,8 +1290,8 @@ pg_get_serial_sequence(PG_FUNCTION_ARGS)
Form_pg_depend deprec = (Form_pg_depend) GETSTRUCT(tup);
/*
- * We assume any internal dependency of a relation on a column
- * must be what we are looking for.
+ * We assume any internal dependency of a relation on a column must be
+ * what we are looking for.
*/
if (deprec->classid == RelationRelationId &&
deprec->objsubid == 0 &&
@@ -1510,7 +1508,7 @@ deparse_context_for_subplan(const char *name, List *tlist,
if (var->varnoold > 0 && var->varnoold <= rtablelength)
{
RangeTblEntry *varrte = rt_fetch(var->varnoold, rtable);
- AttrNumber varattnum = var->varoattno;
+ AttrNumber varattnum = var->varoattno;
/* need this test in case it's referencing a resjunk col */
if (varattnum <= list_length(varrte->eref->colnames))
@@ -1637,8 +1635,8 @@ make_ruledef(StringInfo buf, HeapTuple ruletup, TupleDesc rulettc,
appendStringInfo(buf, " TO %s", generate_relation_name(ev_class));
if (ev_attr > 0)
appendStringInfo(buf, ".%s",
- quote_identifier(get_relid_attribute_name(ev_class,
- ev_attr)));
+ quote_identifier(get_relid_attribute_name(ev_class,
+ ev_attr)));
/* If the rule has an event qualification, add it */
if (ev_qual == NULL)
@@ -1658,15 +1656,15 @@ make_ruledef(StringInfo buf, HeapTuple ruletup, TupleDesc rulettc,
/*
* We need to make a context for recognizing any Vars in the qual
- * (which can only be references to OLD and NEW). Use the rtable
- * of the first query in the action list for this purpose.
+ * (which can only be references to OLD and NEW). Use the rtable of
+ * the first query in the action list for this purpose.
*/
query = (Query *) linitial(actions);
/*
* If the action is INSERT...SELECT, OLD/NEW have been pushed down
- * into the SELECT, and that's what we need to look at. (Ugly
- * kluge ... try to fix this when we redesign querytrees.)
+ * into the SELECT, and that's what we need to look at. (Ugly kluge
+ * ... try to fix this when we redesign querytrees.)
*/
query = getInsertSelectQuery(query, NULL);
@@ -1809,9 +1807,9 @@ get_query_def(Query *query, StringInfo buf, List *parentnamespace,
/*
* Before we begin to examine the query, acquire locks on referenced
- * relations, and fix up deleted columns in JOIN RTEs. This ensures
- * consistent results. Note we assume it's OK to scribble on the
- * passed querytree!
+ * relations, and fix up deleted columns in JOIN RTEs. This ensures
+ * consistent results. Note we assume it's OK to scribble on the passed
+ * querytree!
*/
AcquireRewriteLocks(query);
@@ -1874,9 +1872,9 @@ get_select_query_def(Query *query, deparse_context *context,
ListCell *l;
/*
- * If the Query node has a setOperations tree, then it's the top level
- * of a UNION/INTERSECT/EXCEPT query; only the ORDER BY and LIMIT
- * fields are interesting in the top query itself.
+ * If the Query node has a setOperations tree, then it's the top level of
+ * a UNION/INTERSECT/EXCEPT query; only the ORDER BY and LIMIT fields are
+ * interesting in the top query itself.
*/
if (query->setOperations)
{
@@ -1909,7 +1907,7 @@ get_select_query_def(Query *query, deparse_context *context,
sortcoltype = exprType(sortexpr);
/* See whether operator is default < or > for datatype */
typentry = lookup_type_cache(sortcoltype,
- TYPECACHE_LT_OPR | TYPECACHE_GT_OPR);
+ TYPECACHE_LT_OPR | TYPECACHE_GT_OPR);
if (srt->sortop == typentry->lt_opr)
/* ASC is default, so emit nothing */ ;
else if (srt->sortop == typentry->gt_opr)
@@ -2025,10 +2023,10 @@ get_basic_select_query(Query *query, deparse_context *context,
get_rule_expr((Node *) tle->expr, context, true);
/*
- * Figure out what the result column should be called. In the
- * context of a view, use the view's tuple descriptor (so as to
- * pick up the effects of any column RENAME that's been done on
- * the view). Otherwise, just use what we can find in the TLE.
+ * Figure out what the result column should be called. In the context
+ * of a view, use the view's tuple descriptor (so as to pick up the
+ * effects of any column RENAME that's been done on the view).
+ * Otherwise, just use what we can find in the TLE.
*/
if (resultDesc && colno <= resultDesc->natts)
colname = NameStr(resultDesc->attrs[colno - 1]->attname);
@@ -2130,10 +2128,10 @@ get_setop_query(Node *setOp, Query *query, deparse_context *context,
SetOperationStmt *op = (SetOperationStmt *) setOp;
/*
- * We force parens whenever nesting two SetOperationStmts. There
- * are some cases in which parens are needed around a leaf query
- * too, but those are more easily handled at the next level down
- * (see code above).
+ * We force parens whenever nesting two SetOperationStmts. There are
+ * some cases in which parens are needed around a leaf query too, but
+ * those are more easily handled at the next level down (see code
+ * above).
*/
need_paren = !IsA(op->larg, RangeTblRef);
@@ -2231,8 +2229,8 @@ get_insert_query_def(Query *query, deparse_context *context)
List *strippedexprs;
/*
- * If it's an INSERT ... SELECT there will be a single subquery RTE
- * for the SELECT.
+ * If it's an INSERT ... SELECT there will be a single subquery RTE for
+ * the SELECT.
*/
foreach(l, query->rtable)
{
@@ -2279,13 +2277,12 @@ get_insert_query_def(Query *query, deparse_context *context)
* tle->resname, since resname will fail to track RENAME.
*/
appendStringInfoString(buf,
- quote_identifier(get_relid_attribute_name(rte->relid,
- tle->resno)));
+ quote_identifier(get_relid_attribute_name(rte->relid,
+ tle->resno)));
/*
- * Print any indirection needed (subfields or subscripts), and
- * strip off the top-level nodes representing the indirection
- * assignments.
+ * Print any indirection needed (subfields or subscripts), and strip
+ * off the top-level nodes representing the indirection assignments.
*/
strippedexprs = lappend(strippedexprs,
processIndirection((Node *) tle->expr,
@@ -2351,13 +2348,12 @@ get_update_query_def(Query *query, deparse_context *context)
* tle->resname, since resname will fail to track RENAME.
*/
appendStringInfoString(buf,
- quote_identifier(get_relid_attribute_name(rte->relid,
- tle->resno)));
+ quote_identifier(get_relid_attribute_name(rte->relid,
+ tle->resno)));
/*
- * Print any indirection needed (subfields or subscripts), and
- * strip off the top-level nodes representing the indirection
- * assignments.
+ * Print any indirection needed (subfields or subscripts), and strip
+ * off the top-level nodes representing the indirection assignments.
*/
expr = processIndirection((Node *) tle->expr, context);
@@ -2432,8 +2428,8 @@ get_utility_query_def(Query *query, deparse_context *context)
appendContextKeyword(context, "",
0, PRETTYINDENT_STD, 1);
appendStringInfo(buf, "NOTIFY %s",
- quote_qualified_identifier(stmt->relation->schemaname,
- stmt->relation->relname));
+ quote_qualified_identifier(stmt->relation->schemaname,
+ stmt->relation->relname));
}
else
{
@@ -2517,10 +2513,9 @@ get_names_for_var(Var *var, int levelsup, deparse_context *context,
if (rte->rtekind == RTE_RELATION)
{
/*
- * It's possible that use of the bare refname would find
- * another more-closely-nested RTE, or be ambiguous, in which
- * case we need to specify the schemaname to avoid these
- * errors.
+ * It's possible that use of the bare refname would find another
+ * more-closely-nested RTE, or be ambiguous, in which case we need
+ * to specify the schemaname to avoid these errors.
*/
if (find_rte_by_refname(rte->eref->aliasname, context) != rte)
*schemaname =
@@ -2530,20 +2525,20 @@ get_names_for_var(Var *var, int levelsup, deparse_context *context,
{
/*
* If it's an unnamed join, look at the expansion of the alias
- * variable. If it's a simple reference to one of the input
- * vars then recursively find the name of that var, instead.
- * (This allows correct decompiling of cases where there are
- * identically named columns on both sides of the join.)
- * When it's not a simple reference, we have to just return
- * the unqualified variable name (this can only happen with
- * columns that were merged by USING or NATURAL clauses).
+ * variable. If it's a simple reference to one of the input vars
+ * then recursively find the name of that var, instead. (This
+ * allows correct decompiling of cases where there are identically
+ * named columns on both sides of the join.) When it's not a
+ * simple reference, we have to just return the unqualified
+ * variable name (this can only happen with columns that were
+ * merged by USING or NATURAL clauses).
*/
if (var->varattno > 0)
{
- Var *aliasvar;
+ Var *aliasvar;
aliasvar = (Var *) list_nth(rte->joinaliasvars,
- var->varattno-1);
+ var->varattno - 1);
if (IsA(aliasvar, Var))
{
get_names_for_var(aliasvar,
@@ -2568,9 +2563,9 @@ get_names_for_var(Var *var, int levelsup, deparse_context *context,
* Get the name of a field of a Var of type RECORD.
*
* Since no actual table or view column is allowed to have type RECORD, such
- * a Var must refer to a JOIN or FUNCTION RTE or to a subquery output. We
+ * a Var must refer to a JOIN or FUNCTION RTE or to a subquery output. We
* drill down to find the ultimate defining expression and attempt to infer
- * the field name from it. We ereport if we can't determine the name.
+ * the field name from it. We ereport if we can't determine the name.
*
* levelsup is an extra offset to interpret the Var's varlevelsup correctly.
*
@@ -2609,6 +2604,7 @@ get_name_for_var_field(Var *var, int fieldno,
{
case RTE_RELATION:
case RTE_SPECIAL:
+
/*
* This case should not occur: a column of a table shouldn't have
* type RECORD. Fall through and fail (most likely) at the
@@ -2629,7 +2625,7 @@ get_name_for_var_field(Var *var, int fieldno,
{
/*
* Recurse into the sub-select to see what its Var refers
- * to. We have to build an additional level of namespace
+ * to. We have to build an additional level of namespace
* to keep in step with varlevelsup in the subselect.
*/
deparse_namespace mydpns;
@@ -2662,18 +2658,19 @@ get_name_for_var_field(Var *var, int fieldno,
/* else fall through to inspect the expression */
break;
case RTE_FUNCTION:
+
/*
- * We couldn't get here unless a function is declared with one
- * of its result columns as RECORD, which is not allowed.
+ * We couldn't get here unless a function is declared with one of
+ * its result columns as RECORD, which is not allowed.
*/
break;
}
/*
* We now have an expression we can't expand any more, so see if
- * get_expr_result_type() can do anything with it. If not, pass
- * to lookup_rowtype_tupdesc() which will probably fail, but will
- * give an appropriate error message while failing.
+ * get_expr_result_type() can do anything with it. If not, pass to
+ * lookup_rowtype_tupdesc() which will probably fail, but will give an
+ * appropriate error message while failing.
*/
if (get_expr_result_type(expr, NULL, &tupleDesc) != TYPEFUNC_COMPOSITE)
tupleDesc = lookup_rowtype_tupdesc(exprType(expr), exprTypmod(expr));
@@ -2866,8 +2863,8 @@ isSimpleNode(Node *node, Node *parentNode, int prettyFlags)
return false;
/*
- * Operators are same priority --- can skip parens
- * only if we have (a - b) - c, not a - (b - c).
+ * Operators are same priority --- can skip parens only if
+ * we have (a - b) - c, not a - (b - c).
*/
if (node == (Node *) linitial(((OpExpr *) parentNode)->args))
return true;
@@ -2897,11 +2894,11 @@ isSimpleNode(Node *node, Node *parentNode, int prettyFlags)
case T_BoolExpr: /* lower precedence */
case T_ArrayRef: /* other separators */
case T_ArrayExpr: /* other separators */
- case T_RowExpr: /* other separators */
+ case T_RowExpr: /* other separators */
case T_CoalesceExpr: /* own parentheses */
case T_MinMaxExpr: /* own parentheses */
case T_NullIfExpr: /* other separators */
- case T_Aggref: /* own parentheses */
+ case T_Aggref: /* own parentheses */
case T_CaseExpr: /* other separators */
return true;
default:
@@ -2945,11 +2942,11 @@ isSimpleNode(Node *node, Node *parentNode, int prettyFlags)
}
case T_ArrayRef: /* other separators */
case T_ArrayExpr: /* other separators */
- case T_RowExpr: /* other separators */
+ case T_RowExpr: /* other separators */
case T_CoalesceExpr: /* own parentheses */
case T_MinMaxExpr: /* own parentheses */
case T_NullIfExpr: /* other separators */
- case T_Aggref: /* own parentheses */
+ case T_Aggref: /* own parentheses */
case T_CaseExpr: /* other separators */
return true;
default:
@@ -3055,10 +3052,10 @@ get_rule_expr(Node *node, deparse_context *context,
/*
* Each level of get_rule_expr must emit an indivisible term
- * (parenthesized if necessary) to ensure result is reparsed into the
- * same expression tree. The only exception is that when the input
- * is a List, we emit the component items comma-separated with no
- * surrounding decoration; this is convenient for most callers.
+ * (parenthesized if necessary) to ensure result is reparsed into the same
+ * expression tree. The only exception is that when the input is a List,
+ * we emit the component items comma-separated with no surrounding
+ * decoration; this is convenient for most callers.
*
* There might be some work left here to support additional node types.
*/
@@ -3129,8 +3126,8 @@ get_rule_expr(Node *node, deparse_context *context,
/*
* Parenthesize the argument unless it's a simple Var or a
- * FieldSelect. (In particular, if it's another ArrayRef,
- * we *must* parenthesize to avoid confusion.)
+ * FieldSelect. (In particular, if it's another ArrayRef, we
+ * *must* parenthesize to avoid confusion.)
*/
need_parens = !IsA(aref->refexpr, Var) &&
!IsA(aref->refexpr, FieldSelect);
@@ -3188,7 +3185,7 @@ get_rule_expr(Node *node, deparse_context *context,
appendStringInfo(buf, " %s %s (",
generate_operator_name(expr->opno,
exprType(arg1),
- get_element_type(exprType(arg2))),
+ get_element_type(exprType(arg2))),
expr->useOr ? "ANY" : "ALL");
get_rule_expr_paren(arg2, context, true, node);
appendStringInfoChar(buf, ')');
@@ -3261,9 +3258,8 @@ get_rule_expr(Node *node, deparse_context *context,
case T_SubPlan:
{
/*
- * We cannot see an already-planned subplan in rule
- * deparsing, only while EXPLAINing a query plan. For now,
- * just punt.
+ * We cannot see an already-planned subplan in rule deparsing,
+ * only while EXPLAINing a query plan. For now, just punt.
*/
if (((SubPlan *) node)->useHashTable)
appendStringInfo(buf, "(hashed subplan)");
@@ -3282,12 +3278,11 @@ get_rule_expr(Node *node, deparse_context *context,
/*
* Parenthesize the argument unless it's an ArrayRef or
- * another FieldSelect. Note in particular that it would
- * be WRONG to not parenthesize a Var argument; simplicity
- * is not the issue here, having the right number of names
- * is.
+ * another FieldSelect. Note in particular that it would be
+ * WRONG to not parenthesize a Var argument; simplicity is not
+ * the issue here, having the right number of names is.
*/
- need_parens = !IsA(arg, ArrayRef) && !IsA(arg, FieldSelect);
+ need_parens = !IsA(arg, ArrayRef) &&!IsA(arg, FieldSelect);
if (need_parens)
appendStringInfoChar(buf, '(');
get_rule_expr(arg, context, true);
@@ -3296,8 +3291,8 @@ get_rule_expr(Node *node, deparse_context *context,
/*
* If it's a Var of type RECORD, we have to find what the Var
- * refers to; otherwise we can use get_expr_result_type.
- * If that fails, we try lookup_rowtype_tupdesc, which will
+ * refers to; otherwise we can use get_expr_result_type. If
+ * that fails, we try lookup_rowtype_tupdesc, which will
* probably fail too, but will ereport an acceptable message.
*/
if (IsA(arg, Var) &&
@@ -3324,8 +3319,8 @@ get_rule_expr(Node *node, deparse_context *context,
case T_FieldStore:
/*
- * We shouldn't see FieldStore here; it should have been
- * stripped off by processIndirection().
+ * We shouldn't see FieldStore here; it should have been stripped
+ * off by processIndirection().
*/
elog(ERROR, "unexpected FieldStore");
break;
@@ -3349,8 +3344,8 @@ get_rule_expr(Node *node, deparse_context *context,
if (!PRETTY_PAREN(context))
appendStringInfoChar(buf, ')');
appendStringInfo(buf, "::%s",
- format_type_with_typemod(relabel->resulttype,
- relabel->resulttypmod));
+ format_type_with_typemod(relabel->resulttype,
+ relabel->resulttypmod));
}
}
break;
@@ -3374,7 +3369,7 @@ get_rule_expr(Node *node, deparse_context *context,
if (!PRETTY_PAREN(context))
appendStringInfoChar(buf, ')');
appendStringInfo(buf, "::%s",
- format_type_with_typemod(convert->resulttype, -1));
+ format_type_with_typemod(convert->resulttype, -1));
}
}
break;
@@ -3444,9 +3439,9 @@ get_rule_expr(Node *node, deparse_context *context,
char *sep;
/*
- * If it's a named type and not RECORD, we may have to
- * skip dropped columns and/or claim there are NULLs for
- * added columns.
+ * If it's a named type and not RECORD, we may have to skip
+ * dropped columns and/or claim there are NULLs for added
+ * columns.
*/
if (rowexpr->row_typeid != RECORDOID)
{
@@ -3455,8 +3450,8 @@ get_rule_expr(Node *node, deparse_context *context,
}
/*
- * SQL99 allows "ROW" to be omitted when there is more
- * than one column, but for simplicity we always print it.
+ * SQL99 allows "ROW" to be omitted when there is more than
+ * one column, but for simplicity we always print it.
*/
appendStringInfo(buf, "ROW(");
sep = "";
@@ -3490,7 +3485,7 @@ get_rule_expr(Node *node, deparse_context *context,
appendStringInfo(buf, ")");
if (rowexpr->row_format == COERCE_EXPLICIT_CAST)
appendStringInfo(buf, "::%s",
- format_type_with_typemod(rowexpr->row_typeid, -1));
+ format_type_with_typemod(rowexpr->row_typeid, -1));
}
break;
@@ -3611,8 +3606,8 @@ get_rule_expr(Node *node, deparse_context *context,
if (!PRETTY_PAREN(context))
appendStringInfoChar(buf, ')');
appendStringInfo(buf, "::%s",
- format_type_with_typemod(ctest->resulttype,
- ctest->resulttypmod));
+ format_type_with_typemod(ctest->resulttype,
+ ctest->resulttypmod));
}
}
break;
@@ -3724,9 +3719,8 @@ get_func_expr(FuncExpr *expr, deparse_context *context,
ListCell *l;
/*
- * If the function call came from an implicit coercion, then just show
- * the first argument --- unless caller wants to see implicit
- * coercions.
+ * If the function call came from an implicit coercion, then just show the
+ * first argument --- unless caller wants to see implicit coercions.
*/
if (expr->funcformat == COERCE_IMPLICIT_CAST && !showimplicit)
{
@@ -3755,14 +3749,14 @@ get_func_expr(FuncExpr *expr, deparse_context *context,
if (!PRETTY_PAREN(context))
appendStringInfoChar(buf, ')');
appendStringInfo(buf, "::%s",
- format_type_with_typemod(rettype, coercedTypmod));
+ format_type_with_typemod(rettype, coercedTypmod));
return;
}
/*
- * Normal function: display as proname(args). First we need to
- * extract the argument datatypes.
+ * Normal function: display as proname(args). First we need to extract
+ * the argument datatypes.
*/
nargs = 0;
foreach(l, expr->args)
@@ -3791,7 +3785,7 @@ get_agg_expr(Aggref *aggref, deparse_context *context)
Oid argtype = exprType((Node *) aggref->target);
appendStringInfo(buf, "%s(%s",
- generate_function_name(aggref->aggfnoid, 1, &argtype),
+ generate_function_name(aggref->aggfnoid, 1, &argtype),
aggref->aggdistinct ? "DISTINCT " : "");
if (aggref->aggstar)
appendStringInfo(buf, "*");
@@ -3821,11 +3815,11 @@ get_const_expr(Const *constval, deparse_context *context)
if (constval->constisnull)
{
/*
- * Always label the type of a NULL constant to prevent
- * misdecisions about type when reparsing.
+ * Always label the type of a NULL constant to prevent misdecisions
+ * about type when reparsing.
*/
appendStringInfo(buf, "NULL::%s",
- format_type_with_typemod(constval->consttype, -1));
+ format_type_with_typemod(constval->consttype, -1));
return;
}
@@ -3846,14 +3840,13 @@ get_const_expr(Const *constval, deparse_context *context)
case NUMERICOID:
{
/*
- * These types are printed without quotes unless they
- * contain values that aren't accepted by the scanner
- * unquoted (e.g., 'NaN'). Note that strtod() and friends
- * might accept NaN, so we can't use that to test.
+ * These types are printed without quotes unless they contain
+ * values that aren't accepted by the scanner unquoted (e.g.,
+ * 'NaN'). Note that strtod() and friends might accept NaN,
+ * so we can't use that to test.
*
- * In reality we only need to defend against infinity and
- * NaN, so we need not get too crazy about pattern
- * matching here.
+ * In reality we only need to defend against infinity and NaN, so
+ * we need not get too crazy about pattern matching here.
*/
if (strspn(extval, "0123456789+-eE.") == strlen(extval))
{
@@ -3879,13 +3872,14 @@ get_const_expr(Const *constval, deparse_context *context)
break;
default:
+
/*
* We must quote any funny characters in the constant's
* representation. XXX Any MULTIBYTE considerations here?
*/
for (valptr = extval; *valptr; valptr++)
if (*valptr == '\\' ||
- (unsigned char)*valptr < (unsigned char)' ')
+ (unsigned char) *valptr < (unsigned char) ' ')
{
appendStringInfoChar(buf, ESCAPE_STRING_SYNTAX);
break;
@@ -3901,7 +3895,7 @@ get_const_expr(Const *constval, deparse_context *context)
appendStringInfoChar(buf, ch);
appendStringInfoChar(buf, ch);
}
- else if ((unsigned char)ch < (unsigned char)' ')
+ else if ((unsigned char) ch < (unsigned char) ' ')
appendStringInfo(buf, "\\%03o", (int) ch);
else
appendStringInfoChar(buf, ch);
@@ -3913,9 +3907,9 @@ get_const_expr(Const *constval, deparse_context *context)
pfree(extval);
/*
- * Append ::typename unless the constant will be implicitly typed as
- * the right type when it is read in. XXX this code has to be kept in
- * sync with the behavior of the parser, especially make_const.
+ * Append ::typename unless the constant will be implicitly typed as the
+ * right type when it is read in. XXX this code has to be kept in sync
+ * with the behavior of the parser, especially make_const.
*/
switch (constval->consttype)
{
@@ -3935,7 +3929,7 @@ get_const_expr(Const *constval, deparse_context *context)
}
if (needlabel)
appendStringInfo(buf, "::%s",
- format_type_with_typemod(constval->consttype, -1));
+ format_type_with_typemod(constval->consttype, -1));
}
@@ -3969,10 +3963,10 @@ get_sublink_expr(SubLink *sublink, deparse_context *context)
need_paren = true;
/*
- * XXX we regurgitate the originally given operator name, with or
- * without schema qualification. This is not necessarily 100% right
- * but it's the best we can do, since the operators actually used
- * might not all be in the same schema.
+ * XXX we regurgitate the originally given operator name, with or without
+ * schema qualification. This is not necessarily 100% right but it's the
+ * best we can do, since the operators actually used might not all be in
+ * the same schema.
*/
switch (sublink->subLinkType)
{
@@ -4044,11 +4038,11 @@ get_from_clause(Query *query, const char *prefix, deparse_context *context)
ListCell *l;
/*
- * We use the query's jointree as a guide to what to print. However,
- * we must ignore auto-added RTEs that are marked not inFromCl. (These
- * can only appear at the top level of the jointree, so it's
- * sufficient to check here.) This check also ensures we ignore
- * the rule pseudo-RTEs for NEW and OLD.
+ * We use the query's jointree as a guide to what to print. However, we
+ * must ignore auto-added RTEs that are marked not inFromCl. (These can
+ * only appear at the top level of the jointree, so it's sufficient to
+ * check here.) This check also ensures we ignore the rule pseudo-RTEs
+ * for NEW and OLD.
*/
foreach(l, query->jointree->fromlist)
{
@@ -4124,10 +4118,10 @@ get_from_clause_item(Node *jtnode, Query *query, deparse_context *context)
strcmp(rte->eref->aliasname, get_rel_name(rte->relid)) != 0)
{
/*
- * Apparently the rel has been renamed since the rule was
- * made. Emit a fake alias clause so that variable references
- * will still work. This is not a 100% solution but should
- * work in most reasonable situations.
+ * Apparently the rel has been renamed since the rule was made.
+ * Emit a fake alias clause so that variable references will still
+ * work. This is not a 100% solution but should work in most
+ * reasonable situations.
*/
appendStringInfo(buf, " %s",
quote_identifier(rte->eref->aliasname));
@@ -4136,10 +4130,9 @@ get_from_clause_item(Node *jtnode, Query *query, deparse_context *context)
else if (rte->rtekind == RTE_FUNCTION)
{
/*
- * For a function RTE, always give an alias.
- * This covers possible renaming of the function and/or
- * instability of the FigureColname rules for things that
- * aren't simple functions.
+ * For a function RTE, always give an alias. This covers possible
+ * renaming of the function and/or instability of the
+ * FigureColname rules for things that aren't simple functions.
*/
appendStringInfo(buf, " %s",
quote_identifier(rte->eref->aliasname));
@@ -4175,7 +4168,7 @@ get_from_clause_item(Node *jtnode, Query *query, deparse_context *context)
need_paren_on_right = PRETTY_PAREN(context) &&
!IsA(j->rarg, RangeTblRef) &&
- !(IsA(j->rarg, JoinExpr) && ((JoinExpr*) j->rarg)->alias != NULL);
+ !(IsA(j->rarg, JoinExpr) &&((JoinExpr *) j->rarg)->alias != NULL);
if (!PRETTY_PAREN(context) || j->alias != NULL)
appendStringInfoChar(buf, '(');
@@ -4278,7 +4271,7 @@ get_from_clause_item(Node *jtnode, Query *query, deparse_context *context)
if (col != list_head(j->using))
appendStringInfo(buf, ", ");
appendStringInfoString(buf,
- quote_identifier(strVal(lfirst(col))));
+ quote_identifier(strVal(lfirst(col))));
}
appendStringInfoChar(buf, ')');
}
@@ -4415,8 +4408,7 @@ get_opclass_name(Oid opclass, Oid actual_datatype,
opcrec = (Form_pg_opclass) GETSTRUCT(ht_opc);
/*
- * Special case for ARRAY_OPS: pretend it is default for any array
- * type
+ * Special case for ARRAY_OPS: pretend it is default for any array type
*/
if (OidIsValid(actual_datatype))
{
@@ -4476,19 +4468,19 @@ processIndirection(Node *node, deparse_context *context)
format_type_be(fstore->resulttype));
/*
- * Get the field name. Note we assume here that there's only
- * one field being assigned to. This is okay in stored rules
- * but could be wrong in executable target lists. Presently
- * no problem since explain.c doesn't print plan targetlists,
- * but someday may have to think of something ...
+ * Get the field name. Note we assume here that there's only one
+ * field being assigned to. This is okay in stored rules but
+ * could be wrong in executable target lists. Presently no
+ * problem since explain.c doesn't print plan targetlists, but
+ * someday may have to think of something ...
*/
fieldname = get_relid_attribute_name(typrelid,
- linitial_int(fstore->fieldnums));
+ linitial_int(fstore->fieldnums));
appendStringInfo(buf, ".%s", quote_identifier(fieldname));
/*
- * We ignore arg since it should be an uninteresting reference
- * to the target column or subcolumn.
+ * We ignore arg since it should be an uninteresting reference to
+ * the target column or subcolumn.
*/
node = (Node *) linitial(fstore->newvals);
}
@@ -4501,8 +4493,8 @@ processIndirection(Node *node, deparse_context *context)
printSubscripts(aref, context);
/*
- * We ignore refexpr since it should be an uninteresting
- * reference to the target column or subcolumn.
+ * We ignore refexpr since it should be an uninteresting reference
+ * to the target column or subcolumn.
*/
node = (Node *) aref->refassgnexpr;
}
@@ -4545,10 +4537,9 @@ const char *
quote_identifier(const char *ident)
{
/*
- * Can avoid quoting if ident starts with a lowercase letter or
- * underscore and contains only lowercase letters, digits, and
- * underscores, *and* is not any SQL keyword. Otherwise, supply
- * quotes.
+ * Can avoid quoting if ident starts with a lowercase letter or underscore
+ * and contains only lowercase letters, digits, and underscores, *and* is
+ * not any SQL keyword. Otherwise, supply quotes.
*/
int nquotes = 0;
bool safe;
@@ -4557,8 +4548,8 @@ quote_identifier(const char *ident)
char *optr;
/*
- * would like to use <ctype.h> macros here, but they might yield
- * unwanted locale-specific results...
+ * would like to use <ctype.h> macros here, but they might yield unwanted
+ * locale-specific results...
*/
safe = ((ident[0] >= 'a' && ident[0] <= 'z') || ident[0] == '_');
@@ -4583,13 +4574,13 @@ quote_identifier(const char *ident)
if (safe)
{
/*
- * Check for keyword. This test is overly strong, since many of
- * the "keywords" known to the parser are usable as column names,
- * but the parser doesn't provide any easy way to test for whether
- * an identifier is safe or not... so be safe not sorry.
+ * Check for keyword. This test is overly strong, since many of the
+ * "keywords" known to the parser are usable as column names, but the
+ * parser doesn't provide any easy way to test for whether an
+ * identifier is safe or not... so be safe not sorry.
*
- * Note: ScanKeywordLookup() does case-insensitive comparison, but
- * that's fine, since we already know we have all-lower-case.
+ * Note: ScanKeywordLookup() does case-insensitive comparison, but that's
+ * fine, since we already know we have all-lower-case.
*/
if (ScanKeywordLookup(ident) != NULL)
safe = false;
@@ -4702,8 +4693,8 @@ generate_function_name(Oid funcid, int nargs, Oid *argtypes)
/*
* The idea here is to schema-qualify only if the parser would fail to
- * resolve the correct function given the unqualified func name with
- * the specified argtypes.
+ * resolve the correct function given the unqualified func name with the
+ * specified argtypes.
*/
p_result = func_get_detail(list_make1(makeString(proname)),
NIL, nargs, argtypes,
diff --git a/src/backend/utils/adt/selfuncs.c b/src/backend/utils/adt/selfuncs.c
index ccc8d0f4483..95980ca1e03 100644
--- a/src/backend/utils/adt/selfuncs.c
+++ b/src/backend/utils/adt/selfuncs.c
@@ -15,7 +15,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/selfuncs.c,v 1.190 2005/10/11 17:27:14 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/selfuncs.c,v 1.191 2005/10/15 02:49:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -197,8 +197,8 @@ eqsel(PG_FUNCTION_ARGS)
double selec;
/*
- * If expression is not variable = something or something = variable,
- * then punt and return a default estimate.
+ * If expression is not variable = something or something = variable, then
+ * punt and return a default estimate.
*/
if (!get_restriction_variable(root, args, varRelid,
&vardata, &other, &varonleft))
@@ -229,11 +229,11 @@ eqsel(PG_FUNCTION_ARGS)
int i;
/*
- * Is the constant "=" to any of the column's most common
- * values? (Although the given operator may not really be
- * "=", we will assume that seeing whether it returns TRUE is
- * an appropriate test. If you don't like this, maybe you
- * shouldn't be using eqsel for your operator...)
+ * Is the constant "=" to any of the column's most common values?
+ * (Although the given operator may not really be "=", we will
+ * assume that seeing whether it returns TRUE is an appropriate
+ * test. If you don't like this, maybe you shouldn't be using
+ * eqsel for your operator...)
*/
if (get_attstatsslot(vardata.statsTuple,
vardata.atttype, vardata.atttypmod,
@@ -271,18 +271,18 @@ eqsel(PG_FUNCTION_ARGS)
if (match)
{
/*
- * Constant is "=" to this common value. We know
- * selectivity exactly (or as exactly as VACUUM could
- * calculate it, anyway).
+ * Constant is "=" to this common value. We know selectivity
+ * exactly (or as exactly as VACUUM could calculate it,
+ * anyway).
*/
selec = numbers[i];
}
else
{
/*
- * Comparison is against a constant that is neither NULL
- * nor any of the common values. Its selectivity cannot
- * be more than this:
+ * Comparison is against a constant that is neither NULL nor
+ * any of the common values. Its selectivity cannot be more
+ * than this:
*/
double sumcommon = 0.0;
double otherdistinct;
@@ -293,10 +293,10 @@ eqsel(PG_FUNCTION_ARGS)
CLAMP_PROBABILITY(selec);
/*
- * and in fact it's probably a good deal less. We
- * approximate that all the not-common values share this
- * remaining fraction equally, so we divide by the number
- * of other distinct values.
+ * and in fact it's probably a good deal less. We approximate
+ * that all the not-common values share this remaining
+ * fraction equally, so we divide by the number of other
+ * distinct values.
*/
otherdistinct = get_variable_numdistinct(&vardata)
- nnumbers;
@@ -304,8 +304,8 @@ eqsel(PG_FUNCTION_ARGS)
selec /= otherdistinct;
/*
- * Another cross-check: selectivity shouldn't be estimated
- * as more than the least common "most common value".
+ * Another cross-check: selectivity shouldn't be estimated as
+ * more than the least common "most common value".
*/
if (nnumbers > 0 && selec > numbers[nnumbers - 1])
selec = numbers[nnumbers - 1];
@@ -319,14 +319,14 @@ eqsel(PG_FUNCTION_ARGS)
double ndistinct;
/*
- * Search is for a value that we do not know a priori, but we
- * will assume it is not NULL. Estimate the selectivity as
- * non-null fraction divided by number of distinct values, so
- * that we get a result averaged over all possible values
- * whether common or uncommon. (Essentially, we are assuming
- * that the not-yet-known comparison value is equally likely
- * to be any of the possible values, regardless of their
- * frequency in the table. Is that a good idea?)
+ * Search is for a value that we do not know a priori, but we will
+ * assume it is not NULL. Estimate the selectivity as non-null
+ * fraction divided by number of distinct values, so that we get a
+ * result averaged over all possible values whether common or
+ * uncommon. (Essentially, we are assuming that the not-yet-known
+ * comparison value is equally likely to be any of the possible
+ * values, regardless of their frequency in the table. Is that a
+ * good idea?)
*/
selec = 1.0 - stats->stanullfrac;
ndistinct = get_variable_numdistinct(&vardata);
@@ -334,8 +334,8 @@ eqsel(PG_FUNCTION_ARGS)
selec /= ndistinct;
/*
- * Cross-check: selectivity should never be estimated as more
- * than the most common value's.
+ * Cross-check: selectivity should never be estimated as more than
+ * the most common value's.
*/
if (get_attstatsslot(vardata.statsTuple,
vardata.atttype, vardata.atttypmod,
@@ -352,10 +352,10 @@ eqsel(PG_FUNCTION_ARGS)
else
{
/*
- * No VACUUM ANALYZE stats available, so make a guess using
- * estimated number of distinct values and assuming they are
- * equally common. (The guess is unlikely to be very good, but we
- * do know a few special cases.)
+ * No VACUUM ANALYZE stats available, so make a guess using estimated
+ * number of distinct values and assuming they are equally common.
+ * (The guess is unlikely to be very good, but we do know a few
+ * special cases.)
*/
selec = 1.0 / get_variable_numdistinct(&vardata);
}
@@ -386,17 +386,17 @@ neqsel(PG_FUNCTION_ARGS)
float8 result;
/*
- * We want 1 - eqsel() where the equality operator is the one
- * associated with this != operator, that is, its negator.
+ * We want 1 - eqsel() where the equality operator is the one associated
+ * with this != operator, that is, its negator.
*/
eqop = get_negator(operator);
if (eqop)
{
result = DatumGetFloat8(DirectFunctionCall4(eqsel,
PointerGetDatum(root),
- ObjectIdGetDatum(eqop),
+ ObjectIdGetDatum(eqop),
PointerGetDatum(args),
- Int32GetDatum(varRelid)));
+ Int32GetDatum(varRelid)));
}
else
{
@@ -447,9 +447,9 @@ scalarineqsel(PlannerInfo *root, Oid operator, bool isgt,
/*
* If we have most-common-values info, add up the fractions of the MCV
- * entries that satisfy MCV OP CONST. These fractions contribute
- * directly to the result selectivity. Also add up the total fraction
- * represented by MCV entries.
+ * entries that satisfy MCV OP CONST. These fractions contribute directly
+ * to the result selectivity. Also add up the total fraction represented
+ * by MCV entries.
*/
mcv_selec = 0.0;
sumcommon = 0.0;
@@ -473,17 +473,17 @@ scalarineqsel(PlannerInfo *root, Oid operator, bool isgt,
}
/*
- * If there is a histogram, determine which bin the constant falls in,
- * and compute the resulting contribution to selectivity.
+ * If there is a histogram, determine which bin the constant falls in, and
+ * compute the resulting contribution to selectivity.
*
* Someday, VACUUM might store more than one histogram per rel/att,
- * corresponding to more than one possible sort ordering defined for
- * the column type. However, to make that work we will need to figure
- * out which staop to search for --- it's not necessarily the one we
- * have at hand! (For example, we might have a '<=' operator rather
- * than the '<' operator that will appear in staop.) For now, assume
- * that whatever appears in pg_statistic is sorted the same way our
- * operator sorts, or the reverse way if isgt is TRUE.
+ * corresponding to more than one possible sort ordering defined for the
+ * column type. However, to make that work we will need to figure out
+ * which staop to search for --- it's not necessarily the one we have at
+ * hand! (For example, we might have a '<=' operator rather than the '<'
+ * operator that will appear in staop.) For now, assume that whatever
+ * appears in pg_statistic is sorted the same way our operator sorts, or
+ * the reverse way if isgt is TRUE.
*/
hist_selec = 0.0;
@@ -511,10 +511,9 @@ scalarineqsel(PlannerInfo *root, Oid operator, bool isgt,
else
{
/*
- * Scan to find proper location. This could be made
- * faster by using a binary-search method, but it's
- * probably not worth the trouble for typical histogram
- * sizes.
+ * Scan to find proper location. This could be made faster by
+ * using a binary-search method, but it's probably not worth
+ * the trouble for typical histogram sizes.
*/
for (i = 1; i < nvalues; i++)
{
@@ -542,8 +541,8 @@ scalarineqsel(PlannerInfo *root, Oid operator, bool isgt,
* We have values[i-1] < constant < values[i].
*
* Convert the constant and the two nearest bin boundary
- * values to a uniform comparison scale, and do a
- * linear interpolation within this bin.
+ * values to a uniform comparison scale, and do a linear
+ * interpolation within this bin.
*/
if (convert_to_scalar(constval, consttype, &val,
values[i - 1], values[i],
@@ -564,10 +563,10 @@ scalarineqsel(PlannerInfo *root, Oid operator, bool isgt,
binfrac = (val - low) / (high - low);
/*
- * Watch out for the possibility that we got a
- * NaN or Infinity from the division. This
- * can happen despite the previous checks, if
- * for example "low" is -Infinity.
+ * Watch out for the possibility that we got a NaN
+ * or Infinity from the division. This can happen
+ * despite the previous checks, if for example
+ * "low" is -Infinity.
*/
if (isnan(binfrac) ||
binfrac < 0.0 || binfrac > 1.0)
@@ -577,22 +576,20 @@ scalarineqsel(PlannerInfo *root, Oid operator, bool isgt,
else
{
/*
- * Ideally we'd produce an error here, on the
- * grounds that the given operator shouldn't have
- * scalarXXsel registered as its selectivity func
- * unless we can deal with its operand types. But
- * currently, all manner of stuff is invoking
- * scalarXXsel, so give a default estimate until
- * that can be fixed.
+ * Ideally we'd produce an error here, on the grounds
+ * that the given operator shouldn't have scalarXXsel
+ * registered as its selectivity func unless we can
+ * deal with its operand types. But currently, all
+ * manner of stuff is invoking scalarXXsel, so give a
+ * default estimate until that can be fixed.
*/
binfrac = 0.5;
}
/*
- * Now, compute the overall selectivity across the
- * values represented by the histogram. We have i-1
- * full bins and binfrac partial bin below the
- * constant.
+ * Now, compute the overall selectivity across the values
+ * represented by the histogram. We have i-1 full bins
+ * and binfrac partial bin below the constant.
*/
histfrac = (double) (i - 1) + binfrac;
histfrac /= (double) (nvalues - 1);
@@ -608,9 +605,9 @@ scalarineqsel(PlannerInfo *root, Oid operator, bool isgt,
hist_selec = isgt ? (1.0 - histfrac) : histfrac;
/*
- * The histogram boundaries are only approximate to begin
- * with, and may well be out of date anyway. Therefore, don't
- * believe extremely small or large selectivity estimates.
+ * The histogram boundaries are only approximate to begin with,
+ * and may well be out of date anyway. Therefore, don't believe
+ * extremely small or large selectivity estimates.
*/
if (hist_selec < 0.0001)
hist_selec = 0.0001;
@@ -623,8 +620,8 @@ scalarineqsel(PlannerInfo *root, Oid operator, bool isgt,
/*
* Now merge the results from the MCV and histogram calculations,
- * realizing that the histogram covers only the non-null values that
- * are not listed in MCV.
+ * realizing that the histogram covers only the non-null values that are
+ * not listed in MCV.
*/
selec = 1.0 - stats->stanullfrac - sumcommon;
@@ -666,16 +663,15 @@ scalarltsel(PG_FUNCTION_ARGS)
double selec;
/*
- * If expression is not variable op something or something op
- * variable, then punt and return a default estimate.
+ * If expression is not variable op something or something op variable,
+ * then punt and return a default estimate.
*/
if (!get_restriction_variable(root, args, varRelid,
&vardata, &other, &varonleft))
PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
/*
- * Can't do anything useful if the something is not a constant,
- * either.
+ * Can't do anything useful if the something is not a constant, either.
*/
if (!IsA(other, Const))
{
@@ -684,8 +680,8 @@ scalarltsel(PG_FUNCTION_ARGS)
}
/*
- * If the constant is NULL, assume operator is strict and return zero,
- * ie, operator will never return TRUE.
+ * If the constant is NULL, assume operator is strict and return zero, ie,
+ * operator will never return TRUE.
*/
if (((Const *) other)->constisnull)
{
@@ -742,16 +738,15 @@ scalargtsel(PG_FUNCTION_ARGS)
double selec;
/*
- * If expression is not variable op something or something op
- * variable, then punt and return a default estimate.
+ * If expression is not variable op something or something op variable,
+ * then punt and return a default estimate.
*/
if (!get_restriction_variable(root, args, varRelid,
&vardata, &other, &varonleft))
PG_RETURN_FLOAT8(DEFAULT_INEQ_SEL);
/*
- * Can't do anything useful if the something is not a constant,
- * either.
+ * Can't do anything useful if the something is not a constant, either.
*/
if (!IsA(other, Const))
{
@@ -760,8 +755,8 @@ scalargtsel(PG_FUNCTION_ARGS)
}
/*
- * If the constant is NULL, assume operator is strict and return zero,
- * ie, operator will never return TRUE.
+ * If the constant is NULL, assume operator is strict and return zero, ie,
+ * operator will never return TRUE.
*/
if (((Const *) other)->constisnull)
{
@@ -841,8 +836,8 @@ patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype)
variable = (Node *) linitial(args);
/*
- * If the constant is NULL, assume operator is strict and return zero,
- * ie, operator will never return TRUE.
+ * If the constant is NULL, assume operator is strict and return zero, ie,
+ * operator will never return TRUE.
*/
if (((Const *) other)->constisnull)
{
@@ -853,10 +848,10 @@ patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype)
consttype = ((Const *) other)->consttype;
/*
- * The right-hand const is type text or bytea for all supported
- * operators. We do not expect to see binary-compatible types here,
- * since const-folding should have relabeled the const to exactly
- * match the operator's declared type.
+ * The right-hand const is type text or bytea for all supported operators.
+ * We do not expect to see binary-compatible types here, since
+ * const-folding should have relabeled the const to exactly match the
+ * operator's declared type.
*/
if (consttype != TEXTOID && consttype != BYTEAOID)
{
@@ -865,15 +860,15 @@ patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype)
}
/*
- * Similarly, the exposed type of the left-hand side should be one
- * of those we know. (Do not look at vardata.atttype, which might be
- * something binary-compatible but different.) We can use it to choose
+ * Similarly, the exposed type of the left-hand side should be one of
+ * those we know. (Do not look at vardata.atttype, which might be
+ * something binary-compatible but different.) We can use it to choose
* the index opclass from which we must draw the comparison operators.
*
* NOTE: It would be more correct to use the PATTERN opclasses than the
- * simple ones, but at the moment ANALYZE will not generate statistics
- * for the PATTERN operators. But our results are so approximate
- * anyway that it probably hardly matters.
+ * simple ones, but at the moment ANALYZE will not generate statistics for
+ * the PATTERN operators. But our results are so approximate anyway that
+ * it probably hardly matters.
*/
vartype = vardata.vartype;
@@ -904,8 +899,8 @@ patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype)
pstatus = pattern_fixed_prefix(patt, ptype, &prefix, &rest);
/*
- * If necessary, coerce the prefix constant to the right type. (The
- * "rest" constant need not be changed.)
+ * If necessary, coerce the prefix constant to the right type. (The "rest"
+ * constant need not be changed.)
*/
if (prefix && prefix->consttype != vartype)
{
@@ -915,11 +910,11 @@ patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype)
{
case TEXTOID:
prefixstr = DatumGetCString(DirectFunctionCall1(textout,
- prefix->constvalue));
+ prefix->constvalue));
break;
case BYTEAOID:
prefixstr = DatumGetCString(DirectFunctionCall1(byteaout,
- prefix->constvalue));
+ prefix->constvalue));
break;
default:
elog(ERROR, "unrecognized consttype: %u",
@@ -945,16 +940,15 @@ patternsel(PG_FUNCTION_ARGS, Pattern_Type ptype)
eqargs = list_make2(variable, prefix);
result = DatumGetFloat8(DirectFunctionCall4(eqsel,
PointerGetDatum(root),
- ObjectIdGetDatum(eqopr),
- PointerGetDatum(eqargs),
- Int32GetDatum(varRelid)));
+ ObjectIdGetDatum(eqopr),
+ PointerGetDatum(eqargs),
+ Int32GetDatum(varRelid)));
}
else
{
/*
* Not exact-match pattern. We estimate selectivity of the fixed
- * prefix and remainder of pattern separately, then combine the
- * two.
+ * prefix and remainder of pattern separately, then combine the two.
*/
Selectivity prefixsel;
Selectivity restsel;
@@ -1113,8 +1107,8 @@ booltestsel(PlannerInfo *root, BoolTestType booltesttype, Node *arg,
freq_true = 1.0 - numbers[0] - freq_null;
/*
- * Next derive frequency for false. Then use these as
- * appropriate to derive frequency for each case.
+ * Next derive frequency for false. Then use these as appropriate
+ * to derive frequency for each case.
*/
freq_false = 1.0 - freq_true - freq_null;
@@ -1157,10 +1151,9 @@ booltestsel(PlannerInfo *root, BoolTestType booltesttype, Node *arg,
else
{
/*
- * No most-common-value info available. Still have null
- * fraction information, so use it for IS [NOT] UNKNOWN.
- * Otherwise adjust for null fraction and assume an even split
- * for boolean tests.
+ * No most-common-value info available. Still have null fraction
+ * information, so use it for IS [NOT] UNKNOWN. Otherwise adjust
+ * for null fraction and assume an even split for boolean tests.
*/
switch (booltesttype)
{
@@ -1174,8 +1167,8 @@ booltestsel(PlannerInfo *root, BoolTestType booltesttype, Node *arg,
case IS_NOT_UNKNOWN:
/*
- * Select not unknown (not null) values. Calculate
- * from freq_null.
+ * Select not unknown (not null) values. Calculate from
+ * freq_null.
*/
selec = 1.0 - freq_null;
break;
@@ -1198,8 +1191,8 @@ booltestsel(PlannerInfo *root, BoolTestType booltesttype, Node *arg,
/*
* If we can't get variable statistics for the argument, perhaps
* clause_selectivity can do something with it. We ignore the
- * possibility of a NULL value when using clause_selectivity, and
- * just assume the value is either TRUE or FALSE.
+ * possibility of a NULL value when using clause_selectivity, and just
+ * assume the value is either TRUE or FALSE.
*/
switch (booltesttype)
{
@@ -1217,7 +1210,7 @@ booltestsel(PlannerInfo *root, BoolTestType booltesttype, Node *arg,
case IS_FALSE:
case IS_NOT_TRUE:
selec = 1.0 - (double) clause_selectivity(root, arg,
- varRelid, jointype);
+ varRelid, jointype);
break;
default:
elog(ERROR, "unrecognized booltesttype: %d",
@@ -1366,17 +1359,16 @@ eqjoinsel(PG_FUNCTION_ARGS)
if (have_mcvs1 && have_mcvs2)
{
/*
- * We have most-common-value lists for both relations. Run
- * through the lists to see which MCVs actually join to each other
- * with the given operator. This allows us to determine the exact
- * join selectivity for the portion of the relations represented
- * by the MCV lists. We still have to estimate for the remaining
- * population, but in a skewed distribution this gives us a big
- * leg up in accuracy. For motivation see the analysis in Y.
- * Ioannidis and S. Christodoulakis, "On the propagation of errors
- * in the size of join results", Technical Report 1018, Computer
- * Science Dept., University of Wisconsin, Madison, March 1991
- * (available from ftp.cs.wisc.edu).
+ * We have most-common-value lists for both relations. Run through
+ * the lists to see which MCVs actually join to each other with the
+ * given operator. This allows us to determine the exact join
+ * selectivity for the portion of the relations represented by the MCV
+ * lists. We still have to estimate for the remaining population, but
+ * in a skewed distribution this gives us a big leg up in accuracy.
+ * For motivation see the analysis in Y. Ioannidis and S.
+ * Christodoulakis, "On the propagation of errors in the size of join
+ * results", Technical Report 1018, Computer Science Dept., University
+ * of Wisconsin, Madison, March 1991 (available from ftp.cs.wisc.edu).
*/
FmgrInfo eqproc;
bool *hasmatch1;
@@ -1400,20 +1392,20 @@ eqjoinsel(PG_FUNCTION_ARGS)
hasmatch2 = (bool *) palloc0(nvalues2 * sizeof(bool));
/*
- * If we are doing any variant of JOIN_IN, pretend all the values
- * of the righthand relation are unique (ie, act as if it's been
+ * If we are doing any variant of JOIN_IN, pretend all the values of
+ * the righthand relation are unique (ie, act as if it's been
* DISTINCT'd).
*
- * NOTE: it might seem that we should unique-ify the lefthand input
- * when considering JOIN_REVERSE_IN. But this is not so, because
- * the join clause we've been handed has not been commuted from
- * the way the parser originally wrote it. We know that the
- * unique side of the IN clause is *always* on the right.
+ * NOTE: it might seem that we should unique-ify the lefthand input when
+ * considering JOIN_REVERSE_IN. But this is not so, because the join
+ * clause we've been handed has not been commuted from the way the
+ * parser originally wrote it. We know that the unique side of the IN
+ * clause is *always* on the right.
*
* NOTE: it would be dangerous to try to be smart about JOIN_LEFT or
* JOIN_RIGHT here, because we do not have enough information to
- * determine which var is really on which side of the join.
- * Perhaps someday we should pass in more information.
+ * determine which var is really on which side of the join. Perhaps
+ * someday we should pass in more information.
*/
if (jointype == JOIN_IN ||
jointype == JOIN_REVERSE_IN ||
@@ -1428,10 +1420,10 @@ eqjoinsel(PG_FUNCTION_ARGS)
}
/*
- * Note we assume that each MCV will match at most one member of
- * the other MCV list. If the operator isn't really equality,
- * there could be multiple matches --- but we don't look for them,
- * both for speed and because the math wouldn't add up...
+ * Note we assume that each MCV will match at most one member of the
+ * other MCV list. If the operator isn't really equality, there could
+ * be multiple matches --- but we don't look for them, both for speed
+ * and because the math wouldn't add up...
*/
matchprodfreq = 0.0;
nmatches = 0;
@@ -1480,8 +1472,8 @@ eqjoinsel(PG_FUNCTION_ARGS)
pfree(hasmatch2);
/*
- * Compute total frequency of non-null values that are not in the
- * MCV lists.
+ * Compute total frequency of non-null values that are not in the MCV
+ * lists.
*/
otherfreq1 = 1.0 - nullfrac1 - matchfreq1 - unmatchfreq1;
otherfreq2 = 1.0 - nullfrac2 - matchfreq2 - unmatchfreq2;
@@ -1491,10 +1483,10 @@ eqjoinsel(PG_FUNCTION_ARGS)
/*
* We can estimate the total selectivity from the point of view of
* relation 1 as: the known selectivity for matched MCVs, plus
- * unmatched MCVs that are assumed to match against random members
- * of relation 2's non-MCV population, plus non-MCV values that
- * are assumed to match against random members of relation 2's
- * unmatched MCVs plus non-MCV values.
+ * unmatched MCVs that are assumed to match against random members of
+ * relation 2's non-MCV population, plus non-MCV values that are
+ * assumed to match against random members of relation 2's unmatched
+ * MCVs plus non-MCV values.
*/
totalsel1 = matchprodfreq;
if (nd2 > nvalues2)
@@ -1512,9 +1504,9 @@ eqjoinsel(PG_FUNCTION_ARGS)
/*
* Use the smaller of the two estimates. This can be justified in
- * essentially the same terms as given below for the no-stats
- * case: to a first approximation, we are estimating from the
- * point of view of the relation with smaller nd.
+ * essentially the same terms as given below for the no-stats case: to
+ * a first approximation, we are estimating from the point of view of
+ * the relation with smaller nd.
*/
selec = (totalsel1 < totalsel2) ? totalsel1 : totalsel2;
}
@@ -1522,24 +1514,23 @@ eqjoinsel(PG_FUNCTION_ARGS)
{
/*
* We do not have MCV lists for both sides. Estimate the join
- * selectivity as MIN(1/nd1,1/nd2)*(1-nullfrac1)*(1-nullfrac2).
- * This is plausible if we assume that the join operator is strict
- * and the non-null values are about equally distributed: a given
- * non-null tuple of rel1 will join to either zero or
- * N2*(1-nullfrac2)/nd2 rows of rel2, so total join rows are at
- * most N1*(1-nullfrac1)*N2*(1-nullfrac2)/nd2 giving a join
- * selectivity of not more than (1-nullfrac1)*(1-nullfrac2)/nd2.
- * By the same logic it is not more than
- * (1-nullfrac1)*(1-nullfrac2)/nd1, so the expression with MIN()
- * is an upper bound. Using the MIN() means we estimate from the
- * point of view of the relation with smaller nd (since the larger
- * nd is determining the MIN). It is reasonable to assume that
- * most tuples in this rel will have join partners, so the bound
- * is probably reasonably tight and should be taken as-is.
+ * selectivity as MIN(1/nd1,1/nd2)*(1-nullfrac1)*(1-nullfrac2). This
+ * is plausible if we assume that the join operator is strict and the
+ * non-null values are about equally distributed: a given non-null
+ * tuple of rel1 will join to either zero or N2*(1-nullfrac2)/nd2 rows
+ * of rel2, so total join rows are at most
+ * N1*(1-nullfrac1)*N2*(1-nullfrac2)/nd2 giving a join selectivity of
+ * not more than (1-nullfrac1)*(1-nullfrac2)/nd2. By the same logic it
+ * is not more than (1-nullfrac1)*(1-nullfrac2)/nd1, so the expression
+ * with MIN() is an upper bound. Using the MIN() means we estimate
+ * from the point of view of the relation with smaller nd (since the
+ * larger nd is determining the MIN). It is reasonable to assume that
+ * most tuples in this rel will have join partners, so the bound is
+ * probably reasonably tight and should be taken as-is.
*
* XXX Can we be smarter if we have an MCV list for just one side? It
- * seems that if we assume equal distribution for the other side,
- * we end up with the same answer anyway.
+ * seems that if we assume equal distribution for the other side, we
+ * end up with the same answer anyway.
*/
double nullfrac1 = stats1 ? stats1->stanullfrac : 0.0;
double nullfrac2 = stats2 ? stats2->stanullfrac : 0.0;
@@ -1588,9 +1579,9 @@ neqjoinsel(PG_FUNCTION_ARGS)
{
result = DatumGetFloat8(DirectFunctionCall4(eqjoinsel,
PointerGetDatum(root),
- ObjectIdGetDatum(eqop),
+ ObjectIdGetDatum(eqop),
PointerGetDatum(args),
- Int16GetDatum(jointype)));
+ Int16GetDatum(jointype)));
}
else
{
@@ -1812,10 +1803,10 @@ mergejoinscansel(PlannerInfo *root, Node *clause,
*rightscan = selec;
/*
- * Only one of the two fractions can really be less than 1.0; believe
- * the smaller estimate and reset the other one to exactly 1.0. If we
- * get exactly equal estimates (as can easily happen with self-joins),
- * believe neither.
+ * Only one of the two fractions can really be less than 1.0; believe the
+ * smaller estimate and reset the other one to exactly 1.0. If we get
+ * exactly equal estimates (as can easily happen with self-joins), believe
+ * neither.
*/
if (*leftscan > *rightscan)
*leftscan = 1.0;
@@ -1837,9 +1828,9 @@ fail:
*/
typedef struct
{
- Node *var; /* might be an expression, not just a Var */
- RelOptInfo *rel; /* relation it belongs to */
- double ndistinct; /* # distinct values */
+ Node *var; /* might be an expression, not just a Var */
+ RelOptInfo *rel; /* relation it belongs to */
+ double ndistinct; /* # distinct values */
} GroupVarInfo;
static List *
@@ -1999,9 +1990,9 @@ estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows)
/*
* If we find any variable-free GROUP BY item, then either it is a
- * constant (and we can ignore it) or it contains a volatile
- * function; in the latter case we punt and assume that each input
- * row will yield a distinct group.
+ * constant (and we can ignore it) or it contains a volatile function;
+ * in the latter case we punt and assume that each input row will
+ * yield a distinct group.
*/
if (varshere == NIL)
{
@@ -2031,9 +2022,9 @@ estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows)
* Steps 3/4: group Vars by relation and estimate total numdistinct.
*
* For each iteration of the outer loop, we process the frontmost Var in
- * varinfos, plus all other Vars in the same relation. We remove
- * these Vars from the newvarinfos list for the next iteration. This
- * is the easiest way to group Vars of same rel together.
+ * varinfos, plus all other Vars in the same relation. We remove these
+ * Vars from the newvarinfos list for the next iteration. This is the
+ * easiest way to group Vars of same rel together.
*/
numdistinct = 1.0;
@@ -2075,11 +2066,11 @@ estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows)
if (rel->tuples > 0)
{
/*
- * Clamp to size of rel, or size of rel / 10 if multiple Vars.
- * The fudge factor is because the Vars are probably correlated
- * but we don't know by how much. We should never clamp to less
- * than the largest ndistinct value for any of the Vars, though,
- * since there will surely be at least that many groups.
+ * Clamp to size of rel, or size of rel / 10 if multiple Vars. The
+ * fudge factor is because the Vars are probably correlated but we
+ * don't know by how much. We should never clamp to less than the
+ * largest ndistinct value for any of the Vars, though, since
+ * there will surely be at least that many groups.
*/
double clamp = rel->tuples;
@@ -2179,8 +2170,8 @@ estimate_hash_bucketsize(PlannerInfo *root, Node *hashkey, double nbuckets)
else
{
/*
- * Believe a default ndistinct only if it came from stats.
- * Otherwise punt and return 0.1, per comments above.
+ * Believe a default ndistinct only if it came from stats. Otherwise
+ * punt and return 0.1, per comments above.
*/
if (ndistinct == DEFAULT_NUM_DISTINCT)
{
@@ -2195,21 +2186,20 @@ estimate_hash_bucketsize(PlannerInfo *root, Node *hashkey, double nbuckets)
avgfreq = (1.0 - stanullfrac) / ndistinct;
/*
- * Adjust ndistinct to account for restriction clauses. Observe we
- * are assuming that the data distribution is affected uniformly by
- * the restriction clauses!
+ * Adjust ndistinct to account for restriction clauses. Observe we are
+ * assuming that the data distribution is affected uniformly by the
+ * restriction clauses!
*
- * XXX Possibly better way, but much more expensive: multiply by
- * selectivity of rel's restriction clauses that mention the target
- * Var.
+ * XXX Possibly better way, but much more expensive: multiply by selectivity
+ * of rel's restriction clauses that mention the target Var.
*/
if (vardata.rel)
ndistinct *= vardata.rel->rows / vardata.rel->tuples;
/*
- * Initial estimate of bucketsize fraction is 1/nbuckets as long as
- * the number of buckets is less than the expected number of distinct
- * values; otherwise it is 1/ndistinct.
+ * Initial estimate of bucketsize fraction is 1/nbuckets as long as the
+ * number of buckets is less than the expected number of distinct values;
+ * otherwise it is 1/ndistinct.
*/
if (ndistinct > nbuckets)
estfract = 1.0 / nbuckets;
@@ -2239,16 +2229,15 @@ estimate_hash_bucketsize(PlannerInfo *root, Node *hashkey, double nbuckets)
}
/*
- * Adjust estimated bucketsize upward to account for skewed
- * distribution.
+ * Adjust estimated bucketsize upward to account for skewed distribution.
*/
if (avgfreq > 0.0 && mcvfreq > avgfreq)
estfract *= mcvfreq / avgfreq;
/*
* Clamp bucketsize to sane range (the above adjustment could easily
- * produce an out-of-range result). We set the lower bound a little
- * above zero, since zero isn't a very sane result.
+ * produce an out-of-range result). We set the lower bound a little above
+ * zero, since zero isn't a very sane result.
*/
if (estfract < 1.0e-6)
estfract = 1.0e-6;
@@ -2303,18 +2292,18 @@ convert_to_scalar(Datum value, Oid valuetypid, double *scaledvalue,
double *scaledlobound, double *scaledhibound)
{
/*
- * Both the valuetypid and the boundstypid should exactly match
- * the declared input type(s) of the operator we are invoked for,
- * so we just error out if either is not recognized.
+ * Both the valuetypid and the boundstypid should exactly match the
+ * declared input type(s) of the operator we are invoked for, so we just
+ * error out if either is not recognized.
*
- * XXX The histogram we are interpolating between points of could belong
- * to a column that's only binary-compatible with the declared type.
- * In essence we are assuming that the semantics of binary-compatible
- * types are enough alike that we can use a histogram generated with one
- * type's operators to estimate selectivity for the other's. This is
- * outright wrong in some cases --- in particular signed versus unsigned
+ * XXX The histogram we are interpolating between points of could belong to a
+ * column that's only binary-compatible with the declared type. In essence
+ * we are assuming that the semantics of binary-compatible types are
+ * enough alike that we can use a histogram generated with one type's
+ * operators to estimate selectivity for the other's. This is outright
+ * wrong in some cases --- in particular signed versus unsigned
* interpretation could trip us up. But it's useful enough in the
- * majority of cases that we do it anyway. Should think about more
+ * majority of cases that we do it anyway. Should think about more
* rigorous ways to do it.
*/
switch (valuetypid)
@@ -2350,9 +2339,9 @@ convert_to_scalar(Datum value, Oid valuetypid, double *scaledvalue,
case TEXTOID:
case NAMEOID:
{
- char *valstr = convert_string_datum(value, valuetypid);
- char *lostr = convert_string_datum(lobound, boundstypid);
- char *histr = convert_string_datum(hibound, boundstypid);
+ char *valstr = convert_string_datum(value, valuetypid);
+ char *lostr = convert_string_datum(lobound, boundstypid);
+ char *histr = convert_string_datum(hibound, boundstypid);
convert_string_to_scalar(valstr, scaledvalue,
lostr, scaledlobound,
@@ -2444,8 +2433,8 @@ convert_numeric_to_scalar(Datum value, Oid typid)
}
/*
- * Can't get here unless someone tries to use scalarltsel/scalargtsel
- * on an operator with one numeric and one non-numeric operand.
+ * Can't get here unless someone tries to use scalarltsel/scalargtsel on
+ * an operator with one numeric and one non-numeric operand.
*/
elog(ERROR, "unsupported type: %u", typid);
return 0;
@@ -2563,8 +2552,7 @@ convert_one_string_to_scalar(char *value, int rangelo, int rangehi)
return 0.0; /* empty string has scalar value 0 */
/*
- * Since base is at least 10, need not consider more than about 20
- * chars
+ * Since base is at least 10, need not consider more than about 20 chars
*/
if (slen > 20)
slen = 20;
@@ -2628,8 +2616,8 @@ convert_string_datum(Datum value, Oid typid)
default:
/*
- * Can't get here unless someone tries to use scalarltsel on
- * an operator with one string and one non-string operand.
+ * Can't get here unless someone tries to use scalarltsel on an
+ * operator with one string and one non-string operand.
*/
elog(ERROR, "unsupported type: %u", typid);
return NULL;
@@ -2642,16 +2630,16 @@ convert_string_datum(Datum value, Oid typid)
size_t xfrmlen2;
/*
- * Note: originally we guessed at a suitable output buffer size,
- * and only needed to call strxfrm twice if our guess was too
- * small. However, it seems that some versions of Solaris have
- * buggy strxfrm that can write past the specified buffer length
- * in that scenario. So, do it the dumb way for portability.
+ * Note: originally we guessed at a suitable output buffer size, and
+ * only needed to call strxfrm twice if our guess was too small.
+ * However, it seems that some versions of Solaris have buggy strxfrm
+ * that can write past the specified buffer length in that scenario.
+ * So, do it the dumb way for portability.
*
- * Yet other systems (e.g., glibc) sometimes return a smaller value
- * from the second call than the first; thus the Assert must be <=
- * not == as you'd expect. Can't any of these people program
- * their way out of a paper bag?
+ * Yet other systems (e.g., glibc) sometimes return a smaller value from
+ * the second call than the first; thus the Assert must be <= not ==
+ * as you'd expect. Can't any of these people program their way out
+ * of a paper bag?
*/
xfrmlen = strxfrm(NULL, val, 0);
xfrmstr = (char *) palloc(xfrmlen + 1);
@@ -2780,16 +2768,16 @@ convert_timevalue_to_scalar(Datum value, Oid typid)
Interval *interval = DatumGetIntervalP(value);
/*
- * Convert the month part of Interval to days using
- * assumed average month length of 365.25/12.0 days. Not
- * too accurate, but plenty good enough for our purposes.
+ * Convert the month part of Interval to days using assumed
+ * average month length of 365.25/12.0 days. Not too
+ * accurate, but plenty good enough for our purposes.
*/
#ifdef HAVE_INT64_TIMESTAMP
- return interval->time + interval->day * (double)USECS_PER_DAY +
- interval->month * ((DAYS_PER_YEAR / (double)MONTHS_PER_YEAR) * USECS_PER_DAY);
+ return interval->time + interval->day * (double) USECS_PER_DAY +
+ interval->month * ((DAYS_PER_YEAR / (double) MONTHS_PER_YEAR) * USECS_PER_DAY);
#else
return interval->time + interval->day * SECS_PER_DAY +
- interval->month * ((DAYS_PER_YEAR / (double)MONTHS_PER_YEAR) * (double)SECS_PER_DAY);
+ interval->month * ((DAYS_PER_YEAR / (double) MONTHS_PER_YEAR) * (double) SECS_PER_DAY);
#endif
}
case RELTIMEOID:
@@ -2827,8 +2815,8 @@ convert_timevalue_to_scalar(Datum value, Oid typid)
}
/*
- * Can't get here unless someone tries to use scalarltsel/scalargtsel
- * on an operator with one timevalue and one non-timevalue operand.
+ * Can't get here unless someone tries to use scalarltsel/scalargtsel on
+ * an operator with one timevalue and one non-timevalue operand.
*/
elog(ERROR, "unsupported type: %u", typid);
return 0;
@@ -2875,8 +2863,8 @@ get_restriction_variable(PlannerInfo *root, List *args, int varRelid,
right = (Node *) lsecond(args);
/*
- * Examine both sides. Note that when varRelid is nonzero, Vars of
- * other relations will be treated as pseudoconstants.
+ * Examine both sides. Note that when varRelid is nonzero, Vars of other
+ * relations will be treated as pseudoconstants.
*/
examine_variable(root, left, varRelid, vardata);
examine_variable(root, right, varRelid, &rdata);
@@ -2995,18 +2983,18 @@ examine_variable(PlannerInfo *root, Node *node, int varRelid,
{
vardata->statsTuple = SearchSysCache(STATRELATT,
ObjectIdGetDatum(relid),
- Int16GetDatum(var->varattno),
+ Int16GetDatum(var->varattno),
0, 0);
}
else
{
/*
- * XXX This means the Var comes from a JOIN or sub-SELECT.
- * Later add code to dig down into the join etc and see if we
- * can trace the variable to something with stats. (But
- * beware of sub-SELECTs with DISTINCT/GROUP BY/etc. Perhaps
- * there are no cases where this would really be useful,
- * because we'd have flattened the subselect if it is??)
+ * XXX This means the Var comes from a JOIN or sub-SELECT. Later
+ * add code to dig down into the join etc and see if we can trace
+ * the variable to something with stats. (But beware of
+ * sub-SELECTs with DISTINCT/GROUP BY/etc. Perhaps there are no
+ * cases where this would really be useful, because we'd have
+ * flattened the subselect if it is??)
*/
}
@@ -3031,9 +3019,9 @@ examine_variable(PlannerInfo *root, Node *node, int varRelid,
if (varRelid == 0 || bms_is_member(varRelid, varnos))
{
onerel = find_base_rel(root,
- (varRelid ? varRelid : bms_singleton_member(varnos)));
+ (varRelid ? varRelid : bms_singleton_member(varnos)));
vardata->rel = onerel;
- node = basenode; /* strip any relabeling */
+ node = basenode; /* strip any relabeling */
}
/* else treat it as a constant */
break;
@@ -3042,13 +3030,13 @@ examine_variable(PlannerInfo *root, Node *node, int varRelid,
{
/* treat it as a variable of a join relation */
vardata->rel = find_join_rel(root, varnos);
- node = basenode; /* strip any relabeling */
+ node = basenode; /* strip any relabeling */
}
else if (bms_is_member(varRelid, varnos))
{
/* ignore the vars belonging to other relations */
vardata->rel = find_base_rel(root, varRelid);
- node = basenode; /* strip any relabeling */
+ node = basenode; /* strip any relabeling */
/* note: no point in expressional-index search here */
}
/* else treat it as a constant */
@@ -3064,13 +3052,13 @@ examine_variable(PlannerInfo *root, Node *node, int varRelid,
if (onerel)
{
/*
- * We have an expression in vars of a single relation. Try to
- * match it to expressional index columns, in hopes of finding
- * some statistics.
+ * We have an expression in vars of a single relation. Try to match
+ * it to expressional index columns, in hopes of finding some
+ * statistics.
*
- * XXX it's conceivable that there are multiple matches with
- * different index opclasses; if so, we need to pick one that
- * matches the operator we are estimating for. FIXME later.
+ * XXX it's conceivable that there are multiple matches with different
+ * index opclasses; if so, we need to pick one that matches the
+ * operator we are estimating for. FIXME later.
*/
ListCell *ilist;
@@ -3105,8 +3093,8 @@ examine_variable(PlannerInfo *root, Node *node, int varRelid,
if (equal(node, indexkey))
{
/*
- * Found a match ... is it a unique index? Tests
- * here should match has_unique_index().
+ * Found a match ... is it a unique index? Tests here
+ * should match has_unique_index().
*/
if (index->unique &&
index->ncolumns == 1 &&
@@ -3114,8 +3102,8 @@ examine_variable(PlannerInfo *root, Node *node, int varRelid,
vardata->isunique = true;
/* Has it got stats? */
vardata->statsTuple = SearchSysCache(STATRELATT,
- ObjectIdGetDatum(index->indexoid),
- Int16GetDatum(pos + 1),
+ ObjectIdGetDatum(index->indexoid),
+ Int16GetDatum(pos + 1),
0, 0);
if (vardata->statsTuple)
break;
@@ -3145,9 +3133,9 @@ get_variable_numdistinct(VariableStatData *vardata)
double ntuples;
/*
- * Determine the stadistinct value to use. There are cases where we
- * can get an estimate even without a pg_statistic entry, or can get a
- * better value than is in pg_statistic.
+ * Determine the stadistinct value to use. There are cases where we can
+ * get an estimate even without a pg_statistic entry, or can get a better
+ * value than is in pg_statistic.
*/
if (HeapTupleIsValid(vardata->statsTuple))
{
@@ -3162,16 +3150,15 @@ get_variable_numdistinct(VariableStatData *vardata)
/*
* Special-case boolean columns: presumably, two distinct values.
*
- * Are there any other datatypes we should wire in special estimates
- * for?
+ * Are there any other datatypes we should wire in special estimates for?
*/
stadistinct = 2.0;
}
else
{
/*
- * We don't keep statistics for system columns, but in some cases
- * we can infer distinctness anyway.
+ * We don't keep statistics for system columns, but in some cases we
+ * can infer distinctness anyway.
*/
if (vardata->var && IsA(vardata->var, Var))
{
@@ -3199,8 +3186,8 @@ get_variable_numdistinct(VariableStatData *vardata)
/*
* If there is a unique index for the variable, assume it is unique no
- * matter what pg_statistic says (the statistics could be out of
- * date). Can skip search if we already think it's unique.
+ * matter what pg_statistic says (the statistics could be out of date).
+ * Can skip search if we already think it's unique.
*/
if (stadistinct != -1.0)
{
@@ -3235,8 +3222,8 @@ get_variable_numdistinct(VariableStatData *vardata)
return floor((-stadistinct * ntuples) + 0.5);
/*
- * With no data, estimate ndistinct = ntuples if the table is small,
- * else use default.
+ * With no data, estimate ndistinct = ntuples if the table is small, else
+ * use default.
*/
if (ntuples < DEFAULT_NUM_DISTINCT)
return ntuples;
@@ -3276,12 +3263,10 @@ get_variable_maximum(PlannerInfo *root, VariableStatData *vardata,
get_typlenbyval(vardata->atttype, &typLen, &typByVal);
/*
- * If there is a histogram, grab the last or first value as
- * appropriate.
+ * If there is a histogram, grab the last or first value as appropriate.
*
- * If there is a histogram that is sorted with some other operator than
- * the one we want, fail --- this suggests that there is data we can't
- * use.
+ * If there is a histogram that is sorted with some other operator than the
+ * one we want, fail --- this suggests that there is data we can't use.
*/
if (get_attstatsslot(vardata->statsTuple,
vardata->atttype, vardata->atttypmod,
@@ -3327,9 +3312,9 @@ get_variable_maximum(PlannerInfo *root, VariableStatData *vardata,
/*
* If we have most-common-values info, look for a large MCV. This is
- * needed even if we also have a histogram, since the histogram
- * excludes the MCVs. However, usually the MCVs will not be the
- * extreme values, so avoid unnecessary data copying.
+ * needed even if we also have a histogram, since the histogram excludes
+ * the MCVs. However, usually the MCVs will not be the extreme values, so
+ * avoid unnecessary data copying.
*/
if (get_attstatsslot(vardata->statsTuple,
vardata->atttype, vardata->atttypmod,
@@ -3411,7 +3396,7 @@ like_fixed_prefix(Const *patt_const, bool case_insensitive,
if (typeid == BYTEAOID && case_insensitive)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("case insensitive matching not supported on type bytea")));
+ errmsg("case insensitive matching not supported on type bytea")));
if (typeid != BYTEAOID)
{
@@ -3453,16 +3438,16 @@ like_fixed_prefix(Const *patt_const, bool case_insensitive,
}
/*
- * XXX I suspect isalpha() is not an adequately locale-sensitive
- * test for characters that can vary under case folding?
+ * XXX I suspect isalpha() is not an adequately locale-sensitive test
+ * for characters that can vary under case folding?
*/
if (case_insensitive && isalpha((unsigned char) patt[pos]))
break;
/*
* NOTE: this code used to think that %% meant a literal %, but
- * textlike() itself does not think that, and the SQL92 spec
- * doesn't say any such thing either.
+ * textlike() itself does not think that, and the SQL92 spec doesn't
+ * say any such thing either.
*/
match[match_pos++] = patt[pos];
}
@@ -3487,8 +3472,7 @@ like_fixed_prefix(Const *patt_const, bool case_insensitive,
/* in LIKE, an empty pattern is an exact match! */
if (pos == pattlen)
- return Pattern_Prefix_Exact; /* reached end of pattern, so
- * exact */
+ return Pattern_Prefix_Exact; /* reached end of pattern, so exact */
if (match_pos > 0)
return Pattern_Prefix_Partial;
@@ -3511,14 +3495,14 @@ regex_fixed_prefix(Const *patt_const, bool case_insensitive,
Oid typeid = patt_const->consttype;
/*
- * Should be unnecessary, there are no bytea regex operators defined.
- * As such, it should be noted that the rest of this function has *not*
- * been made safe for binary (possibly NULL containing) strings.
+ * Should be unnecessary, there are no bytea regex operators defined. As
+ * such, it should be noted that the rest of this function has *not* been
+ * made safe for binary (possibly NULL containing) strings.
*/
if (typeid == BYTEAOID)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("regular-expression matching not supported on type bytea")));
+ errmsg("regular-expression matching not supported on type bytea")));
/* the right-hand const is type text for all of these */
patt = DatumGetCString(DirectFunctionCall1(textout, patt_const->constvalue));
@@ -3535,8 +3519,8 @@ regex_fixed_prefix(Const *patt_const, bool case_insensitive,
}
/*
- * If unquoted | is present at paren level 0 in pattern, then there
- * are multiple alternatives for the start of the string.
+ * If unquoted | is present at paren level 0 in pattern, then there are
+ * multiple alternatives for the start of the string.
*/
paren_depth = 0;
for (pos = 1; patt[pos]; pos++)
@@ -3568,15 +3552,14 @@ regex_fixed_prefix(Const *patt_const, bool case_insensitive,
prev_match_pos = match_pos = 0;
/* note start at pos 1 to skip leading ^ */
- for (prev_pos = pos = 1; patt[pos]; )
+ for (prev_pos = pos = 1; patt[pos];)
{
- int len;
+ int len;
/*
- * Check for characters that indicate multiple possible matches
- * here. XXX I suspect isalpha() is not an adequately
- * locale-sensitive test for characters that can vary under case
- * folding?
+ * Check for characters that indicate multiple possible matches here.
+ * XXX I suspect isalpha() is not an adequately locale-sensitive test
+ * for characters that can vary under case folding?
*/
if (patt[pos] == '.' ||
patt[pos] == '(' ||
@@ -3586,8 +3569,8 @@ regex_fixed_prefix(Const *patt_const, bool case_insensitive,
break;
/*
- * In AREs, backslash followed by alphanumeric is an escape, not
- * a quoted character. Must treat it as having multiple possible
+ * In AREs, backslash followed by alphanumeric is an escape, not a
+ * quoted character. Must treat it as having multiple possible
* matches.
*/
if (patt[pos] == '\\' && isalnum((unsigned char) patt[pos + 1]))
@@ -3595,8 +3578,7 @@ regex_fixed_prefix(Const *patt_const, bool case_insensitive,
/*
* Check for quantifiers. Except for +, this means the preceding
- * character is optional, so we must remove it from the prefix
- * too!
+ * character is optional, so we must remove it from the prefix too!
*/
if (patt[pos] == '*' ||
patt[pos] == '?' ||
@@ -3716,8 +3698,8 @@ prefix_selectivity(PlannerInfo *root, Node *variable,
/* Assume scalargtsel is appropriate for all supported types */
prefixsel = DatumGetFloat8(DirectFunctionCall4(scalargtsel,
PointerGetDatum(root),
- ObjectIdGetDatum(cmpopr),
- PointerGetDatum(cmpargs),
+ ObjectIdGetDatum(cmpopr),
+ PointerGetDatum(cmpargs),
Int32GetDatum(0)));
/*-------
@@ -3738,13 +3720,13 @@ prefix_selectivity(PlannerInfo *root, Node *variable,
/* Assume scalarltsel is appropriate for all supported types */
topsel = DatumGetFloat8(DirectFunctionCall4(scalarltsel,
PointerGetDatum(root),
- ObjectIdGetDatum(cmpopr),
- PointerGetDatum(cmpargs),
+ ObjectIdGetDatum(cmpopr),
+ PointerGetDatum(cmpargs),
Int32GetDatum(0)));
/*
- * Merge the two selectivities in the same way as for a range
- * query (see clauselist_selectivity()).
+ * Merge the two selectivities in the same way as for a range query
+ * (see clauselist_selectivity()).
*/
prefixsel = topsel + prefixsel - 1.0;
@@ -3752,21 +3734,20 @@ prefix_selectivity(PlannerInfo *root, Node *variable,
prefixsel += nulltestsel(root, IS_NULL, variable, 0);
/*
- * A zero or slightly negative prefixsel should be converted into
- * a small positive value; we probably are dealing with a very
- * tight range and got a bogus result due to roundoff errors.
- * However, if prefixsel is very negative, then we probably have
- * default selectivity estimates on one or both sides of the
- * range. In that case, insert a not-so-wildly-optimistic default
- * estimate.
+ * A zero or slightly negative prefixsel should be converted into a
+ * small positive value; we probably are dealing with a very tight
+ * range and got a bogus result due to roundoff errors. However, if
+ * prefixsel is very negative, then we probably have default
+ * selectivity estimates on one or both sides of the range. In that
+ * case, insert a not-so-wildly-optimistic default estimate.
*/
if (prefixsel <= 0.0)
{
if (prefixsel < -0.01)
{
/*
- * No data available --- use a default estimate that is
- * small, but not real small.
+ * No data available --- use a default estimate that is small,
+ * but not real small.
*/
prefixsel = 0.005;
}
@@ -3795,8 +3776,7 @@ prefix_selectivity(PlannerInfo *root, Node *variable,
#define FIXED_CHAR_SEL 0.20 /* about 1/5 */
#define CHAR_RANGE_SEL 0.25
-#define ANY_CHAR_SEL 0.9 /* not 1, since it won't match
- * end-of-string */
+#define ANY_CHAR_SEL 0.9 /* not 1, since it won't match end-of-string */
#define FULL_WILDCARD_SEL 5.0
#define PARTIAL_WILDCARD_SEL 2.0
@@ -3816,7 +3796,7 @@ like_selectivity(Const *patt_const, bool case_insensitive)
if (typeid == BYTEAOID && case_insensitive)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("case insensitive matching not supported on type bytea")));
+ errmsg("case insensitive matching not supported on type bytea")));
if (typeid != BYTEAOID)
{
@@ -3895,8 +3875,8 @@ regex_selectivity_sub(char *patt, int pattlen, bool case_insensitive)
else if (patt[pos] == '|' && paren_depth == 0)
{
/*
- * If unquoted | is present at paren level 0 in pattern, we
- * have multiple alternatives; sum their probabilities.
+ * If unquoted | is present at paren level 0 in pattern, we have
+ * multiple alternatives; sum their probabilities.
*/
sel += regex_selectivity_sub(patt + (pos + 1),
pattlen - (pos + 1),
@@ -3970,14 +3950,14 @@ regex_selectivity(Const *patt_const, bool case_insensitive)
Oid typeid = patt_const->consttype;
/*
- * Should be unnecessary, there are no bytea regex operators defined.
- * As such, it should be noted that the rest of this function has *not*
- * been made safe for binary (possibly NULL containing) strings.
+ * Should be unnecessary, there are no bytea regex operators defined. As
+ * such, it should be noted that the rest of this function has *not* been
+ * made safe for binary (possibly NULL containing) strings.
*/
if (typeid == BYTEAOID)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("regular-expression matching not supported on type bytea")));
+ errmsg("regular-expression matching not supported on type bytea")));
/* the right-hand const is type text for all of these */
patt = DatumGetCString(DirectFunctionCall1(textout, patt_const->constvalue));
@@ -4062,7 +4042,7 @@ make_greater_string(const Const *str_const)
if (datatype == NAMEOID)
{
workstr = DatumGetCString(DirectFunctionCall1(nameout,
- str_const->constvalue));
+ str_const->constvalue));
len = strlen(workstr);
}
else if (datatype == BYTEAOID)
@@ -4084,7 +4064,7 @@ make_greater_string(const Const *str_const)
else
{
workstr = DatumGetCString(DirectFunctionCall1(textout,
- str_const->constvalue));
+ str_const->constvalue));
len = strlen(workstr);
}
@@ -4120,8 +4100,8 @@ make_greater_string(const Const *str_const)
*lastchar = savelastchar;
/*
- * Truncate off the last character, which might be more than 1
- * byte, depending on the character encoding.
+ * Truncate off the last character, which might be more than 1 byte,
+ * depending on the character encoding.
*/
if (datatype != BYTEAOID && pg_database_encoding_max_length() > 1)
len = pg_mbcliplen(workstr, len, len - 1);
@@ -4221,27 +4201,27 @@ genericcostestimate(PlannerInfo *root,
List *selectivityQuals;
/*
- * If the index is partial, AND the index predicate with the
- * explicitly given indexquals to produce a more accurate idea of the
- * index selectivity. This may produce redundant clauses. We get rid
- * of exact duplicates in the code below. We expect that most cases
- * of partial redundancy (such as "x < 4" from the qual and "x < 5"
- * from the predicate) will be recognized and handled correctly by
- * clauselist_selectivity(). This assumption is somewhat fragile,
- * since it depends on predicate_implied_by() and clauselist_selectivity()
+ * If the index is partial, AND the index predicate with the explicitly
+ * given indexquals to produce a more accurate idea of the index
+ * selectivity. This may produce redundant clauses. We get rid of exact
+ * duplicates in the code below. We expect that most cases of partial
+ * redundancy (such as "x < 4" from the qual and "x < 5" from the
+ * predicate) will be recognized and handled correctly by
+ * clauselist_selectivity(). This assumption is somewhat fragile, since
+ * it depends on predicate_implied_by() and clauselist_selectivity()
* having similar capabilities, and there are certainly many cases where
- * we will end up with a too-low selectivity estimate. This will bias the
+ * we will end up with a too-low selectivity estimate. This will bias the
* system in favor of using partial indexes where possible, which is not
* necessarily a bad thing. But it'd be nice to do better someday.
*
- * Note that index->indpred and indexQuals are both in implicit-AND form,
- * so ANDing them together just takes merging the lists. However,
- * eliminating duplicates is a bit trickier because indexQuals
- * contains RestrictInfo nodes and the indpred does not. It is okay
- * to pass a mixed list to clauselist_selectivity, but we have to work
- * a bit to generate a list without logical duplicates. (We could
- * just list_union indpred and strippedQuals, but then we'd not get
- * caching of per-qual selectivity estimates.)
+ * Note that index->indpred and indexQuals are both in implicit-AND form, so
+ * ANDing them together just takes merging the lists. However,
+ * eliminating duplicates is a bit trickier because indexQuals contains
+ * RestrictInfo nodes and the indpred does not. It is okay to pass a
+ * mixed list to clauselist_selectivity, but we have to work a bit to
+ * generate a list without logical duplicates. (We could just list_union
+ * indpred and strippedQuals, but then we'd not get caching of per-qual
+ * selectivity estimates.)
*/
if (index->indpred != NIL)
{
@@ -4269,8 +4249,8 @@ genericcostestimate(PlannerInfo *root,
numIndexTuples = *indexSelectivity * index->rel->tuples;
/*
- * We can bound the number of tuples by the index size in any case.
- * Also, always estimate at least one tuple is touched, even when
+ * We can bound the number of tuples by the index size in any case. Also,
+ * always estimate at least one tuple is touched, even when
* indexSelectivity estimate is tiny.
*/
if (numIndexTuples > index->tuples)
@@ -4281,9 +4261,9 @@ genericcostestimate(PlannerInfo *root,
/*
* Estimate the number of index pages that will be retrieved.
*
- * For all currently-supported index types, the first page of the index
- * is a metadata page, and we should figure on fetching that plus a
- * pro-rated fraction of the remaining pages.
+ * For all currently-supported index types, the first page of the index is a
+ * metadata page, and we should figure on fetching that plus a pro-rated
+ * fraction of the remaining pages.
*/
if (index->pages > 1 && index->tuples > 0)
{
@@ -4304,15 +4284,15 @@ genericcostestimate(PlannerInfo *root,
/*
* CPU cost: any complex expressions in the indexquals will need to be
- * evaluated once at the start of the scan to reduce them to runtime
- * keys to pass to the index AM (see nodeIndexscan.c). We model the
- * per-tuple CPU costs as cpu_index_tuple_cost plus one
- * cpu_operator_cost per indexqual operator.
+ * evaluated once at the start of the scan to reduce them to runtime keys
+ * to pass to the index AM (see nodeIndexscan.c). We model the per-tuple
+ * CPU costs as cpu_index_tuple_cost plus one cpu_operator_cost per
+ * indexqual operator.
*
- * Note: this neglects the possible costs of rechecking lossy operators
- * and OR-clause expressions. Detecting that that might be needed
- * seems more expensive than it's worth, though, considering all the
- * other inaccuracies here ...
+ * Note: this neglects the possible costs of rechecking lossy operators and
+ * OR-clause expressions. Detecting that that might be needed seems more
+ * expensive than it's worth, though, considering all the other
+ * inaccuracies here ...
*/
cost_qual_eval(&index_qual_cost, indexQuals);
qual_op_cost = cpu_operator_cost * list_length(indexQuals);
@@ -4351,15 +4331,14 @@ btcostestimate(PG_FUNCTION_ARGS)
ListCell *l;
/*
- * For a btree scan, only leading '=' quals plus inequality quals
- * for the immediately next attribute contribute to index selectivity
- * (these are the "boundary quals" that determine the starting and
- * stopping points of the index scan). Additional quals can suppress
- * visits to the heap, so it's OK to count them in indexSelectivity,
- * but they should not count for estimating numIndexTuples. So we must
- * examine the given indexQuals to find out which ones count as boundary
- * quals. We rely on the knowledge that they are given in index column
- * order.
+ * For a btree scan, only leading '=' quals plus inequality quals for the
+ * immediately next attribute contribute to index selectivity (these are
+ * the "boundary quals" that determine the starting and stopping points of
+ * the index scan). Additional quals can suppress visits to the heap, so
+ * it's OK to count them in indexSelectivity, but they should not count
+ * for estimating numIndexTuples. So we must examine the given indexQuals
+ * to find out which ones count as boundary quals. We rely on the
+ * knowledge that they are given in index column order.
*/
indexBoundQuals = NIL;
indexcol = 0;
@@ -4367,9 +4346,9 @@ btcostestimate(PG_FUNCTION_ARGS)
foreach(l, indexQuals)
{
RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
- Expr *clause;
- Oid clause_op;
- int op_strategy;
+ Expr *clause;
+ Oid clause_op;
+ int op_strategy;
Assert(IsA(rinfo, RestrictInfo));
clause = rinfo->clause;
@@ -4409,15 +4388,15 @@ btcostestimate(PG_FUNCTION_ARGS)
}
op_strategy = get_op_opclass_strategy(clause_op,
index->classlist[indexcol]);
- Assert(op_strategy != 0); /* not a member of opclass?? */
+ Assert(op_strategy != 0); /* not a member of opclass?? */
if (op_strategy == BTEqualStrategyNumber)
eqQualHere = true;
indexBoundQuals = lappend(indexBoundQuals, rinfo);
}
/*
- * If index is unique and we found an '=' clause for each column,
- * we can just assume numIndexTuples = 1 and skip the expensive
+ * If index is unique and we found an '=' clause for each column, we can
+ * just assume numIndexTuples = 1 and skip the expensive
* clauselist_selectivity calculations.
*/
if (index->unique && indexcol == index->ncolumns - 1 && eqQualHere)
@@ -4437,13 +4416,12 @@ btcostestimate(PG_FUNCTION_ARGS)
indexSelectivity, indexCorrelation);
/*
- * If we can get an estimate of the first column's ordering
- * correlation C from pg_statistic, estimate the index correlation as
- * C for a single-column index, or C * 0.75 for multiple columns.
- * (The idea here is that multiple columns dilute the importance of
- * the first column's ordering, but don't negate it entirely. Before
- * 8.0 we divided the correlation by the number of columns, but that
- * seems too strong.)
+ * If we can get an estimate of the first column's ordering correlation C
+ * from pg_statistic, estimate the index correlation as C for a
+ * single-column index, or C * 0.75 for multiple columns. (The idea here
+ * is that multiple columns dilute the importance of the first column's
+ * ordering, but don't negate it entirely. Before 8.0 we divided the
+ * correlation by the number of columns, but that seems too strong.)
*/
if (index->indexkeys[0] != 0)
{
diff --git a/src/backend/utils/adt/timestamp.c b/src/backend/utils/adt/timestamp.c
index 73e7bb8ea8a..d3090413c4e 100644
--- a/src/backend/utils/adt/timestamp.c
+++ b/src/backend/utils/adt/timestamp.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/timestamp.c,v 1.154 2005/10/09 17:21:46 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/timestamp.c,v 1.155 2005/10/15 02:49:30 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -116,7 +116,7 @@ timestamp_in(PG_FUNCTION_ARGS)
case DTK_INVALID:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("date/time value \"%s\" is no longer supported", str)));
+ errmsg("date/time value \"%s\" is no longer supported", str)));
TIMESTAMP_NOEND(result);
break;
@@ -138,7 +138,7 @@ timestamp_in(PG_FUNCTION_ARGS)
Datum
timestamp_out(PG_FUNCTION_ARGS)
{
- Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
+ Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
char *result;
struct pg_tm tt,
*tm = &tt;
@@ -169,11 +169,12 @@ Datum
timestamp_recv(PG_FUNCTION_ARGS)
{
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
+
#ifdef NOT_USED
Oid typelem = PG_GETARG_OID(1);
#endif
int32 typmod = PG_GETARG_INT32(2);
- Timestamp timestamp;
+ Timestamp timestamp;
struct pg_tm tt,
*tm = &tt;
fsec_t fsec;
@@ -203,7 +204,7 @@ timestamp_recv(PG_FUNCTION_ARGS)
Datum
timestamp_send(PG_FUNCTION_ARGS)
{
- Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
+ Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
StringInfoData buf;
pq_begintypsend(&buf);
@@ -223,7 +224,7 @@ timestamp_send(PG_FUNCTION_ARGS)
Datum
timestamp_scale(PG_FUNCTION_ARGS)
{
- Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
+ Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
int32 typmod = PG_GETARG_INT32(1);
Timestamp result;
@@ -257,7 +258,6 @@ AdjustTimestampForTypmod(Timestamp *time, int32 typmod)
INT64CONST(5),
INT64CONST(0)
};
-
#else
static const double TimestampScales[MAX_TIMESTAMP_PRECISION + 1] = {
1,
@@ -276,21 +276,21 @@ AdjustTimestampForTypmod(Timestamp *time, int32 typmod)
if (typmod < 0 || typmod > MAX_TIMESTAMP_PRECISION)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("timestamp(%d) precision must be between %d and %d",
- typmod, 0, MAX_TIMESTAMP_PRECISION)));
+ errmsg("timestamp(%d) precision must be between %d and %d",
+ typmod, 0, MAX_TIMESTAMP_PRECISION)));
/*
- * Note: this round-to-nearest code is not completely consistent
- * about rounding values that are exactly halfway between integral
- * values. On most platforms, rint() will implement
- * round-to-nearest-even, but the integer code always rounds up
- * (away from zero). Is it worth trying to be consistent?
+ * Note: this round-to-nearest code is not completely consistent about
+ * rounding values that are exactly halfway between integral values.
+ * On most platforms, rint() will implement round-to-nearest-even, but
+ * the integer code always rounds up (away from zero). Is it worth
+ * trying to be consistent?
*/
#ifdef HAVE_INT64_TIMESTAMP
if (*time >= INT64CONST(0))
{
*time = ((*time + TimestampOffsets[typmod]) / TimestampScales[typmod]) *
- TimestampScales[typmod];
+ TimestampScales[typmod];
}
else
{
@@ -298,7 +298,7 @@ AdjustTimestampForTypmod(Timestamp *time, int32 typmod)
* TimestampScales[typmod]);
}
#else
- *time = rint((double)*time * TimestampScales[typmod]) / TimestampScales[typmod];
+ *time = rint((double) *time * TimestampScales[typmod]) / TimestampScales[typmod];
#endif
}
}
@@ -359,7 +359,7 @@ timestamptz_in(PG_FUNCTION_ARGS)
case DTK_INVALID:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("date/time value \"%s\" is no longer supported", str)));
+ errmsg("date/time value \"%s\" is no longer supported", str)));
TIMESTAMP_NOEND(result);
break;
@@ -413,6 +413,7 @@ Datum
timestamptz_recv(PG_FUNCTION_ARGS)
{
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
+
#ifdef NOT_USED
Oid typelem = PG_GETARG_OID(1);
#endif
@@ -540,7 +541,7 @@ interval_in(PG_FUNCTION_ARGS)
case DTK_INVALID:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("date/time value \"%s\" is no longer supported", str)));
+ errmsg("date/time value \"%s\" is no longer supported", str)));
break;
default:
@@ -583,6 +584,7 @@ Datum
interval_recv(PG_FUNCTION_ARGS)
{
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
+
#ifdef NOT_USED
Oid typelem = PG_GETARG_OID(1);
#endif
@@ -643,6 +645,7 @@ interval_scale(PG_FUNCTION_ARGS)
PG_RETURN_INTERVAL_P(result);
}
+
/*
* Adjust interval for specified precision, in both YEAR to SECOND
* range and sub-second precision.
@@ -670,7 +673,6 @@ AdjustIntervalForTypmod(Interval *interval, int32 typmod)
INT64CONST(5),
INT64CONST(0)
};
-
#else
static const double IntervalScales[MAX_INTERVAL_PRECISION + 1] = {
1,
@@ -684,8 +686,8 @@ AdjustIntervalForTypmod(Interval *interval, int32 typmod)
#endif
/*
- * Unspecified range and precision? Then not necessary to adjust.
- * Setting typmod to -1 is the convention for all types.
+ * Unspecified range and precision? Then not necessary to adjust. Setting
+ * typmod to -1 is the convention for all types.
*/
if (typmod != -1)
{
@@ -727,9 +729,9 @@ AdjustIntervalForTypmod(Interval *interval, int32 typmod)
#ifdef HAVE_INT64_TIMESTAMP
interval->time = (interval->time / USECS_PER_HOUR) *
- USECS_PER_HOUR;
+ USECS_PER_HOUR;
#else
- interval->time = ((int)(interval->time / SECS_PER_HOUR)) * (double)SECS_PER_HOUR;
+ interval->time = ((int) (interval->time / SECS_PER_HOUR)) * (double) SECS_PER_HOUR;
#endif
}
else if (range == INTERVAL_MASK(MINUTE))
@@ -747,10 +749,10 @@ AdjustIntervalForTypmod(Interval *interval, int32 typmod)
hour = interval->time / USECS_PER_HOUR;
interval->time -= hour * USECS_PER_HOUR;
interval->time = (interval->time / USECS_PER_MINUTE) *
- USECS_PER_MINUTE;
+ USECS_PER_MINUTE;
#else
- TMODULO(interval->time, hour, (double)SECS_PER_HOUR);
- interval->time = ((int)(interval->time / SECS_PER_MINUTE)) * (double)SECS_PER_MINUTE;
+ TMODULO(interval->time, hour, (double) SECS_PER_HOUR);
+ interval->time = ((int) (interval->time / SECS_PER_MINUTE)) * (double) SECS_PER_MINUTE;
#endif
}
else if (range == INTERVAL_MASK(SECOND))
@@ -768,7 +770,7 @@ AdjustIntervalForTypmod(Interval *interval, int32 typmod)
minute = interval->time / USECS_PER_MINUTE;
interval->time -= minute * USECS_PER_MINUTE;
#else
- TMODULO(interval->time, minute, (double)SECS_PER_MINUTE);
+ TMODULO(interval->time, minute, (double) SECS_PER_MINUTE);
/* return subseconds too */
#endif
}
@@ -780,9 +782,9 @@ AdjustIntervalForTypmod(Interval *interval, int32 typmod)
#ifdef HAVE_INT64_TIMESTAMP
interval->time = (interval->time / USECS_PER_HOUR) *
- USECS_PER_HOUR;
+ USECS_PER_HOUR;
#else
- interval->time = ((int) (interval->time / SECS_PER_HOUR)) * (double)SECS_PER_HOUR;
+ interval->time = ((int) (interval->time / SECS_PER_HOUR)) * (double) SECS_PER_HOUR;
#endif
}
/* DAY TO MINUTE */
@@ -794,9 +796,9 @@ AdjustIntervalForTypmod(Interval *interval, int32 typmod)
#ifdef HAVE_INT64_TIMESTAMP
interval->time = (interval->time / USECS_PER_MINUTE) *
- USECS_PER_MINUTE;
+ USECS_PER_MINUTE;
#else
- interval->time = ((int)(interval->time / SECS_PER_MINUTE)) * (double)SECS_PER_MINUTE;
+ interval->time = ((int) (interval->time / SECS_PER_MINUTE)) * (double) SECS_PER_MINUTE;
#endif
}
/* DAY TO SECOND */
@@ -815,9 +817,9 @@ AdjustIntervalForTypmod(Interval *interval, int32 typmod)
#ifdef HAVE_INT64_TIMESTAMP
interval->time = (interval->time / USECS_PER_MINUTE) *
- USECS_PER_MINUTE;
+ USECS_PER_MINUTE;
#else
- interval->time = ((int)(interval->time / SECS_PER_MINUTE)) * (double)SECS_PER_MINUTE;
+ interval->time = ((int) (interval->time / SECS_PER_MINUTE)) * (double) SECS_PER_MINUTE;
#endif
}
/* HOUR TO SECOND */
@@ -835,7 +837,6 @@ AdjustIntervalForTypmod(Interval *interval, int32 typmod)
{
#ifdef HAVE_INT64_TIMESTAMP
int64 hour;
-
#else
double hour;
#endif
@@ -847,7 +848,7 @@ AdjustIntervalForTypmod(Interval *interval, int32 typmod)
hour = interval->time / USECS_PER_HOUR;
interval->time -= hour * USECS_PER_HOUR;
#else
- TMODULO(interval->time, hour, (double)SECS_PER_HOUR);
+ TMODULO(interval->time, hour, (double) SECS_PER_HOUR);
#endif
}
else
@@ -859,36 +860,35 @@ AdjustIntervalForTypmod(Interval *interval, int32 typmod)
if (precision < 0 || precision > MAX_INTERVAL_PRECISION)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("interval(%d) precision must be between %d and %d",
- precision, 0, MAX_INTERVAL_PRECISION)));
+ errmsg("interval(%d) precision must be between %d and %d",
+ precision, 0, MAX_INTERVAL_PRECISION)));
/*
- * Note: this round-to-nearest code is not completely
- * consistent about rounding values that are exactly halfway
- * between integral values. On most platforms, rint() will
- * implement round-to-nearest-even, but the integer code
- * always rounds up (away from zero). Is it worth trying to
- * be consistent?
+ * Note: this round-to-nearest code is not completely consistent
+ * about rounding values that are exactly halfway between integral
+ * values. On most platforms, rint() will implement
+ * round-to-nearest-even, but the integer code always rounds up
+ * (away from zero). Is it worth trying to be consistent?
*/
#ifdef HAVE_INT64_TIMESTAMP
if (interval->time >= INT64CONST(0))
{
interval->time = ((interval->time +
- IntervalOffsets[precision]) /
- IntervalScales[precision]) *
- IntervalScales[precision];
+ IntervalOffsets[precision]) /
+ IntervalScales[precision]) *
+ IntervalScales[precision];
}
else
{
interval->time = -(((-interval->time +
- IntervalOffsets[precision]) /
+ IntervalOffsets[precision]) /
IntervalScales[precision]) *
- IntervalScales[precision]);
+ IntervalScales[precision]);
}
#else
interval->time = rint(((double) interval->time) *
- IntervalScales[precision]) /
- IntervalScales[precision];
+ IntervalScales[precision]) /
+ IntervalScales[precision];
#endif
}
}
@@ -1016,16 +1016,16 @@ dt2time(Timestamp jd, int *hour, int *min, int *sec, fsec_t *fsec)
* timezone) will be used.
*/
int
-timestamp2tm(Timestamp dt, int *tzp, struct pg_tm *tm, fsec_t *fsec, char **tzn, pg_tz *attimezone)
+timestamp2tm(Timestamp dt, int *tzp, struct pg_tm * tm, fsec_t *fsec, char **tzn, pg_tz *attimezone)
{
- Timestamp date;
+ Timestamp date;
Timestamp time;
pg_time_t utime;
/*
- * If HasCTZSet is true then we have a brute force time zone
- * specified. Go ahead and rotate to the local time zone since we will
- * later bypass any calls which adjust the tm fields.
+ * If HasCTZSet is true then we have a brute force time zone specified. Go
+ * ahead and rotate to the local time zone since we will later bypass any
+ * calls which adjust the tm fields.
*/
if (attimezone == NULL && HasCTZSet && tzp != NULL)
{
@@ -1057,7 +1057,7 @@ timestamp2tm(Timestamp dt, int *tzp, struct pg_tm *tm, fsec_t *fsec, char **tzn,
dt2time(time, &tm->tm_hour, &tm->tm_min, &tm->tm_sec, fsec);
#else
time = dt;
- TMODULO(time, date, (double)SECS_PER_DAY);
+ TMODULO(time, date, (double) SECS_PER_DAY);
if (time < 0)
{
@@ -1082,7 +1082,7 @@ recalc_t:
if (*fsec >= 1.0)
{
time = ceil(time);
- if (time >= (double)SECS_PER_DAY)
+ if (time >= (double) SECS_PER_DAY)
{
time = 0;
date += 1;
@@ -1104,8 +1104,8 @@ recalc_t:
}
/*
- * We have a brute force time zone per SQL99? Then use it without
- * change since we have already rotated to the time zone.
+ * We have a brute force time zone per SQL99? Then use it without change
+ * since we have already rotated to the time zone.
*/
if (attimezone == NULL && HasCTZSet)
{
@@ -1119,14 +1119,14 @@ recalc_t:
}
/*
- * If the time falls within the range of pg_time_t, use pg_localtime()
- * to rotate to the local time zone.
+ * If the time falls within the range of pg_time_t, use pg_localtime() to
+ * rotate to the local time zone.
*
* First, convert to an integral timestamp, avoiding possibly
* platform-specific roundoff-in-wrong-direction errors, and adjust to
- * Unix epoch. Then see if we can convert to pg_time_t without loss.
- * This coding avoids hardwiring any assumptions about the width of
- * pg_time_t, so it should behave sanely on machines without int64.
+ * Unix epoch. Then see if we can convert to pg_time_t without loss. This
+ * coding avoids hardwiring any assumptions about the width of pg_time_t,
+ * so it should behave sanely on machines without int64.
*/
#ifdef HAVE_INT64_TIMESTAMP
dt = (dt - *fsec) / USECS_PER_SEC +
@@ -1139,7 +1139,7 @@ recalc_t:
if ((Timestamp) utime == dt)
{
struct pg_tm *tx = pg_localtime(&utime,
- attimezone ? attimezone : global_timezone);
+ attimezone ? attimezone : global_timezone);
tm->tm_year = tx->tm_year + 1900;
tm->tm_mon = tx->tm_mon + 1;
@@ -1180,13 +1180,13 @@ recalc_t:
* Returns -1 on failure (value out of range).
*/
int
-tm2timestamp(struct pg_tm *tm, fsec_t fsec, int *tzp, Timestamp *result)
+tm2timestamp(struct pg_tm * tm, fsec_t fsec, int *tzp, Timestamp *result)
{
#ifdef HAVE_INT64_TIMESTAMP
- int date;
+ int date;
int64 time;
#else
- double date,
+ double date,
time;
#endif
@@ -1220,7 +1220,7 @@ tm2timestamp(struct pg_tm *tm, fsec_t fsec, int *tzp, Timestamp *result)
* Convert a interval data type to a tm structure.
*/
int
-interval2tm(Interval span, struct pg_tm *tm, fsec_t *fsec)
+interval2tm(Interval span, struct pg_tm * tm, fsec_t *fsec)
{
#ifdef HAVE_INT64_TIMESTAMP
int64 time;
@@ -1242,8 +1242,8 @@ interval2tm(Interval span, struct pg_tm *tm, fsec_t *fsec)
*fsec = time - (tm->tm_sec * USECS_PER_SEC);
#else
recalc:
- TMODULO(time, tm->tm_hour, (double)SECS_PER_HOUR);
- TMODULO(time, tm->tm_min, (double)SECS_PER_MINUTE);
+ TMODULO(time, tm->tm_hour, (double) SECS_PER_HOUR);
+ TMODULO(time, tm->tm_min, (double) SECS_PER_MINUTE);
TMODULO(time, tm->tm_sec, 1.0);
time = TSROUND(time);
/* roundoff may need to propagate to higher-order fields */
@@ -1259,18 +1259,18 @@ recalc:
}
int
-tm2interval(struct pg_tm *tm, fsec_t fsec, Interval *span)
+tm2interval(struct pg_tm * tm, fsec_t fsec, Interval *span)
{
span->month = tm->tm_year * MONTHS_PER_YEAR + tm->tm_mon;
- span->day = tm->tm_mday;
+ span->day = tm->tm_mday;
#ifdef HAVE_INT64_TIMESTAMP
span->time = (((((tm->tm_hour * INT64CONST(60)) +
- tm->tm_min) * INT64CONST(60)) +
- tm->tm_sec) * USECS_PER_SEC) + fsec;
+ tm->tm_min) * INT64CONST(60)) +
+ tm->tm_sec) * USECS_PER_SEC) + fsec;
#else
- span->time = (((tm->tm_hour * (double)MINS_PER_HOUR) +
- tm->tm_min) * (double)SECS_PER_MINUTE) +
- tm->tm_sec + fsec;
+ span->time = (((tm->tm_hour * (double) MINS_PER_HOUR) +
+ tm->tm_min) * (double) SECS_PER_MINUTE) +
+ tm->tm_sec + fsec;
#endif
return 0;
@@ -1282,7 +1282,6 @@ time2t(const int hour, const int min, const int sec, const fsec_t fsec)
{
return (((((hour * MINS_PER_HOUR) + min) * SECS_PER_MINUTE) + sec) * USECS_PER_SEC) + fsec;
} /* time2t() */
-
#else
static double
time2t(const int hour, const int min, const int sec, const fsec_t fsec)
@@ -1311,7 +1310,7 @@ dt2local(Timestamp dt, int tz)
Datum
timestamp_finite(PG_FUNCTION_ARGS)
{
- Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
+ Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
PG_RETURN_BOOL(!TIMESTAMP_NOT_FINITE(timestamp));
}
@@ -1328,7 +1327,7 @@ interval_finite(PG_FUNCTION_ARGS)
*---------------------------------------------------------*/
void
-GetEpochTime(struct pg_tm *tm)
+GetEpochTime(struct pg_tm * tm)
{
struct pg_tm *t0;
pg_time_t epoch = 0;
@@ -1379,8 +1378,8 @@ timestamp_cmp_internal(Timestamp dt1, Timestamp dt2)
* When using float representation, we have to be wary of NaNs.
*
* We consider all NANs to be equal and larger than any non-NAN. This is
- * somewhat arbitrary; the important thing is to have a consistent
- * sort order.
+ * somewhat arbitrary; the important thing is to have a consistent sort
+ * order.
*/
if (isnan(dt1))
{
@@ -1667,10 +1666,10 @@ interval_cmp_internal(Interval *interval1, Interval *interval2)
span2 += interval2->month * INT64CONST(30) * USECS_PER_DAY;
span2 += interval2->day * INT64CONST(24) * USECS_PER_HOUR;
#else
- span1 += interval1->month * ((double)DAYS_PER_MONTH * SECS_PER_DAY);
- span1 += interval1->day * ((double)HOURS_PER_DAY * SECS_PER_HOUR);
- span2 += interval2->month * ((double)DAYS_PER_MONTH * SECS_PER_DAY);
- span2 += interval2->day * ((double)HOURS_PER_DAY * SECS_PER_HOUR);
+ span1 += interval1->month * ((double) DAYS_PER_MONTH * SECS_PER_DAY);
+ span1 += interval1->day * ((double) HOURS_PER_DAY * SECS_PER_HOUR);
+ span2 += interval2->month * ((double) DAYS_PER_MONTH * SECS_PER_DAY);
+ span2 += interval2->day * ((double) HOURS_PER_DAY * SECS_PER_HOUR);
#endif
return ((span1 < span2) ? -1 : (span1 > span2) ? 1 : 0);
@@ -1749,11 +1748,11 @@ interval_hash(PG_FUNCTION_ARGS)
/*
* Specify hash length as sizeof(double) + sizeof(int4), not as
- * sizeof(Interval), so that any garbage pad bytes in the structure
- * won't be included in the hash!
+ * sizeof(Interval), so that any garbage pad bytes in the structure won't
+ * be included in the hash!
*/
- return hash_any((unsigned char *) key,
- sizeof(key->time) + sizeof(key->day) + sizeof(key->month));
+ return hash_any((unsigned char *) key,
+ sizeof(key->time) + sizeof(key->day) + sizeof(key->month));
}
/* overlaps_timestamp() --- implements the SQL92 OVERLAPS operator.
@@ -1766,9 +1765,9 @@ Datum
overlaps_timestamp(PG_FUNCTION_ARGS)
{
/*
- * The arguments are Timestamps, but we leave them as generic Datums
- * to avoid unnecessary conversions between value and reference forms
- * --- not to mention possible dereferences of null pointers.
+ * The arguments are Timestamps, but we leave them as generic Datums to
+ * avoid unnecessary conversions between value and reference forms --- not
+ * to mention possible dereferences of null pointers.
*/
Datum ts1 = PG_GETARG_DATUM(0);
Datum te1 = PG_GETARG_DATUM(1);
@@ -1785,9 +1784,9 @@ overlaps_timestamp(PG_FUNCTION_ARGS)
DatumGetBool(DirectFunctionCall2(timestamp_lt,t1,t2))
/*
- * If both endpoints of interval 1 are null, the result is null
- * (unknown). If just one endpoint is null, take ts1 as the non-null
- * one. Otherwise, take ts1 as the lesser endpoint.
+ * If both endpoints of interval 1 are null, the result is null (unknown).
+ * If just one endpoint is null, take ts1 as the non-null one. Otherwise,
+ * take ts1 as the lesser endpoint.
*/
if (ts1IsNull)
{
@@ -1835,8 +1834,8 @@ overlaps_timestamp(PG_FUNCTION_ARGS)
if (TIMESTAMP_GT(ts1, ts2))
{
/*
- * This case is ts1 < te2 OR te1 < te2, which may look redundant
- * but in the presence of nulls it's not quite completely so.
+ * This case is ts1 < te2 OR te1 < te2, which may look redundant but
+ * in the presence of nulls it's not quite completely so.
*/
if (te2IsNull)
PG_RETURN_NULL();
@@ -1846,8 +1845,8 @@ overlaps_timestamp(PG_FUNCTION_ARGS)
PG_RETURN_NULL();
/*
- * If te1 is not null then we had ts1 <= te1 above, and we just
- * found ts1 >= te2, hence te1 >= te2.
+ * If te1 is not null then we had ts1 <= te1 above, and we just found
+ * ts1 >= te2, hence te1 >= te2.
*/
PG_RETURN_BOOL(false);
}
@@ -1862,8 +1861,8 @@ overlaps_timestamp(PG_FUNCTION_ARGS)
PG_RETURN_NULL();
/*
- * If te2 is not null then we had ts2 <= te2 above, and we just
- * found ts2 >= te1, hence te2 >= te1.
+ * If te2 is not null then we had ts2 <= te2 above, and we just found
+ * ts2 >= te1, hence te2 >= te1.
*/
PG_RETURN_BOOL(false);
}
@@ -1871,8 +1870,7 @@ overlaps_timestamp(PG_FUNCTION_ARGS)
{
/*
* For ts1 = ts2 the spec says te1 <> te2 OR te1 = te2, which is a
- * rather silly way of saying "true if both are nonnull, else
- * null".
+ * rather silly way of saying "true if both are nonnull, else null".
*/
if (te1IsNull || te2IsNull)
PG_RETURN_NULL();
@@ -1938,13 +1936,13 @@ timestamp_mi(PG_FUNCTION_ARGS)
result->day = 0;
result = DatumGetIntervalP(DirectFunctionCall1(interval_justify_hours,
- IntervalPGetDatum(result)));
+ IntervalPGetDatum(result)));
PG_RETURN_INTERVAL_P(result);
}
/* interval_justify_hours()
* Adjust interval so 'time' contains less than a whole day, and
- * 'day' contains an integral number of days. This is useful for
+ * 'day' contains an integral number of days. This is useful for
* situations (such as non-TZ) where '1 day' = '24 hours' is valid,
* e.g. interval subtraction and division. The SQL standard requires
* such conversion in these cases, but not the conversion of days to months.
@@ -1952,8 +1950,8 @@ timestamp_mi(PG_FUNCTION_ARGS)
Datum
interval_justify_hours(PG_FUNCTION_ARGS)
{
- Interval *span = PG_GETARG_INTERVAL_P(0);
- Interval *result;
+ Interval *span = PG_GETARG_INTERVAL_P(0);
+ Interval *result;
result = (Interval *) palloc(sizeof(Interval));
result->month = span->month;
@@ -1963,8 +1961,8 @@ interval_justify_hours(PG_FUNCTION_ARGS)
result->time += span->day * USECS_PER_DAY;
TMODULO(result->time, result->day, USECS_PER_DAY);
#else
- result->time += span->day * (double)SECS_PER_DAY;
- TMODULO(result->time, result->day, (double)SECS_PER_DAY);
+ result->time += span->day * (double) SECS_PER_DAY;
+ TMODULO(result->time, result->day, (double) SECS_PER_DAY);
#endif
PG_RETURN_INTERVAL_P(result);
@@ -1977,8 +1975,8 @@ interval_justify_hours(PG_FUNCTION_ARGS)
Datum
interval_justify_days(PG_FUNCTION_ARGS)
{
- Interval *span = PG_GETARG_INTERVAL_P(0);
- Interval *result;
+ Interval *span = PG_GETARG_INTERVAL_P(0);
+ Interval *result;
result = (Interval *) palloc(sizeof(Interval));
result->day = span->day;
@@ -2003,7 +2001,7 @@ interval_justify_days(PG_FUNCTION_ARGS)
Datum
timestamp_pl_interval(PG_FUNCTION_ARGS)
{
- Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
+ Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
Interval *span = PG_GETARG_INTERVAL_P(1);
Timestamp result;
@@ -2050,7 +2048,7 @@ timestamp_pl_interval(PG_FUNCTION_ARGS)
*tm = &tt;
fsec_t fsec;
int julian;
-
+
if (timestamp2tm(timestamp, NULL, tm, &fsec, NULL, NULL) != 0)
ereport(ERROR,
(errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
@@ -2076,7 +2074,7 @@ timestamp_pl_interval(PG_FUNCTION_ARGS)
Datum
timestamp_mi_interval(PG_FUNCTION_ARGS)
{
- Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
+ Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
Interval *span = PG_GETARG_INTERVAL_P(1);
Interval tspan;
@@ -2277,7 +2275,9 @@ interval_mul(PG_FUNCTION_ARGS)
{
Interval *span = PG_GETARG_INTERVAL_P(0);
float8 factor = PG_GETARG_FLOAT8(1);
- double month_remainder, day_remainder, month_remainder_days;
+ double month_remainder,
+ day_remainder,
+ month_remainder_days;
Interval *result;
result = (Interval *) palloc(sizeof(Interval));
@@ -2303,7 +2303,7 @@ interval_mul(PG_FUNCTION_ARGS)
#endif
result = DatumGetIntervalP(DirectFunctionCall1(interval_justify_hours,
- IntervalPGetDatum(result)));
+ IntervalPGetDatum(result)));
PG_RETURN_INTERVAL_P(result);
}
@@ -2322,7 +2322,9 @@ interval_div(PG_FUNCTION_ARGS)
{
Interval *span = PG_GETARG_INTERVAL_P(0);
float8 factor = PG_GETARG_FLOAT8(1);
- double month_remainder, day_remainder, month_remainder_days;
+ double month_remainder,
+ day_remainder,
+ month_remainder_days;
Interval *result;
result = (Interval *) palloc(sizeof(Interval));
@@ -2354,7 +2356,7 @@ interval_div(PG_FUNCTION_ARGS)
#endif
result = DatumGetIntervalP(DirectFunctionCall1(interval_justify_hours,
- IntervalPGetDatum(result)));
+ IntervalPGetDatum(result)));
PG_RETURN_INTERVAL_P(result);
}
@@ -2386,10 +2388,10 @@ interval_accum(PG_FUNCTION_ARGS)
elog(ERROR, "expected 2-element interval array");
/*
- * XXX memcpy, instead of just extracting a pointer, to work around
- * buggy array code: it won't ensure proper alignment of Interval
- * objects on machines where double requires 8-byte alignment. That
- * should be fixed, but in the meantime...
+ * XXX memcpy, instead of just extracting a pointer, to work around buggy
+ * array code: it won't ensure proper alignment of Interval objects on
+ * machines where double requires 8-byte alignment. That should be fixed,
+ * but in the meantime...
*
* Note: must use DatumGetPointer here, not DatumGetIntervalP, else some
* compilers optimize into double-aligned load/store anyway.
@@ -2398,8 +2400,8 @@ interval_accum(PG_FUNCTION_ARGS)
memcpy((void *) &N, DatumGetPointer(transdatums[1]), sizeof(Interval));
newsum = DatumGetIntervalP(DirectFunctionCall2(interval_pl,
- IntervalPGetDatum(&sumX),
- IntervalPGetDatum(newval)));
+ IntervalPGetDatum(&sumX),
+ IntervalPGetDatum(newval)));
N.time += 1;
transdatums[0] = IntervalPGetDatum(newsum);
@@ -2427,10 +2429,10 @@ interval_avg(PG_FUNCTION_ARGS)
elog(ERROR, "expected 2-element interval array");
/*
- * XXX memcpy, instead of just extracting a pointer, to work around
- * buggy array code: it won't ensure proper alignment of Interval
- * objects on machines where double requires 8-byte alignment. That
- * should be fixed, but in the meantime...
+ * XXX memcpy, instead of just extracting a pointer, to work around buggy
+ * array code: it won't ensure proper alignment of Interval objects on
+ * machines where double requires 8-byte alignment. That should be fixed,
+ * but in the meantime...
*
* Note: must use DatumGetPointer here, not DatumGetIntervalP, else some
* compilers optimize into double-aligned load/store anyway.
@@ -2689,7 +2691,7 @@ Datum
timestamp_text(PG_FUNCTION_ARGS)
{
/* Input is a Timestamp, but may as well leave it in Datum form */
- Datum timestamp = PG_GETARG_DATUM(0);
+ Datum timestamp = PG_GETARG_DATUM(0);
text *result;
char *str;
int len;
@@ -2728,7 +2730,7 @@ text_timestamp(PG_FUNCTION_ARGS)
(errcode(ERRCODE_INVALID_DATETIME_FORMAT),
errmsg("invalid input syntax for type timestamp: \"%s\"",
DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(str))))));
+ PointerGetDatum(str))))));
sp = VARDATA(str);
dp = dstr;
@@ -2750,7 +2752,7 @@ Datum
timestamptz_text(PG_FUNCTION_ARGS)
{
/* Input is a Timestamp, but may as well leave it in Datum form */
- Datum timestamp = PG_GETARG_DATUM(0);
+ Datum timestamp = PG_GETARG_DATUM(0);
text *result;
char *str;
int len;
@@ -2788,7 +2790,7 @@ text_timestamptz(PG_FUNCTION_ARGS)
(errcode(ERRCODE_INVALID_DATETIME_FORMAT),
errmsg("invalid input syntax for type timestamp with time zone: \"%s\"",
DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(str))))));
+ PointerGetDatum(str))))));
sp = VARDATA(str);
dp = dstr;
@@ -2815,7 +2817,7 @@ interval_text(PG_FUNCTION_ARGS)
int len;
str = DatumGetCString(DirectFunctionCall1(interval_out,
- IntervalPGetDatum(interval)));
+ IntervalPGetDatum(interval)));
len = strlen(str) + VARHDRSZ;
@@ -2849,7 +2851,7 @@ text_interval(PG_FUNCTION_ARGS)
(errcode(ERRCODE_INVALID_DATETIME_FORMAT),
errmsg("invalid input syntax for type interval: \"%s\"",
DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(str))))));
+ PointerGetDatum(str))))));
sp = VARDATA(str);
dp = dstr;
@@ -2870,7 +2872,7 @@ Datum
timestamp_trunc(PG_FUNCTION_ARGS)
{
text *units = PG_GETARG_TEXT_P(0);
- Timestamp timestamp = PG_GETARG_TIMESTAMP(1);
+ Timestamp timestamp = PG_GETARG_TIMESTAMP(1);
Timestamp result;
int type,
val;
@@ -2898,26 +2900,27 @@ timestamp_trunc(PG_FUNCTION_ARGS)
switch (val)
{
case DTK_WEEK:
- {
- int woy;
-
- woy = date2isoweek(tm->tm_year, tm->tm_mon, tm->tm_mday);
- /*
- * If it is week 52/53 and the month is January,
- * then the week must belong to the previous year.
- * Also, some December dates belong to the next year.
- */
- if (woy >= 52 && tm->tm_mon == 1)
- --tm->tm_year;
- if (woy <= 1 && tm->tm_mon == MONTHS_PER_YEAR)
- ++tm->tm_year;
- isoweek2date(woy, &(tm->tm_year), &(tm->tm_mon), &(tm->tm_mday));
- tm->tm_hour = 0;
- tm->tm_min = 0;
- tm->tm_sec = 0;
- fsec = 0;
- break;
- }
+ {
+ int woy;
+
+ woy = date2isoweek(tm->tm_year, tm->tm_mon, tm->tm_mday);
+
+ /*
+ * If it is week 52/53 and the month is January, then the
+ * week must belong to the previous year. Also, some
+ * December dates belong to the next year.
+ */
+ if (woy >= 52 && tm->tm_mon == 1)
+ --tm->tm_year;
+ if (woy <= 1 && tm->tm_mon == MONTHS_PER_YEAR)
+ ++tm->tm_year;
+ isoweek2date(woy, &(tm->tm_year), &(tm->tm_mon), &(tm->tm_mday));
+ tm->tm_hour = 0;
+ tm->tm_min = 0;
+ tm->tm_sec = 0;
+ fsec = 0;
+ break;
+ }
case DTK_MILLENNIUM:
/* see comments in timestamptz_trunc */
if (tm->tm_year > 0)
@@ -3032,34 +3035,35 @@ timestamptz_trunc(PG_FUNCTION_ARGS)
switch (val)
{
case DTK_WEEK:
- {
- int woy;
-
- woy = date2isoweek(tm->tm_year, tm->tm_mon, tm->tm_mday);
- /*
- * If it is week 52/53 and the month is January,
- * then the week must belong to the previous year.
- * Also, some December dates belong to the next year.
- */
- if (woy >= 52 && tm->tm_mon == 1)
- --tm->tm_year;
- if (woy <= 1 && tm->tm_mon == MONTHS_PER_YEAR)
- ++tm->tm_year;
- isoweek2date(woy, &(tm->tm_year), &(tm->tm_mon), &(tm->tm_mday));
- tm->tm_hour = 0;
- tm->tm_min = 0;
- tm->tm_sec = 0;
- fsec = 0;
- redotz = true;
- break;
- }
+ {
+ int woy;
+
+ woy = date2isoweek(tm->tm_year, tm->tm_mon, tm->tm_mday);
+
+ /*
+ * If it is week 52/53 and the month is January, then the
+ * week must belong to the previous year. Also, some
+ * December dates belong to the next year.
+ */
+ if (woy >= 52 && tm->tm_mon == 1)
+ --tm->tm_year;
+ if (woy <= 1 && tm->tm_mon == MONTHS_PER_YEAR)
+ ++tm->tm_year;
+ isoweek2date(woy, &(tm->tm_year), &(tm->tm_mon), &(tm->tm_mday));
+ tm->tm_hour = 0;
+ tm->tm_min = 0;
+ tm->tm_sec = 0;
+ fsec = 0;
+ redotz = true;
+ break;
+ }
/* one may consider DTK_THOUSAND and DTK_HUNDRED... */
case DTK_MILLENNIUM:
/*
* truncating to the millennium? what is this supposed to
- * mean? let us put the first year of the millennium...
- * i.e. -1000, 1, 1001, 2001...
+ * mean? let us put the first year of the millennium... i.e.
+ * -1000, 1, 1001, 2001...
*/
if (tm->tm_year > 0)
tm->tm_year = ((tm->tm_year + 999) / 1000) * 1000 - 999;
@@ -3076,8 +3080,8 @@ timestamptz_trunc(PG_FUNCTION_ARGS)
case DTK_DECADE:
/*
- * truncating to the decade? first year of the decade.
- * must not be applied if year was truncated before!
+ * truncating to the decade? first year of the decade. must
+ * not be applied if year was truncated before!
*/
if (val != DTK_MILLENNIUM && val != DTK_CENTURY)
{
@@ -3126,8 +3130,8 @@ timestamptz_trunc(PG_FUNCTION_ARGS)
default:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("timestamp with time zone units \"%s\" not "
- "supported", lowunits)));
+ errmsg("timestamp with time zone units \"%s\" not "
+ "supported", lowunits)));
result = 0;
}
@@ -3143,8 +3147,8 @@ timestamptz_trunc(PG_FUNCTION_ARGS)
{
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("timestamp with time zone units \"%s\" not recognized",
- lowunits)));
+ errmsg("timestamp with time zone units \"%s\" not recognized",
+ lowunits)));
result = 0;
}
@@ -3181,7 +3185,7 @@ interval_trunc(PG_FUNCTION_ARGS)
{
switch (val)
{
- /* fall through */
+ /* fall through */
case DTK_MILLENNIUM:
/* caution: C division may have negative remainder */
tm->tm_year = (tm->tm_year / 1000) * 1000;
@@ -3241,7 +3245,7 @@ interval_trunc(PG_FUNCTION_ARGS)
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("interval units \"%s\" not recognized",
DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(units))))));
+ PointerGetDatum(units))))));
*result = *interval;
}
@@ -3263,7 +3267,7 @@ isoweek2date(int woy, int *year, int *mon, int *mday)
if (!*year)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("cannot calculate week number without year information")));
+ errmsg("cannot calculate week number without year information")));
/* fourth day of current year */
day4 = date2j(*year, 1, 4);
@@ -3298,8 +3302,8 @@ date2isoweek(int year, int mon, int mday)
day0 = j2day(day4 - 1);
/*
- * We need the first week containing a Thursday, otherwise this day
- * falls into the previous year for purposes of counting weeks
+ * We need the first week containing a Thursday, otherwise this day falls
+ * into the previous year for purposes of counting weeks
*/
if (dayn < day4 - day0)
{
@@ -3312,8 +3316,8 @@ date2isoweek(int year, int mon, int mday)
result = (dayn - (day4 - day0)) / 7 + 1;
/*
- * Sometimes the last few days in a year will fall into the first week
- * of the next year, so check for this.
+ * Sometimes the last few days in a year will fall into the first week of
+ * the next year, so check for this.
*/
if (result >= 52)
{
@@ -3352,8 +3356,8 @@ date2isoyear(int year, int mon, int mday)
day0 = j2day(day4 - 1);
/*
- * We need the first week containing a Thursday, otherwise this day
- * falls into the previous year for purposes of counting weeks
+ * We need the first week containing a Thursday, otherwise this day falls
+ * into the previous year for purposes of counting weeks
*/
if (dayn < day4 - day0)
{
@@ -3368,8 +3372,8 @@ date2isoyear(int year, int mon, int mday)
result = (dayn - (day4 - day0)) / 7 + 1;
/*
- * Sometimes the last few days in a year will fall into the first week
- * of the next year, so check for this.
+ * Sometimes the last few days in a year will fall into the first week of
+ * the next year, so check for this.
*/
if (result >= 52)
{
@@ -3393,7 +3397,7 @@ Datum
timestamp_part(PG_FUNCTION_ARGS)
{
text *units = PG_GETARG_TEXT_P(0);
- Timestamp timestamp = PG_GETARG_TIMESTAMP(1);
+ Timestamp timestamp = PG_GETARG_TIMESTAMP(1);
float8 result;
int type,
val;
@@ -3484,9 +3488,9 @@ timestamp_part(PG_FUNCTION_ARGS)
case DTK_DECADE:
/*
- * what is a decade wrt dates? let us assume that decade
- * 199 is 1990 thru 1999... decade 0 starts on year 1 BC,
- * and -1 is 11 BC thru 2 BC...
+ * what is a decade wrt dates? let us assume that decade 199
+ * is 1990 thru 1999... decade 0 starts on year 1 BC, and -1
+ * is 11 BC thru 2 BC...
*/
if (tm->tm_year >= 0)
result = tm->tm_year / 10;
@@ -3521,10 +3525,10 @@ timestamp_part(PG_FUNCTION_ARGS)
result = date2j(tm->tm_year, tm->tm_mon, tm->tm_mday);
#ifdef HAVE_INT64_TIMESTAMP
result += ((((tm->tm_hour * MINS_PER_HOUR) + tm->tm_min) * SECS_PER_MINUTE) +
- tm->tm_sec + (fsec / 1000000.0)) / (double)SECS_PER_DAY;
+ tm->tm_sec + (fsec / 1000000.0)) / (double) SECS_PER_DAY;
#else
result += ((((tm->tm_hour * MINS_PER_HOUR) + tm->tm_min) * SECS_PER_MINUTE) +
- tm->tm_sec + fsec) / (double)SECS_PER_DAY;
+ tm->tm_sec + fsec) / (double) SECS_PER_DAY;
#endif
break;
@@ -3549,20 +3553,19 @@ timestamp_part(PG_FUNCTION_ARGS)
TimestampTz timestamptz;
/*
- * convert to timestamptz to produce consistent
- * results
+ * convert to timestamptz to produce consistent results
*/
if (timestamp2tm(timestamp, NULL, tm, &fsec, NULL, NULL) != 0)
ereport(ERROR,
- (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
- errmsg("timestamp out of range")));
+ (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
+ errmsg("timestamp out of range")));
tz = DetermineTimeZoneOffset(tm, global_timezone);
if (tm2timestamp(tm, fsec, &tz, &timestamptz) != 0)
ereport(ERROR,
- (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
- errmsg("timestamp out of range")));
+ (errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
+ errmsg("timestamp out of range")));
#ifdef HAVE_INT64_TIMESTAMP
result = (timestamptz - SetEpochTimestamp()) / 1000000.0;
@@ -3601,7 +3604,7 @@ timestamp_part(PG_FUNCTION_ARGS)
{
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("timestamp units \"%s\" not recognized", lowunits)));
+ errmsg("timestamp units \"%s\" not recognized", lowunits)));
result = 0;
}
@@ -3657,12 +3660,12 @@ timestamptz_part(PG_FUNCTION_ARGS)
case DTK_TZ_MINUTE:
result = -tz;
result /= MINS_PER_HOUR;
- FMODULO(result, dummy, (double)MINS_PER_HOUR);
+ FMODULO(result, dummy, (double) MINS_PER_HOUR);
break;
case DTK_TZ_HOUR:
dummy = -tz;
- FMODULO(dummy, result, (double)SECS_PER_HOUR);
+ FMODULO(dummy, result, (double) SECS_PER_HOUR);
break;
case DTK_MICROSEC:
@@ -3749,18 +3752,18 @@ timestamptz_part(PG_FUNCTION_ARGS)
result = date2j(tm->tm_year, tm->tm_mon, tm->tm_mday);
#ifdef HAVE_INT64_TIMESTAMP
result += ((((tm->tm_hour * MINS_PER_HOUR) + tm->tm_min) * SECS_PER_MINUTE) +
- tm->tm_sec + (fsec / 1000000.0)) / (double)SECS_PER_DAY;
+ tm->tm_sec + (fsec / 1000000.0)) / (double) SECS_PER_DAY;
#else
result += ((((tm->tm_hour * MINS_PER_HOUR) + tm->tm_min) * SECS_PER_MINUTE) +
- tm->tm_sec + fsec) / (double)SECS_PER_DAY;
+ tm->tm_sec + fsec) / (double) SECS_PER_DAY;
#endif
break;
default:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("timestamp with time zone units \"%s\" not supported",
- lowunits)));
+ errmsg("timestamp with time zone units \"%s\" not supported",
+ lowunits)));
result = 0;
}
@@ -3771,7 +3774,7 @@ timestamptz_part(PG_FUNCTION_ARGS)
{
case DTK_EPOCH:
#ifdef HAVE_INT64_TIMESTAMP
- result = (timestamp - SetEpochTimestamp()) /1000000.0;
+ result = (timestamp - SetEpochTimestamp()) / 1000000.0;
#else
result = timestamp - SetEpochTimestamp();
#endif
@@ -3797,8 +3800,8 @@ timestamptz_part(PG_FUNCTION_ARGS)
default:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("timestamp with time zone units \"%s\" not supported",
- lowunits)));
+ errmsg("timestamp with time zone units \"%s\" not supported",
+ lowunits)));
result = 0;
}
}
@@ -3806,8 +3809,8 @@ timestamptz_part(PG_FUNCTION_ARGS)
{
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("timestamp with time zone units \"%s\" not recognized",
- lowunits)));
+ errmsg("timestamp with time zone units \"%s\" not recognized",
+ lowunits)));
result = 0;
}
@@ -3913,8 +3916,8 @@ interval_part(PG_FUNCTION_ARGS)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("interval units \"%s\" not supported",
- DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(units))))));
+ DatumGetCString(DirectFunctionCall1(textout,
+ PointerGetDatum(units))))));
result = 0;
}
@@ -3933,7 +3936,7 @@ interval_part(PG_FUNCTION_ARGS)
result = interval->time;
#endif
result += (DAYS_PER_YEAR * SECS_PER_DAY) * (interval->month / MONTHS_PER_YEAR);
- result += ((double)DAYS_PER_MONTH * SECS_PER_DAY) * (interval->month % MONTHS_PER_YEAR);
+ result += ((double) DAYS_PER_MONTH * SECS_PER_DAY) * (interval->month % MONTHS_PER_YEAR);
result += interval->day * SECS_PER_DAY;
}
else
@@ -3942,7 +3945,7 @@ interval_part(PG_FUNCTION_ARGS)
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("interval units \"%s\" not recognized",
DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(units))))));
+ PointerGetDatum(units))))));
result = 0;
}
@@ -3950,9 +3953,9 @@ interval_part(PG_FUNCTION_ARGS)
}
-/* timestamp_zone()
- * Encode timestamp type with specified time zone.
- * This function is just timestamp2timestamptz() except instead of
+/* timestamp_zone()
+ * Encode timestamp type with specified time zone.
+ * This function is just timestamp2timestamptz() except instead of
* shifting to the global timezone, we shift to the specified timezone.
* This is different from the other AT TIME ZONE cases because instead
* of shifting to a _to_ a new time zone, it sets the time to _be_ the
@@ -3963,20 +3966,20 @@ timestamp_zone(PG_FUNCTION_ARGS)
{
text *zone = PG_GETARG_TEXT_P(0);
Timestamp timestamp = PG_GETARG_TIMESTAMP(1);
- TimestampTz result;
+ TimestampTz result;
int tz;
- pg_tz *tzp;
- char tzname[TZ_STRLEN_MAX + 1];
- int len;
-
+ pg_tz *tzp;
+ char tzname[TZ_STRLEN_MAX + 1];
+ int len;
+
if (TIMESTAMP_NOT_FINITE(timestamp))
PG_RETURN_TIMESTAMPTZ(timestamp);
/*
- * Look up the requested timezone. First we look in the timezone
- * database (to handle cases like "America/New_York"), and if that
- * fails, we look in the date token table (to handle cases like "EST").
- */
+ * Look up the requested timezone. First we look in the timezone database
+ * (to handle cases like "America/New_York"), and if that fails, we look
+ * in the date token table (to handle cases like "EST").
+ */
len = Min(VARSIZE(zone) - VARHDRSZ, TZ_STRLEN_MAX);
memcpy(tzname, VARDATA(zone), len);
tzname[len] = '\0';
@@ -3985,7 +3988,7 @@ timestamp_zone(PG_FUNCTION_ARGS)
{
/* Apply the timezone change */
struct pg_tm tm;
- fsec_t fsec;
+ fsec_t fsec;
if (timestamp2tm(timestamp, NULL, &tm, &fsec, NULL, tzp) != 0)
ereport(ERROR,
@@ -4032,7 +4035,7 @@ Datum
timestamp_izone(PG_FUNCTION_ARGS)
{
Interval *zone = PG_GETARG_INTERVAL_P(0);
- Timestamp timestamp = PG_GETARG_TIMESTAMP(1);
+ Timestamp timestamp = PG_GETARG_TIMESTAMP(1);
TimestampTz result;
int tz;
@@ -4042,9 +4045,9 @@ timestamp_izone(PG_FUNCTION_ARGS)
if (zone->month != 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("interval time zone \"%s\" must not specify month",
- DatumGetCString(DirectFunctionCall1(interval_out,
- PointerGetDatum(zone))))));
+ errmsg("interval time zone \"%s\" must not specify month",
+ DatumGetCString(DirectFunctionCall1(interval_out,
+ PointerGetDatum(zone))))));
#ifdef HAVE_INT64_TIMESTAMP
tz = zone->time / USECS_PER_SEC;
@@ -4063,7 +4066,7 @@ timestamp_izone(PG_FUNCTION_ARGS)
Datum
timestamp_timestamptz(PG_FUNCTION_ARGS)
{
- Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
+ Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
PG_RETURN_TIMESTAMPTZ(timestamp2timestamptz(timestamp));
}
@@ -4139,17 +4142,17 @@ timestamptz_zone(PG_FUNCTION_ARGS)
Timestamp result;
int tz;
pg_tz *tzp;
- char tzname[TZ_STRLEN_MAX + 1];
- int len;
+ char tzname[TZ_STRLEN_MAX + 1];
+ int len;
if (TIMESTAMP_NOT_FINITE(timestamp))
PG_RETURN_TIMESTAMP(timestamp);
/*
- * Look up the requested timezone. First we look in the timezone
- * database (to handle cases like "America/New_York"), and if that
- * fails, we look in the date token table (to handle cases like "EST").
- */
+ * Look up the requested timezone. First we look in the timezone database
+ * (to handle cases like "America/New_York"), and if that fails, we look
+ * in the date token table (to handle cases like "EST").
+ */
len = Min(VARSIZE(zone) - VARHDRSZ, TZ_STRLEN_MAX);
memcpy(tzname, VARDATA(zone), len);
tzname[len] = '\0';
@@ -4158,7 +4161,7 @@ timestamptz_zone(PG_FUNCTION_ARGS)
{
/* Apply the timezone change */
struct pg_tm tm;
- fsec_t fsec;
+ fsec_t fsec;
if (timestamp2tm(timestamp, &tz, &tm, &fsec, NULL, tzp) != 0)
ereport(ERROR,
@@ -4215,9 +4218,9 @@ timestamptz_izone(PG_FUNCTION_ARGS)
if (zone->month != 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("interval time zone \"%s\" must not specify month",
- DatumGetCString(DirectFunctionCall1(interval_out,
- PointerGetDatum(zone))))));
+ errmsg("interval time zone \"%s\" must not specify month",
+ DatumGetCString(DirectFunctionCall1(interval_out,
+ PointerGetDatum(zone))))));
#ifdef HAVE_INT64_TIMESTAMP
tz = -(zone->time / USECS_PER_SEC);
diff --git a/src/backend/utils/adt/varbit.c b/src/backend/utils/adt/varbit.c
index 370d3e81101..7dbbed16f69 100644
--- a/src/backend/utils/adt/varbit.c
+++ b/src/backend/utils/adt/varbit.c
@@ -9,7 +9,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/varbit.c,v 1.46 2005/09/24 17:53:16 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/varbit.c,v 1.47 2005/10/15 02:49:30 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -83,8 +83,8 @@ bit_in(PG_FUNCTION_ARGS)
else
{
/*
- * Otherwise it's binary. This allows things like cast('1001' as
- * bit) to work transparently.
+ * Otherwise it's binary. This allows things like cast('1001' as bit)
+ * to work transparently.
*/
bit_not_hex = true;
sp = input_string;
@@ -98,16 +98,16 @@ bit_in(PG_FUNCTION_ARGS)
bitlen = slen * 4;
/*
- * Sometimes atttypmod is not supplied. If it is supplied we need to
- * make sure that the bitstring fits.
+ * Sometimes atttypmod is not supplied. If it is supplied we need to make
+ * sure that the bitstring fits.
*/
if (atttypmod <= 0)
atttypmod = bitlen;
else if (bitlen != atttypmod)
ereport(ERROR,
(errcode(ERRCODE_STRING_DATA_LENGTH_MISMATCH),
- errmsg("bit string length %d does not match type bit(%d)",
- bitlen, atttypmod)));
+ errmsg("bit string length %d does not match type bit(%d)",
+ bitlen, atttypmod)));
len = VARBITTOTALLEN(atttypmod);
/* set to 0 so that *r is always initialised and string is zero-padded */
@@ -204,8 +204,8 @@ bit_out(PG_FUNCTION_ARGS)
}
/*
- * Go back one step if we printed a hex number that was not part of
- * the bitstring anymore
+ * Go back one step if we printed a hex number that was not part of the
+ * bitstring anymore
*/
if (i > len)
r--;
@@ -222,6 +222,7 @@ Datum
bit_recv(PG_FUNCTION_ARGS)
{
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
+
#ifdef NOT_USED
Oid typelem = PG_GETARG_OID(1);
#endif
@@ -239,14 +240,14 @@ bit_recv(PG_FUNCTION_ARGS)
errmsg("invalid length in external bit string")));
/*
- * Sometimes atttypmod is not supplied. If it is supplied we need to
- * make sure that the bitstring fits.
+ * Sometimes atttypmod is not supplied. If it is supplied we need to make
+ * sure that the bitstring fits.
*/
if (atttypmod > 0 && bitlen != atttypmod)
ereport(ERROR,
(errcode(ERRCODE_STRING_DATA_LENGTH_MISMATCH),
- errmsg("bit string length %d does not match type bit(%d)",
- bitlen, atttypmod)));
+ errmsg("bit string length %d does not match type bit(%d)",
+ bitlen, atttypmod)));
len = VARBITTOTALLEN(bitlen);
result = (VarBit *) palloc(len);
@@ -301,8 +302,8 @@ bit(PG_FUNCTION_ARGS)
if (!isExplicit)
ereport(ERROR,
(errcode(ERRCODE_STRING_DATA_LENGTH_MISMATCH),
- errmsg("bit string length %d does not match type bit(%d)",
- VARBITLEN(arg), len)));
+ errmsg("bit string length %d does not match type bit(%d)",
+ VARBITLEN(arg), len)));
rlen = VARBITTOTALLEN(len);
/* set to 0 so that string is zero-padded */
@@ -314,9 +315,9 @@ bit(PG_FUNCTION_ARGS)
Min(VARBITBYTES(result), VARBITBYTES(arg)));
/*
- * Make sure last byte is zero-padded if needed. This is useless but
- * safe if source data was shorter than target length (we assume the
- * last byte of the source data was itself correctly zero-padded).
+ * Make sure last byte is zero-padded if needed. This is useless but safe
+ * if source data was shorter than target length (we assume the last byte
+ * of the source data was itself correctly zero-padded).
*/
ipad = VARBITPAD(result);
if (ipad > 0)
@@ -378,8 +379,8 @@ varbit_in(PG_FUNCTION_ARGS)
bitlen = slen * 4;
/*
- * Sometimes atttypmod is not supplied. If it is supplied we need to
- * make sure that the bitstring fits.
+ * Sometimes atttypmod is not supplied. If it is supplied we need to make
+ * sure that the bitstring fits.
*/
if (atttypmod <= 0)
atttypmod = bitlen;
@@ -500,6 +501,7 @@ Datum
varbit_recv(PG_FUNCTION_ARGS)
{
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
+
#ifdef NOT_USED
Oid typelem = PG_GETARG_OID(1);
#endif
@@ -517,8 +519,8 @@ varbit_recv(PG_FUNCTION_ARGS)
errmsg("invalid length in external bit string")));
/*
- * Sometimes atttypmod is not supplied. If it is supplied we need to
- * make sure that the bitstring fits.
+ * Sometimes atttypmod is not supplied. If it is supplied we need to make
+ * sure that the bitstring fits.
*/
if (atttypmod > 0 && bitlen > atttypmod)
ereport(ERROR,
@@ -874,8 +876,8 @@ bitsubstr(PG_FUNCTION_ARGS)
else
{
/*
- * OK, we've got a true substring starting at position s1-1 and
- * ending at position e1-1
+ * OK, we've got a true substring starting at position s1-1 and ending
+ * at position e1-1
*/
rbitlen = e1 - s1;
len = VARBITTOTALLEN(rbitlen);
diff --git a/src/backend/utils/adt/varchar.c b/src/backend/utils/adt/varchar.c
index 84fcc97ccdb..1377e7cc6d2 100644
--- a/src/backend/utils/adt/varchar.c
+++ b/src/backend/utils/adt/varchar.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/varchar.c,v 1.112 2005/07/29 12:59:15 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/varchar.c,v 1.113 2005/10/15 02:49:30 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -81,7 +81,7 @@ bpchar_input(const char *s, size_t len, int32 atttypmod)
maxlen = len;
else
{
- size_t charlen; /* number of CHARACTERS in the input */
+ size_t charlen; /* number of CHARACTERS in the input */
maxlen = atttypmod - VARHDRSZ;
charlen = pg_mbstrlen_with_len(s, len);
@@ -106,16 +106,16 @@ bpchar_input(const char *s, size_t len, int32 atttypmod)
}
/*
- * Now we set maxlen to the necessary byte length, not
- * the number of CHARACTERS!
+ * Now we set maxlen to the necessary byte length, not the number
+ * of CHARACTERS!
*/
maxlen = len = mbmaxlen;
}
else
{
/*
- * Now we set maxlen to the necessary byte length, not
- * the number of CHARACTERS!
+ * Now we set maxlen to the necessary byte length, not the number
+ * of CHARACTERS!
*/
maxlen = len + (maxlen - charlen);
}
@@ -141,6 +141,7 @@ Datum
bpcharin(PG_FUNCTION_ARGS)
{
char *s = PG_GETARG_CSTRING(0);
+
#ifdef NOT_USED
Oid typelem = PG_GETARG_OID(1);
#endif
@@ -178,6 +179,7 @@ Datum
bpcharrecv(PG_FUNCTION_ARGS)
{
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
+
#ifdef NOT_USED
Oid typelem = PG_GETARG_OID(1);
#endif
@@ -226,8 +228,8 @@ bpchar(PG_FUNCTION_ARGS)
char *r;
char *s;
int i;
- int charlen; /* number of characters in the input string
- * + VARHDRSZ */
+ int charlen; /* number of characters in the input string +
+ * VARHDRSZ */
/* No work if typmod is invalid */
if (maxlen < (int32) VARHDRSZ)
@@ -254,24 +256,24 @@ bpchar(PG_FUNCTION_ARGS)
for (i = maxmblen - VARHDRSZ; i < len - VARHDRSZ; i++)
if (*(VARDATA(source) + i) != ' ')
ereport(ERROR,
- (errcode(ERRCODE_STRING_DATA_RIGHT_TRUNCATION),
- errmsg("value too long for type character(%d)",
- maxlen - VARHDRSZ)));
+ (errcode(ERRCODE_STRING_DATA_RIGHT_TRUNCATION),
+ errmsg("value too long for type character(%d)",
+ maxlen - VARHDRSZ)));
}
len = maxmblen;
/*
- * XXX: at this point, maxlen is the necessary byte
- * length+VARHDRSZ, not the number of CHARACTERS!
+ * XXX: at this point, maxlen is the necessary byte length+VARHDRSZ,
+ * not the number of CHARACTERS!
*/
maxlen = len;
}
else
{
/*
- * XXX: at this point, maxlen is the necessary byte
- * length+VARHDRSZ, not the number of CHARACTERS!
+ * XXX: at this point, maxlen is the necessary byte length+VARHDRSZ,
+ * not the number of CHARACTERS!
*/
maxlen = len + (maxlen - charlen);
}
@@ -407,8 +409,8 @@ varchar_input(const char *s, size_t len, int32 atttypmod)
if (s[j] != ' ')
ereport(ERROR,
(errcode(ERRCODE_STRING_DATA_RIGHT_TRUNCATION),
- errmsg("value too long for type character varying(%d)",
- (int) maxlen)));
+ errmsg("value too long for type character varying(%d)",
+ (int) maxlen)));
}
len = mbmaxlen;
@@ -429,6 +431,7 @@ Datum
varcharin(PG_FUNCTION_ARGS)
{
char *s = PG_GETARG_CSTRING(0);
+
#ifdef NOT_USED
Oid typelem = PG_GETARG_OID(1);
#endif
@@ -466,11 +469,12 @@ Datum
varcharrecv(PG_FUNCTION_ARGS)
{
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
+
#ifdef NOT_USED
Oid typelem = PG_GETARG_OID(1);
#endif
int32 atttypmod = PG_GETARG_INT32(2);
- VarChar *result;
+ VarChar *result;
char *str;
int nbytes;
@@ -531,8 +535,8 @@ varchar(PG_FUNCTION_ARGS)
if (*(VARDATA(source) + i) != ' ')
ereport(ERROR,
(errcode(ERRCODE_STRING_DATA_RIGHT_TRUNCATION),
- errmsg("value too long for type character varying(%d)",
- maxlen - VARHDRSZ)));
+ errmsg("value too long for type character varying(%d)",
+ maxlen - VARHDRSZ)));
}
len = maxmblen + VARHDRSZ;
diff --git a/src/backend/utils/adt/varlena.c b/src/backend/utils/adt/varlena.c
index dcd2b7ff42c..07ba4dc6848 100644
--- a/src/backend/utils/adt/varlena.c
+++ b/src/backend/utils/adt/varlena.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/varlena.c,v 1.135 2005/09/24 17:53:16 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/varlena.c,v 1.136 2005/10/15 02:49:30 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -147,8 +147,7 @@ byteain(PG_FUNCTION_ARGS)
else
{
/*
- * We should never get here. The first pass should not allow
- * it.
+ * We should never get here. The first pass should not allow it.
*/
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
@@ -550,8 +549,8 @@ text_substring(Datum str, int32 start, int32 length, bool length_not_specified)
{
S1 = Max(S, 1);
- if (length_not_specified) /* special case - get length to
- * end of string */
+ if (length_not_specified) /* special case - get length to end of
+ * string */
L1 = -1;
else
{
@@ -559,18 +558,18 @@ text_substring(Datum str, int32 start, int32 length, bool length_not_specified)
int E = S + length;
/*
- * A negative value for L is the only way for the end position
- * to be before the start. SQL99 says to throw an error.
+ * A negative value for L is the only way for the end position to
+ * be before the start. SQL99 says to throw an error.
*/
if (E < S)
ereport(ERROR,
(errcode(ERRCODE_SUBSTRING_ERROR),
- errmsg("negative substring length not allowed")));
+ errmsg("negative substring length not allowed")));
/*
- * A zero or negative value for the end position can happen if
- * the start was negative or one. SQL99 says to return a
- * zero-length string.
+ * A zero or negative value for the end position can happen if the
+ * start was negative or one. SQL99 says to return a zero-length
+ * string.
*/
if (E < 1)
return PG_STR_GET_TEXT("");
@@ -579,9 +578,9 @@ text_substring(Datum str, int32 start, int32 length, bool length_not_specified)
}
/*
- * If the start position is past the end of the string, SQL99 says
- * to return a zero-length string -- PG_GETARG_TEXT_P_SLICE() will
- * do that for us. Convert to zero-based starting position
+ * If the start position is past the end of the string, SQL99 says to
+ * return a zero-length string -- PG_GETARG_TEXT_P_SLICE() will do
+ * that for us. Convert to zero-based starting position
*/
return DatumGetTextPSlice(str, S1 - 1, L1);
}
@@ -589,8 +588,8 @@ text_substring(Datum str, int32 start, int32 length, bool length_not_specified)
{
/*
* When encoding max length is > 1, we can't get LC without
- * detoasting, so we'll grab a conservatively large slice now and
- * go back later to do the right thing
+ * detoasting, so we'll grab a conservatively large slice now and go
+ * back later to do the right thing
*/
int32 slice_start;
int32 slice_size;
@@ -603,38 +602,38 @@ text_substring(Datum str, int32 start, int32 length, bool length_not_specified)
text *ret;
/*
- * if S is past the end of the string, the tuple toaster will
- * return a zero-length string to us
+ * if S is past the end of the string, the tuple toaster will return a
+ * zero-length string to us
*/
S1 = Max(S, 1);
/*
- * We need to start at position zero because there is no way to
- * know in advance which byte offset corresponds to the supplied
- * start position.
+ * We need to start at position zero because there is no way to know
+ * in advance which byte offset corresponds to the supplied start
+ * position.
*/
slice_start = 0;
- if (length_not_specified) /* special case - get length to
- * end of string */
+ if (length_not_specified) /* special case - get length to end of
+ * string */
slice_size = L1 = -1;
else
{
int E = S + length;
/*
- * A negative value for L is the only way for the end position
- * to be before the start. SQL99 says to throw an error.
+ * A negative value for L is the only way for the end position to
+ * be before the start. SQL99 says to throw an error.
*/
if (E < S)
ereport(ERROR,
(errcode(ERRCODE_SUBSTRING_ERROR),
- errmsg("negative substring length not allowed")));
+ errmsg("negative substring length not allowed")));
/*
- * A zero or negative value for the end position can happen if
- * the start was negative or one. SQL99 says to return a
- * zero-length string.
+ * A zero or negative value for the end position can happen if the
+ * start was negative or one. SQL99 says to return a zero-length
+ * string.
*/
if (E < 1)
return PG_STR_GET_TEXT("");
@@ -646,9 +645,8 @@ text_substring(Datum str, int32 start, int32 length, bool length_not_specified)
L1 = E - S1;
/*
- * Total slice size in bytes can't be any longer than the
- * start position plus substring length times the encoding max
- * length.
+ * Total slice size in bytes can't be any longer than the start
+ * position plus substring length times the encoding max length.
*/
slice_size = (S1 + L1) * eml;
}
@@ -662,16 +660,15 @@ text_substring(Datum str, int32 start, int32 length, bool length_not_specified)
slice_strlen = pg_mbstrlen_with_len(VARDATA(slice), VARSIZE(slice) - VARHDRSZ);
/*
- * Check that the start position wasn't > slice_strlen. If so,
- * SQL99 says to return a zero-length string.
+ * Check that the start position wasn't > slice_strlen. If so, SQL99
+ * says to return a zero-length string.
*/
if (S1 > slice_strlen)
return PG_STR_GET_TEXT("");
/*
- * Adjust L1 and E1 now that we know the slice string length.
- * Again remember that S1 is one based, and slice_start is zero
- * based.
+ * Adjust L1 and E1 now that we know the slice string length. Again
+ * remember that S1 is one based, and slice_start is zero based.
*/
if (L1 > -1)
E1 = Min(S1 + L1, slice_start + 1 + slice_strlen);
@@ -679,8 +676,7 @@ text_substring(Datum str, int32 start, int32 length, bool length_not_specified)
E1 = slice_start + 1 + slice_strlen;
/*
- * Find the start position in the slice; remember S1 is not zero
- * based
+ * Find the start position in the slice; remember S1 is not zero based
*/
p = VARDATA(slice);
for (i = 0; i < S1 - 1; i++)
@@ -834,11 +830,10 @@ varstr_cmp(char *arg1, int len1, char *arg2, int len2)
int result;
/*
- * Unfortunately, there is no strncoll(), so in the non-C locale case
- * we have to do some memory copying. This turns out to be
- * significantly slower, so we optimize the case where LC_COLLATE is
- * C. We also try to optimize relatively-short strings by avoiding
- * palloc/pfree overhead.
+ * Unfortunately, there is no strncoll(), so in the non-C locale case we
+ * have to do some memory copying. This turns out to be significantly
+ * slower, so we optimize the case where LC_COLLATE is C. We also try to
+ * optimize relatively-short strings by avoiding palloc/pfree overhead.
*/
if (lc_collate_is_c())
{
@@ -859,11 +854,11 @@ varstr_cmp(char *arg1, int len1, char *arg2, int len2)
/* Win32 does not have UTF-8, so we need to map to UTF-16 */
if (GetDatabaseEncoding() == PG_UTF8)
{
- int a1len;
- int a2len;
- int r;
+ int a1len;
+ int a2len;
+ int r;
- if (len1 >= STACKBUFLEN/2)
+ if (len1 >= STACKBUFLEN / 2)
{
a1len = len1 * 2 + 2;
a1p = palloc(a1len);
@@ -873,7 +868,7 @@ varstr_cmp(char *arg1, int len1, char *arg2, int len2)
a1len = STACKBUFLEN;
a1p = a1buf;
}
- if (len2 >= STACKBUFLEN/2)
+ if (len2 >= STACKBUFLEN / 2)
{
a2len = len2 * 2 + 2;
a2p = palloc(a2len);
@@ -890,7 +885,7 @@ varstr_cmp(char *arg1, int len1, char *arg2, int len2)
else
{
r = MultiByteToWideChar(CP_UTF8, 0, arg1, len1,
- (LPWSTR) a1p, a1len/2);
+ (LPWSTR) a1p, a1len / 2);
if (!r)
ereport(ERROR,
(errmsg("could not convert string to UTF16: %lu",
@@ -903,7 +898,7 @@ varstr_cmp(char *arg1, int len1, char *arg2, int len2)
else
{
r = MultiByteToWideChar(CP_UTF8, 0, arg2, len2,
- (LPWSTR) a2p, a2len/2);
+ (LPWSTR) a2p, a2len / 2);
if (!r)
ereport(ERROR,
(errmsg("could not convert string to UTF16: %lu",
@@ -913,7 +908,8 @@ varstr_cmp(char *arg1, int len1, char *arg2, int len2)
errno = 0;
result = wcscoll((LPWSTR) a1p, (LPWSTR) a2p);
- if (result == 2147483647) /* _NLSCMPERROR; missing from mingw headers */
+ if (result == 2147483647) /* _NLSCMPERROR; missing from mingw
+ * headers */
ereport(ERROR,
(errmsg("could not compare unicode strings: %d",
errno)));
@@ -925,7 +921,7 @@ varstr_cmp(char *arg1, int len1, char *arg2, int len2)
return result;
}
-#endif /* WIN32 */
+#endif /* WIN32 */
if (len1 >= STACKBUFLEN)
a1p = (char *) palloc(len1 + 1);
@@ -1349,9 +1345,8 @@ bytea_substr(PG_FUNCTION_ARGS)
if (fcinfo->nargs == 2)
{
/*
- * Not passed a length - PG_GETARG_BYTEA_P_SLICE() grabs
- * everything to the end of the string if we pass it a negative
- * value for length.
+ * Not passed a length - PG_GETARG_BYTEA_P_SLICE() grabs everything to
+ * the end of the string if we pass it a negative value for length.
*/
L1 = -1;
}
@@ -1361,8 +1356,8 @@ bytea_substr(PG_FUNCTION_ARGS)
int E = S + PG_GETARG_INT32(2);
/*
- * A negative value for L is the only way for the end position to
- * be before the start. SQL99 says to throw an error.
+ * A negative value for L is the only way for the end position to be
+ * before the start. SQL99 says to throw an error.
*/
if (E < S)
ereport(ERROR,
@@ -1382,8 +1377,8 @@ bytea_substr(PG_FUNCTION_ARGS)
/*
* If the start position is past the end of the string, SQL99 says to
- * return a zero-length string -- PG_GETARG_TEXT_P_SLICE() will do
- * that for us. Convert to zero-based starting position
+ * return a zero-length string -- PG_GETARG_TEXT_P_SLICE() will do that
+ * for us. Convert to zero-based starting position
*/
PG_RETURN_BYTEA_P(PG_GETARG_BYTEA_P_SLICE(0, S1 - 1, L1));
}
@@ -1686,7 +1681,7 @@ textToQualifiedNameList(text *textval)
/* Convert to C string (handles possible detoasting). */
/* Note we rely on being able to modify rawname below. */
rawname = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(textval)));
+ PointerGetDatum(textval)));
if (!SplitIdentifierString(rawname, '.', &namelist))
ereport(ERROR,
@@ -1788,14 +1783,13 @@ SplitIdentifierString(char *rawstring, char separator,
return false; /* empty unquoted name not allowed */
/*
- * Downcase the identifier, using same code as main lexer
- * does.
+ * Downcase the identifier, using same code as main lexer does.
*
* XXX because we want to overwrite the input in-place, we cannot
- * support a downcasing transformation that increases the
- * string length. This is not a problem given the current
- * implementation of downcase_truncate_identifier, but we'll
- * probably have to do something about this someday.
+ * support a downcasing transformation that increases the string
+ * length. This is not a problem given the current implementation
+ * of downcase_truncate_identifier, but we'll probably have to do
+ * something about this someday.
*/
len = endp - curname;
downname = downcase_truncate_identifier(curname, len, false);
@@ -2083,12 +2077,14 @@ check_replace_text_has_escape_char(const text *replace_text)
if (pg_database_encoding_max_length() == 1)
{
for (; p < p_end; p++)
- if (*p == '\\') return true;
+ if (*p == '\\')
+ return true;
}
else
{
for (; p < p_end; p += pg_mblen(p))
- if (*p == '\\') return true;
+ if (*p == '\\')
+ return true;
}
return false;
@@ -2100,7 +2096,7 @@ check_replace_text_has_escape_char(const text *replace_text)
*/
static void
appendStringInfoRegexpSubstr(StringInfo str, text *replace_text,
- regmatch_t *pmatch, text *src_text)
+ regmatch_t *pmatch, text *src_text)
{
const char *p = VARDATA(replace_text);
const char *p_end = p + (VARSIZE(replace_text) - VARHDRSZ);
@@ -2129,19 +2125,20 @@ appendStringInfoRegexpSubstr(StringInfo str, text *replace_text,
}
/*
- * Copy the text when there is a text in the left of escape char
- * or escape char is not found.
+ * Copy the text when there is a text in the left of escape char or
+ * escape char is not found.
*/
if (ch_cnt)
{
- text *append_text = text_substring(PointerGetDatum(replace_text),
- substr_start, ch_cnt, false);
+ text *append_text = text_substring(PointerGetDatum(replace_text),
+ substr_start, ch_cnt, false);
+
appendStringInfoText(str, append_text);
pfree(append_text);
}
substr_start += ch_cnt + 1;
- if (p >= p_end) /* When escape char is not found. */
+ if (p >= p_end) /* When escape char is not found. */
break;
/* See the next character of escape char. */
@@ -2151,7 +2148,8 @@ appendStringInfoRegexpSubstr(StringInfo str, text *replace_text,
if (*p >= '1' && *p <= '9')
{
/* Use the back reference of regexp. */
- int idx = *p - '0';
+ int idx = *p - '0';
+
so = pmatch[idx].rm_so;
eo = pmatch[idx].rm_eo;
p++;
@@ -2169,8 +2167,9 @@ appendStringInfoRegexpSubstr(StringInfo str, text *replace_text,
if (so != -1 && eo != -1)
{
/* Copy the text that is back reference of regexp. */
- text *append_text = text_substring(PointerGetDatum(src_text),
- so + 1, (eo - so), false);
+ text *append_text = text_substring(PointerGetDatum(src_text),
+ so + 1, (eo - so), false);
+
appendStringInfoText(str, append_text);
pfree(append_text);
}
@@ -2189,9 +2188,9 @@ replace_text_regexp(PG_FUNCTION_ARGS)
text *ret_text;
text *src_text = PG_GETARG_TEXT_P(0);
int src_text_len = VARSIZE(src_text) - VARHDRSZ;
- regex_t *re = (regex_t *)PG_GETARG_POINTER(1);
+ regex_t *re = (regex_t *) PG_GETARG_POINTER(1);
text *replace_text = PG_GETARG_TEXT_P(2);
- bool global = PG_GETARG_BOOL(3);
+ bool global = PG_GETARG_BOOL(3);
StringInfo str = makeStringInfo();
int regexec_result;
regmatch_t pmatch[REGEXP_REPLACE_BACKREF_CNT];
@@ -2214,33 +2213,34 @@ replace_text_regexp(PG_FUNCTION_ARGS)
data,
data_len,
search_start,
- NULL, /* no details */
+ NULL, /* no details */
REGEXP_REPLACE_BACKREF_CNT,
pmatch,
0);
if (regexec_result != REG_OKAY && regexec_result != REG_NOMATCH)
{
- char errMsg[100];
+ char errMsg[100];
/* re failed??? */
pg_regerror(regexec_result, re, errMsg, sizeof(errMsg));
ereport(ERROR,
- (errcode(ERRCODE_INVALID_REGULAR_EXPRESSION),
- errmsg("regular expression failed: %s", errMsg)));
+ (errcode(ERRCODE_INVALID_REGULAR_EXPRESSION),
+ errmsg("regular expression failed: %s", errMsg)));
}
if (regexec_result == REG_NOMATCH)
break;
- /*
- * Copy the text when there is a text in the left of matched position.
- */
+ /*
+ * Copy the text when there is a text in the left of matched position.
+ */
if (pmatch[0].rm_so - data_pos > 0)
{
- text *left_text = text_substring(PointerGetDatum(src_text),
- data_pos + 1,
- pmatch[0].rm_so - data_pos, false);
+ text *left_text = text_substring(PointerGetDatum(src_text),
+ data_pos + 1,
+ pmatch[0].rm_so - data_pos, false);
+
appendStringInfoText(str, left_text);
pfree(left_text);
}
@@ -2270,13 +2270,14 @@ replace_text_regexp(PG_FUNCTION_ARGS)
}
/*
- * Copy the text when there is a text at the right of last matched
- * or regexp is not matched.
+ * Copy the text when there is a text at the right of last matched or
+ * regexp is not matched.
*/
if (data_pos < data_len)
{
- text *right_text = text_substring(PointerGetDatum(src_text),
- data_pos + 1, -1, true);
+ text *right_text = text_substring(PointerGetDatum(src_text),
+ data_pos + 1, -1, true);
+
appendStringInfoText(str, right_text);
pfree(right_text);
}
@@ -2392,7 +2393,7 @@ text_to_array(PG_FUNCTION_ARGS)
*/
if (fldsep_len < 1)
PG_RETURN_ARRAYTYPE_P(create_singleton_array(fcinfo, TEXTOID,
- CStringGetDatum(inputstring), 1));
+ CStringGetDatum(inputstring), 1));
/* start with end position holding the initial start position */
end_posn = 0;
@@ -2409,17 +2410,17 @@ text_to_array(PG_FUNCTION_ARGS)
if (fldnum == 1)
{
/*
- * first element return one element, 1D, array using the
- * input string
+ * first element return one element, 1D, array using the input
+ * string
*/
PG_RETURN_ARRAYTYPE_P(create_singleton_array(fcinfo, TEXTOID,
- CStringGetDatum(inputstring), 1));
+ CStringGetDatum(inputstring), 1));
}
else
{
/* otherwise create array and exit */
PG_RETURN_ARRAYTYPE_P(makeArrayResult(astate,
- CurrentMemoryContext));
+ CurrentMemoryContext));
}
}
else if (start_posn == 0)
@@ -2439,7 +2440,7 @@ text_to_array(PG_FUNCTION_ARGS)
/* interior field requested */
result_text = text_substring(PointerGetDatum(inputstring),
start_posn + fldsep_len,
- end_posn - start_posn - fldsep_len,
+ end_posn - start_posn - fldsep_len,
false);
}
@@ -2489,14 +2490,14 @@ array_to_text(PG_FUNCTION_ARGS)
/*
* We arrange to look up info about element type, including its output
- * conversion proc, only once per series of calls, assuming the
- * element type doesn't change underneath us.
+ * conversion proc, only once per series of calls, assuming the element
+ * type doesn't change underneath us.
*/
my_extra = (ArrayMetaState *) fcinfo->flinfo->fn_extra;
if (my_extra == NULL)
{
fcinfo->flinfo->fn_extra = MemoryContextAlloc(fcinfo->flinfo->fn_mcxt,
- sizeof(ArrayMetaState));
+ sizeof(ArrayMetaState));
my_extra = (ArrayMetaState *) fcinfo->flinfo->fn_extra;
my_extra->element_type = InvalidOid;
}
@@ -2504,8 +2505,7 @@ array_to_text(PG_FUNCTION_ARGS)
if (my_extra->element_type != element_type)
{
/*
- * Get info about element type, including its output conversion
- * proc
+ * Get info about element type, including its output conversion proc
*/
get_type_io_data(element_type, IOFunc_output,
&my_extra->typlen, &my_extra->typbyval,
@@ -2606,7 +2606,7 @@ md5_text(PG_FUNCTION_ARGS)
{
text *in_text = PG_GETARG_TEXT_P(0);
size_t len;
- char hexsum[MD5_HASH_LEN + 1];
+ char hexsum[MD5_HASH_LEN + 1];
text *result_text;
/* Calculate the length of the buffer using varlena metadata */
@@ -2661,7 +2661,7 @@ pg_column_size(PG_FUNCTION_ARGS)
if (fcinfo->flinfo->fn_extra == NULL)
{
/* Lookup the datatype of the supplied argument */
- Oid argtypeid = get_fn_expr_argtype(fcinfo->flinfo, 0);
+ Oid argtypeid = get_fn_expr_argtype(fcinfo->flinfo, 0);
typlen = get_typlen(argtypeid);
if (typlen == 0) /* should not happen */
diff --git a/src/backend/utils/cache/catcache.c b/src/backend/utils/cache/catcache.c
index 2ffcee77695..918ab7c081a 100644
--- a/src/backend/utils/cache/catcache.c
+++ b/src/backend/utils/cache/catcache.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/cache/catcache.c,v 1.124 2005/09/24 22:54:39 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/cache/catcache.c,v 1.125 2005/10/15 02:49:30 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -35,7 +35,7 @@
#include "utils/syscache.h"
-/* #define CACHEDEBUG */ /* turns DEBUG elogs on */
+ /* #define CACHEDEBUG */ /* turns DEBUG elogs on */
/*
* Constants related to size of the catcache.
@@ -187,22 +187,22 @@ CatalogCacheComputeHashValue(CatCache *cache, int nkeys, ScanKey cur_skey)
case 4:
hashValue ^=
DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[3],
- cur_skey[3].sk_argument)) << 9;
+ cur_skey[3].sk_argument)) << 9;
/* FALLTHROUGH */
case 3:
hashValue ^=
DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[2],
- cur_skey[2].sk_argument)) << 6;
+ cur_skey[2].sk_argument)) << 6;
/* FALLTHROUGH */
case 2:
hashValue ^=
DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[1],
- cur_skey[1].sk_argument)) << 3;
+ cur_skey[1].sk_argument)) << 3;
/* FALLTHROUGH */
case 1:
hashValue ^=
DatumGetUInt32(DirectFunctionCall1(cache->cc_hashfunc[0],
- cur_skey[0].sk_argument));
+ cur_skey[0].sk_argument));
break;
default:
elog(FATAL, "wrong number of hash keys: %d", nkeys);
@@ -448,8 +448,8 @@ CatalogCacheIdInvalidate(int cacheId,
/*
* We don't bother to check whether the cache has finished
- * initialization yet; if not, there will be no entries in it so
- * no problem.
+ * initialization yet; if not, there will be no entries in it so no
+ * problem.
*/
/*
@@ -522,15 +522,15 @@ void
CreateCacheMemoryContext(void)
{
/*
- * Purely for paranoia, check that context doesn't exist; caller
- * probably did so already.
+ * Purely for paranoia, check that context doesn't exist; caller probably
+ * did so already.
*/
if (!CacheMemoryContext)
CacheMemoryContext = AllocSetContextCreate(TopMemoryContext,
"CacheMemoryContext",
- ALLOCSET_DEFAULT_MINSIZE,
- ALLOCSET_DEFAULT_INITSIZE,
- ALLOCSET_DEFAULT_MAXSIZE);
+ ALLOCSET_DEFAULT_MINSIZE,
+ ALLOCSET_DEFAULT_INITSIZE,
+ ALLOCSET_DEFAULT_MAXSIZE);
}
@@ -768,7 +768,6 @@ do { \
cp->cc_reloid, cp->cc_indexoid, cp->id, \
cp->cc_nkeys, cp->cc_nbuckets); \
} while(0)
-
#else
#define InitCatCache_DEBUG2
#endif
@@ -786,8 +785,8 @@ InitCatCache(int id,
int i;
/*
- * first switch to the cache context so our allocations do not vanish
- * at the end of a transaction
+ * first switch to the cache context so our allocations do not vanish at
+ * the end of a transaction
*/
if (!CacheMemoryContext)
CreateCacheMemoryContext();
@@ -878,7 +877,6 @@ do { \
i+1, cache->cc_nkeys, cache->cc_key[i]); \
} \
} while(0)
-
#else
#define CatalogCacheInitializeCache_DEBUG1
#define CatalogCacheInitializeCache_DEBUG2
@@ -895,15 +893,15 @@ CatalogCacheInitializeCache(CatCache *cache)
CatalogCacheInitializeCache_DEBUG1;
/*
- * Open the relation without locking --- we only need the tupdesc,
- * which we assume will never change ...
+ * Open the relation without locking --- we only need the tupdesc, which
+ * we assume will never change ...
*/
relation = heap_open(cache->cc_reloid, NoLock);
Assert(RelationIsValid(relation));
/*
- * switch to the cache context so our allocations do not vanish at the
- * end of a transaction
+ * switch to the cache context so our allocations do not vanish at the end
+ * of a transaction
*/
Assert(CacheMemoryContext != NULL);
@@ -915,8 +913,8 @@ CatalogCacheInitializeCache(CatCache *cache)
tupdesc = CreateTupleDescCopyConstr(RelationGetDescr(relation));
/*
- * save the relation's name and relisshared flag, too (cc_relname
- * is used only for debugging purposes)
+ * save the relation's name and relisshared flag, too (cc_relname is used
+ * only for debugging purposes)
*/
cache->cc_relname = pstrdup(RelationGetRelationName(relation));
cache->cc_relisshared = RelationGetForm(relation)->relisshared;
@@ -957,8 +955,8 @@ CatalogCacheInitializeCache(CatCache *cache)
cache->cc_isname[i] = (keytype == NAMEOID);
/*
- * Do equality-function lookup (we assume this won't need a
- * catalog lookup for any supported type)
+ * Do equality-function lookup (we assume this won't need a catalog
+ * lookup for any supported type)
*/
fmgr_info_cxt(eqfunc,
&cache->cc_skey[i].sk_func,
@@ -1026,9 +1024,9 @@ IndexScanOK(CatCache *cache, ScanKey cur_skey)
if (cache->id == INDEXRELID)
{
/*
- * Since the OIDs of indexes aren't hardwired, it's painful to
- * figure out which is which. Just force all pg_index searches to
- * be heap scans while building the relcaches.
+ * Since the OIDs of indexes aren't hardwired, it's painful to figure
+ * out which is which. Just force all pg_index searches to be heap
+ * scans while building the relcaches.
*/
if (!criticalRelcachesBuilt)
return false;
@@ -1037,10 +1035,10 @@ IndexScanOK(CatCache *cache, ScanKey cur_skey)
cache->id == AMNAME)
{
/*
- * Always do heap scans in pg_am, because it's so small there's
- * not much point in an indexscan anyway. We *must* do this when
- * initially building critical relcache entries, but we might as
- * well just always do it.
+ * Always do heap scans in pg_am, because it's so small there's not
+ * much point in an indexscan anyway. We *must* do this when
+ * initially building critical relcache entries, but we might as well
+ * just always do it.
*/
return false;
}
@@ -1146,18 +1144,18 @@ SearchCatCache(CatCache *cache,
continue;
/*
- * we found a match in the cache: move it to the front of the
- * global LRU list. We also move it to the front of the list for
- * its hashbucket, in order to speed subsequent searches. (The
- * most frequently accessed elements in any hashbucket will tend
- * to be near the front of the hashbucket's list.)
+ * we found a match in the cache: move it to the front of the global
+ * LRU list. We also move it to the front of the list for its
+ * hashbucket, in order to speed subsequent searches. (The most
+ * frequently accessed elements in any hashbucket will tend to be near
+ * the front of the hashbucket's list.)
*/
DLMoveToFront(&ct->lrulist_elem);
DLMoveToFront(&ct->cache_elem);
/*
- * If it's a positive entry, bump its refcount and return it. If
- * it's negative, we can report failure to the caller.
+ * If it's a positive entry, bump its refcount and return it. If it's
+ * negative, we can report failure to the caller.
*/
if (!ct->negative)
{
@@ -1188,19 +1186,19 @@ SearchCatCache(CatCache *cache,
}
/*
- * Tuple was not found in cache, so we have to try to retrieve it
- * directly from the relation. If found, we will add it to the cache;
- * if not found, we will add a negative cache entry instead.
+ * Tuple was not found in cache, so we have to try to retrieve it directly
+ * from the relation. If found, we will add it to the cache; if not
+ * found, we will add a negative cache entry instead.
*
- * NOTE: it is possible for recursive cache lookups to occur while
- * reading the relation --- for example, due to shared-cache-inval
- * messages being processed during heap_open(). This is OK. It's
- * even possible for one of those lookups to find and enter the very
- * same tuple we are trying to fetch here. If that happens, we will
- * enter a second copy of the tuple into the cache. The first copy
- * will never be referenced again, and will eventually age out of the
- * cache, so there's no functional problem. This case is rare enough
- * that it's not worth expending extra cycles to detect.
+ * NOTE: it is possible for recursive cache lookups to occur while reading
+ * the relation --- for example, due to shared-cache-inval messages being
+ * processed during heap_open(). This is OK. It's even possible for one
+ * of those lookups to find and enter the very same tuple we are trying to
+ * fetch here. If that happens, we will enter a second copy of the tuple
+ * into the cache. The first copy will never be referenced again, and
+ * will eventually age out of the cache, so there's no functional problem.
+ * This case is rare enough that it's not worth expending extra cycles to
+ * detect.
*/
relation = heap_open(cache->cc_reloid, AccessShareLock);
@@ -1231,13 +1229,13 @@ SearchCatCache(CatCache *cache,
/*
* If tuple was not found, we need to build a negative cache entry
- * containing a fake tuple. The fake tuple has the correct key
- * columns, but nulls everywhere else.
+ * containing a fake tuple. The fake tuple has the correct key columns,
+ * but nulls everywhere else.
*
- * In bootstrap mode, we don't build negative entries, because the
- * cache invalidation mechanism isn't alive and can't clear them
- * if the tuple gets created later. (Bootstrap doesn't do UPDATEs,
- * so it doesn't need cache inval for that.)
+ * In bootstrap mode, we don't build negative entries, because the cache
+ * invalidation mechanism isn't alive and can't clear them if the tuple
+ * gets created later. (Bootstrap doesn't do UPDATEs, so it doesn't need
+ * cache inval for that.)
*/
if (ct == NULL)
{
@@ -1256,8 +1254,8 @@ SearchCatCache(CatCache *cache,
cache->cc_relname, hashIndex);
/*
- * We are not returning the negative entry to the caller, so leave
- * its refcount zero.
+ * We are not returning the negative entry to the caller, so leave its
+ * refcount zero.
*/
return NULL;
@@ -1331,7 +1329,7 @@ SearchCatCacheList(CatCache *cache,
Dlelem *elt;
CatCList *cl;
CatCTup *ct;
- List * volatile ctlist;
+ List *volatile ctlist;
ListCell *ctlist_item;
int nmembers;
bool ordered;
@@ -1362,8 +1360,8 @@ SearchCatCacheList(CatCache *cache,
/*
* compute a hash value of the given keys for faster search. We don't
- * presently divide the CatCList items into buckets, but this still
- * lets us skip non-matching items quickly most of the time.
+ * presently divide the CatCList items into buckets, but this still lets
+ * us skip non-matching items quickly most of the time.
*/
lHashValue = CatalogCacheComputeHashValue(cache, nkeys, cur_skey);
@@ -1399,11 +1397,11 @@ SearchCatCacheList(CatCache *cache,
/*
* We found a matching list: mark it as touched since the last
- * CatalogCacheCleanup() sweep. Also move the list to the front
- * of the cache's list-of-lists, to speed subsequent searches.
- * (We do not move the members to the fronts of their hashbucket
- * lists, however, since there's no point in that unless they are
- * searched for individually.)
+ * CatalogCacheCleanup() sweep. Also move the list to the front of
+ * the cache's list-of-lists, to speed subsequent searches. (We do not
+ * move the members to the fronts of their hashbucket lists, however,
+ * since there's no point in that unless they are searched for
+ * individually.)
*/
cl->touched = true;
DLMoveToFront(&cl->cache_elem);
@@ -1428,10 +1426,10 @@ SearchCatCacheList(CatCache *cache,
* relation. For each matching tuple found in the relation, use an
* existing cache entry if possible, else build a new one.
*
- * We have to bump the member refcounts temporarily to ensure they
- * won't get dropped from the cache while loading other members.
- * We use a PG_TRY block to ensure we can undo those refcounts if
- * we get an error before we finish constructing the CatCList.
+ * We have to bump the member refcounts temporarily to ensure they won't get
+ * dropped from the cache while loading other members. We use a PG_TRY
+ * block to ensure we can undo those refcounts if we get an error before
+ * we finish constructing the CatCList.
*/
ResourceOwnerEnlargeCatCacheListRefs(CurrentResourceOwner);
@@ -1473,13 +1471,13 @@ SearchCatCacheList(CatCache *cache,
ct = (CatCTup *) DLE_VAL(elt);
if (ct->dead || ct->negative)
- continue; /* ignore dead and negative entries */
+ continue; /* ignore dead and negative entries */
if (ct->hash_value != hashValue)
- continue; /* quickly skip entry if wrong hash val */
+ continue; /* quickly skip entry if wrong hash val */
if (!ItemPointerEquals(&(ct->tuple.t_self), &(ntp->t_self)))
- continue; /* not same tuple */
+ continue; /* not same tuple */
/*
* Found a match, but can't use it if it belongs to another
@@ -1526,9 +1524,9 @@ SearchCatCacheList(CatCache *cache,
heap_freetuple(ntp);
/*
- * We are now past the last thing that could trigger an elog before
- * we have finished building the CatCList and remembering it in the
- * resource owner. So it's OK to fall out of the PG_TRY, and indeed
+ * We are now past the last thing that could trigger an elog before we
+ * have finished building the CatCList and remembering it in the
+ * resource owner. So it's OK to fall out of the PG_TRY, and indeed
* we'd better do so before we start marking the members as belonging
* to the list.
*/
@@ -1629,8 +1627,7 @@ CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
MemoryContext oldcxt;
/*
- * Allocate CatCTup header in cache memory, and copy the tuple there
- * too.
+ * Allocate CatCTup header in cache memory, and copy the tuple there too.
*/
oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
ct = (CatCTup *) palloc(sizeof(CatCTup));
@@ -1658,9 +1655,9 @@ CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp,
CacheHdr->ch_ntup++;
/*
- * If we've exceeded the desired size of the caches, try to throw away
- * the least recently used entry(s). NB: be careful not to throw away
- * the newly-built entry...
+ * If we've exceeded the desired size of the caches, try to throw away the
+ * least recently used entry(s). NB: be careful not to throw away the
+ * newly-built entry...
*/
if (CacheHdr->ch_ntup > CacheHdr->ch_maxtup)
CatalogCacheCleanup(ct);
@@ -1684,22 +1681,22 @@ CatalogCacheCleanup(CatCTup *savect)
*prevelt;
/*
- * Each time we have to do this, try to cut the cache size down to
- * about 90% of the maximum.
+ * Each time we have to do this, try to cut the cache size down to about
+ * 90% of the maximum.
*/
tup_target = (CacheHdr->ch_maxtup * 9) / 10;
/*
- * Our strategy for managing CatCLists is that, each time we have to
- * throw away some cache entries, we first move-to-front all the members
- * of CatCLists that have been touched since the last cleanup sweep.
- * Then we do strict LRU elimination by individual tuples, zapping a list
- * if any of its members gets zapped. Before PostgreSQL 8.1, we moved
- * members to front each time their owning list was touched, which was
- * arguably more fair in balancing list members against standalone tuples
- * --- but the overhead for large lists was horrendous. This scheme is
- * more heavily biased towards preserving lists, but that is not
- * necessarily bad either.
+ * Our strategy for managing CatCLists is that, each time we have to throw
+ * away some cache entries, we first move-to-front all the members of
+ * CatCLists that have been touched since the last cleanup sweep. Then we
+ * do strict LRU elimination by individual tuples, zapping a list if any
+ * of its members gets zapped. Before PostgreSQL 8.1, we moved members to
+ * front each time their owning list was touched, which was arguably more
+ * fair in balancing list members against standalone tuples --- but the
+ * overhead for large lists was horrendous. This scheme is more heavily
+ * biased towards preserving lists, but that is not necessarily bad
+ * either.
*/
for (ccp = CacheHdr->ch_caches; ccp; ccp = ccp->cc_next)
{
@@ -1710,7 +1707,7 @@ CatalogCacheCleanup(CatCTup *savect)
Assert(cl->cl_magic == CL_MAGIC);
if (cl->touched && !cl->dead)
{
- int i;
+ int i;
for (i = 0; i < cl->n_members; i++)
DLMoveToFront(&cl->members[i]->lrulist_elem);
@@ -1775,9 +1772,9 @@ build_dummy_tuple(CatCache *cache, int nkeys, ScanKey skeys)
if (attindex > 0)
{
/*
- * Here we must be careful in case the caller passed a C
- * string where a NAME is wanted: convert the given argument
- * to a correctly padded NAME. Otherwise the memcpy() done in
+ * Here we must be careful in case the caller passed a C string
+ * where a NAME is wanted: convert the given argument to a
+ * correctly padded NAME. Otherwise the memcpy() done in
* heap_formtuple could fall off the end of memory.
*/
if (cache->cc_isname[i])
@@ -1840,7 +1837,7 @@ build_dummy_tuple(CatCache *cache, int nkeys, ScanKey skeys)
void
PrepareToInvalidateCacheTuple(Relation relation,
HeapTuple tuple,
- void (*function) (int, uint32, ItemPointer, Oid))
+ void (*function) (int, uint32, ItemPointer, Oid))
{
CatCache *ccp;
Oid reloid;
diff --git a/src/backend/utils/cache/inval.c b/src/backend/utils/cache/inval.c
index da0ffad16b2..59250feac1a 100644
--- a/src/backend/utils/cache/inval.c
+++ b/src/backend/utils/cache/inval.c
@@ -53,10 +53,10 @@
*
* Also, whenever we see an operation on a pg_class or pg_attribute tuple,
* we register a relcache flush operation for the relation described by that
- * tuple. pg_class updates trigger an smgr flush operation as well.
+ * tuple. pg_class updates trigger an smgr flush operation as well.
*
* We keep the relcache and smgr flush requests in lists separate from the
- * catcache tuple flush requests. This allows us to issue all the pending
+ * catcache tuple flush requests. This allows us to issue all the pending
* catcache flushes before we issue relcache flushes, which saves us from
* loading a catcache tuple during relcache load only to flush it again
* right away. Also, we avoid queuing multiple relcache flush requests for
@@ -80,7 +80,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/cache/inval.c,v 1.72 2005/06/17 22:32:46 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/cache/inval.c,v 1.73 2005/10/15 02:49:31 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -142,7 +142,7 @@ typedef struct TransInvalidationInfo
struct TransInvalidationInfo *parent;
/* Subtransaction nesting depth */
- int my_level;
+ int my_level;
/* head of current-command event list */
InvalidationListHeader CurrentCmdInvalidMsgs;
@@ -173,9 +173,9 @@ static struct CACHECALLBACK
static int cache_callback_count = 0;
/* info values for 2PC callback */
-#define TWOPHASE_INFO_MSG 0 /* SharedInvalidationMessage */
-#define TWOPHASE_INFO_FILE_BEFORE 1 /* relcache file inval */
-#define TWOPHASE_INFO_FILE_AFTER 2 /* relcache file inval */
+#define TWOPHASE_INFO_MSG 0 /* SharedInvalidationMessage */
+#define TWOPHASE_INFO_FILE_BEFORE 1 /* relcache file inval */
+#define TWOPHASE_INFO_FILE_AFTER 2 /* relcache file inval */
static void PersistInvalidationMessage(SharedInvalidationMessage *msg);
@@ -208,7 +208,7 @@ AddInvalidationMessage(InvalidationChunk **listHdr,
chunk = (InvalidationChunk *)
MemoryContextAlloc(CurTransactionContext,
sizeof(InvalidationChunk) +
- (FIRSTCHUNKSIZE - 1) *sizeof(SharedInvalidationMessage));
+ (FIRSTCHUNKSIZE - 1) *sizeof(SharedInvalidationMessage));
chunk->nitems = 0;
chunk->maxitems = FIRSTCHUNKSIZE;
chunk->next = *listHdr;
@@ -222,7 +222,7 @@ AddInvalidationMessage(InvalidationChunk **listHdr,
chunk = (InvalidationChunk *)
MemoryContextAlloc(CurTransactionContext,
sizeof(InvalidationChunk) +
- (chunksize - 1) *sizeof(SharedInvalidationMessage));
+ (chunksize - 1) *sizeof(SharedInvalidationMessage));
chunk->nitems = 0;
chunk->maxitems = chunksize;
chunk->next = *listHdr;
@@ -316,7 +316,7 @@ AddRelcacheInvalidationMessage(InvalidationListHeader *hdr,
ProcessMessageList(hdr->rclist,
if (msg->rc.id == SHAREDINVALRELCACHE_ID &&
msg->rc.relId == relId)
- return);
+ return);
/* OK, add the item */
msg.rc.id = SHAREDINVALRELCACHE_ID;
@@ -338,7 +338,7 @@ AddSmgrInvalidationMessage(InvalidationListHeader *hdr,
ProcessMessageList(hdr->rclist,
if (msg->sm.id == SHAREDINVALSMGR_ID &&
RelFileNodeEquals(msg->sm.rnode, rnode))
- return);
+ return);
/* OK, add the item */
msg.sm.id = SHAREDINVALSMGR_ID;
@@ -470,8 +470,8 @@ LocalExecuteInvalidationMessage(SharedInvalidationMessage *msg)
else if (msg->id == SHAREDINVALSMGR_ID)
{
/*
- * We could have smgr entries for relations of other databases,
- * so no short-circuit test is possible here.
+ * We could have smgr entries for relations of other databases, so no
+ * short-circuit test is possible here.
*/
smgrclosenode(msg->sm.rnode);
}
@@ -523,17 +523,16 @@ PrepareForTupleInvalidation(Relation relation, HeapTuple tuple)
return;
/*
- * We only need to worry about invalidation for tuples that are in
- * system relations; user-relation tuples are never in catcaches and
- * can't affect the relcache either.
+ * We only need to worry about invalidation for tuples that are in system
+ * relations; user-relation tuples are never in catcaches and can't affect
+ * the relcache either.
*/
if (!IsSystemRelation(relation))
return;
/*
- * TOAST tuples can likewise be ignored here. Note that TOAST tables
- * are considered system relations so they are not filtered by the
- * above test.
+ * TOAST tuples can likewise be ignored here. Note that TOAST tables are
+ * considered system relations so they are not filtered by the above test.
*/
if (IsToastRelation(relation))
return;
@@ -561,16 +560,15 @@ PrepareForTupleInvalidation(Relation relation, HeapTuple tuple)
databaseId = MyDatabaseId;
/*
- * We need to send out an smgr inval as well as a relcache inval.
- * This is needed because other backends might possibly possess
- * smgr cache but not relcache entries for the target relation.
+ * We need to send out an smgr inval as well as a relcache inval. This
+ * is needed because other backends might possibly possess smgr cache
+ * but not relcache entries for the target relation.
*
- * Note: during a pg_class row update that assigns a new
- * relfilenode or reltablespace value, we will be called on both
- * the old and new tuples, and thus will broadcast invalidation
- * messages showing both the old and new RelFileNode values. This
- * ensures that other backends will close smgr references to the
- * old file.
+ * Note: during a pg_class row update that assigns a new relfilenode or
+ * reltablespace value, we will be called on both the old and new
+ * tuples, and thus will broadcast invalidation messages showing both
+ * the old and new RelFileNode values. This ensures that other
+ * backends will close smgr references to the old file.
*
* XXX possible future cleanup: it might be better to trigger smgr
* flushes explicitly, rather than indirectly from pg_class updates.
@@ -590,13 +588,12 @@ PrepareForTupleInvalidation(Relation relation, HeapTuple tuple)
relationId = atttup->attrelid;
/*
- * KLUGE ALERT: we always send the relcache event with
- * MyDatabaseId, even if the rel in question is shared (which we
- * can't easily tell). This essentially means that only backends
- * in this same database will react to the relcache flush request.
- * This is in fact appropriate, since only those backends could
- * see our pg_attribute change anyway. It looks a bit ugly
- * though.
+ * KLUGE ALERT: we always send the relcache event with MyDatabaseId,
+ * even if the rel in question is shared (which we can't easily tell).
+ * This essentially means that only backends in this same database
+ * will react to the relcache flush request. This is in fact
+ * appropriate, since only those backends could see our pg_attribute
+ * change anyway. It looks a bit ugly though.
*/
databaseId = MyDatabaseId;
}
@@ -646,7 +643,7 @@ AtStart_Inval(void)
/*
* AtPrepare_Inval
- * Save the inval lists state at 2PC transaction prepare.
+ * Save the inval lists state at 2PC transaction prepare.
*
* In this phase we just generate 2PC records for all the pending invalidation
* work.
@@ -658,8 +655,8 @@ AtPrepare_Inval(void)
Assert(transInvalInfo != NULL && transInvalInfo->parent == NULL);
/*
- * Relcache init file invalidation requires processing both before
- * and after we send the SI messages.
+ * Relcache init file invalidation requires processing both before and
+ * after we send the SI messages.
*/
if (transInvalInfo->RelcacheInitFileInval)
RegisterTwoPhaseRecord(TWOPHASE_RM_INVAL_ID, TWOPHASE_INFO_FILE_BEFORE,
@@ -678,7 +675,7 @@ AtPrepare_Inval(void)
/*
* PostPrepare_Inval
- * Clean up after successful PREPARE.
+ * Clean up after successful PREPARE.
*
* Here, we want to act as though the transaction aborted, so that we will
* undo any syscache changes it made, thereby bringing us into sync with the
@@ -714,7 +711,7 @@ AtSubStart_Inval(void)
/*
* PersistInvalidationMessage
- * Write an invalidation message to the 2PC state file.
+ * Write an invalidation message to the 2PC state file.
*/
static void
PersistInvalidationMessage(SharedInvalidationMessage *msg)
@@ -736,7 +733,7 @@ inval_twophase_postcommit(TransactionId xid, uint16 info,
switch (info)
{
case TWOPHASE_INFO_MSG:
- msg = (SharedInvalidationMessage *) recdata;
+ msg = (SharedInvalidationMessage *) recdata;
Assert(len == sizeof(SharedInvalidationMessage));
SendSharedInvalidMessage(msg);
break;
@@ -786,15 +783,15 @@ AtEOXact_Inval(bool isCommit)
Assert(transInvalInfo != NULL && transInvalInfo->parent == NULL);
/*
- * Relcache init file invalidation requires processing both before
- * and after we send the SI messages. However, we need not do
- * anything unless we committed.
+ * Relcache init file invalidation requires processing both before and
+ * after we send the SI messages. However, we need not do anything
+ * unless we committed.
*/
if (transInvalInfo->RelcacheInitFileInval)
RelationCacheInitFileInvalidate(true);
AppendInvalidationMessages(&transInvalInfo->PriorCmdInvalidMsgs,
- &transInvalInfo->CurrentCmdInvalidMsgs);
+ &transInvalInfo->CurrentCmdInvalidMsgs);
ProcessInvalidationMessages(&transInvalInfo->PriorCmdInvalidMsgs,
SendSharedInvalidMessage);
@@ -897,9 +894,9 @@ void
CommandEndInvalidationMessages(void)
{
/*
- * You might think this shouldn't be called outside any transaction,
- * but bootstrap does it, and also ABORT issued when not in a
- * transaction. So just quietly return if no state to work on.
+ * You might think this shouldn't be called outside any transaction, but
+ * bootstrap does it, and also ABORT issued when not in a transaction. So
+ * just quietly return if no state to work on.
*/
if (transInvalInfo == NULL)
return;
diff --git a/src/backend/utils/cache/lsyscache.c b/src/backend/utils/cache/lsyscache.c
index ebb884dc258..096a3cb942b 100644
--- a/src/backend/utils/cache/lsyscache.c
+++ b/src/backend/utils/cache/lsyscache.c
@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/cache/lsyscache.c,v 1.128 2005/10/11 17:27:14 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/cache/lsyscache.c,v 1.129 2005/10/15 02:49:31 momjian Exp $
*
* NOTES
* Eventually, the index information should go through here, too.
@@ -149,10 +149,10 @@ get_op_hash_function(Oid opno)
Oid opclass = InvalidOid;
/*
- * Search pg_amop to see if the target operator is registered as the
- * "=" operator of any hash opclass. If the operator is registered in
- * multiple opclasses, assume we can use the associated hash function
- * from any one.
+ * Search pg_amop to see if the target operator is registered as the "="
+ * operator of any hash opclass. If the operator is registered in
+ * multiple opclasses, assume we can use the associated hash function from
+ * any one.
*/
catlist = SearchSysCacheList(AMOPOPID, 1,
ObjectIdGetDatum(opno),
@@ -1223,9 +1223,9 @@ getTypeIOParam(HeapTuple typeTuple)
Form_pg_type typeStruct = (Form_pg_type) GETSTRUCT(typeTuple);
/*
- * Array types get their typelem as parameter; everybody else gets
- * their own type OID as parameter. (This is a change from 8.0,
- * in which only composite types got their own OID as parameter.)
+ * Array types get their typelem as parameter; everybody else gets their
+ * own type OID as parameter. (This is a change from 8.0, in which only
+ * composite types got their own OID as parameter.)
*/
if (OidIsValid(typeStruct->typelem))
return typeStruct->typelem;
@@ -1414,7 +1414,7 @@ get_typdefault(Oid typid)
/* Convert C string to a value of the given type */
datum = OidFunctionCall3(type->typinput,
CStringGetDatum(strDefaultVal),
- ObjectIdGetDatum(getTypeIOParam(typeTuple)),
+ ObjectIdGetDatum(getTypeIOParam(typeTuple)),
Int32GetDatum(-1));
/* Build a Const node containing the value */
expr = (Node *) makeConst(typid,
@@ -1501,8 +1501,8 @@ get_typavgwidth(Oid typid, int32 typmod)
{
/*
* For BPCHAR, the max width is also the only width. Otherwise we
- * need to guess about the typical data width given the max. A
- * sliding scale for percentage of max width seems reasonable.
+ * need to guess about the typical data width given the max. A sliding
+ * scale for percentage of max width seems reasonable.
*/
if (typid == BPCHAROID)
return maxwidth;
@@ -1513,8 +1513,8 @@ get_typavgwidth(Oid typid, int32 typmod)
/*
* Beyond 1000, assume we're looking at something like
- * "varchar(10000)" where the limit isn't actually reached often,
- * and use a fixed estimate.
+ * "varchar(10000)" where the limit isn't actually reached often, and
+ * use a fixed estimate.
*/
return 32 + (1000 - 32) / 2;
}
@@ -1905,9 +1905,9 @@ get_attstatsslot(HeapTuple statstuple,
values, nvalues);
/*
- * If the element type is pass-by-reference, we now have a bunch
- * of Datums that are pointers into the syscache value. Copy them
- * to avoid problems if syscache decides to drop the entry.
+ * If the element type is pass-by-reference, we now have a bunch of
+ * Datums that are pointers into the syscache value. Copy them to
+ * avoid problems if syscache decides to drop the entry.
*/
if (!typeForm->typbyval)
{
@@ -1938,9 +1938,9 @@ get_attstatsslot(HeapTuple statstuple,
statarray = DatumGetArrayTypeP(val);
/*
- * We expect the array to be a 1-D float4 array; verify that. We
- * don't need to use deconstruct_array() since the array data is
- * just going to look like a C array of float4 values.
+ * We expect the array to be a 1-D float4 array; verify that. We don't
+ * need to use deconstruct_array() since the array data is just going
+ * to look like a C array of float4 values.
*/
narrayelem = ARR_DIMS(statarray)[0];
if (ARR_NDIM(statarray) != 1 || narrayelem <= 0 ||
@@ -2038,7 +2038,7 @@ get_roleid(const char *rolname)
Oid
get_roleid_checked(const char *rolname)
{
- Oid roleid;
+ Oid roleid;
roleid = get_roleid(rolname);
if (!OidIsValid(roleid))
diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c
index d74982dcb0c..e877c1f828b 100644
--- a/src/backend/utils/cache/relcache.c
+++ b/src/backend/utils/cache/relcache.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/cache/relcache.c,v 1.229 2005/09/16 04:13:18 neilc Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/cache/relcache.c,v 1.230 2005/10/15 02:49:31 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -192,7 +192,7 @@ static bool load_relcache_init_file(void);
static void write_relcache_init_file(void);
static void formrdesc(const char *relationName, Oid relationReltype,
- bool hasoids, int natts, FormData_pg_attribute *att);
+ bool hasoids, int natts, FormData_pg_attribute *att);
static HeapTuple ScanPgRelation(Oid targetRelId, bool indexOK);
static Relation AllocateRelationDesc(Relation relation, Form_pg_class relp);
@@ -241,9 +241,9 @@ ScanPgRelation(Oid targetRelId, bool indexOK)
/*
* Open pg_class and fetch a tuple. Force heap scan if we haven't yet
- * built the critical relcache entries (this includes initdb and
- * startup without a pg_internal.init file). The caller can also
- * force a heap scan by setting indexOK == false.
+ * built the critical relcache entries (this includes initdb and startup
+ * without a pg_internal.init file). The caller can also force a heap
+ * scan by setting indexOK == false.
*/
pg_class_desc = heap_open(RelationRelationId, AccessShareLock);
pg_class_scan = systable_beginscan(pg_class_desc, ClassOidIndexId,
@@ -303,12 +303,11 @@ AllocateRelationDesc(Relation relation, Form_pg_class relp)
/*
* Copy the relation tuple form
*
- * We only allocate space for the fixed fields, ie, CLASS_TUPLE_SIZE.
- * relacl is NOT stored in the relcache --- there'd be little point in
- * it, since we don't copy the tuple's nullvalues bitmap and hence
- * wouldn't know if the value is valid ... bottom line is that relacl
- * *cannot* be retrieved from the relcache. Get it from the syscache
- * if you need it.
+ * We only allocate space for the fixed fields, ie, CLASS_TUPLE_SIZE. relacl
+ * is NOT stored in the relcache --- there'd be little point in it, since
+ * we don't copy the tuple's nullvalues bitmap and hence wouldn't know if
+ * the value is valid ... bottom line is that relacl *cannot* be retrieved
+ * from the relcache. Get it from the syscache if you need it.
*/
relationForm = (Form_pg_class) palloc(CLASS_TUPLE_SIZE);
@@ -355,8 +354,8 @@ RelationBuildTupleDesc(Relation relation)
/*
* Form a scan key that selects only user attributes (attnum > 0).
- * (Eliminating system attribute rows at the index level is lots
- * faster than fetching them.)
+ * (Eliminating system attribute rows at the index level is lots faster
+ * than fetching them.)
*/
ScanKeyInit(&skey[0],
Anum_pg_attribute_attrelid,
@@ -368,9 +367,9 @@ RelationBuildTupleDesc(Relation relation)
Int16GetDatum(0));
/*
- * Open pg_attribute and begin a scan. Force heap scan if we haven't
- * yet built the critical relcache entries (this includes initdb and
- * startup without a pg_internal.init file).
+ * Open pg_attribute and begin a scan. Force heap scan if we haven't yet
+ * built the critical relcache entries (this includes initdb and startup
+ * without a pg_internal.init file).
*/
pg_attribute_desc = heap_open(AttributeRelationId, AccessShareLock);
pg_attribute_scan = systable_beginscan(pg_attribute_desc,
@@ -445,9 +444,8 @@ RelationBuildTupleDesc(Relation relation)
/*
* However, we can easily set the attcacheoff value for the first
- * attribute: it must be zero. This eliminates the need for special
- * cases for attnum=1 that used to exist in fastgetattr() and
- * index_getattr().
+ * attribute: it must be zero. This eliminates the need for special cases
+ * for attnum=1 that used to exist in fastgetattr() and index_getattr().
*/
if (relation->rd_rel->relnatts > 0)
relation->rd_att->attrs[0]->attcacheoff = 0;
@@ -477,7 +475,7 @@ RelationBuildTupleDesc(Relation relation)
constr->num_check = relation->rd_rel->relchecks;
constr->check = (ConstrCheck *)
MemoryContextAllocZero(CacheMemoryContext,
- constr->num_check * sizeof(ConstrCheck));
+ constr->num_check * sizeof(ConstrCheck));
CheckConstraintFetch(relation);
}
else
@@ -521,8 +519,8 @@ RelationBuildRuleLock(Relation relation)
int maxlocks;
/*
- * Make the private context. Parameters are set on the assumption
- * that it'll probably not contain much data.
+ * Make the private context. Parameters are set on the assumption that
+ * it'll probably not contain much data.
*/
rulescxt = AllocSetContextCreate(CacheMemoryContext,
RelationGetRelationName(relation),
@@ -532,8 +530,8 @@ RelationBuildRuleLock(Relation relation)
relation->rd_rulescxt = rulescxt;
/*
- * allocate an array to hold the rewrite rules (the array is extended
- * if necessary)
+ * allocate an array to hold the rewrite rules (the array is extended if
+ * necessary)
*/
maxlocks = 4;
rules = (RewriteRule **)
@@ -551,10 +549,10 @@ RelationBuildRuleLock(Relation relation)
/*
* open pg_rewrite and begin a scan
*
- * Note: since we scan the rules using RewriteRelRulenameIndexId,
- * we will be reading the rules in name order, except possibly during
- * emergency-recovery operations (ie, IsIgnoringSystemIndexes). This
- * in turn ensures that rules will be fired in name order.
+ * Note: since we scan the rules using RewriteRelRulenameIndexId, we will be
+ * reading the rules in name order, except possibly during
+ * emergency-recovery operations (ie, IsIgnoringSystemIndexes). This in
+ * turn ensures that rules will be fired in name order.
*/
rewrite_desc = heap_open(RewriteRelationId, AccessShareLock);
rewrite_tupdesc = RelationGetDescr(rewrite_desc);
@@ -602,7 +600,7 @@ RelationBuildRuleLock(Relation relation)
&isnull);
Assert(!isnull);
rule_evqual_str = DatumGetCString(DirectFunctionCall1(textout,
- rule_evqual));
+ rule_evqual));
oldcxt = MemoryContextSwitchTo(rulescxt);
rule->qual = (Node *) stringToNode(rule_evqual_str);
MemoryContextSwitchTo(oldcxt);
@@ -647,8 +645,8 @@ equalRuleLocks(RuleLock *rlock1, RuleLock *rlock2)
/*
* As of 7.3 we assume the rule ordering is repeatable, because
- * RelationBuildRuleLock should read 'em in a consistent order. So
- * just compare corresponding slots.
+ * RelationBuildRuleLock should read 'em in a consistent order. So just
+ * compare corresponding slots.
*/
if (rlock1 != NULL)
{
@@ -717,8 +715,8 @@ RelationBuildDesc(Oid targetRelId, Relation oldrelation)
relp = (Form_pg_class) GETSTRUCT(pg_class_tuple);
/*
- * allocate storage for the relation descriptor, and copy
- * pg_class_tuple to relation->rd_rel.
+ * allocate storage for the relation descriptor, and copy pg_class_tuple
+ * to relation->rd_rel.
*/
relation = AllocateRelationDesc(oldrelation, relp);
@@ -733,10 +731,9 @@ RelationBuildDesc(Oid targetRelId, Relation oldrelation)
RelationGetRelid(relation) = relid;
/*
- * normal relations are not nailed into the cache; nor can a
- * pre-existing relation be new. It could be temp though. (Actually,
- * it could be new too, but it's okay to forget that fact if forced to
- * flush the entry.)
+ * normal relations are not nailed into the cache; nor can a pre-existing
+ * relation be new. It could be temp though. (Actually, it could be new
+ * too, but it's okay to forget that fact if forced to flush the entry.)
*/
relation->rd_refcnt = 0;
relation->rd_isnailed = false;
@@ -834,9 +831,8 @@ RelationInitIndexAccessInfo(Relation relation)
/*
* Make a copy of the pg_index entry for the index. Since pg_index
- * contains variable-length and possibly-null fields, we have to do
- * this honestly rather than just treating it as a Form_pg_index
- * struct.
+ * contains variable-length and possibly-null fields, we have to do this
+ * honestly rather than just treating it as a Form_pg_index struct.
*/
tuple = SearchSysCache(INDEXRELID,
ObjectIdGetDatum(RelationGetRelid(relation)),
@@ -851,9 +847,9 @@ RelationInitIndexAccessInfo(Relation relation)
ReleaseSysCache(tuple);
/*
- * indclass cannot be referenced directly through the C struct, because
- * it is after the variable-width indkey field. Therefore we extract
- * the datum the hard way and provide a direct link in the relcache.
+ * indclass cannot be referenced directly through the C struct, because it
+ * is after the variable-width indkey field. Therefore we extract the
+ * datum the hard way and provide a direct link in the relcache.
*/
indclassDatum = fastgetattr(relation->rd_indextuple,
Anum_pg_index_indclass,
@@ -884,9 +880,9 @@ RelationInitIndexAccessInfo(Relation relation)
amsupport = aform->amsupport;
/*
- * Make the private context to hold index access info. The reason we
- * need a context, and not just a couple of pallocs, is so that we
- * won't leak any subsidiary info attached to fmgr lookup records.
+ * Make the private context to hold index access info. The reason we need
+ * a context, and not just a couple of pallocs, is so that we won't leak
+ * any subsidiary info attached to fmgr lookup records.
*
* Context parameters are set on the assumption that it'll probably not
* contain much data.
@@ -931,7 +927,7 @@ RelationInitIndexAccessInfo(Relation relation)
relation->rd_supportinfo = supportinfo;
/*
- * Fill the operator and support procedure OID arrays. (aminfo and
+ * Fill the operator and support procedure OID arrays. (aminfo and
* supportinfo are left as zeroes, and are filled on-the-fly when used)
*/
IndexSupportInitialize(relation->rd_indclass,
@@ -1070,17 +1066,17 @@ LookupOpclassInfo(Oid operatorClassOid,
opcentry->supportProcs = NULL;
/*
- * To avoid infinite recursion during startup, force heap scans if
- * we're looking up info for the opclasses used by the indexes we
- * would like to reference here.
+ * To avoid infinite recursion during startup, force heap scans if we're
+ * looking up info for the opclasses used by the indexes we would like to
+ * reference here.
*/
indexOK = criticalRelcachesBuilt ||
(operatorClassOid != OID_BTREE_OPS_OID &&
operatorClassOid != INT2_BTREE_OPS_OID);
/*
- * Scan pg_amop to obtain operators for the opclass. We only fetch
- * the default ones (those with subtype zero).
+ * Scan pg_amop to obtain operators for the opclass. We only fetch the
+ * default ones (those with subtype zero).
*/
if (numStrats > 0)
{
@@ -1113,8 +1109,8 @@ LookupOpclassInfo(Oid operatorClassOid,
}
/*
- * Scan pg_amproc to obtain support procs for the opclass. We only
- * fetch the default ones (those with subtype zero).
+ * Scan pg_amproc to obtain support procs for the opclass. We only fetch
+ * the default ones (those with subtype zero).
*/
if (numSupport > 0)
{
@@ -1193,8 +1189,8 @@ formrdesc(const char *relationName, Oid relationReltype,
relation->rd_refcnt = 1;
/*
- * all entries built with this routine are nailed-in-cache; none are
- * for new or temp relations.
+ * all entries built with this routine are nailed-in-cache; none are for
+ * new or temp relations.
*/
relation->rd_isnailed = true;
relation->rd_createSubid = InvalidSubTransactionId;
@@ -1203,9 +1199,9 @@ formrdesc(const char *relationName, Oid relationReltype,
/*
* initialize relation tuple form
*
- * The data we insert here is pretty incomplete/bogus, but it'll serve to
- * get us launched. RelationCacheInitializePhase2() will read the
- * real data from pg_class and replace what we've done here.
+ * The data we insert here is pretty incomplete/bogus, but it'll serve to get
+ * us launched. RelationCacheInitializePhase2() will read the real data
+ * from pg_class and replace what we've done here.
*/
relation->rd_rel = (Form_pg_class) palloc0(CLASS_TUPLE_SIZE);
@@ -1214,10 +1210,9 @@ formrdesc(const char *relationName, Oid relationReltype,
relation->rd_rel->reltype = relationReltype;
/*
- * It's important to distinguish between shared and non-shared
- * relations, even at bootstrap time, to make sure we know where they
- * are stored. At present, all relations that formrdesc is used for
- * are not shared.
+ * It's important to distinguish between shared and non-shared relations,
+ * even at bootstrap time, to make sure we know where they are stored. At
+ * present, all relations that formrdesc is used for are not shared.
*/
relation->rd_rel->relisshared = false;
@@ -1231,8 +1226,8 @@ formrdesc(const char *relationName, Oid relationReltype,
* initialize attribute tuple form
*
* Unlike the case with the relation tuple, this data had better be right
- * because it will never be replaced. The input values must be
- * correctly defined by macros in src/include/catalog/ headers.
+ * because it will never be replaced. The input values must be correctly
+ * defined by macros in src/include/catalog/ headers.
*/
relation->rd_att = CreateTemplateTupleDesc(natts, hasoids);
relation->rd_att->tdtypeid = relationReltype;
@@ -1361,8 +1356,8 @@ RelationIdGetRelation(Oid relationId)
return rd;
/*
- * no reldesc in the cache, so have RelationBuildDesc() build one and
- * add it.
+ * no reldesc in the cache, so have RelationBuildDesc() build one and add
+ * it.
*/
rd = RelationBuildDesc(relationId, NULL);
if (RelationIsValid(rd))
@@ -1454,11 +1449,12 @@ RelationReloadClassinfo(Relation relation)
/* Should be called only for invalidated nailed indexes */
Assert(relation->rd_isnailed && !relation->rd_isvalid &&
relation->rd_rel->relkind == RELKIND_INDEX);
+
/*
* Read the pg_class row
*
- * Don't try to use an indexscan of pg_class_oid_index to reload the
- * info for pg_class_oid_index ...
+ * Don't try to use an indexscan of pg_class_oid_index to reload the info for
+ * pg_class_oid_index ...
*/
indexOK = (RelationGetRelid(relation) != ClassOidIndexId);
pg_class_tuple = ScanPgRelation(RelationGetRelid(relation), indexOK);
@@ -1492,25 +1488,25 @@ RelationClearRelation(Relation relation, bool rebuild)
/*
* Make sure smgr and lower levels close the relation's files, if they
- * weren't closed already. If the relation is not getting deleted,
- * the next smgr access should reopen the files automatically. This
- * ensures that the low-level file access state is updated after, say,
- * a vacuum truncation.
+ * weren't closed already. If the relation is not getting deleted, the
+ * next smgr access should reopen the files automatically. This ensures
+ * that the low-level file access state is updated after, say, a vacuum
+ * truncation.
*/
RelationCloseSmgr(relation);
/*
- * Never, never ever blow away a nailed-in system relation, because
- * we'd be unable to recover. However, we must reset rd_targblock, in
- * case we got called because of a relation cache flush that was
- * triggered by VACUUM.
+ * Never, never ever blow away a nailed-in system relation, because we'd
+ * be unable to recover. However, we must reset rd_targblock, in case we
+ * got called because of a relation cache flush that was triggered by
+ * VACUUM.
*
- * If it's a nailed index, then we need to re-read the pg_class row to
- * see if its relfilenode changed. We can't necessarily do that here,
- * because we might be in a failed transaction. We assume it's okay
- * to do it if there are open references to the relcache entry (cf
- * notes for AtEOXact_RelationCache). Otherwise just mark the entry
- * as possibly invalid, and it'll be fixed when next opened.
+ * If it's a nailed index, then we need to re-read the pg_class row to see if
+ * its relfilenode changed. We can't necessarily do that here, because we
+ * might be in a failed transaction. We assume it's okay to do it if
+ * there are open references to the relcache entry (cf notes for
+ * AtEOXact_RelationCache). Otherwise just mark the entry as possibly
+ * invalid, and it'll be fixed when next opened.
*/
if (relation->rd_isnailed)
{
@@ -1542,8 +1538,8 @@ RelationClearRelation(Relation relation, bool rebuild)
* Free all the subsidiary data structures of the relcache entry. We
* cannot free rd_att if we are trying to rebuild the entry, however,
* because pointers to it may be cached in various places. The rule
- * manager might also have pointers into the rewrite rules. So to
- * begin with, we can only get rid of these fields:
+ * manager might also have pointers into the rewrite rules. So to begin
+ * with, we can only get rid of these fields:
*/
FreeTriggerDesc(relation->trigdesc);
if (relation->rd_indextuple)
@@ -1558,9 +1554,9 @@ RelationClearRelation(Relation relation, bool rebuild)
/*
* If we're really done with the relcache entry, blow it away. But if
- * someone is still using it, reconstruct the whole deal without
- * moving the physical RelationData record (so that the someone's
- * pointer is still valid).
+ * someone is still using it, reconstruct the whole deal without moving
+ * the physical RelationData record (so that the someone's pointer is
+ * still valid).
*/
if (!rebuild)
{
@@ -1574,12 +1570,12 @@ RelationClearRelation(Relation relation, bool rebuild)
else
{
/*
- * When rebuilding an open relcache entry, must preserve ref count
- * and rd_createSubid state. Also attempt to preserve the
- * tupledesc and rewrite-rule substructures in place.
+ * When rebuilding an open relcache entry, must preserve ref count and
+ * rd_createSubid state. Also attempt to preserve the tupledesc and
+ * rewrite-rule substructures in place.
*
- * Note that this process does not touch CurrentResourceOwner; which
- * is good because whatever ref counts the entry may have do not
+ * Note that this process does not touch CurrentResourceOwner; which is
+ * good because whatever ref counts the entry may have do not
* necessarily belong to that resource owner.
*/
Oid save_relid = RelationGetRelid(relation);
@@ -1773,8 +1769,8 @@ RelationCacheInvalidate(void)
{
/*
* Add this entry to list of stuff to rebuild in second pass.
- * pg_class_oid_index goes on the front of rebuildFirstList,
- * other nailed indexes on the back, and everything else into
+ * pg_class_oid_index goes on the front of rebuildFirstList, other
+ * nailed indexes on the back, and everything else into
* rebuildList (in no particular order).
*/
if (relation->rd_isnailed &&
@@ -1793,9 +1789,9 @@ RelationCacheInvalidate(void)
rebuildList = list_concat(rebuildFirstList, rebuildList);
/*
- * Now zap any remaining smgr cache entries. This must happen before
- * we start to rebuild entries, since that may involve catalog fetches
- * which will re-open catalog files.
+ * Now zap any remaining smgr cache entries. This must happen before we
+ * start to rebuild entries, since that may involve catalog fetches which
+ * will re-open catalog files.
*/
smgrcloseall();
@@ -1832,13 +1828,13 @@ AtEOXact_RelationCache(bool isCommit)
/*
* To speed up transaction exit, we want to avoid scanning the relcache
- * unless there is actually something for this routine to do. Other
- * than the debug-only Assert checks, most transactions don't create
- * any work for us to do here, so we keep a static flag that gets set
- * if there is anything to do. (Currently, this means either a relation
- * is created in the current xact, or an index list is forced.) For
- * simplicity, the flag remains set till end of top-level transaction,
- * even though we could clear it at subtransaction end in some cases.
+ * unless there is actually something for this routine to do. Other than
+ * the debug-only Assert checks, most transactions don't create any work
+ * for us to do here, so we keep a static flag that gets set if there is
+ * anything to do. (Currently, this means either a relation is created in
+ * the current xact, or an index list is forced.) For simplicity, the
+ * flag remains set till end of top-level transaction, even though we
+ * could clear it at subtransaction end in some cases.
*/
if (!need_eoxact_work
#ifdef USE_ASSERT_CHECKING
@@ -1857,10 +1853,9 @@ AtEOXact_RelationCache(bool isCommit)
* The relcache entry's ref count should be back to its normal
* not-in-a-transaction state: 0 unless it's nailed in cache.
*
- * In bootstrap mode, this is NOT true, so don't check it ---
- * the bootstrap code expects relations to stay open across
- * start/commit transaction calls. (That seems bogus, but it's
- * not worth fixing.)
+ * In bootstrap mode, this is NOT true, so don't check it --- the
+ * bootstrap code expects relations to stay open across start/commit
+ * transaction calls. (That seems bogus, but it's not worth fixing.)
*/
#ifdef USE_ASSERT_CHECKING
if (!IsBootstrapProcessingMode())
@@ -1939,8 +1934,8 @@ AtEOSubXact_RelationCache(bool isCommit, SubTransactionId mySubid,
/*
* Is it a relation created in the current subtransaction?
*
- * During subcommit, mark it as belonging to the parent, instead.
- * During subabort, simply delete the relcache entry.
+ * During subcommit, mark it as belonging to the parent, instead. During
+ * subabort, simply delete the relcache entry.
*/
if (relation->rd_createSubid == mySubid)
{
@@ -2041,11 +2036,10 @@ RelationBuildLocalRelation(const char *relname,
/*
* create a new tuple descriptor from the one passed in. We do this
- * partly to copy it into the cache context, and partly because the
- * new relation can't have any defaults or constraints yet; they have
- * to be added in later steps, because they require additions to
- * multiple system catalogs. We can copy attnotnull constraints here,
- * however.
+ * partly to copy it into the cache context, and partly because the new
+ * relation can't have any defaults or constraints yet; they have to be
+ * added in later steps, because they require additions to multiple system
+ * catalogs. We can copy attnotnull constraints here, however.
*/
rel->rd_att = CreateTupleDescCopy(tupDesc);
has_not_null = false;
@@ -2079,9 +2073,9 @@ RelationBuildLocalRelation(const char *relname,
rel->rd_rel->relowner = BOOTSTRAP_SUPERUSERID;
/*
- * Insert relation physical and logical identifiers (OIDs) into the
- * right places. Note that the physical ID (relfilenode) is initially
- * the same as the logical ID (OID).
+ * Insert relation physical and logical identifiers (OIDs) into the right
+ * places. Note that the physical ID (relfilenode) is initially the same
+ * as the logical ID (OID).
*/
rel->rd_rel->relisshared = shared_relation;
@@ -2157,8 +2151,8 @@ RelationCacheInitialize(void)
/*
* Try to load the relcache cache file. If successful, we're done for
- * now. Otherwise, initialize the cache with pre-made descriptors for
- * the critical "nailed-in" system catalogs.
+ * now. Otherwise, initialize the cache with pre-made descriptors for the
+ * critical "nailed-in" system catalogs.
*/
if (IsBootstrapProcessingMode() ||
!load_relcache_init_file())
@@ -2197,24 +2191,22 @@ RelationCacheInitializePhase2(void)
return;
/*
- * If we didn't get the critical system indexes loaded into relcache,
- * do so now. These are critical because the catcache depends on them
- * for catcache fetches that are done during relcache load. Thus, we
- * have an infinite-recursion problem. We can break the recursion by
- * doing heapscans instead of indexscans at certain key spots. To
- * avoid hobbling performance, we only want to do that until we have
- * the critical indexes loaded into relcache. Thus, the flag
- * criticalRelcachesBuilt is used to decide whether to do heapscan or
- * indexscan at the key spots, and we set it true after we've loaded
- * the critical indexes.
+ * If we didn't get the critical system indexes loaded into relcache, do
+ * so now. These are critical because the catcache depends on them for
+ * catcache fetches that are done during relcache load. Thus, we have an
+ * infinite-recursion problem. We can break the recursion by doing
+ * heapscans instead of indexscans at certain key spots. To avoid hobbling
+ * performance, we only want to do that until we have the critical indexes
+ * loaded into relcache. Thus, the flag criticalRelcachesBuilt is used to
+ * decide whether to do heapscan or indexscan at the key spots, and we set
+ * it true after we've loaded the critical indexes.
*
- * The critical indexes are marked as "nailed in cache", partly to make
- * it easy for load_relcache_init_file to count them, but mainly
- * because we cannot flush and rebuild them once we've set
- * criticalRelcachesBuilt to true. (NOTE: perhaps it would be
- * possible to reload them by temporarily setting
- * criticalRelcachesBuilt to false again. For now, though, we just
- * nail 'em in.)
+ * The critical indexes are marked as "nailed in cache", partly to make it
+ * easy for load_relcache_init_file to count them, but mainly because we
+ * cannot flush and rebuild them once we've set criticalRelcachesBuilt to
+ * true. (NOTE: perhaps it would be possible to reload them by
+ * temporarily setting criticalRelcachesBuilt to false again. For now,
+ * though, we just nail 'em in.)
*/
if (!criticalRelcachesBuilt)
{
@@ -2240,12 +2232,12 @@ RelationCacheInitializePhase2(void)
}
/*
- * Now, scan all the relcache entries and update anything that might
- * be wrong in the results from formrdesc or the relcache cache file.
- * If we faked up relcache entries using formrdesc, then read the real
- * pg_class rows and replace the fake entries with them. Also, if any
- * of the relcache entries have rules or triggers, load that info the
- * hard way since it isn't recorded in the cache file.
+ * Now, scan all the relcache entries and update anything that might be
+ * wrong in the results from formrdesc or the relcache cache file. If we
+ * faked up relcache entries using formrdesc, then read the real pg_class
+ * rows and replace the fake entries with them. Also, if any of the
+ * relcache entries have rules or triggers, load that info the hard way
+ * since it isn't recorded in the cache file.
*/
hash_seq_init(&status, RelationIdCache);
@@ -2262,7 +2254,7 @@ RelationCacheInitializePhase2(void)
Form_pg_class relp;
htup = SearchSysCache(RELOID,
- ObjectIdGetDatum(RelationGetRelid(relation)),
+ ObjectIdGetDatum(RelationGetRelid(relation)),
0, 0, 0);
if (!HeapTupleIsValid(htup))
elog(FATAL, "cache lookup failed for relation %u",
@@ -2311,11 +2303,10 @@ RelationCacheInitializePhase3(void)
if (needNewCacheFile)
{
/*
- * Force all the catcaches to finish initializing and thereby open
- * the catalogs and indexes they use. This will preload the
- * relcache with entries for all the most important system
- * catalogs and indexes, so that the init file will be most useful
- * for future backends.
+ * Force all the catcaches to finish initializing and thereby open the
+ * catalogs and indexes they use. This will preload the relcache with
+ * entries for all the most important system catalogs and indexes, so
+ * that the init file will be most useful for future backends.
*/
InitCatalogCachePhase2();
@@ -2349,7 +2340,7 @@ GetPgIndexDescriptor(void)
oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
pgindexdesc = CreateTemplateTupleDesc(Natts_pg_index, false);
- pgindexdesc->tdtypeid = RECORDOID; /* not right, but we don't care */
+ pgindexdesc->tdtypeid = RECORDOID; /* not right, but we don't care */
pgindexdesc->tdtypmod = -1;
for (i = 0; i < Natts_pg_index; i++)
@@ -2405,7 +2396,7 @@ AttrDefaultFetch(Relation relation)
continue;
if (attrdef[i].adbin != NULL)
elog(WARNING, "multiple attrdef records found for attr %s of rel %s",
- NameStr(relation->rd_att->attrs[adform->adnum - 1]->attname),
+ NameStr(relation->rd_att->attrs[adform->adnum - 1]->attname),
RelationGetRelationName(relation));
else
found++;
@@ -2415,12 +2406,12 @@ AttrDefaultFetch(Relation relation)
adrel->rd_att, &isnull);
if (isnull)
elog(WARNING, "null adbin for attr %s of rel %s",
- NameStr(relation->rd_att->attrs[adform->adnum - 1]->attname),
+ NameStr(relation->rd_att->attrs[adform->adnum - 1]->attname),
RelationGetRelationName(relation));
else
attrdef[i].adbin = MemoryContextStrdup(CacheMemoryContext,
- DatumGetCString(DirectFunctionCall1(textout,
- val)));
+ DatumGetCString(DirectFunctionCall1(textout,
+ val)));
break;
}
@@ -2472,7 +2463,7 @@ CheckConstraintFetch(Relation relation)
RelationGetRelationName(relation));
check[found].ccname = MemoryContextStrdup(CacheMemoryContext,
- NameStr(conform->conname));
+ NameStr(conform->conname));
/* Grab and test conbin is actually set */
val = fastgetattr(htup,
@@ -2483,8 +2474,8 @@ CheckConstraintFetch(Relation relation)
RelationGetRelationName(relation));
check[found].ccbin = MemoryContextStrdup(CacheMemoryContext,
- DatumGetCString(DirectFunctionCall1(textout,
- val)));
+ DatumGetCString(DirectFunctionCall1(textout,
+ val)));
found++;
}
@@ -2514,7 +2505,7 @@ CheckConstraintFetch(Relation relation)
*
* Since shared cache inval causes the relcache's copy of the list to go away,
* we return a copy of the list palloc'd in the caller's context. The caller
- * may list_free() the returned list after scanning it. This is necessary
+ * may list_free() the returned list after scanning it. This is necessary
* since the caller will typically be doing syscache lookups on the relevant
* indexes, and syscache lookup could cause SI messages to be processed!
*
@@ -2539,10 +2530,10 @@ RelationGetIndexList(Relation relation)
return list_copy(relation->rd_indexlist);
/*
- * We build the list we intend to return (in the caller's context)
- * while doing the scan. After successfully completing the scan, we
- * copy that list into the relcache entry. This avoids cache-context
- * memory leakage if we get some sort of error partway through.
+ * We build the list we intend to return (in the caller's context) while
+ * doing the scan. After successfully completing the scan, we copy that
+ * list into the relcache entry. This avoids cache-context memory leakage
+ * if we get some sort of error partway through.
*/
result = NIL;
oidIndex = InvalidOid;
@@ -2662,9 +2653,9 @@ RelationGetOidIndex(Relation relation)
List *ilist;
/*
- * If relation doesn't have OIDs at all, caller is probably confused.
- * (We could just silently return InvalidOid, but it seems better to
- * throw an assertion.)
+ * If relation doesn't have OIDs at all, caller is probably confused. (We
+ * could just silently return InvalidOid, but it seems better to throw an
+ * assertion.)
*/
Assert(relation->rd_rel->relhasoids);
@@ -2707,10 +2698,9 @@ RelationGetIndexExpressions(Relation relation)
return NIL;
/*
- * We build the tree we intend to return in the caller's context.
- * After successfully completing the work, we copy it into the
- * relcache entry. This avoids problems if we get some sort of error
- * partway through.
+ * We build the tree we intend to return in the caller's context. After
+ * successfully completing the work, we copy it into the relcache entry.
+ * This avoids problems if we get some sort of error partway through.
*/
exprsDatum = heap_getattr(relation->rd_indextuple,
Anum_pg_index_indexprs,
@@ -2775,10 +2765,9 @@ RelationGetIndexPredicate(Relation relation)
return NIL;
/*
- * We build the tree we intend to return in the caller's context.
- * After successfully completing the work, we copy it into the
- * relcache entry. This avoids problems if we get some sort of error
- * partway through.
+ * We build the tree we intend to return in the caller's context. After
+ * successfully completing the work, we copy it into the relcache entry.
+ * This avoids problems if we get some sort of error partway through.
*/
predDatum = heap_getattr(relation->rd_indextuple,
Anum_pg_index_indpred,
@@ -2795,8 +2784,8 @@ RelationGetIndexPredicate(Relation relation)
* will be comparing it to similarly-processed qual clauses, and may fail
* to detect valid matches without this. This must match the processing
* done to qual clauses in preprocess_expression()! (We can skip the
- * stuff involving subqueries, however, since we don't allow any in
- * index predicates.)
+ * stuff involving subqueries, however, since we don't allow any in index
+ * predicates.)
*/
result = (List *) eval_const_expressions((Node *) result);
@@ -2897,9 +2886,9 @@ load_relcache_init_file(void)
}
/*
- * Read the index relcache entries from the file. Note we will not
- * enter any of them into the cache if the read fails partway through;
- * this helps to guard against broken init files.
+ * Read the index relcache entries from the file. Note we will not enter
+ * any of them into the cache if the read fails partway through; this
+ * helps to guard against broken init files.
*/
max_rels = 100;
rels = (Relation *) palloc(max_rels * sizeof(Relation));
@@ -3086,10 +3075,10 @@ load_relcache_init_file(void)
/*
* Rules and triggers are not saved (mainly because the internal
- * format is complex and subject to change). They must be rebuilt
- * if needed by RelationCacheInitializePhase2. This is not
- * expected to be a big performance hit since few system catalogs
- * have such. Ditto for index expressions and predicates.
+ * format is complex and subject to change). They must be rebuilt if
+ * needed by RelationCacheInitializePhase2. This is not expected to
+ * be a big performance hit since few system catalogs have such.
+ * Ditto for index expressions and predicates.
*/
rel->rd_rules = NULL;
rel->rd_rulescxt = NULL;
@@ -3114,17 +3103,17 @@ load_relcache_init_file(void)
/*
* Recompute lock and physical addressing info. This is needed in
- * case the pg_internal.init file was copied from some other
- * database by CREATE DATABASE.
+ * case the pg_internal.init file was copied from some other database
+ * by CREATE DATABASE.
*/
RelationInitLockInfo(rel);
RelationInitPhysicalAddr(rel);
}
/*
- * We reached the end of the init file without apparent problem. Did
- * we get the right number of nailed items? (This is a useful
- * crosscheck in case the set of critical rels or indexes changes.)
+ * We reached the end of the init file without apparent problem. Did we
+ * get the right number of nailed items? (This is a useful crosscheck in
+ * case the set of critical rels or indexes changes.)
*/
if (nailed_rels != NUM_CRITICAL_RELS ||
nailed_indexes != NUM_CRITICAL_INDEXES)
@@ -3150,9 +3139,9 @@ load_relcache_init_file(void)
return true;
/*
- * init file is broken, so do it the hard way. We don't bother trying
- * to free the clutter we just allocated; it's not in the relcache so
- * it won't hurt.
+ * init file is broken, so do it the hard way. We don't bother trying to
+ * free the clutter we just allocated; it's not in the relcache so it
+ * won't hurt.
*/
read_failed:
pfree(rels);
@@ -3180,8 +3169,8 @@ write_relcache_init_file(void)
/*
* We must write a temporary file and rename it into place. Otherwise,
- * another backend starting at about the same time might crash trying
- * to read the partially-complete file.
+ * another backend starting at about the same time might crash trying to
+ * read the partially-complete file.
*/
snprintf(tempfilename, sizeof(tempfilename), "%s/%s.%d",
DatabasePath, RELCACHE_INIT_FILENAME, MyProcPid);
@@ -3201,7 +3190,7 @@ write_relcache_init_file(void)
(errcode_for_file_access(),
errmsg("could not create relation-cache initialization file \"%s\": %m",
tempfilename),
- errdetail("Continuing anyway, but there's something wrong.")));
+ errdetail("Continuing anyway, but there's something wrong.")));
return;
}
@@ -3308,11 +3297,11 @@ write_relcache_init_file(void)
/*
* Now we have to check whether the data we've so painstakingly
- * accumulated is already obsolete due to someone else's
- * just-committed catalog changes. If so, we just delete the temp
- * file and leave it to the next backend to try again. (Our own
- * relcache entries will be updated by SI message processing, but we
- * can't be sure whether what we wrote out was up-to-date.)
+ * accumulated is already obsolete due to someone else's just-committed
+ * catalog changes. If so, we just delete the temp file and leave it to
+ * the next backend to try again. (Our own relcache entries will be
+ * updated by SI message processing, but we can't be sure whether what we
+ * wrote out was up-to-date.)
*
* This mustn't run concurrently with RelationCacheInitFileInvalidate, so
* grab a serialization lock for the duration.
@@ -3323,8 +3312,8 @@ write_relcache_init_file(void)
AcceptInvalidationMessages();
/*
- * If we have received any SI relcache invals since backend start,
- * assume we may have written out-of-date data.
+ * If we have received any SI relcache invals since backend start, assume
+ * we may have written out-of-date data.
*/
if (relcacheInvalsReceived == 0L)
{
@@ -3332,10 +3321,10 @@ write_relcache_init_file(void)
* OK, rename the temp file to its final name, deleting any
* previously-existing init file.
*
- * Note: a failure here is possible under Cygwin, if some other
- * backend is holding open an unlinked-but-not-yet-gone init file.
- * So treat this as a noncritical failure; just remove the useless
- * temp file on failure.
+ * Note: a failure here is possible under Cygwin, if some other backend
+ * is holding open an unlinked-but-not-yet-gone init file. So treat
+ * this as a noncritical failure; just remove the useless temp file on
+ * failure.
*/
if (rename(tempfilename, finalfilename) < 0)
unlink(tempfilename);
@@ -3401,11 +3390,10 @@ RelationCacheInitFileInvalidate(bool beforeSend)
/*
* We need to interlock this against write_relcache_init_file, to
* guard against possibility that someone renames a new-but-
- * already-obsolete init file into place just after we unlink.
- * With the interlock, it's certain that write_relcache_init_file
- * will notice our SI inval message before renaming into place, or
- * else that we will execute second and successfully unlink the
- * file.
+ * already-obsolete init file into place just after we unlink. With
+ * the interlock, it's certain that write_relcache_init_file will
+ * notice our SI inval message before renaming into place, or else
+ * that we will execute second and successfully unlink the file.
*/
LWLockAcquire(RelCacheInitLock, LW_EXCLUSIVE);
unlink(initfilename);
diff --git a/src/backend/utils/cache/syscache.c b/src/backend/utils/cache/syscache.c
index cd24460857f..1ee237fafd9 100644
--- a/src/backend/utils/cache/syscache.c
+++ b/src/backend/utils/cache/syscache.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/cache/syscache.c,v 1.100 2005/06/28 05:09:01 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/cache/syscache.c,v 1.101 2005/10/15 02:49:32 momjian Exp $
*
* NOTES
* These routines allow the parser/planner/executor to perform
@@ -56,7 +56,7 @@
Add your entry to the cacheinfo[] array below. All cache lists are
alphabetical, so add it in the proper place. Specify the relation
- OID, index OID, number of keys, and key attribute numbers. If the
+ OID, index OID, number of keys, and key attribute numbers. If the
relation contains tuples that are associated with a particular relation
(for example, its attributes, rules, triggers, etc) then specify the
attribute number that contains the OID of the associated relation.
@@ -92,7 +92,7 @@ struct cachedesc
};
static const struct cachedesc cacheinfo[] = {
- {AggregateRelationId, /* AGGFNOID */
+ {AggregateRelationId, /* AGGFNOID */
AggregateFnoidIndexId,
0,
1,
@@ -102,7 +102,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
- {AccessMethodRelationId, /* AMNAME */
+ {AccessMethodRelationId, /* AMNAME */
AmNameIndexId,
0,
1,
@@ -112,7 +112,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
- {AccessMethodRelationId, /* AMOID */
+ {AccessMethodRelationId, /* AMOID */
AmOidIndexId,
0,
1,
@@ -152,7 +152,7 @@ static const struct cachedesc cacheinfo[] = {
Anum_pg_amproc_amprocnum,
0
}},
- {AttributeRelationId, /* ATTNAME */
+ {AttributeRelationId, /* ATTNAME */
AttributeRelidNameIndexId,
Anum_pg_attribute_attrelid,
2,
@@ -162,7 +162,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
- {AttributeRelationId, /* ATTNUM */
+ {AttributeRelationId, /* ATTNUM */
AttributeRelidNumIndexId,
Anum_pg_attribute_attrelid,
2,
@@ -172,7 +172,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
- {AuthMemRelationId, /* AUTHMEMMEMROLE */
+ {AuthMemRelationId, /* AUTHMEMMEMROLE */
AuthMemMemRoleIndexId,
0,
2,
@@ -182,7 +182,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
- {AuthMemRelationId, /* AUTHMEMROLEMEM */
+ {AuthMemRelationId, /* AUTHMEMROLEMEM */
AuthMemRoleMemIndexId,
0,
2,
@@ -192,7 +192,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
- {AuthIdRelationId, /* AUTHNAME */
+ {AuthIdRelationId, /* AUTHNAME */
AuthIdRolnameIndexId,
0,
1,
@@ -202,7 +202,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
- {AuthIdRelationId, /* AUTHOID */
+ {AuthIdRelationId, /* AUTHOID */
AuthIdOidIndexId,
0,
1,
@@ -213,7 +213,7 @@ static const struct cachedesc cacheinfo[] = {
0
}},
{
- CastRelationId, /* CASTSOURCETARGET */
+ CastRelationId, /* CASTSOURCETARGET */
CastSourceTargetIndexId,
0,
2,
@@ -223,7 +223,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
- {OperatorClassRelationId, /* CLAAMNAMENSP */
+ {OperatorClassRelationId, /* CLAAMNAMENSP */
OpclassAmNameNspIndexId,
0,
3,
@@ -233,7 +233,7 @@ static const struct cachedesc cacheinfo[] = {
Anum_pg_opclass_opcnamespace,
0
}},
- {OperatorClassRelationId, /* CLAOID */
+ {OperatorClassRelationId, /* CLAOID */
OpclassOidIndexId,
0,
1,
@@ -243,7 +243,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
- {ConversionRelationId, /* CONDEFAULT */
+ {ConversionRelationId, /* CONDEFAULT */
ConversionDefaultIndexId,
0,
4,
@@ -253,7 +253,7 @@ static const struct cachedesc cacheinfo[] = {
Anum_pg_conversion_contoencoding,
ObjectIdAttributeNumber,
}},
- {ConversionRelationId, /* CONNAMENSP */
+ {ConversionRelationId, /* CONNAMENSP */
ConversionNameNspIndexId,
0,
2,
@@ -263,7 +263,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
- {ConversionRelationId, /* CONOID */
+ {ConversionRelationId, /* CONOID */
ConversionOidIndexId,
0,
1,
@@ -273,7 +273,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
- {IndexRelationId, /* INDEXRELID */
+ {IndexRelationId, /* INDEXRELID */
IndexRelidIndexId,
Anum_pg_index_indrelid,
1,
@@ -283,7 +283,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
- {InheritsRelationId, /* INHRELID */
+ {InheritsRelationId, /* INHRELID */
InheritsRelidSeqnoIndexId,
Anum_pg_inherits_inhrelid,
2,
@@ -293,7 +293,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
- {LanguageRelationId, /* LANGNAME */
+ {LanguageRelationId, /* LANGNAME */
LanguageNameIndexId,
0,
1,
@@ -303,7 +303,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
- {LanguageRelationId, /* LANGOID */
+ {LanguageRelationId, /* LANGOID */
LanguageOidIndexId,
0,
1,
@@ -313,7 +313,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
- {NamespaceRelationId, /* NAMESPACENAME */
+ {NamespaceRelationId, /* NAMESPACENAME */
NamespaceNameIndexId,
0,
1,
@@ -323,7 +323,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
- {NamespaceRelationId, /* NAMESPACEOID */
+ {NamespaceRelationId, /* NAMESPACEOID */
NamespaceOidIndexId,
0,
1,
@@ -333,7 +333,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
- {OperatorRelationId, /* OPERNAMENSP */
+ {OperatorRelationId, /* OPERNAMENSP */
OperatorNameNspIndexId,
0,
4,
@@ -343,7 +343,7 @@ static const struct cachedesc cacheinfo[] = {
Anum_pg_operator_oprright,
Anum_pg_operator_oprnamespace
}},
- {OperatorRelationId, /* OPEROID */
+ {OperatorRelationId, /* OPEROID */
OperatorOidIndexId,
0,
1,
@@ -353,7 +353,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
- {ProcedureRelationId, /* PROCNAMEARGSNSP */
+ {ProcedureRelationId, /* PROCNAMEARGSNSP */
ProcedureNameArgsNspIndexId,
0,
3,
@@ -363,7 +363,7 @@ static const struct cachedesc cacheinfo[] = {
Anum_pg_proc_pronamespace,
0
}},
- {ProcedureRelationId, /* PROCOID */
+ {ProcedureRelationId, /* PROCOID */
ProcedureOidIndexId,
0,
1,
@@ -373,7 +373,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
- {RelationRelationId, /* RELNAMENSP */
+ {RelationRelationId, /* RELNAMENSP */
ClassNameNspIndexId,
ObjectIdAttributeNumber,
2,
@@ -383,7 +383,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
- {RelationRelationId, /* RELOID */
+ {RelationRelationId, /* RELOID */
ClassOidIndexId,
ObjectIdAttributeNumber,
1,
@@ -393,7 +393,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
- {RewriteRelationId, /* RULERELNAME */
+ {RewriteRelationId, /* RULERELNAME */
RewriteRelRulenameIndexId,
Anum_pg_rewrite_ev_class,
2,
@@ -403,7 +403,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
- {StatisticRelationId, /* STATRELATT */
+ {StatisticRelationId, /* STATRELATT */
StatisticRelidAttnumIndexId,
Anum_pg_statistic_starelid,
2,
@@ -413,7 +413,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
- {TypeRelationId, /* TYPENAMENSP */
+ {TypeRelationId, /* TYPENAMENSP */
TypeNameNspIndexId,
Anum_pg_type_typrelid,
2,
@@ -423,7 +423,7 @@ static const struct cachedesc cacheinfo[] = {
0,
0
}},
- {TypeRelationId, /* TYPEOID */
+ {TypeRelationId, /* TYPEOID */
TypeOidIndexId,
Anum_pg_type_typrelid,
1,
@@ -435,7 +435,8 @@ static const struct cachedesc cacheinfo[] = {
}}
};
-static CatCache *SysCache[lengthof(cacheinfo)];
+static CatCache *SysCache[
+ lengthof(cacheinfo)];
static int SysCacheSize = lengthof(cacheinfo);
static bool CacheInitialized = false;
@@ -697,10 +698,10 @@ SysCacheGetAttr(int cacheId, HeapTuple tup,
bool *isNull)
{
/*
- * We just need to get the TupleDesc out of the cache entry, and then
- * we can apply heap_getattr(). We expect that the cache control data
- * is currently valid --- if the caller recently fetched the tuple,
- * then it should be.
+ * We just need to get the TupleDesc out of the cache entry, and then we
+ * can apply heap_getattr(). We expect that the cache control data is
+ * currently valid --- if the caller recently fetched the tuple, then it
+ * should be.
*/
if (cacheId < 0 || cacheId >= SysCacheSize)
elog(ERROR, "invalid cache id: %d", cacheId);
diff --git a/src/backend/utils/cache/typcache.c b/src/backend/utils/cache/typcache.c
index b0b890516df..ff9cc975437 100644
--- a/src/backend/utils/cache/typcache.c
+++ b/src/backend/utils/cache/typcache.c
@@ -36,7 +36,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/cache/typcache.c,v 1.14 2005/05/29 04:23:06 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/cache/typcache.c,v 1.15 2005/10/15 02:49:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -135,9 +135,9 @@ lookup_type_cache(Oid type_id, int flags)
if (typentry == NULL)
{
/*
- * If we didn't find one, we want to make one. But first look up
- * the pg_type row, just to make sure we don't make a cache entry
- * for an invalid type OID.
+ * If we didn't find one, we want to make one. But first look up the
+ * pg_type row, just to make sure we don't make a cache entry for an
+ * invalid type OID.
*/
HeapTuple tp;
Form_pg_type typtup;
@@ -190,8 +190,8 @@ lookup_type_cache(Oid type_id, int flags)
{
/*
* If we find a btree opclass where previously we only found a
- * hash opclass, forget the hash equality operator so we can
- * use the btree operator instead.
+ * hash opclass, forget the hash equality operator so we can use
+ * the btree operator instead.
*/
typentry->eq_opr = InvalidOid;
typentry->eq_opr_finfo.fn_oid = InvalidOid;
@@ -224,7 +224,7 @@ lookup_type_cache(Oid type_id, int flags)
if (typentry->btree_opc != InvalidOid)
typentry->gt_opr = get_opclass_member(typentry->btree_opc,
InvalidOid,
- BTGreaterStrategyNumber);
+ BTGreaterStrategyNumber);
}
if ((flags & (TYPECACHE_CMP_PROC | TYPECACHE_CMP_PROC_FINFO)) &&
typentry->cmp_proc == InvalidOid)
@@ -238,9 +238,9 @@ lookup_type_cache(Oid type_id, int flags)
/*
* Set up fmgr lookup info as requested
*
- * Note: we tell fmgr the finfo structures live in CacheMemoryContext,
- * which is not quite right (they're really in DynaHashContext) but
- * this will do for our purposes.
+ * Note: we tell fmgr the finfo structures live in CacheMemoryContext, which
+ * is not quite right (they're really in DynaHashContext) but this will do
+ * for our purposes.
*/
if ((flags & TYPECACHE_EQ_OPR_FINFO) &&
typentry->eq_opr_finfo.fn_oid == InvalidOid &&
@@ -277,9 +277,9 @@ lookup_type_cache(Oid type_id, int flags)
Assert(rel->rd_rel->reltype == typentry->type_id);
/*
- * Notice that we simply store a link to the relcache's tupdesc.
- * Since we are relying on relcache to detect cache flush events,
- * there's not a lot of point to maintaining an independent copy.
+ * Notice that we simply store a link to the relcache's tupdesc. Since
+ * we are relying on relcache to detect cache flush events, there's
+ * not a lot of point to maintaining an independent copy.
*/
typentry->tupDesc = RelationGetDescr(rel);
@@ -316,12 +316,11 @@ lookup_default_opclass(Oid type_id, Oid am_id)
* (either exactly or binary-compatibly, but prefer an exact match).
*
* We could find more than one binary-compatible match, in which case we
- * require the user to specify which one he wants. If we find more
- * than one exact match, then someone put bogus entries in pg_opclass.
+ * require the user to specify which one he wants. If we find more than
+ * one exact match, then someone put bogus entries in pg_opclass.
*
- * This is the same logic as GetDefaultOpClass() in indexcmds.c, except
- * that we consider all opclasses, regardless of the current search
- * path.
+ * This is the same logic as GetDefaultOpClass() in indexcmds.c, except that
+ * we consider all opclasses, regardless of the current search path.
*/
rel = heap_open(OperatorClassRelationId, AccessShareLock);
@@ -361,8 +360,8 @@ lookup_default_opclass(Oid type_id, Oid am_id)
if (nexact != 0)
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("there are multiple default operator classes for data type %s",
- format_type_be(type_id))));
+ errmsg("there are multiple default operator classes for data type %s",
+ format_type_be(type_id))));
if (ncompatible == 1)
return compatibleOid;
@@ -506,7 +505,7 @@ assign_record_type_typmod(TupleDesc tupDesc)
int32 newlen = RecordCacheArrayLen * 2;
RecordCacheArray = (TupleDesc *) repalloc(RecordCacheArray,
- newlen * sizeof(TupleDesc));
+ newlen * sizeof(TupleDesc));
RecordCacheArrayLen = newlen;
}
diff --git a/src/backend/utils/error/assert.c b/src/backend/utils/error/assert.c
index 43205d07fda..d55c9d4f630 100644
--- a/src/backend/utils/error/assert.c
+++ b/src/backend/utils/error/assert.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/error/assert.c,v 1.30 2004/12/31 22:01:27 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/error/assert.c,v 1.31 2005/10/15 02:49:32 momjian Exp $
*
* NOTE
* This should eventually work with elog()
@@ -42,8 +42,8 @@ ExceptionalCondition(char *conditionName,
#ifdef SLEEP_ON_ASSERT
/*
- * It would be nice to use pg_usleep() here, but only does 2000 sec or
- * 33 minutes, which seems too short.
+ * It would be nice to use pg_usleep() here, but only does 2000 sec or 33
+ * minutes, which seems too short.
*/
sleep(1000000);
#endif
diff --git a/src/backend/utils/error/elog.c b/src/backend/utils/error/elog.c
index d24242e8409..b4f1000be86 100644
--- a/src/backend/utils/error/elog.c
+++ b/src/backend/utils/error/elog.c
@@ -25,7 +25,7 @@
* scenario of this sort is "out of memory"; and it's also the nastiest
* to handle because we'd likely also run out of memory while trying to
* report this error! Our escape hatch for this case is to reset the
- * ErrorContext to empty before trying to process the inner error. Since
+ * ErrorContext to empty before trying to process the inner error. Since
* ErrorContext is guaranteed to have at least 8K of space in it (see mcxt.c),
* we should be able to process an "out of memory" message successfully.
* Since we lose the prior error state due to the reset, we won't be able
@@ -42,7 +42,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/error/elog.c,v 1.164 2005/10/14 20:53:56 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/error/elog.c,v 1.165 2005/10/15 02:49:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -188,8 +188,8 @@ errstart(int elevel, const char *filename, int lineno,
/*
* Now decide whether we need to process this report at all; if it's
- * warning or less and not enabled for logging, just return FALSE
- * without starting up any error logging machinery.
+ * warning or less and not enabled for logging, just return FALSE without
+ * starting up any error logging machinery.
*/
/* Determine whether message is enabled for server log output */
@@ -256,8 +256,8 @@ errstart(int elevel, const char *filename, int lineno,
MemoryContextReset(ErrorContext);
/*
- * If we recurse more than once, the problem might be something
- * broken in a context traceback routine. Abandon them too.
+ * If we recurse more than once, the problem might be something broken
+ * in a context traceback routine. Abandon them too.
*/
if (recursion_depth > 2)
error_context_stack = NULL;
@@ -316,15 +316,15 @@ errfinish(int dummy,...)
CHECK_STACK_DEPTH();
/*
- * Do processing in ErrorContext, which we hope has enough reserved
- * space to report an error.
+ * Do processing in ErrorContext, which we hope has enough reserved space
+ * to report an error.
*/
oldcontext = MemoryContextSwitchTo(ErrorContext);
/*
* Call any context callback functions. Errors occurring in callback
- * functions will be treated as recursive errors --- this ensures we
- * will avoid infinite recursion (see errstart).
+ * functions will be treated as recursive errors --- this ensures we will
+ * avoid infinite recursion (see errstart).
*/
for (econtext = error_context_stack;
econtext != NULL;
@@ -333,34 +333,32 @@ errfinish(int dummy,...)
/*
* If ERROR (not more nor less) we pass it off to the current handler.
- * Printing it and popping the stack is the responsibility of
- * the handler.
+ * Printing it and popping the stack is the responsibility of the handler.
*/
if (elevel == ERROR)
{
/*
- * We do some minimal cleanup before longjmp'ing so that handlers
- * can execute in a reasonably sane state.
+ * We do some minimal cleanup before longjmp'ing so that handlers can
+ * execute in a reasonably sane state.
*/
/* This is just in case the error came while waiting for input */
ImmediateInterruptOK = false;
/*
- * Reset InterruptHoldoffCount in case we ereport'd from
- * inside an interrupt holdoff section. (We assume here that
- * no handler will itself be inside a holdoff section. If
- * necessary, such a handler could save and restore
- * InterruptHoldoffCount for itself, but this should make life
- * easier for most.)
+ * Reset InterruptHoldoffCount in case we ereport'd from inside an
+ * interrupt holdoff section. (We assume here that no handler will
+ * itself be inside a holdoff section. If necessary, such a handler
+ * could save and restore InterruptHoldoffCount for itself, but this
+ * should make life easier for most.)
*/
InterruptHoldoffCount = 0;
- CritSectionCount = 0; /* should be unnecessary, but... */
+ CritSectionCount = 0; /* should be unnecessary, but... */
/*
- * Note that we leave CurrentMemoryContext set to ErrorContext.
- * The handler should reset it to something else soon.
+ * Note that we leave CurrentMemoryContext set to ErrorContext. The
+ * handler should reset it to something else soon.
*/
recursion_depth--;
@@ -370,12 +368,11 @@ errfinish(int dummy,...)
/*
* If we are doing FATAL or PANIC, abort any old-style COPY OUT in
* progress, so that we can report the message before dying. (Without
- * this, pq_putmessage will refuse to send the message at all, which
- * is what we want for NOTICE messages, but not for fatal exits.) This
- * hack is necessary because of poor design of old-style copy
- * protocol. Note we must do this even if client is fool enough to
- * have set client_min_messages above FATAL, so don't look at
- * output_to_client.
+ * this, pq_putmessage will refuse to send the message at all, which is
+ * what we want for NOTICE messages, but not for fatal exits.) This hack
+ * is necessary because of poor design of old-style copy protocol. Note
+ * we must do this even if client is fool enough to have set
+ * client_min_messages above FATAL, so don't look at output_to_client.
*/
if (elevel >= FATAL && whereToSendOutput == Remote)
pq_endcopyout(true);
@@ -412,28 +409,27 @@ errfinish(int dummy,...)
ImmediateInterruptOK = false;
/*
- * If we just reported a startup failure, the client will
- * disconnect on receiving it, so don't send any more to the
- * client.
+ * If we just reported a startup failure, the client will disconnect
+ * on receiving it, so don't send any more to the client.
*/
if (PG_exception_stack == NULL && whereToSendOutput == Remote)
whereToSendOutput = None;
/*
* fflush here is just to improve the odds that we get to see the
- * error message, in case things are so hosed that proc_exit
- * crashes. Any other code you might be tempted to add here
- * should probably be in an on_proc_exit callback instead.
+ * error message, in case things are so hosed that proc_exit crashes.
+ * Any other code you might be tempted to add here should probably be
+ * in an on_proc_exit callback instead.
*/
fflush(stdout);
fflush(stderr);
/*
- * If proc_exit is already running, we exit with nonzero exit code
- * to indicate that something's pretty wrong. We also want to
- * exit with nonzero exit code if not running under the postmaster
- * (for example, if we are being run from the initdb script, we'd
- * better return an error status).
+ * If proc_exit is already running, we exit with nonzero exit code to
+ * indicate that something's pretty wrong. We also want to exit with
+ * nonzero exit code if not running under the postmaster (for example,
+ * if we are being run from the initdb script, we'd better return an
+ * error status).
*/
proc_exit(proc_exit_inprogress || !IsUnderPostmaster);
}
@@ -441,8 +437,8 @@ errfinish(int dummy,...)
if (elevel >= PANIC)
{
/*
- * Serious crash time. Postmaster will observe nonzero process
- * exit status and kill the other backends too.
+ * Serious crash time. Postmaster will observe nonzero process exit
+ * status and kill the other backends too.
*
* XXX: what if we are *in* the postmaster? abort() won't kill our
* children...
@@ -977,8 +973,8 @@ CopyErrorData(void)
ErrorData *newedata;
/*
- * we don't increment recursion_depth because out-of-memory here does
- * not indicate a problem within the error subsystem.
+ * we don't increment recursion_depth because out-of-memory here does not
+ * indicate a problem within the error subsystem.
*/
CHECK_STACK_DEPTH();
@@ -1037,9 +1033,9 @@ void
FlushErrorState(void)
{
/*
- * Reset stack to empty. The only case where it would be more than
- * one deep is if we serviced an error that interrupted construction
- * of another message. We assume control escaped out of that message
+ * Reset stack to empty. The only case where it would be more than one
+ * deep is if we serviced an error that interrupted construction of
+ * another message. We assume control escaped out of that message
* construction and won't ever go back.
*/
errordata_stack_depth = -1;
@@ -1117,7 +1113,7 @@ DebugFileOpen(void)
0666)) < 0)
ereport(FATAL,
(errcode_for_file_access(),
- errmsg("could not open file \"%s\": %m", OutputFileName)));
+ errmsg("could not open file \"%s\": %m", OutputFileName)));
istty = isatty(fd);
close(fd);
@@ -1131,17 +1127,17 @@ DebugFileOpen(void)
OutputFileName)));
/*
- * If the file is a tty and we're running under the postmaster,
- * try to send stdout there as well (if it isn't a tty then stderr
- * will block out stdout, so we may as well let stdout go wherever
- * it was going before).
+ * If the file is a tty and we're running under the postmaster, try to
+ * send stdout there as well (if it isn't a tty then stderr will block
+ * out stdout, so we may as well let stdout go wherever it was going
+ * before).
*/
if (istty && IsUnderPostmaster)
if (!freopen(OutputFileName, "a", stdout))
ereport(FATAL,
(errcode_for_file_access(),
- errmsg("could not reopen file \"%s\" as stdout: %m",
- OutputFileName)));
+ errmsg("could not reopen file \"%s\" as stdout: %m",
+ OutputFileName)));
}
}
@@ -1156,13 +1152,13 @@ void
set_syslog_parameters(const char *ident, int facility)
{
/*
- * guc.c is likely to call us repeatedly with same parameters, so
- * don't thrash the syslog connection unnecessarily. Also, we do not
- * re-open the connection until needed, since this routine will get called
- * whether or not Log_destination actually mentions syslog.
+ * guc.c is likely to call us repeatedly with same parameters, so don't
+ * thrash the syslog connection unnecessarily. Also, we do not re-open
+ * the connection until needed, since this routine will get called whether
+ * or not Log_destination actually mentions syslog.
*
- * Note that we make our own copy of the ident string rather than relying
- * on guc.c's. This may be overly paranoid, but it ensures that we cannot
+ * Note that we make our own copy of the ident string rather than relying on
+ * guc.c's. This may be overly paranoid, but it ensures that we cannot
* accidentally free a string that syslog is still using.
*/
if (syslog_ident == NULL || strcmp(syslog_ident, ident) != 0 ||
@@ -1212,13 +1208,12 @@ write_syslog(int level, const char *line)
seq++;
/*
- * Our problem here is that many syslog implementations don't handle
- * long messages in an acceptable manner. While this function doesn't
- * help that fact, it does work around by splitting up messages into
- * smaller pieces.
+ * Our problem here is that many syslog implementations don't handle long
+ * messages in an acceptable manner. While this function doesn't help that
+ * fact, it does work around by splitting up messages into smaller pieces.
*
- * We divide into multiple syslog() calls if message is too long
- * or if the message contains embedded NewLine(s) '\n'.
+ * We divide into multiple syslog() calls if message is too long or if the
+ * message contains embedded NewLine(s) '\n'.
*/
len = strlen(line);
if (len > PG_SYSLOG_LIMIT || strchr(line, '\n') != NULL)
@@ -1290,7 +1285,7 @@ write_syslog(int level, const char *line)
static void
write_eventlog(int level, const char *line)
{
- int eventlevel = EVENTLOG_ERROR_TYPE;
+ int eventlevel = EVENTLOG_ERROR_TYPE;
static HANDLE evtHandle = INVALID_HANDLE_VALUE;
if (evtHandle == INVALID_HANDLE_VALUE)
@@ -1356,9 +1351,9 @@ log_line_prefix(StringInfo buf)
int i;
/*
- * This is one of the few places where we'd rather not inherit a
- * static variable's value from the postmaster. But since we will,
- * reset it when MyProcPid changes.
+ * This is one of the few places where we'd rather not inherit a static
+ * variable's value from the postmaster. But since we will, reset it when
+ * MyProcPid changes.
*/
if (log_my_pid != MyProcPid)
{
@@ -1412,8 +1407,8 @@ log_line_prefix(StringInfo buf)
if (MyProcPort)
{
appendStringInfo(buf, "%lx.%x",
- (long) (MyProcPort->session_start.tv_sec),
- MyProcPid);
+ (long) (MyProcPort->session_start.tv_sec),
+ MyProcPid);
}
break;
case 'p':
@@ -1425,21 +1420,22 @@ log_line_prefix(StringInfo buf)
case 'm':
{
/*
- * Note: for %m, %t, and %s we deliberately use the
- * C library's strftime/localtime, and not the
- * equivalent functions from src/timezone. This
- * ensures that all backends will report log entries
- * in the same timezone, namely whatever C-library
- * setting they inherit from the postmaster. If we
- * used src/timezone then local settings of the
- * TimeZone GUC variable would confuse the log.
+ * Note: for %m, %t, and %s we deliberately use the C
+ * library's strftime/localtime, and not the equivalent
+ * functions from src/timezone. This ensures that all
+ * backends will report log entries in the same timezone,
+ * namely whatever C-library setting they inherit from the
+ * postmaster. If we used src/timezone then local
+ * settings of the TimeZone GUC variable would confuse the
+ * log.
*/
- time_t stamp_time;
- char strfbuf[128], msbuf[8];
+ time_t stamp_time;
+ char strfbuf[128],
+ msbuf[8];
struct timeval tv;
gettimeofday(&tv, NULL);
- stamp_time = tv.tv_sec;
+ stamp_time = tv.tv_sec;
strftime(strfbuf, sizeof(strfbuf),
/* leave room for milliseconds... */
@@ -1452,8 +1448,8 @@ log_line_prefix(StringInfo buf)
localtime(&stamp_time));
/* 'paste' milliseconds into place... */
- sprintf(msbuf, ".%03d", (int) (tv.tv_usec/1000));
- strncpy(strfbuf+19, msbuf, 4);
+ sprintf(msbuf, ".%03d", (int) (tv.tv_usec / 1000));
+ strncpy(strfbuf + 19, msbuf, 4);
appendStringInfoString(buf, strfbuf);
}
@@ -1535,7 +1531,7 @@ log_line_prefix(StringInfo buf)
char *
unpack_sql_state(int sql_state)
{
- static char buf[12];
+ static char buf[12];
int i;
for (i = 0; i < 5; i++)
@@ -1629,8 +1625,7 @@ send_message_to_server_log(ErrorData *edata)
}
/*
- * If the user wants the query that generated this error logged, do
- * it.
+ * If the user wants the query that generated this error logged, do it.
*/
if (edata->elevel >= log_min_error_statement && debug_query_string != NULL)
{
@@ -1692,12 +1687,13 @@ send_message_to_server_log(ErrorData *edata)
if ((Log_destination & LOG_DESTINATION_STDERR) || whereToSendOutput == Debug)
{
#ifdef WIN32
+
/*
* In a win32 service environment, there is no usable stderr. Capture
* anything going there and write it to the eventlog instead.
*
- * If stderr redirection is active, it's ok to write to stderr
- * because that's really a pipe to the syslogger process.
+ * If stderr redirection is active, it's ok to write to stderr because
+ * that's really a pipe to the syslogger process.
*/
if ((!Redirect_stderr || am_syslogger) && pgwin32_is_service())
write_eventlog(edata->elevel, buf.data);
@@ -1847,12 +1843,12 @@ send_message_to_frontend(ErrorData *edata)
pq_endmessage(&msgbuf);
/*
- * This flush is normally not necessary, since postgres.c will flush
- * out waiting data when control returns to the main loop. But it
- * seems best to leave it here, so that the client has some clue what
- * happened if the backend dies before getting back to the main loop
- * ... error/notice messages should not be a performance-critical path
- * anyway, so an extra flush won't hurt much ...
+ * This flush is normally not necessary, since postgres.c will flush out
+ * waiting data when control returns to the main loop. But it seems best
+ * to leave it here, so that the client has some clue what happened if the
+ * backend dies before getting back to the main loop ... error/notice
+ * messages should not be a performance-critical path anyway, so an extra
+ * flush won't hurt much ...
*/
pq_flush();
}
@@ -1887,9 +1883,9 @@ expand_fmt_string(const char *fmt, ErrorData *edata)
if (*cp == 'm')
{
/*
- * Replace %m by system error string. If there are any
- * %'s in the string, we'd better double them so that
- * vsnprintf won't misinterpret.
+ * Replace %m by system error string. If there are any %'s in
+ * the string, we'd better double them so that vsnprintf won't
+ * misinterpret.
*/
const char *cp2;
@@ -1934,8 +1930,8 @@ useful_strerror(int errnum)
str = strerror(errnum);
/*
- * Some strerror()s return an empty string for out-of-range errno.
- * This is ANSI C spec compliant, but not exactly useful.
+ * Some strerror()s return an empty string for out-of-range errno. This is
+ * ANSI C spec compliant, but not exactly useful.
*/
if (str == NULL || *str == '\0')
{
diff --git a/src/backend/utils/fmgr/dfmgr.c b/src/backend/utils/fmgr/dfmgr.c
index 3c33fbfa6f0..2212f49fc46 100644
--- a/src/backend/utils/fmgr/dfmgr.c
+++ b/src/backend/utils/fmgr/dfmgr.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/fmgr/dfmgr.c,v 1.80 2005/05/11 01:26:02 neilc Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/fmgr/dfmgr.c,v 1.81 2005/10/15 02:49:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -30,8 +30,8 @@ typedef struct df_files
{
struct df_files *next; /* List link */
dev_t device; /* Device file is on */
-#ifndef WIN32 /* ensures we never again depend on this
- * under win32 */
+#ifndef WIN32 /* ensures we never again depend on this under
+ * win32 */
ino_t inode; /* Inode number of file */
#endif
void *handle; /* a handle for pg_dl* functions */
@@ -200,8 +200,8 @@ load_file(char *filename)
/*
* We need to do stat() in order to determine whether this is the same
- * file as a previously loaded file; it's also handy so as to give a
- * good error message if bogus file name given.
+ * file as a previously loaded file; it's also handy so as to give a good
+ * error message if bogus file name given.
*/
if (stat(fullname, &stat_buf) == -1)
ereport(ERROR,
@@ -209,8 +209,8 @@ load_file(char *filename)
errmsg("could not access file \"%s\": %m", fullname)));
/*
- * We have to zap all entries in the list that match on either
- * filename or inode, else load_external_function() won't do anything.
+ * We have to zap all entries in the list that match on either filename or
+ * inode, else load_external_function() won't do anything.
*/
prv = NULL;
for (file_scanner = file_list; file_scanner != NULL; file_scanner = nxt)
@@ -351,7 +351,7 @@ substitute_libpath_macro(const char *name)
strncmp(name, "$libdir", strlen("$libdir")) != 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_NAME),
- errmsg("invalid macro name in dynamic library path: %s", name)));
+ errmsg("invalid macro name in dynamic library path: %s", name)));
ret = palloc(strlen(pkglib_path) + strlen(sep_ptr) + 1);
diff --git a/src/backend/utils/fmgr/fmgr.c b/src/backend/utils/fmgr/fmgr.c
index dd6134ccfd0..4e5dcc3002b 100644
--- a/src/backend/utils/fmgr/fmgr.c
+++ b/src/backend/utils/fmgr/fmgr.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/fmgr/fmgr.c,v 1.96 2005/06/28 05:09:01 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/fmgr/fmgr.c,v 1.97 2005/10/15 02:49:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -41,9 +41,9 @@
* some warnings about int->pointer conversions...
*/
#if (defined(__mc68000__) || (defined(__m68k__))) && defined(__ELF__)
-typedef int32 (*func_ptr) ();
+typedef int32 (*func_ptr) ();
#else
-typedef char * (*func_ptr) ();
+typedef char *(*func_ptr) ();
#endif
/*
@@ -52,8 +52,8 @@ typedef char * (*func_ptr) ();
typedef struct
{
func_ptr func; /* Address of the oldstyle function */
- bool arg_toastable[FUNC_MAX_ARGS]; /* is n'th arg of a
- * toastable datatype? */
+ bool arg_toastable[FUNC_MAX_ARGS]; /* is n'th arg of a toastable
+ * datatype? */
} Oldstyle_fnextra;
/*
@@ -95,8 +95,8 @@ fmgr_isbuiltin(Oid id)
int high = fmgr_nbuiltins - 1;
/*
- * Loop invariant: low is the first index that could contain target
- * entry, and high is the last index that could contain it.
+ * Loop invariant: low is the first index that could contain target entry,
+ * and high is the last index that could contain it.
*/
while (low <= high)
{
@@ -177,9 +177,9 @@ fmgr_info_cxt_security(Oid functionId, FmgrInfo *finfo, MemoryContext mcxt,
char *prosrc;
/*
- * fn_oid *must* be filled in last. Some code assumes that if fn_oid
- * is valid, the whole struct is valid. Some FmgrInfo struct's do
- * survive elogs.
+ * fn_oid *must* be filled in last. Some code assumes that if fn_oid is
+ * valid, the whole struct is valid. Some FmgrInfo struct's do survive
+ * elogs.
*/
finfo->fn_oid = InvalidOid;
finfo->fn_extra = NULL;
@@ -189,8 +189,7 @@ fmgr_info_cxt_security(Oid functionId, FmgrInfo *finfo, MemoryContext mcxt,
if ((fbp = fmgr_isbuiltin(functionId)) != NULL)
{
/*
- * Fast path for builtin functions: don't bother consulting
- * pg_proc
+ * Fast path for builtin functions: don't bother consulting pg_proc
*/
finfo->fn_nargs = fbp->nargs;
finfo->fn_strict = fbp->strict;
@@ -227,11 +226,11 @@ fmgr_info_cxt_security(Oid functionId, FmgrInfo *finfo, MemoryContext mcxt,
/*
* For an ordinary builtin function, we should never get here
* because the isbuiltin() search above will have succeeded.
- * However, if the user has done a CREATE FUNCTION to create
- * an alias for a builtin function, we can end up here. In
- * that case we have to look up the function by name. The
- * name of the internal function is stored in prosrc (it
- * doesn't have to be the same as the name of the alias!)
+ * However, if the user has done a CREATE FUNCTION to create an
+ * alias for a builtin function, we can end up here. In that case
+ * we have to look up the function by name. The name of the
+ * internal function is stored in prosrc (it doesn't have to be
+ * the same as the name of the alias!)
*/
prosrcdatum = SysCacheGetAttr(PROCOID, procedureTuple,
Anum_pg_proc_prosrc, &isnull);
@@ -300,8 +299,7 @@ fmgr_info_C_lang(Oid functionId, FmgrInfo *finfo, HeapTuple procedureTuple)
void *libraryhandle;
/*
- * Get prosrc and probin strings (link symbol and library
- * filename)
+ * Get prosrc and probin strings (link symbol and library filename)
*/
prosrcattr = SysCacheGetAttr(PROCOID, procedureTuple,
Anum_pg_proc_prosrc, &isnull);
@@ -605,14 +603,13 @@ fmgr_oldstyle(PG_FUNCTION_ARGS)
fnextra = (Oldstyle_fnextra *) fcinfo->flinfo->fn_extra;
/*
- * Result is NULL if any argument is NULL, but we still call the
- * function (peculiar, but that's the way it worked before, and after
- * all this is a backwards-compatibility wrapper). Note, however,
- * that we'll never get here with NULL arguments if the function is
- * marked strict.
+ * Result is NULL if any argument is NULL, but we still call the function
+ * (peculiar, but that's the way it worked before, and after all this is a
+ * backwards-compatibility wrapper). Note, however, that we'll never get
+ * here with NULL arguments if the function is marked strict.
*
- * We also need to detoast any TOAST-ed inputs, since it's unlikely that
- * an old-style function knows about TOASTing.
+ * We also need to detoast any TOAST-ed inputs, since it's unlikely that an
+ * old-style function knows about TOASTing.
*/
isnull = false;
for (i = 0; i < n_arguments; i++)
@@ -634,9 +631,9 @@ fmgr_oldstyle(PG_FUNCTION_ARGS)
case 1:
/*
- * nullvalue() used to use isNull to check if arg is NULL;
- * perhaps there are other functions still out there that also
- * rely on this undocumented hack?
+ * nullvalue() used to use isNull to check if arg is NULL; perhaps
+ * there are other functions still out there that also rely on
+ * this undocumented hack?
*/
returnValue = (*user_fn) (fcinfo->arg[0], &fcinfo->isnull);
break;
@@ -744,16 +741,16 @@ fmgr_oldstyle(PG_FUNCTION_ARGS)
default:
/*
- * Increasing FUNC_MAX_ARGS doesn't automatically add cases to
- * the above code, so mention the actual value in this error
- * not FUNC_MAX_ARGS. You could add cases to the above if you
- * needed to support old-style functions with many arguments,
- * but making 'em be new-style is probably a better idea.
+ * Increasing FUNC_MAX_ARGS doesn't automatically add cases to the
+ * above code, so mention the actual value in this error not
+ * FUNC_MAX_ARGS. You could add cases to the above if you needed
+ * to support old-style functions with many arguments, but making
+ * 'em be new-style is probably a better idea.
*/
ereport(ERROR,
(errcode(ERRCODE_TOO_MANY_ARGUMENTS),
- errmsg("function %u has too many arguments (%d, maximum is %d)",
- fcinfo->flinfo->fn_oid, n_arguments, 16)));
+ errmsg("function %u has too many arguments (%d, maximum is %d)",
+ fcinfo->flinfo->fn_oid, n_arguments, 16)));
returnValue = NULL; /* keep compiler quiet */
break;
}
@@ -769,7 +766,7 @@ fmgr_oldstyle(PG_FUNCTION_ARGS)
struct fmgr_security_definer_cache
{
FmgrInfo flinfo;
- Oid userid;
+ Oid userid;
};
/*
@@ -785,8 +782,8 @@ fmgr_security_definer(PG_FUNCTION_ARGS)
{
Datum result;
FmgrInfo *save_flinfo;
- struct fmgr_security_definer_cache * volatile fcache;
- Oid save_userid;
+ struct fmgr_security_definer_cache *volatile fcache;
+ Oid save_userid;
HeapTuple tuple;
if (!fcinfo->flinfo->fn_extra)
@@ -1719,8 +1716,8 @@ fmgr(Oid procedureId,...)
if (n_arguments > FUNC_MAX_ARGS)
ereport(ERROR,
(errcode(ERRCODE_TOO_MANY_ARGUMENTS),
- errmsg("function %u has too many arguments (%d, maximum is %d)",
- flinfo.fn_oid, n_arguments, FUNC_MAX_ARGS)));
+ errmsg("function %u has too many arguments (%d, maximum is %d)",
+ flinfo.fn_oid, n_arguments, FUNC_MAX_ARGS)));
va_start(pvar, procedureId);
for (i = 0; i < n_arguments; i++)
fcinfo.arg[i] = (Datum) va_arg(pvar, char *);
@@ -1760,10 +1757,10 @@ Int64GetDatum(int64 X)
#else /* INT64_IS_BUSTED */
/*
- * On a machine with no 64-bit-int C datatype, sizeof(int64) will not
- * be 8, but we want Int64GetDatum to return an 8-byte object anyway,
- * with zeroes in the unused bits. This is needed so that, for
- * example, hash join of int8 will behave properly.
+ * On a machine with no 64-bit-int C datatype, sizeof(int64) will not be
+ * 8, but we want Int64GetDatum to return an 8-byte object anyway, with
+ * zeroes in the unused bits. This is needed so that, for example, hash
+ * join of int8 will behave properly.
*/
int64 *retval = (int64 *) palloc0(Max(sizeof(int64), 8));
@@ -1846,8 +1843,8 @@ get_fn_expr_rettype(FmgrInfo *flinfo)
Node *expr;
/*
- * can't return anything useful if we have no FmgrInfo or if its
- * fn_expr node has not been initialized
+ * can't return anything useful if we have no FmgrInfo or if its fn_expr
+ * node has not been initialized
*/
if (!flinfo || !flinfo->fn_expr)
return InvalidOid;
@@ -1866,8 +1863,8 @@ Oid
get_fn_expr_argtype(FmgrInfo *flinfo, int argnum)
{
/*
- * can't return anything useful if we have no FmgrInfo or if its
- * fn_expr node has not been initialized
+ * can't return anything useful if we have no FmgrInfo or if its fn_expr
+ * node has not been initialized
*/
if (!flinfo || !flinfo->fn_expr)
return InvalidOid;
@@ -1909,8 +1906,8 @@ get_call_expr_argtype(Node *expr, int argnum)
argtype = exprType((Node *) list_nth(args, argnum));
/*
- * special hack for ScalarArrayOpExpr: what the underlying function
- * will actually get passed is the element type of the array.
+ * special hack for ScalarArrayOpExpr: what the underlying function will
+ * actually get passed is the element type of the array.
*/
if (IsA(expr, ScalarArrayOpExpr) &&
argnum == 1)
diff --git a/src/backend/utils/fmgr/funcapi.c b/src/backend/utils/fmgr/funcapi.c
index 598168a70a0..0a51f7ae0f2 100644
--- a/src/backend/utils/fmgr/funcapi.c
+++ b/src/backend/utils/fmgr/funcapi.c
@@ -7,7 +7,7 @@
* Copyright (c) 2002-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/fmgr/funcapi.c,v 1.25 2005/10/06 19:51:15 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/fmgr/funcapi.c,v 1.26 2005/10/15 02:49:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -29,13 +29,13 @@
static void shutdown_MultiFuncCall(Datum arg);
static TypeFuncClass internal_get_result_type(Oid funcid,
- Node *call_expr,
- ReturnSetInfo *rsinfo,
- Oid *resultTypeId,
- TupleDesc *resultTupleDesc);
+ Node *call_expr,
+ ReturnSetInfo *rsinfo,
+ Oid *resultTypeId,
+ TupleDesc *resultTupleDesc);
static bool resolve_polymorphic_tupdesc(TupleDesc tupdesc,
- oidvector *declared_args,
- Node *call_expr);
+ oidvector *declared_args,
+ Node *call_expr);
static TypeFuncClass get_type_func_class(Oid typid);
@@ -89,8 +89,8 @@ init_MultiFuncCall(PG_FUNCTION_ARGS)
fcinfo->flinfo->fn_extra = retval;
/*
- * Ensure we will get shut down cleanly if the exprcontext is not
- * run to completion.
+ * Ensure we will get shut down cleanly if the exprcontext is not run
+ * to completion.
*/
RegisterExprContextCallback(rsi->econtext,
shutdown_MultiFuncCall,
@@ -119,16 +119,16 @@ per_MultiFuncCall(PG_FUNCTION_ARGS)
FuncCallContext *retval = (FuncCallContext *) fcinfo->flinfo->fn_extra;
/*
- * Clear the TupleTableSlot, if present. This is for safety's sake:
- * the Slot will be in a long-lived context (it better be, if the
+ * Clear the TupleTableSlot, if present. This is for safety's sake: the
+ * Slot will be in a long-lived context (it better be, if the
* FuncCallContext is pointing to it), but in most usage patterns the
- * tuples stored in it will be in the function's per-tuple context. So
- * at the beginning of each call, the Slot will hold a dangling
- * pointer to an already-recycled tuple. We clear it out here.
+ * tuples stored in it will be in the function's per-tuple context. So at
+ * the beginning of each call, the Slot will hold a dangling pointer to an
+ * already-recycled tuple. We clear it out here.
*
* Note: use of retval->slot is obsolete as of 8.0, and we expect that it
- * will always be NULL. This is just here for backwards compatibility
- * in case someone creates a slot anyway.
+ * will always be NULL. This is just here for backwards compatibility in
+ * case someone creates a slot anyway.
*/
if (retval->slot != NULL)
ExecClearTuple(retval->slot);
@@ -168,8 +168,8 @@ shutdown_MultiFuncCall(Datum arg)
flinfo->fn_extra = NULL;
/*
- * Caller is responsible to free up memory for individual struct
- * elements other than att_in_funcinfo and elements.
+ * Caller is responsible to free up memory for individual struct elements
+ * other than att_in_funcinfo and elements.
*/
if (funcctx->attinmeta != NULL)
pfree(funcctx->attinmeta);
@@ -183,14 +183,14 @@ shutdown_MultiFuncCall(Datum arg)
* Given a function's call info record, determine the kind of datatype
* it is supposed to return. If resultTypeId isn't NULL, *resultTypeId
* receives the actual datatype OID (this is mainly useful for scalar
- * result types). If resultTupleDesc isn't NULL, *resultTupleDesc
+ * result types). If resultTupleDesc isn't NULL, *resultTupleDesc
* receives a pointer to a TupleDesc when the result is of a composite
* type, or NULL when it's a scalar result. NB: the tupledesc should
* be copied if it is to be accessed over a long period.
*
* One hard case that this handles is resolution of actual rowtypes for
* functions returning RECORD (from either the function's OUT parameter
- * list, or a ReturnSetInfo context node). TYPEFUNC_RECORD is returned
+ * list, or a ReturnSetInfo context node). TYPEFUNC_RECORD is returned
* only when we couldn't resolve the actual rowtype for lack of information.
*
* The other hard case that this handles is resolution of polymorphism.
@@ -238,7 +238,7 @@ get_expr_result_type(Node *expr,
else
{
/* handle as a generic expression; no chance to resolve RECORD */
- Oid typid = exprType(expr);
+ Oid typid = exprType(expr);
if (resultTypeId)
*resultTypeId = typid;
@@ -273,7 +273,7 @@ get_func_result_type(Oid functionId,
/*
* internal_get_result_type -- workhorse code implementing all the above
*
- * funcid must always be supplied. call_expr and rsinfo can be NULL if not
+ * funcid must always be supplied. call_expr and rsinfo can be NULL if not
* available. We will return TYPEFUNC_RECORD, and store NULL into
* *resultTupleDesc, if we cannot deduce the complete result rowtype from
* the available information.
@@ -306,9 +306,9 @@ internal_get_result_type(Oid funcid,
if (tupdesc)
{
/*
- * It has OUT parameters, so it's basically like a regular
- * composite type, except we have to be able to resolve any
- * polymorphic OUT parameters.
+ * It has OUT parameters, so it's basically like a regular composite
+ * type, except we have to be able to resolve any polymorphic OUT
+ * parameters.
*/
if (resultTypeId)
*resultTypeId = rettype;
@@ -341,7 +341,7 @@ internal_get_result_type(Oid funcid,
*/
if (rettype == ANYARRAYOID || rettype == ANYELEMENTOID)
{
- Oid newrettype = exprType(call_expr);
+ Oid newrettype = exprType(call_expr);
if (newrettype == InvalidOid) /* this probably should not happen */
ereport(ERROR,
@@ -355,7 +355,7 @@ internal_get_result_type(Oid funcid,
if (resultTypeId)
*resultTypeId = rettype;
if (resultTupleDesc)
- *resultTupleDesc = NULL; /* default result */
+ *resultTupleDesc = NULL; /* default result */
/* Classify the result type */
result = get_type_func_class(rettype);
@@ -391,7 +391,7 @@ internal_get_result_type(Oid funcid,
/*
* Given the result tuple descriptor for a function with OUT parameters,
* replace any polymorphic columns (ANYELEMENT/ANYARRAY) with correct data
- * types deduced from the input arguments. Returns TRUE if able to deduce
+ * types deduced from the input arguments. Returns TRUE if able to deduce
* all types, FALSE if not.
*/
static bool
@@ -425,7 +425,7 @@ resolve_polymorphic_tupdesc(TupleDesc tupdesc, oidvector *declared_args,
return true;
/*
- * Otherwise, extract actual datatype(s) from input arguments. (We assume
+ * Otherwise, extract actual datatype(s) from input arguments. (We assume
* the parser already validated consistency of the arguments.)
*/
if (!call_expr)
@@ -468,14 +468,14 @@ resolve_polymorphic_tupdesc(TupleDesc tupdesc, oidvector *declared_args,
switch (tupdesc->attrs[i]->atttypid)
{
case ANYELEMENTOID:
- TupleDescInitEntry(tupdesc, i+1,
+ TupleDescInitEntry(tupdesc, i + 1,
NameStr(tupdesc->attrs[i]->attname),
anyelement_type,
-1,
0);
break;
case ANYARRAYOID:
- TupleDescInitEntry(tupdesc, i+1,
+ TupleDescInitEntry(tupdesc, i + 1,
NameStr(tupdesc->attrs[i]->attname),
anyarray_type,
-1,
@@ -492,7 +492,7 @@ resolve_polymorphic_tupdesc(TupleDesc tupdesc, oidvector *declared_args,
/*
* Given the declared argument types and modes for a function,
* replace any polymorphic types (ANYELEMENT/ANYARRAY) with correct data
- * types deduced from the input arguments. Returns TRUE if able to deduce
+ * types deduced from the input arguments. Returns TRUE if able to deduce
* all types, FALSE if not. This is the same logic as
* resolve_polymorphic_tupdesc, but with a different argument representation.
*
@@ -513,7 +513,7 @@ resolve_polymorphic_argtypes(int numargs, Oid *argtypes, char *argmodes,
inargno = 0;
for (i = 0; i < numargs; i++)
{
- char argmode = argmodes ? argmodes[i] : PROARGMODE_IN;
+ char argmode = argmodes ? argmodes[i] : PROARGMODE_IN;
switch (argtypes[i])
{
@@ -612,10 +612,11 @@ get_type_func_class(Oid typid)
case 'p':
if (typid == RECORDOID)
return TYPEFUNC_RECORD;
+
/*
* We treat VOID and CSTRING as legitimate scalar datatypes,
- * mostly for the convenience of the JDBC driver (which wants
- * to be able to do "SELECT * FROM foo()" for all legitimately
+ * mostly for the convenience of the JDBC driver (which wants to
+ * be able to do "SELECT * FROM foo()" for all legitimately
* user-callable functions).
*/
if (typid == VOIDOID || typid == CSTRINGOID)
@@ -681,14 +682,14 @@ get_func_result_name(Oid functionId)
* since the array data is just going to look like a C array of
* values.
*/
- arr = DatumGetArrayTypeP(proargmodes); /* ensure not toasted */
+ arr = DatumGetArrayTypeP(proargmodes); /* ensure not toasted */
numargs = ARR_DIMS(arr)[0];
if (ARR_NDIM(arr) != 1 ||
numargs < 0 ||
ARR_ELEMTYPE(arr) != CHAROID)
elog(ERROR, "proargmodes is not a 1-D char array");
argmodes = (char *) ARR_DATA_PTR(arr);
- arr = DatumGetArrayTypeP(proargnames); /* ensure not toasted */
+ arr = DatumGetArrayTypeP(proargnames); /* ensure not toasted */
if (ARR_NDIM(arr) != 1 ||
ARR_DIMS(arr)[0] != numargs ||
ARR_ELEMTYPE(arr) != TEXTOID)
@@ -769,7 +770,7 @@ build_function_result_tupdesc_t(HeapTuple procTuple)
Anum_pg_proc_proargnames,
&isnull);
if (isnull)
- proargnames = PointerGetDatum(NULL); /* just to be sure */
+ proargnames = PointerGetDatum(NULL); /* just to be sure */
return build_function_result_tupdesc_d(proallargtypes,
proargmodes,
@@ -848,7 +849,7 @@ build_function_result_tupdesc_d(Datum proallargtypes,
numoutargs = 0;
for (i = 0; i < numargs; i++)
{
- char *pname;
+ char *pname;
if (argmodes[i] == PROARGMODE_IN)
continue;
@@ -879,7 +880,7 @@ build_function_result_tupdesc_d(Datum proallargtypes,
desc = CreateTemplateTupleDesc(numoutargs, false);
for (i = 0; i < numoutargs; i++)
{
- TupleDescInitEntry(desc, i+1,
+ TupleDescInitEntry(desc, i + 1,
outargnames[i],
outargtypes[i],
-1,
@@ -986,7 +987,7 @@ TypeGetTupleDesc(Oid typeoid, List *colaliases)
if (list_length(colaliases) != 1)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("number of aliases does not match number of columns")));
+ errmsg("number of aliases does not match number of columns")));
/* OK, get the column alias */
attname = strVal(linitial(colaliases));
diff --git a/src/backend/utils/hash/dynahash.c b/src/backend/utils/hash/dynahash.c
index 66be64a4e56..292673ac26a 100644
--- a/src/backend/utils/hash/dynahash.c
+++ b/src/backend/utils/hash/dynahash.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/hash/dynahash.c,v 1.64 2005/08/20 23:26:24 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/hash/dynahash.c,v 1.65 2005/10/15 02:49:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -115,14 +115,14 @@ hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
HASHHDR *hctl;
/*
- * For shared hash tables, we have a local hash header (HTAB struct)
- * that we allocate in TopMemoryContext; all else is in shared memory.
+ * For shared hash tables, we have a local hash header (HTAB struct) that
+ * we allocate in TopMemoryContext; all else is in shared memory.
*
- * For non-shared hash tables, everything including the hash header
- * is in a memory context created specially for the hash table ---
- * this makes hash_destroy very simple. The memory context is made
- * a child of either a context specified by the caller, or
- * TopMemoryContext if nothing is specified.
+ * For non-shared hash tables, everything including the hash header is in a
+ * memory context created specially for the hash table --- this makes
+ * hash_destroy very simple. The memory context is made a child of either
+ * a context specified by the caller, or TopMemoryContext if nothing is
+ * specified.
*/
if (flags & HASH_SHARED_MEM)
{
@@ -144,7 +144,7 @@ hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
}
/* Initialize the hash header, plus a copy of the table name */
- hashp = (HTAB *) DynaHashAlloc(sizeof(HTAB) + strlen(tabname) + 1);
+ hashp = (HTAB *) DynaHashAlloc(sizeof(HTAB) + strlen(tabname) +1);
MemSet(hashp, 0, sizeof(HTAB));
hashp->tabname = (char *) (hashp + 1);
@@ -156,10 +156,9 @@ hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
hashp->hash = string_hash; /* default hash function */
/*
- * If you don't specify a match function, it defaults to strncmp() if
- * you used string_hash (either explicitly or by default) and to
- * memcmp() otherwise. (Prior to PostgreSQL 7.4, memcmp() was always
- * used.)
+ * If you don't specify a match function, it defaults to strncmp() if you
+ * used string_hash (either explicitly or by default) and to memcmp()
+ * otherwise. (Prior to PostgreSQL 7.4, memcmp() was always used.)
*/
if (flags & HASH_COMPARE)
hashp->match = info->match;
@@ -186,8 +185,8 @@ hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
if (flags & HASH_SHARED_MEM)
{
/*
- * ctl structure is preallocated for shared memory tables. Note
- * that HASH_DIRSIZE and HASH_ALLOC had better be set as well.
+ * ctl structure is preallocated for shared memory tables. Note that
+ * HASH_DIRSIZE and HASH_ALLOC had better be set as well.
*/
hashp->hctl = info->hctl;
hashp->dir = info->dir;
@@ -243,8 +242,8 @@ hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
}
/*
- * hash table now allocates space for key and data but you have to say
- * how much space to allocate
+ * hash table now allocates space for key and data but you have to say how
+ * much space to allocate
*/
if (flags & HASH_ELEM)
{
@@ -318,8 +317,8 @@ init_htab(HTAB *hashp, long nelem)
/*
* Divide number of elements by the fill factor to determine a desired
- * number of buckets. Allocate space for the next greater power of
- * two number of buckets
+ * number of buckets. Allocate space for the next greater power of two
+ * number of buckets
*/
lnbuckets = (nelem - 1) / hctl->ffactor + 1;
@@ -329,15 +328,14 @@ init_htab(HTAB *hashp, long nelem)
hctl->high_mask = (nbuckets << 1) - 1;
/*
- * Figure number of directory segments needed, round up to a power of
- * 2
+ * Figure number of directory segments needed, round up to a power of 2
*/
nsegs = (nbuckets - 1) / hctl->ssize + 1;
nsegs = 1 << my_log2(nsegs);
/*
- * Make sure directory is big enough. If pre-allocated directory is
- * too small, choke (caller screwed up).
+ * Make sure directory is big enough. If pre-allocated directory is too
+ * small, choke (caller screwed up).
*/
if (nsegs > hctl->dsize)
{
@@ -418,7 +416,7 @@ hash_estimate_size(long num_entries, Size entrysize)
size = add_size(size, mul_size(nDirEntries, sizeof(HASHSEGMENT)));
/* segments */
size = add_size(size, mul_size(nSegments,
- MAXALIGN(DEF_SEGSIZE * sizeof(HASHBUCKET))));
+ MAXALIGN(DEF_SEGSIZE * sizeof(HASHBUCKET))));
/* elements --- allocated in groups of up to HASHELEMENT_ALLOC_MAX */
elementSize = MAXALIGN(sizeof(HASHELEMENT)) + MAXALIGN(entrysize);
elementAllocCnt = Min(num_entries, HASHELEMENT_ALLOC_MAX);
@@ -528,7 +526,7 @@ calc_bucket(HASHHDR *hctl, uint32 hash_val)
* the result is a dangling pointer that shouldn't be dereferenced!)
*
* HASH_ENTER will normally ereport a generic "out of memory" error if
- * it is unable to create a new entry. The HASH_ENTER_NULL operation is
+ * it is unable to create a new entry. The HASH_ENTER_NULL operation is
* the same except it will return NULL if out of memory. Note that
* HASH_ENTER_NULL cannot be used with the default palloc-based allocator,
* since palloc internally ereports on out-of-memory.
@@ -623,8 +621,8 @@ hash_search(HTAB *hashp,
/*
* better hope the caller is synchronizing access to this
- * element, because someone else is going to reuse it the
- * next time something is added to the table
+ * element, because someone else is going to reuse it the next
+ * time something is added to the table
*/
return (void *) ELEMENTKEY(currBucket);
}
@@ -680,9 +678,8 @@ hash_search(HTAB *hashp,
if (++hctl->nentries / (long) (hctl->max_bucket + 1) >= hctl->ffactor)
{
/*
- * NOTE: failure to expand table is not a fatal error, it
- * just means we have to run at higher fill factor than we
- * wanted.
+ * NOTE: failure to expand table is not a fatal error, it just
+ * means we have to run at higher fill factor than we wanted.
*/
expand_table(hashp);
}
@@ -731,7 +728,7 @@ hash_seq_search(HASH_SEQ_STATUS *status)
{
/* Continuing scan of curBucket... */
status->curEntry = curElem->link;
- if (status->curEntry == NULL) /* end of this bucket */
+ if (status->curEntry == NULL) /* end of this bucket */
++status->curBucket;
return (void *) ELEMENTKEY(curElem);
}
@@ -746,7 +743,7 @@ hash_seq_search(HASH_SEQ_STATUS *status)
max_bucket = hctl->max_bucket;
if (curBucket > max_bucket)
- return NULL; /* search is done */
+ return NULL; /* search is done */
/*
* first find the right segment in the table directory.
@@ -768,7 +765,7 @@ hash_seq_search(HASH_SEQ_STATUS *status)
if (++curBucket > max_bucket)
{
status->curBucket = curBucket;
- return NULL; /* search is done */
+ return NULL; /* search is done */
}
if (++segment_ndx >= ssize)
{
@@ -833,10 +830,9 @@ expand_table(HTAB *hashp)
/*
* *Before* changing masks, find old bucket corresponding to same hash
- * values; values in that bucket may need to be relocated to new
- * bucket. Note that new_bucket is certainly larger than low_mask at
- * this point, so we can skip the first step of the regular hash mask
- * calc.
+ * values; values in that bucket may need to be relocated to new bucket.
+ * Note that new_bucket is certainly larger than low_mask at this point,
+ * so we can skip the first step of the regular hash mask calc.
*/
old_bucket = (new_bucket & hctl->low_mask);
@@ -850,10 +846,10 @@ expand_table(HTAB *hashp)
}
/*
- * Relocate records to the new bucket. NOTE: because of the way the
- * hash masking is done in calc_bucket, only one old bucket can need
- * to be split at this point. With a different way of reducing the
- * hash value, that might not be true!
+ * Relocate records to the new bucket. NOTE: because of the way the hash
+ * masking is done in calc_bucket, only one old bucket can need to be
+ * split at this point. With a different way of reducing the hash value,
+ * that might not be true!
*/
old_segnum = old_bucket >> hctl->sshift;
old_segndx = MOD(old_bucket, hctl->ssize);
diff --git a/src/backend/utils/hash/hashfn.c b/src/backend/utils/hash/hashfn.c
index c5968658161..43dac9daad1 100644
--- a/src/backend/utils/hash/hashfn.c
+++ b/src/backend/utils/hash/hashfn.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/hash/hashfn.c,v 1.24 2005/06/08 23:02:05 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/hash/hashfn.c,v 1.25 2005/10/15 02:49:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -64,7 +64,7 @@ uint32
bitmap_hash(const void *key, Size keysize)
{
Assert(keysize == sizeof(Bitmapset *));
- return bms_hash_value(*((const Bitmapset * const *) key));
+ return bms_hash_value(*((const Bitmapset *const *) key));
}
/*
@@ -74,6 +74,6 @@ int
bitmap_match(const void *key1, const void *key2, Size keysize)
{
Assert(keysize == sizeof(Bitmapset *));
- return !bms_equal(*((const Bitmapset * const *) key1),
- *((const Bitmapset * const *) key2));
+ return !bms_equal(*((const Bitmapset *const *) key1),
+ *((const Bitmapset *const *) key2));
}
diff --git a/src/backend/utils/hash/pg_crc.c b/src/backend/utils/hash/pg_crc.c
index 211da1aa729..2cfdff44e22 100644
--- a/src/backend/utils/hash/pg_crc.c
+++ b/src/backend/utils/hash/pg_crc.c
@@ -19,7 +19,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/hash/pg_crc.c,v 1.13 2005/06/02 05:55:29 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/hash/pg_crc.c,v 1.14 2005/10/15 02:49:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -378,7 +378,6 @@ const uint32 pg_crc64_table1[256] = {
0x5DEDC41A, 0x1F1D25F1,
0xD80C07CD, 0x9AFCE626
};
-
#else /* int64 works */
const uint64 pg_crc64_table[256] = {
@@ -511,7 +510,6 @@ const uint64 pg_crc64_table[256] = {
UINT64CONST(0x5DEDC41A34BBEEB2), UINT64CONST(0x1F1D25F19D51D821),
UINT64CONST(0xD80C07CD676F8394), UINT64CONST(0x9AFCE626CE85B507)
};
-
#endif /* INT64_IS_BUSTED */
-#endif /* PROVIDE_64BIT_CRC */
+#endif /* PROVIDE_64BIT_CRC */
diff --git a/src/backend/utils/init/flatfiles.c b/src/backend/utils/init/flatfiles.c
index 7d9d2e6cb25..9906682c320 100644
--- a/src/backend/utils/init/flatfiles.c
+++ b/src/backend/utils/init/flatfiles.c
@@ -4,9 +4,9 @@
* Routines for maintaining "flat file" images of the shared catalogs.
*
* We use flat files so that the postmaster and not-yet-fully-started
- * backends can look at the contents of pg_database, pg_authid, and
- * pg_auth_members for authentication purposes. This module is
- * responsible for keeping the flat-file images as nearly in sync with
+ * backends can look at the contents of pg_database, pg_authid, and
+ * pg_auth_members for authentication purposes. This module is
+ * responsible for keeping the flat-file images as nearly in sync with
* database reality as possible.
*
* The tricky part of the write_xxx_file() routines in this module is that
@@ -23,7 +23,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/utils/init/flatfiles.c,v 1.14 2005/08/11 21:11:46 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/init/flatfiles.c,v 1.15 2005/10/15 02:49:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -55,7 +55,7 @@
#define AUTH_FLAT_FILE "global/pg_auth"
/* Info bits in a flatfiles 2PC record */
-#define FF_BIT_DATABASE 1
+#define FF_BIT_DATABASE 1
#define FF_BIT_AUTH 2
@@ -181,8 +181,8 @@ write_database_file(Relation drel)
/*
* Create a temporary filename to be renamed later. This prevents the
- * backend from clobbering the flat file while the postmaster
- * might be reading from it.
+ * backend from clobbering the flat file while the postmaster might be
+ * reading from it.
*/
filename = database_getflatfilename();
bufsize = strlen(filename) + 12;
@@ -209,7 +209,7 @@ write_database_file(Relation drel)
Oid datoid;
Oid dattablespace;
TransactionId datfrozenxid,
- datvacuumxid;
+ datvacuumxid;
datname = NameStr(dbform->datname);
datoid = HeapTupleGetOid(tuple);
@@ -219,7 +219,7 @@ write_database_file(Relation drel)
/*
* Identify the oldest datfrozenxid, ignoring databases that are not
- * connectable (we assume they are safely frozen). This must match
+ * connectable (we assume they are safely frozen). This must match
* the logic in vac_truncate_clog() in vacuum.c.
*/
if (dbform->datallowconn &&
@@ -262,8 +262,8 @@ write_database_file(Relation drel)
tempname)));
/*
- * Rename the temp file to its final name, deleting the old flat file.
- * We expect that rename(2) is an atomic action.
+ * Rename the temp file to its final name, deleting the old flat file. We
+ * expect that rename(2) is an atomic action.
*/
if (rename(tempname, filename))
ereport(ERROR,
@@ -295,16 +295,18 @@ write_database_file(Relation drel)
* and build data structures in-memory before writing the file.
*/
-typedef struct {
+typedef struct
+{
Oid roleid;
bool rolcanlogin;
- char* rolname;
- char* rolpassword;
- char* rolvaliduntil;
- List* member_of;
+ char *rolname;
+ char *rolpassword;
+ char *rolvaliduntil;
+ List *member_of;
} auth_entry;
-typedef struct {
+typedef struct
+{
Oid roleid;
Oid memberid;
} authmem_entry;
@@ -314,11 +316,13 @@ typedef struct {
static int
oid_compar(const void *a, const void *b)
{
- const auth_entry *a_auth = (const auth_entry*) a;
- const auth_entry *b_auth = (const auth_entry*) b;
+ const auth_entry *a_auth = (const auth_entry *) a;
+ const auth_entry *b_auth = (const auth_entry *) b;
- if (a_auth->roleid < b_auth->roleid) return -1;
- if (a_auth->roleid > b_auth->roleid) return 1;
+ if (a_auth->roleid < b_auth->roleid)
+ return -1;
+ if (a_auth->roleid > b_auth->roleid)
+ return 1;
return 0;
}
@@ -326,21 +330,23 @@ oid_compar(const void *a, const void *b)
static int
name_compar(const void *a, const void *b)
{
- const auth_entry *a_auth = (const auth_entry*) a;
- const auth_entry *b_auth = (const auth_entry*) b;
+ const auth_entry *a_auth = (const auth_entry *) a;
+ const auth_entry *b_auth = (const auth_entry *) b;
- return strcmp(a_auth->rolname,b_auth->rolname);
+ return strcmp(a_auth->rolname, b_auth->rolname);
}
/* qsort comparator for sorting authmem_entry array by memberid */
static int
mem_compar(const void *a, const void *b)
{
- const authmem_entry *a_auth = (const authmem_entry*) a;
- const authmem_entry *b_auth = (const authmem_entry*) b;
+ const authmem_entry *a_auth = (const authmem_entry *) a;
+ const authmem_entry *b_auth = (const authmem_entry *) b;
- if (a_auth->memberid < b_auth->memberid) return -1;
- if (a_auth->memberid > b_auth->memberid) return 1;
+ if (a_auth->memberid < b_auth->memberid)
+ return -1;
+ if (a_auth->memberid > b_auth->memberid)
+ return 1;
return 0;
}
@@ -354,7 +360,7 @@ write_auth_file(Relation rel_authid, Relation rel_authmem)
char *filename,
*tempname;
int bufsize;
- BlockNumber totalblocks;
+ BlockNumber totalblocks;
FILE *fp;
mode_t oumask;
HeapScanDesc scan;
@@ -364,13 +370,13 @@ write_auth_file(Relation rel_authid, Relation rel_authmem)
int curr_mem = 0;
int total_mem = 0;
int est_rows;
- auth_entry *auth_info;
+ auth_entry *auth_info;
authmem_entry *authmem_info;
/*
* Create a temporary filename to be renamed later. This prevents the
- * backend from clobbering the flat file while the postmaster might
- * be reading from it.
+ * backend from clobbering the flat file while the postmaster might be
+ * reading from it.
*/
filename = auth_getflatfilename();
bufsize = strlen(filename) + 12;
@@ -387,29 +393,29 @@ write_auth_file(Relation rel_authid, Relation rel_authmem)
tempname)));
/*
- * Read pg_authid and fill temporary data structures. Note we must
- * read all roles, even those without rolcanlogin.
+ * Read pg_authid and fill temporary data structures. Note we must read
+ * all roles, even those without rolcanlogin.
*/
totalblocks = RelationGetNumberOfBlocks(rel_authid);
totalblocks = totalblocks ? totalblocks : 1;
- est_rows = totalblocks * (BLCKSZ / (sizeof(HeapTupleHeaderData)+sizeof(FormData_pg_authid)));
- auth_info = (auth_entry*) palloc(est_rows*sizeof(auth_entry));
+ est_rows = totalblocks * (BLCKSZ / (sizeof(HeapTupleHeaderData) + sizeof(FormData_pg_authid)));
+ auth_info = (auth_entry *) palloc(est_rows * sizeof(auth_entry));
scan = heap_beginscan(rel_authid, SnapshotNow, 0, NULL);
while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
{
Form_pg_authid aform = (Form_pg_authid) GETSTRUCT(tuple);
HeapTupleHeader tup = tuple->t_data;
- char *tp; /* ptr to tuple data */
- long off; /* offset in tuple data */
+ char *tp; /* ptr to tuple data */
+ long off; /* offset in tuple data */
bits8 *bp = tup->t_bits; /* ptr to null bitmask in tuple */
Datum datum;
if (curr_role >= est_rows)
{
est_rows *= 2;
- auth_info = (auth_entry*)
- repalloc(auth_info, est_rows*sizeof(auth_entry));
+ auth_info = (auth_entry *)
+ repalloc(auth_info, est_rows * sizeof(auth_entry));
}
auth_info[curr_role].roleid = HeapTupleGetOid(tuple);
@@ -418,10 +424,10 @@ write_auth_file(Relation rel_authid, Relation rel_authmem)
auth_info[curr_role].member_of = NIL;
/*
- * We can't use heap_getattr() here because during startup we will
- * not have any tupdesc for pg_authid. Fortunately it's not too
- * hard to work around this. rolpassword is the first possibly-null
- * field so we can compute its offset directly.
+ * We can't use heap_getattr() here because during startup we will not
+ * have any tupdesc for pg_authid. Fortunately it's not too hard to
+ * work around this. rolpassword is the first possibly-null field so
+ * we can compute its offset directly.
*/
tp = (char *) tup + tup->t_hoff;
off = offsetof(FormData_pg_authid, rolpassword);
@@ -438,8 +444,8 @@ write_auth_file(Relation rel_authid, Relation rel_authmem)
datum = PointerGetDatum(tp + off);
/*
- * The password probably shouldn't ever be out-of-line toasted;
- * if it is, ignore it, since we can't handle that in startup mode.
+ * The password probably shouldn't ever be out-of-line toasted; if
+ * it is, ignore it, since we can't handle that in startup mode.
*/
if (VARATT_IS_EXTERNAL(DatumGetPointer(datum)))
auth_info[curr_role].rolpassword = pstrdup("");
@@ -495,8 +501,8 @@ write_auth_file(Relation rel_authid, Relation rel_authmem)
*/
totalblocks = RelationGetNumberOfBlocks(rel_authmem);
totalblocks = totalblocks ? totalblocks : 1;
- est_rows = totalblocks * (BLCKSZ / (sizeof(HeapTupleHeaderData)+sizeof(FormData_pg_auth_members)));
- authmem_info = (authmem_entry*) palloc(est_rows*sizeof(authmem_entry));
+ est_rows = totalblocks * (BLCKSZ / (sizeof(HeapTupleHeaderData) + sizeof(FormData_pg_auth_members)));
+ authmem_info = (authmem_entry *) palloc(est_rows * sizeof(authmem_entry));
scan = heap_beginscan(rel_authmem, SnapshotNow, 0, NULL);
while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
@@ -506,8 +512,8 @@ write_auth_file(Relation rel_authid, Relation rel_authmem)
if (curr_mem >= est_rows)
{
est_rows *= 2;
- authmem_info = (authmem_entry*)
- repalloc(authmem_info, est_rows*sizeof(authmem_entry));
+ authmem_info = (authmem_entry *)
+ repalloc(authmem_info, est_rows * sizeof(authmem_entry));
}
authmem_info[curr_mem].roleid = memform->roleid;
@@ -518,8 +524,8 @@ write_auth_file(Relation rel_authid, Relation rel_authmem)
heap_endscan(scan);
/*
- * Search for memberships. We can skip all this if pg_auth_members
- * is empty.
+ * Search for memberships. We can skip all this if pg_auth_members is
+ * empty.
*/
if (total_mem > 0)
{
@@ -528,22 +534,23 @@ write_auth_file(Relation rel_authid, Relation rel_authmem)
*/
qsort(auth_info, total_roles, sizeof(auth_entry), oid_compar);
qsort(authmem_info, total_mem, sizeof(authmem_entry), mem_compar);
+
/*
* For each role, find what it belongs to.
*/
for (curr_role = 0; curr_role < total_roles; curr_role++)
{
- List *roles_list;
- List *roles_names_list = NIL;
- ListCell *mem;
+ List *roles_list;
+ List *roles_names_list = NIL;
+ ListCell *mem;
/* We can skip this for non-login roles */
if (!auth_info[curr_role].rolcanlogin)
continue;
/*
- * This search algorithm is the same as in is_member_of_role;
- * we are just working with a different input data structure.
+ * This search algorithm is the same as in is_member_of_role; we
+ * are just working with a different input data structure.
*/
roles_list = list_make1_oid(auth_info[curr_role].roleid);
@@ -551,17 +558,20 @@ write_auth_file(Relation rel_authid, Relation rel_authmem)
{
authmem_entry key;
authmem_entry *found_mem;
- int first_found, last_found, i;
+ int first_found,
+ last_found,
+ i;
key.memberid = lfirst_oid(mem);
found_mem = bsearch(&key, authmem_info, total_mem,
sizeof(authmem_entry), mem_compar);
if (!found_mem)
continue;
+
/*
- * bsearch found a match for us; but if there were
- * multiple matches it could have found any one of them.
- * Locate first and last match.
+ * bsearch found a match for us; but if there were multiple
+ * matches it could have found any one of them. Locate first
+ * and last match.
*/
first_found = last_found = (found_mem - authmem_info);
while (first_found > 0 &&
@@ -570,30 +580,31 @@ write_auth_file(Relation rel_authid, Relation rel_authmem)
while (last_found + 1 < total_mem &&
mem_compar(&key, &authmem_info[last_found + 1]) == 0)
last_found++;
+
/*
* Now add all the new roles to roles_list.
*/
for (i = first_found; i <= last_found; i++)
roles_list = list_append_unique_oid(roles_list,
- authmem_info[i].roleid);
+ authmem_info[i].roleid);
}
/*
- * Convert list of role Oids to list of role names.
- * We must do this before re-sorting auth_info.
+ * Convert list of role Oids to list of role names. We must do
+ * this before re-sorting auth_info.
*
- * We skip the first list element (curr_role itself) since there
- * is no point in writing that a role is a member of itself.
+ * We skip the first list element (curr_role itself) since there is
+ * no point in writing that a role is a member of itself.
*/
for_each_cell(mem, lnext(list_head(roles_list)))
{
- auth_entry key_auth;
+ auth_entry key_auth;
auth_entry *found_role;
key_auth.roleid = lfirst_oid(mem);
found_role = bsearch(&key_auth, auth_info, total_roles,
sizeof(auth_entry), oid_compar);
- if (found_role) /* paranoia */
+ if (found_role) /* paranoia */
roles_names_list = lappend(roles_names_list,
found_role->rolname);
}
@@ -613,7 +624,7 @@ write_auth_file(Relation rel_authid, Relation rel_authmem)
if (arole->rolcanlogin)
{
- ListCell *mem;
+ ListCell *mem;
fputs_quote(arole->rolname, fp);
fputs(" ", fp);
@@ -638,8 +649,8 @@ write_auth_file(Relation rel_authid, Relation rel_authmem)
tempname)));
/*
- * Rename the temp file to its final name, deleting the old flat file.
- * We expect that rename(2) is an atomic action.
+ * Rename the temp file to its final name, deleting the old flat file. We
+ * expect that rename(2) is an atomic action.
*/
if (rename(tempname, filename))
ereport(ERROR,
@@ -671,11 +682,13 @@ BuildFlatFiles(bool database_only)
{
ResourceOwner owner;
RelFileNode rnode;
- Relation rel_db, rel_authid, rel_authmem;
+ Relation rel_db,
+ rel_authid,
+ rel_authmem;
/*
- * We don't have any hope of running a real relcache, but we can use
- * the same fake-relcache facility that WAL replay uses.
+ * We don't have any hope of running a real relcache, but we can use the
+ * same fake-relcache facility that WAL replay uses.
*/
XLogInitRelationCache();
@@ -749,21 +762,21 @@ AtEOXact_UpdateFlatFiles(bool isCommit)
}
/*
- * Advance command counter to be certain we see all effects of the
- * current transaction.
+ * Advance command counter to be certain we see all effects of the current
+ * transaction.
*/
CommandCounterIncrement();
/*
- * We use ExclusiveLock to ensure that only one backend writes the
- * flat file(s) at a time. That's sufficient because it's okay to
- * allow plain reads of the tables in parallel. There is some chance
- * of a deadlock here (if we were triggered by a user update of one
- * of the tables, which likely won't have gotten a strong enough lock),
- * so get the locks we need before writing anything.
+ * We use ExclusiveLock to ensure that only one backend writes the flat
+ * file(s) at a time. That's sufficient because it's okay to allow plain
+ * reads of the tables in parallel. There is some chance of a deadlock
+ * here (if we were triggered by a user update of one of the tables, which
+ * likely won't have gotten a strong enough lock), so get the locks we
+ * need before writing anything.
*
- * For writing the auth file, it's sufficient to ExclusiveLock pg_authid;
- * we take just regular AccessShareLock on pg_auth_members.
+ * For writing the auth file, it's sufficient to ExclusiveLock pg_authid; we
+ * take just regular AccessShareLock on pg_auth_members.
*/
if (database_file_update_subid != InvalidSubTransactionId)
drel = heap_open(DatabaseRelationId, ExclusiveLock);
@@ -863,7 +876,7 @@ AtEOSubXact_UpdateFlatFiles(bool isCommit,
* or pg_auth_members via general-purpose INSERT/UPDATE/DELETE commands.
*
* It is sufficient for this to be a STATEMENT trigger since we don't
- * care which individual rows changed. It doesn't much matter whether
+ * care which individual rows changed. It doesn't much matter whether
* it's a BEFORE or AFTER trigger.
*/
Datum
@@ -906,11 +919,11 @@ flatfile_twophase_postcommit(TransactionId xid, uint16 info,
void *recdata, uint32 len)
{
/*
- * Set flags to do the needed file updates at the end of my own
- * current transaction. (XXX this has some issues if my own
- * transaction later rolls back, or if there is any significant
- * delay before I commit. OK for now because we disallow
- * COMMIT PREPARED inside a transaction block.)
+ * Set flags to do the needed file updates at the end of my own current
+ * transaction. (XXX this has some issues if my own transaction later
+ * rolls back, or if there is any significant delay before I commit. OK
+ * for now because we disallow COMMIT PREPARED inside a transaction
+ * block.)
*/
if (info & FF_BIT_DATABASE)
database_file_update_needed();
diff --git a/src/backend/utils/init/miscinit.c b/src/backend/utils/init/miscinit.c
index 148e2609734..5c6f2f95d5f 100644
--- a/src/backend/utils/init/miscinit.c
+++ b/src/backend/utils/init/miscinit.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/init/miscinit.c,v 1.149 2005/08/17 22:14:33 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/init/miscinit.c,v 1.150 2005/10/15 02:49:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -37,7 +37,7 @@
#include "storage/ipc.h"
#include "storage/pg_shmem.h"
#include "storage/proc.h"
-#include "storage/procarray.h"
+#include "storage/procarray.h"
#include "utils/builtins.h"
#include "utils/guc.h"
#include "utils/lsyscache.h"
@@ -295,10 +295,10 @@ make_absolute_path(const char *path)
* DEFINER functions, as well as locally in some specialized commands.
* ----------------------------------------------------------------
*/
-static Oid AuthenticatedUserId = InvalidOid;
-static Oid SessionUserId = InvalidOid;
-static Oid OuterUserId = InvalidOid;
-static Oid CurrentUserId = InvalidOid;
+static Oid AuthenticatedUserId = InvalidOid;
+static Oid SessionUserId = InvalidOid;
+static Oid OuterUserId = InvalidOid;
+static Oid CurrentUserId = InvalidOid;
/* We also have to remember the superuser state of some of these levels */
static bool AuthenticatedUserIsSuperuser = false;
@@ -418,8 +418,8 @@ InitializeSessionUserId(const char *rolename)
/*
* These next checks are not enforced when in standalone mode, so that
- * there is a way to recover from sillinesses like
- * "UPDATE pg_authid SET rolcanlogin = false;".
+ * there is a way to recover from sillinesses like "UPDATE pg_authid SET
+ * rolcanlogin = false;".
*
* We do not enforce them for the autovacuum process either.
*/
@@ -433,15 +433,16 @@ InitializeSessionUserId(const char *rolename)
(errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
errmsg("role \"%s\" is not permitted to log in",
rolename)));
+
/*
* Check connection limit for this role.
*
* There is a race condition here --- we create our PGPROC before
- * checking for other PGPROCs. If two backends did this at about the
+ * checking for other PGPROCs. If two backends did this at about the
* same time, they might both think they were over the limit, while
* ideally one should succeed and one fail. Getting that to work
- * exactly seems more trouble than it is worth, however; instead
- * we just document that the connection limit is approximate.
+ * exactly seems more trouble than it is worth, however; instead we
+ * just document that the connection limit is approximate.
*/
if (rform->rolconnlimit >= 0 &&
!AuthenticatedUserIsSuperuser &&
@@ -451,7 +452,7 @@ InitializeSessionUserId(const char *rolename)
errmsg("too many connections for role \"%s\"",
rolename)));
}
-
+
/* Record username and superuser status as GUC settings too */
SetConfigOption("session_authorization", rolename,
PGC_BACKEND, PGC_S_OVERRIDE);
@@ -460,9 +461,8 @@ InitializeSessionUserId(const char *rolename)
PGC_INTERNAL, PGC_S_OVERRIDE);
/*
- * Set up user-specific configuration variables. This is a good place
- * to do it so we don't have to read pg_authid twice during session
- * startup.
+ * Set up user-specific configuration variables. This is a good place to
+ * do it so we don't have to read pg_authid twice during session startup.
*/
datum = SysCacheGetAttr(AUTHNAME, roleTup,
Anum_pg_authid_rolconfig, &isnull);
@@ -534,7 +534,7 @@ SetSessionAuthorization(Oid userid, bool is_superuser)
!AuthenticatedUserIsSuperuser)
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("permission denied to set session authorization")));
+ errmsg("permission denied to set session authorization")));
SetSessionUserId(userid, is_superuser);
@@ -562,7 +562,7 @@ GetCurrentRoleId(void)
* Change Role ID while running (SET ROLE)
*
* If roleid is InvalidOid, we are doing SET ROLE NONE: revert to the
- * session user authorization. In this case the is_superuser argument
+ * session user authorization. In this case the is_superuser argument
* is ignored.
*
* When roleid is not InvalidOid, the caller must have checked whether
@@ -686,17 +686,17 @@ CreateLockFile(const char *filename, bool amPostmaster,
pid_t my_pid = getpid();
/*
- * We need a loop here because of race conditions. But don't loop
- * forever (for example, a non-writable $PGDATA directory might cause
- * a failure that won't go away). 100 tries seems like plenty.
+ * We need a loop here because of race conditions. But don't loop forever
+ * (for example, a non-writable $PGDATA directory might cause a failure
+ * that won't go away). 100 tries seems like plenty.
*/
for (ntries = 0;; ntries++)
{
/*
* Try to create the lock file --- O_EXCL makes this atomic.
*
- * Think not to make the file protection weaker than 0600. See
- * comments below.
+ * Think not to make the file protection weaker than 0600. See comments
+ * below.
*/
fd = open(filename, O_RDWR | O_CREAT | O_EXCL, 0600);
if (fd >= 0)
@@ -745,38 +745,38 @@ CreateLockFile(const char *filename, bool amPostmaster,
/*
* Check to see if the other process still exists
*
- * If the PID in the lockfile is our own PID or our parent's PID,
- * then the file must be stale (probably left over from a previous
- * system boot cycle). We need this test because of the likelihood
- * that a reboot will assign exactly the same PID as we had in the
- * previous reboot. Also, if there is just one more process launch
- * in this reboot than in the previous one, the lockfile might mention
- * our parent's PID. We can reject that since we'd never be launched
- * directly by a competing postmaster. We can't detect grandparent
- * processes unfortunately, but if the init script is written carefully
- * then all but the immediate parent shell will be root-owned processes
- * and so the kill test will fail with EPERM.
+ * If the PID in the lockfile is our own PID or our parent's PID, then
+ * the file must be stale (probably left over from a previous system
+ * boot cycle). We need this test because of the likelihood that a
+ * reboot will assign exactly the same PID as we had in the previous
+ * reboot. Also, if there is just one more process launch in this
+ * reboot than in the previous one, the lockfile might mention our
+ * parent's PID. We can reject that since we'd never be launched
+ * directly by a competing postmaster. We can't detect grandparent
+ * processes unfortunately, but if the init script is written
+ * carefully then all but the immediate parent shell will be
+ * root-owned processes and so the kill test will fail with EPERM.
*
* We can treat the EPERM-error case as okay because that error implies
* that the existing process has a different userid than we do, which
* means it cannot be a competing postmaster. A postmaster cannot
* successfully attach to a data directory owned by a userid other
- * than its own. (This is now checked directly in checkDataDir(),
- * but has been true for a long time because of the restriction that
- * the data directory isn't group- or world-accessible.) Also,
- * since we create the lockfiles mode 600, we'd have failed above
- * if the lockfile belonged to another userid --- which means that
- * whatever process kill() is reporting about isn't the one that
- * made the lockfile. (NOTE: this last consideration is the only
- * one that keeps us from blowing away a Unix socket file belonging
- * to an instance of Postgres being run by someone else, at least
- * on machines where /tmp hasn't got a stickybit.)
+ * than its own. (This is now checked directly in checkDataDir(), but
+ * has been true for a long time because of the restriction that the
+ * data directory isn't group- or world-accessible.) Also, since we
+ * create the lockfiles mode 600, we'd have failed above if the
+ * lockfile belonged to another userid --- which means that whatever
+ * process kill() is reporting about isn't the one that made the
+ * lockfile. (NOTE: this last consideration is the only one that
+ * keeps us from blowing away a Unix socket file belonging to an
+ * instance of Postgres being run by someone else, at least on
+ * machines where /tmp hasn't got a stickybit.)
*
- * Windows hasn't got getppid(), but doesn't need it since it's not
- * using real kill() either...
+ * Windows hasn't got getppid(), but doesn't need it since it's not using
+ * real kill() either...
*
- * Normally kill() will fail with ESRCH if the given PID doesn't
- * exist. BeOS returns EINVAL for some silly reason, however.
+ * Normally kill() will fail with ESRCH if the given PID doesn't exist.
+ * BeOS returns EINVAL for some silly reason, however.
*/
if (other_pid != my_pid
#ifndef WIN32
@@ -811,11 +811,11 @@ CreateLockFile(const char *filename, bool amPostmaster,
}
/*
- * No, the creating process did not exist. However, it could be
- * that the postmaster crashed (or more likely was kill -9'd by a
- * clueless admin) but has left orphan backends behind. Check for
- * this by looking to see if there is an associated shmem segment
- * that is still in use.
+ * No, the creating process did not exist. However, it could be that
+ * the postmaster crashed (or more likely was kill -9'd by a clueless
+ * admin) but has left orphan backends behind. Check for this by
+ * looking to see if there is an associated shmem segment that is
+ * still in use.
*/
if (isDDLock)
{
@@ -833,23 +833,23 @@ CreateLockFile(const char *filename, bool amPostmaster,
if (PGSharedMemoryIsInUse(id1, id2))
ereport(FATAL,
(errcode(ERRCODE_LOCK_FILE_EXISTS),
- errmsg("pre-existing shared memory block "
- "(key %lu, ID %lu) is still in use",
- id1, id2),
- errhint("If you're sure there are no old "
- "server processes still running, remove "
- "the shared memory block with "
- "the command \"ipcclean\", \"ipcrm\", "
- "or just delete the file \"%s\".",
- filename)));
+ errmsg("pre-existing shared memory block "
+ "(key %lu, ID %lu) is still in use",
+ id1, id2),
+ errhint("If you're sure there are no old "
+ "server processes still running, remove "
+ "the shared memory block with "
+ "the command \"ipcclean\", \"ipcrm\", "
+ "or just delete the file \"%s\".",
+ filename)));
}
}
}
/*
- * Looks like nobody's home. Unlink the file and try again to
- * create it. Need a loop because of possible race condition
- * against other would-be creators.
+ * Looks like nobody's home. Unlink the file and try again to create
+ * it. Need a loop because of possible race condition against other
+ * would-be creators.
*/
if (unlink(filename) < 0)
ereport(FATAL,
@@ -857,7 +857,7 @@ CreateLockFile(const char *filename, bool amPostmaster,
errmsg("could not remove old lock file \"%s\": %m",
filename),
errhint("The file seems accidentally left over, but "
- "it could not be removed. Please remove the file "
+ "it could not be removed. Please remove the file "
"by hand and try again.")));
}
@@ -878,7 +878,7 @@ CreateLockFile(const char *filename, bool amPostmaster,
errno = save_errno ? save_errno : ENOSPC;
ereport(FATAL,
(errcode_for_file_access(),
- errmsg("could not write lock file \"%s\": %m", filename)));
+ errmsg("could not write lock file \"%s\": %m", filename)));
}
if (close(fd))
{
@@ -888,7 +888,7 @@ CreateLockFile(const char *filename, bool amPostmaster,
errno = save_errno;
ereport(FATAL,
(errcode_for_file_access(),
- errmsg("could not write lock file \"%s\": %m", filename)));
+ errmsg("could not write lock file \"%s\": %m", filename)));
}
/*
@@ -939,10 +939,10 @@ TouchSocketLockFile(void)
if (socketLockFile[0] != '\0')
{
/*
- * utime() is POSIX standard, utimes() is a common alternative; if
- * we have neither, fall back to actually reading the file (which
- * only sets the access time not mod time, but that should be
- * enough in most cases). In all paths, we ignore errors.
+ * utime() is POSIX standard, utimes() is a common alternative; if we
+ * have neither, fall back to actually reading the file (which only
+ * sets the access time not mod time, but that should be enough in
+ * most cases). In all paths, we ignore errors.
*/
#ifdef HAVE_UTIME
utime(socketLockFile, NULL);
@@ -1093,7 +1093,7 @@ ValidatePgVersion(const char *path)
else
ereport(FATAL,
(errcode_for_file_access(),
- errmsg("could not open file \"%s\": %m", full_path)));
+ errmsg("could not open file \"%s\": %m", full_path)));
}
ret = fscanf(file, "%ld.%ld", &file_major, &file_minor);
@@ -1113,7 +1113,7 @@ ValidatePgVersion(const char *path)
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("database files are incompatible with server"),
errdetail("The data directory was initialized by PostgreSQL version %ld.%ld, "
- "which is not compatible with this version %s.",
+ "which is not compatible with this version %s.",
file_major, file_minor, version_string)));
}
@@ -1149,7 +1149,7 @@ process_preload_libraries(char *preload_libraries_string)
list_free(elemlist);
ereport(LOG,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("invalid list syntax for parameter \"preload_libraries\"")));
+ errmsg("invalid list syntax for parameter \"preload_libraries\"")));
return;
}
@@ -1164,9 +1164,8 @@ process_preload_libraries(char *preload_libraries_string)
if (sep)
{
/*
- * a colon separator implies there is an initialization
- * function that we need to run in addition to loading the
- * library
+ * a colon separator implies there is an initialization function
+ * that we need to run in addition to loading the library
*/
size_t filename_len = sep - tok;
size_t funcname_len = strlen(tok) - filename_len - 1;
diff --git a/src/backend/utils/init/postinit.c b/src/backend/utils/init/postinit.c
index 73fedbdd477..3c763e39292 100644
--- a/src/backend/utils/init/postinit.c
+++ b/src/backend/utils/init/postinit.c
@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/init/postinit.c,v 1.157 2005/08/11 21:11:46 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/init/postinit.c,v 1.158 2005/10/15 02:49:33 momjian Exp $
*
*
*-------------------------------------------------------------------------
@@ -112,7 +112,7 @@ FindMyDatabase(const char *name, Oid *db_id, Oid *db_tablespace)
*
* Since FindMyDatabase cannot lock pg_database, the information it read
* could be stale; for example we might have attached to a database that's in
- * process of being destroyed by dropdb(). This routine is called after
+ * process of being destroyed by dropdb(). This routine is called after
* we have all the locking and other infrastructure running --- now we can
* check that we are really attached to a valid database.
*
@@ -134,14 +134,14 @@ static void
ReverifyMyDatabase(const char *name)
{
Relation pgdbrel;
- SysScanDesc pgdbscan;
+ SysScanDesc pgdbscan;
ScanKeyData key;
HeapTuple tup;
Form_pg_database dbform;
/*
- * Because we grab RowShareLock here, we can be sure that dropdb()
- * is not running in parallel with us (any more).
+ * Because we grab RowShareLock here, we can be sure that dropdb() is not
+ * running in parallel with us (any more).
*/
pgdbrel = heap_open(DatabaseRelationId, RowShareLock);
@@ -161,17 +161,17 @@ ReverifyMyDatabase(const char *name)
heap_close(pgdbrel, RowShareLock);
/*
- * The only real problem I could have created is to load dirty
- * buffers for the dead database into shared buffer cache; if I
- * did, some other backend will eventually try to write them and
- * die in mdblindwrt. Flush any such pages to forestall trouble.
+ * The only real problem I could have created is to load dirty buffers
+ * for the dead database into shared buffer cache; if I did, some
+ * other backend will eventually try to write them and die in
+ * mdblindwrt. Flush any such pages to forestall trouble.
*/
DropBuffers(MyDatabaseId);
/* Now I can commit hara-kiri with a clear conscience... */
ereport(FATAL,
(errcode(ERRCODE_UNDEFINED_DATABASE),
- errmsg("database \"%s\", OID %u, has disappeared from pg_database",
- name, MyDatabaseId)));
+ errmsg("database \"%s\", OID %u, has disappeared from pg_database",
+ name, MyDatabaseId)));
}
dbform = (Form_pg_database) GETSTRUCT(tup);
@@ -191,17 +191,18 @@ ReverifyMyDatabase(const char *name)
if (!dbform->datallowconn)
ereport(FATAL,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("database \"%s\" is not currently accepting connections",
- name)));
+ errmsg("database \"%s\" is not currently accepting connections",
+ name)));
+
/*
* Check connection limit for this database.
*
* There is a race condition here --- we create our PGPROC before
- * checking for other PGPROCs. If two backends did this at about the
+ * checking for other PGPROCs. If two backends did this at about the
* same time, they might both think they were over the limit, while
* ideally one should succeed and one fail. Getting that to work
- * exactly seems more trouble than it is worth, however; instead
- * we just document that the connection limit is approximate.
+ * exactly seems more trouble than it is worth, however; instead we
+ * just document that the connection limit is approximate.
*/
if (dbform->datconnlimit >= 0 &&
!superuser() &&
@@ -213,8 +214,8 @@ ReverifyMyDatabase(const char *name)
}
/*
- * OK, we're golden. Next to-do item is to save the encoding
- * info out of the pg_database tuple.
+ * OK, we're golden. Next to-do item is to save the encoding info out of
+ * the pg_database tuple.
*/
SetDatabaseEncoding(dbform->encoding);
/* Record it as a GUC internal option, too */
@@ -264,8 +265,8 @@ InitCommunication(void)
if (!IsUnderPostmaster) /* postmaster already did this */
{
/*
- * We're running a postgres bootstrap process or a standalone
- * backend. Create private "shmem" and semaphores.
+ * We're running a postgres bootstrap process or a standalone backend.
+ * Create private "shmem" and semaphores.
*/
CreateSharedMemoryAndSemaphores(true, 0);
}
@@ -309,7 +310,7 @@ BaseInit(void)
* The return value indicates whether the userID is a superuser. (That
* can only be tested inside a transaction, so we want to do it during
* the startup transaction rather than doing a separate one in postgres.c.)
- *
+ *
* Note:
* Be very careful with the order of calls in the InitPostgres function.
* --------------------------------
@@ -324,8 +325,8 @@ InitPostgres(const char *dbname, const char *username)
/*
* Set up the global variables holding database id and path.
*
- * We take a shortcut in the bootstrap case, otherwise we have to look up
- * the db name in pg_database.
+ * We take a shortcut in the bootstrap case, otherwise we have to look up the
+ * db name in pg_database.
*/
if (bootstrap)
{
@@ -338,13 +339,12 @@ InitPostgres(const char *dbname, const char *username)
char *fullpath;
/*
- * Formerly we validated DataDir here, but now that's done
- * earlier.
+ * Formerly we validated DataDir here, but now that's done earlier.
*/
/*
- * Find oid and tablespace of the database we're about to open.
- * Since we're not yet up and running we have to use the hackish
+ * Find oid and tablespace of the database we're about to open. Since
+ * we're not yet up and running we have to use the hackish
* FindMyDatabase.
*/
if (!FindMyDatabase(dbname, &MyDatabaseId, &MyDatabaseTableSpace))
@@ -364,8 +364,8 @@ InitPostgres(const char *dbname, const char *username)
(errcode(ERRCODE_UNDEFINED_DATABASE),
errmsg("database \"%s\" does not exist",
dbname),
- errdetail("The database subdirectory \"%s\" is missing.",
- fullpath)));
+ errdetail("The database subdirectory \"%s\" is missing.",
+ fullpath)));
else
ereport(FATAL,
(errcode_for_file_access(),
@@ -383,17 +383,17 @@ InitPostgres(const char *dbname, const char *username)
*/
/*
- * Set up my per-backend PGPROC struct in shared memory. (We need
- * to know MyDatabaseId before we can do this, since it's entered into
- * the PGPROC struct.)
+ * Set up my per-backend PGPROC struct in shared memory. (We need to
+ * know MyDatabaseId before we can do this, since it's entered into the
+ * PGPROC struct.)
*/
InitProcess();
/*
* Initialize my entry in the shared-invalidation manager's array of
- * per-backend data. (Formerly this came before InitProcess, but now
- * it must happen after, because it uses MyProc.) Once I have done
- * this, I am visible to other backends!
+ * per-backend data. (Formerly this came before InitProcess, but now it
+ * must happen after, because it uses MyProc.) Once I have done this, I
+ * am visible to other backends!
*
* Sets up MyBackendId, a unique backend identifier.
*/
@@ -410,22 +410,22 @@ InitPostgres(const char *dbname, const char *username)
InitBufferPoolBackend();
/*
- * Initialize local process's access to XLOG. In bootstrap case we
- * may skip this since StartupXLOG() was run instead.
+ * Initialize local process's access to XLOG. In bootstrap case we may
+ * skip this since StartupXLOG() was run instead.
*/
if (!bootstrap)
InitXLOGAccess();
/*
- * Initialize the relation descriptor cache. This must create at
- * least the minimum set of "nailed-in" cache entries. No catalog
- * access happens here.
+ * Initialize the relation descriptor cache. This must create at least
+ * the minimum set of "nailed-in" cache entries. No catalog access
+ * happens here.
*/
RelationCacheInitialize();
/*
- * Initialize all the system catalog caches. Note that no catalog
- * access happens here; we only set up the cache structure.
+ * Initialize all the system catalog caches. Note that no catalog access
+ * happens here; we only set up the cache structure.
*/
InitCatalogCache();
@@ -433,14 +433,13 @@ InitPostgres(const char *dbname, const char *username)
EnablePortalManager();
/*
- * Set up process-exit callback to do pre-shutdown cleanup. This
- * has to be after we've initialized all the low-level modules
- * like the buffer manager, because during shutdown this has to
- * run before the low-level modules start to close down. On the
- * other hand, we want it in place before we begin our first
- * transaction --- if we fail during the initialization transaction,
- * as is entirely possible, we need the AbortTransaction call to
- * clean up.
+ * Set up process-exit callback to do pre-shutdown cleanup. This has to
+ * be after we've initialized all the low-level modules like the buffer
+ * manager, because during shutdown this has to run before the low-level
+ * modules start to close down. On the other hand, we want it in place
+ * before we begin our first transaction --- if we fail during the
+ * initialization transaction, as is entirely possible, we need the
+ * AbortTransaction call to clean up.
*/
on_shmem_exit(ShutdownPostgres, 0);
@@ -479,18 +478,18 @@ InitPostgres(const char *dbname, const char *username)
}
/*
- * Unless we are bootstrapping, double-check that InitMyDatabaseInfo()
- * got a correct result. We can't do this until all the
- * database-access infrastructure is up. (Also, it wants to know if
- * the user is a superuser, so the above stuff has to happen first.)
+ * Unless we are bootstrapping, double-check that InitMyDatabaseInfo() got
+ * a correct result. We can't do this until all the database-access
+ * infrastructure is up. (Also, it wants to know if the user is a
+ * superuser, so the above stuff has to happen first.)
*/
if (!bootstrap)
ReverifyMyDatabase(dbname);
/*
* Final phase of relation cache startup: write a new cache file if
- * necessary. This is done after ReverifyMyDatabase to avoid writing
- * a cache file into a dead database.
+ * necessary. This is done after ReverifyMyDatabase to avoid writing a
+ * cache file into a dead database.
*/
RelationCacheInitializePhase3();
@@ -555,8 +554,8 @@ ShutdownPostgres(int code, Datum arg)
AbortOutOfAnyTransaction();
/*
- * User locks are not released by transaction end, so be sure to
- * release them explicitly.
+ * User locks are not released by transaction end, so be sure to release
+ * them explicitly.
*/
#ifdef USER_LOCKS
LockReleaseAll(USER_LOCKMETHOD, true);
diff --git a/src/backend/utils/mb/conv.c b/src/backend/utils/mb/conv.c
index 02082db5ef7..a395384c931 100644
--- a/src/backend/utils/mb/conv.c
+++ b/src/backend/utils/mb/conv.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conv.c,v 1.54 2005/09/24 17:53:17 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conv.c,v 1.55 2005/10/15 02:49:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -223,8 +223,8 @@ pg_mic2ascii(unsigned char *mic, unsigned char *p, int len)
void
latin2mic_with_table(
unsigned char *l, /* local charset string (source) */
- unsigned char *p, /* pointer to store mule internal
- * code (destination) */
+ unsigned char *p, /* pointer to store mule internal code
+ * (destination) */
int len, /* length of l */
int lc, /* leading character of p */
unsigned char *tab /* code conversion table */
@@ -265,8 +265,7 @@ latin2mic_with_table(
*/
void
mic2latin_with_table(
- unsigned char *mic, /* mule internal code
- * (source) */
+ unsigned char *mic, /* mule internal code (source) */
unsigned char *p, /* local code (destination) */
int len, /* length of p */
int lc, /* leading character */
@@ -380,8 +379,8 @@ UtfToLocal(unsigned char *utf, unsigned char *iso,
{
ereport(WARNING,
(errcode(ERRCODE_UNTRANSLATABLE_CHARACTER),
- errmsg("ignoring unconvertible UTF8 character 0x%04x",
- iutf)));
+ errmsg("ignoring unconvertible UTF8 character 0x%04x",
+ iutf)));
continue;
}
if (p->code & 0xff000000)
diff --git a/src/backend/utils/mb/conversion_procs/euc_jp_and_sjis/euc_jp_and_sjis.c b/src/backend/utils/mb/conversion_procs/euc_jp_and_sjis/euc_jp_and_sjis.c
index 277224103ac..3b215b2c4fb 100644
--- a/src/backend/utils/mb/conversion_procs/euc_jp_and_sjis/euc_jp_and_sjis.c
+++ b/src/backend/utils/mb/conversion_procs/euc_jp_and_sjis/euc_jp_and_sjis.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/euc_jp_and_sjis/euc_jp_and_sjis.c,v 1.12 2005/09/24 17:53:18 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/euc_jp_and_sjis/euc_jp_and_sjis.c,v 1.13 2005/10/15 02:49:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -462,7 +462,7 @@ euc_jp2sjis(unsigned char *euc, unsigned char *p, int len)
while (euc_end >= euc && (c1 = *euc++))
{
- if(c1 < 0x80)
+ if (c1 < 0x80)
{
/* should be ASCII */
*p++ = c1;
@@ -487,7 +487,8 @@ euc_jp2sjis(unsigned char *euc, unsigned char *p, int len)
}
else
{
- int i, k2;
+ int i,
+ k2;
/* IBM kanji */
for (i = 0;; i++)
@@ -508,9 +509,9 @@ euc_jp2sjis(unsigned char *euc, unsigned char *p, int len)
}
}
}
- }
+ }
else
- {
+ {
/* JIS X0208 kanji? */
c2 = *euc++;
k = (c1 << 8) | (c2 & 0xff);
@@ -543,7 +544,7 @@ sjis2euc_jp(unsigned char *sjis, unsigned char *p, int len)
while (sjis_end >= sjis && (c1 = *sjis++))
{
- if(c1 < 0x80)
+ if (c1 < 0x80)
{
/* should be ASCII */
*p++ = c1;
@@ -643,4 +644,3 @@ sjis2euc_jp(unsigned char *sjis, unsigned char *p, int len)
}
*p = '\0';
}
-
diff --git a/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c b/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c
index 4bdbfe95635..0447c2a9e7d 100644
--- a/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c
+++ b/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c
@@ -7,7 +7,7 @@
*
* 1999/1/15 Tatsuo Ishii
*
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c,v 1.5 2004/08/30 02:54:40 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c,v 1.6 2005/10/15 02:49:34 momjian Exp $
*/
/* can be used in either frontend or backend */
@@ -19,7 +19,7 @@ typedef struct
{
unsigned short code,
peer;
-} codes_t;
+} codes_t;
/* map Big5 Level 1 to CNS 11643-1992 Plane 1 */
static codes_t big5Level1ToCnsPlane1[25] = { /* range */
@@ -205,7 +205,7 @@ static unsigned short b2c3[][2] = {
};
static unsigned short BinarySearchRange
- (codes_t *array, int high, unsigned short code)
+ (codes_t * array, int high, unsigned short code)
{
int low,
mid,
@@ -230,24 +230,24 @@ static unsigned short BinarySearchRange
/*
* NOTE: big5 high_byte: 0xa1-0xfe, low_byte: 0x40-0x7e,
- * 0xa1-0xfe (radicals: 0x00-0x3e, 0x3f-0x9c) big5 radix
- * is 0x9d. [region_low, region_high]
- * We should remember big5 has two different regions
- * (above). There is a bias for the distance between these
- * regions. 0xa1 - 0x7e + bias = 1 (Distance between 0xa1
- * and 0x7e is 1.) bias = - 0x22.
+ * 0xa1-0xfe (radicals: 0x00-0x3e, 0x3f-0x9c) big5 radix is
+ * 0x9d. [region_low, region_high] We
+ * should remember big5 has two different regions (above).
+ * There is a bias for the distance between these regions.
+ * 0xa1 - 0x7e + bias = 1 (Distance between 0xa1 and 0x7e is
+ * 1.) bias = - 0x22.
*/
distance = tmp * 0x9d + high - low +
(high >= 0xa1 ? (low >= 0xa1 ? 0 : -0x22)
: (low >= 0xa1 ? +0x22 : 0));
/*
- * NOTE: we have to convert the distance into a code
- * point. The code point's low_byte is 0x21 plus mod_0x5e.
- * In the first, we extract the mod_0x5e of the starting
- * code point, subtracting 0x21, and add distance to it.
- * Then we calculate again mod_0x5e of them, and restore
- * the final codepoint, adding 0x21.
+ * NOTE: we have to convert the distance into a code point.
+ * The code point's low_byte is 0x21 plus mod_0x5e. In the
+ * first, we extract the mod_0x5e of the starting code point,
+ * subtracting 0x21, and add distance to it. Then we calculate
+ * again mod_0x5e of them, and restore the final codepoint,
+ * adding 0x21.
*/
tmp = (array[mid].peer & 0x00ff) + distance - 0x21;
tmp = (array[mid].peer & 0xff00) + ((tmp / 0x5e) << 8)
@@ -260,9 +260,8 @@ static unsigned short BinarySearchRange
tmp = ((code & 0xff00) - (array[mid].code & 0xff00)) >> 8;
/*
- * NOTE: ISO charsets ranges between 0x21-0xfe
- * (94charset). Its radix is 0x5e. But there is no
- * distance bias like big5.
+ * NOTE: ISO charsets ranges between 0x21-0xfe (94charset).
+ * Its radix is 0x5e. But there is no distance bias like big5.
*/
distance = tmp * 0x5e
+ ((int) (code & 0x00ff) - (int) (array[mid].code & 0x00ff));
diff --git a/src/backend/utils/mb/conversion_procs/utf8_and_cyrillic/utf8_and_cyrillic.c b/src/backend/utils/mb/conversion_procs/utf8_and_cyrillic/utf8_and_cyrillic.c
index 3330f89d5df..efab622c94f 100644
--- a/src/backend/utils/mb/conversion_procs/utf8_and_cyrillic/utf8_and_cyrillic.c
+++ b/src/backend/utils/mb/conversion_procs/utf8_and_cyrillic/utf8_and_cyrillic.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_cyrillic/utf8_and_cyrillic.c,v 1.11 2005/09/24 17:53:20 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_cyrillic/utf8_and_cyrillic.c,v 1.12 2005/10/15 02:49:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -75,7 +75,7 @@ koi8r_to_utf8(PG_FUNCTION_ARGS)
Assert(len >= 0);
LocalToUtf(src, dest, LUmapKOI8R,
- sizeof(LUmapKOI8R) / sizeof(pg_local_to_utf), PG_KOI8R, len);
+ sizeof(LUmapKOI8R) / sizeof(pg_local_to_utf), PG_KOI8R, len);
PG_RETURN_VOID();
}
@@ -109,7 +109,7 @@ win1251_to_utf8(PG_FUNCTION_ARGS)
Assert(len >= 0);
LocalToUtf(src, dest, LUmapWIN1251,
- sizeof(LUmapWIN1251) / sizeof(pg_local_to_utf), PG_WIN1251, len);
+ sizeof(LUmapWIN1251) / sizeof(pg_local_to_utf), PG_WIN1251, len);
PG_RETURN_VOID();
}
diff --git a/src/backend/utils/mb/conversion_procs/utf8_and_euc_cn/utf8_and_euc_cn.c b/src/backend/utils/mb/conversion_procs/utf8_and_euc_cn/utf8_and_euc_cn.c
index 29196cb4e8d..9dcd87355a8 100644
--- a/src/backend/utils/mb/conversion_procs/utf8_and_euc_cn/utf8_and_euc_cn.c
+++ b/src/backend/utils/mb/conversion_procs/utf8_and_euc_cn/utf8_and_euc_cn.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_euc_cn/utf8_and_euc_cn.c,v 1.11 2005/09/24 17:53:21 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_euc_cn/utf8_and_euc_cn.c,v 1.12 2005/10/15 02:49:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -45,7 +45,7 @@ euc_cn_to_utf8(PG_FUNCTION_ARGS)
Assert(len >= 0);
LocalToUtf(src, dest, LUmapEUC_CN,
- sizeof(LUmapEUC_CN) / sizeof(pg_local_to_utf), PG_EUC_CN, len);
+ sizeof(LUmapEUC_CN) / sizeof(pg_local_to_utf), PG_EUC_CN, len);
PG_RETURN_VOID();
}
diff --git a/src/backend/utils/mb/conversion_procs/utf8_and_euc_jp/utf8_and_euc_jp.c b/src/backend/utils/mb/conversion_procs/utf8_and_euc_jp/utf8_and_euc_jp.c
index bbe849de904..4231bc08dff 100644
--- a/src/backend/utils/mb/conversion_procs/utf8_and_euc_jp/utf8_and_euc_jp.c
+++ b/src/backend/utils/mb/conversion_procs/utf8_and_euc_jp/utf8_and_euc_jp.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_euc_jp/utf8_and_euc_jp.c,v 1.11 2005/09/24 17:53:21 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_euc_jp/utf8_and_euc_jp.c,v 1.12 2005/10/15 02:49:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -45,7 +45,7 @@ euc_jp_to_utf8(PG_FUNCTION_ARGS)
Assert(len >= 0);
LocalToUtf(src, dest, LUmapEUC_JP,
- sizeof(LUmapEUC_JP) / sizeof(pg_local_to_utf), PG_EUC_JP, len);
+ sizeof(LUmapEUC_JP) / sizeof(pg_local_to_utf), PG_EUC_JP, len);
PG_RETURN_VOID();
}
diff --git a/src/backend/utils/mb/conversion_procs/utf8_and_euc_kr/utf8_and_euc_kr.c b/src/backend/utils/mb/conversion_procs/utf8_and_euc_kr/utf8_and_euc_kr.c
index db1505ab626..b197b064eeb 100644
--- a/src/backend/utils/mb/conversion_procs/utf8_and_euc_kr/utf8_and_euc_kr.c
+++ b/src/backend/utils/mb/conversion_procs/utf8_and_euc_kr/utf8_and_euc_kr.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_euc_kr/utf8_and_euc_kr.c,v 1.11 2005/09/24 17:53:22 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_euc_kr/utf8_and_euc_kr.c,v 1.12 2005/10/15 02:49:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -45,7 +45,7 @@ euc_kr_to_utf8(PG_FUNCTION_ARGS)
Assert(len >= 0);
LocalToUtf(src, dest, LUmapEUC_KR,
- sizeof(LUmapEUC_KR) / sizeof(pg_local_to_utf), PG_EUC_KR, len);
+ sizeof(LUmapEUC_KR) / sizeof(pg_local_to_utf), PG_EUC_KR, len);
PG_RETURN_VOID();
}
diff --git a/src/backend/utils/mb/conversion_procs/utf8_and_euc_tw/utf8_and_euc_tw.c b/src/backend/utils/mb/conversion_procs/utf8_and_euc_tw/utf8_and_euc_tw.c
index cc05c64cc13..b2f7d465a77 100644
--- a/src/backend/utils/mb/conversion_procs/utf8_and_euc_tw/utf8_and_euc_tw.c
+++ b/src/backend/utils/mb/conversion_procs/utf8_and_euc_tw/utf8_and_euc_tw.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_euc_tw/utf8_and_euc_tw.c,v 1.11 2005/09/24 17:53:22 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_euc_tw/utf8_and_euc_tw.c,v 1.12 2005/10/15 02:49:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -45,7 +45,7 @@ euc_tw_to_utf8(PG_FUNCTION_ARGS)
Assert(len >= 0);
LocalToUtf(src, dest, LUmapEUC_TW,
- sizeof(LUmapEUC_TW) / sizeof(pg_local_to_utf), PG_EUC_TW, len);
+ sizeof(LUmapEUC_TW) / sizeof(pg_local_to_utf), PG_EUC_TW, len);
PG_RETURN_VOID();
}
diff --git a/src/backend/utils/mb/conversion_procs/utf8_and_gb18030/utf8_and_gb18030.c b/src/backend/utils/mb/conversion_procs/utf8_and_gb18030/utf8_and_gb18030.c
index 37f207ec153..6cc235e7327 100644
--- a/src/backend/utils/mb/conversion_procs/utf8_and_gb18030/utf8_and_gb18030.c
+++ b/src/backend/utils/mb/conversion_procs/utf8_and_gb18030/utf8_and_gb18030.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_gb18030/utf8_and_gb18030.c,v 1.11 2005/09/24 17:53:23 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_gb18030/utf8_and_gb18030.c,v 1.12 2005/10/15 02:49:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -45,7 +45,7 @@ gb18030_to_utf8(PG_FUNCTION_ARGS)
Assert(len >= 0);
LocalToUtf(src, dest, LUmapGB18030,
- sizeof(LUmapGB18030) / sizeof(pg_local_to_utf), PG_GB18030, len);
+ sizeof(LUmapGB18030) / sizeof(pg_local_to_utf), PG_GB18030, len);
PG_RETURN_VOID();
}
diff --git a/src/backend/utils/mb/conversion_procs/utf8_and_iso8859/utf8_and_iso8859.c b/src/backend/utils/mb/conversion_procs/utf8_and_iso8859/utf8_and_iso8859.c
index b6c56324ec3..0038db58e62 100644
--- a/src/backend/utils/mb/conversion_procs/utf8_and_iso8859/utf8_and_iso8859.c
+++ b/src/backend/utils/mb/conversion_procs/utf8_and_iso8859/utf8_and_iso8859.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_iso8859/utf8_and_iso8859.c,v 1.14 2005/09/24 17:53:23 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_iso8859/utf8_and_iso8859.c,v 1.15 2005/10/15 02:49:35 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -65,7 +65,7 @@ typedef struct
pg_utf_to_local *map2; /* from UTF8 map name */
int size1; /* size of map1 */
int size2; /* size of map2 */
-} pg_conv_map;
+} pg_conv_map;
static pg_conv_map maps[] = {
{PG_SQL_ASCII}, /* SQL/ASCII */
diff --git a/src/backend/utils/mb/conversion_procs/utf8_and_johab/utf8_and_johab.c b/src/backend/utils/mb/conversion_procs/utf8_and_johab/utf8_and_johab.c
index 9e93be9c496..3de91947af8 100644
--- a/src/backend/utils/mb/conversion_procs/utf8_and_johab/utf8_and_johab.c
+++ b/src/backend/utils/mb/conversion_procs/utf8_and_johab/utf8_and_johab.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_johab/utf8_and_johab.c,v 1.11 2005/09/24 17:53:24 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_johab/utf8_and_johab.c,v 1.12 2005/10/15 02:49:35 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -45,7 +45,7 @@ johab_to_utf8(PG_FUNCTION_ARGS)
Assert(len >= 0);
LocalToUtf(src, dest, LUmapJOHAB,
- sizeof(LUmapJOHAB) / sizeof(pg_local_to_utf), PG_JOHAB, len);
+ sizeof(LUmapJOHAB) / sizeof(pg_local_to_utf), PG_JOHAB, len);
PG_RETURN_VOID();
}
diff --git a/src/backend/utils/mb/conversion_procs/utf8_and_win1250/utf8_and_win1250.c b/src/backend/utils/mb/conversion_procs/utf8_and_win1250/utf8_and_win1250.c
index cd79893ae8c..6789ca7aaa5 100644
--- a/src/backend/utils/mb/conversion_procs/utf8_and_win1250/utf8_and_win1250.c
+++ b/src/backend/utils/mb/conversion_procs/utf8_and_win1250/utf8_and_win1250.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_win1250/utf8_and_win1250.c,v 1.12 2005/09/24 17:53:25 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_win1250/utf8_and_win1250.c,v 1.13 2005/10/15 02:49:35 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -63,7 +63,7 @@ win1250_to_utf8(PG_FUNCTION_ARGS)
Assert(len >= 0);
LocalToUtf(src, dest, LUmapWIN1250,
- sizeof(LUmapWIN1250) / sizeof(pg_local_to_utf), PG_WIN1250, len);
+ sizeof(LUmapWIN1250) / sizeof(pg_local_to_utf), PG_WIN1250, len);
PG_RETURN_VOID();
}
diff --git a/src/backend/utils/mb/conversion_procs/utf8_and_win1252/utf8_and_win1252.c b/src/backend/utils/mb/conversion_procs/utf8_and_win1252/utf8_and_win1252.c
index b7592b6c30c..b4d2b2375a9 100644
--- a/src/backend/utils/mb/conversion_procs/utf8_and_win1252/utf8_and_win1252.c
+++ b/src/backend/utils/mb/conversion_procs/utf8_and_win1252/utf8_and_win1252.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_win1252/utf8_and_win1252.c,v 1.4 2005/09/24 17:53:25 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_win1252/utf8_and_win1252.c,v 1.5 2005/10/15 02:49:35 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -63,7 +63,7 @@ win1252_to_utf8(PG_FUNCTION_ARGS)
Assert(len >= 0);
LocalToUtf(src, dest, LUmapWIN1252,
- sizeof(LUmapWIN1252) / sizeof(pg_local_to_utf), PG_WIN1252, len);
+ sizeof(LUmapWIN1252) / sizeof(pg_local_to_utf), PG_WIN1252, len);
PG_RETURN_VOID();
}
diff --git a/src/backend/utils/mb/conversion_procs/utf8_and_win1256/utf8_and_win1256.c b/src/backend/utils/mb/conversion_procs/utf8_and_win1256/utf8_and_win1256.c
index a0036d80e63..d6b83d8f837 100644
--- a/src/backend/utils/mb/conversion_procs/utf8_and_win1256/utf8_and_win1256.c
+++ b/src/backend/utils/mb/conversion_procs/utf8_and_win1256/utf8_and_win1256.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_win1256/utf8_and_win1256.c,v 1.12 2005/09/24 17:53:26 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_win1256/utf8_and_win1256.c,v 1.13 2005/10/15 02:49:35 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -63,7 +63,7 @@ win1256_to_utf8(PG_FUNCTION_ARGS)
Assert(len >= 0);
LocalToUtf(src, dest, LUmapWIN1256,
- sizeof(LUmapWIN1256) / sizeof(pg_local_to_utf), PG_WIN1256, len);
+ sizeof(LUmapWIN1256) / sizeof(pg_local_to_utf), PG_WIN1256, len);
PG_RETURN_VOID();
}
diff --git a/src/backend/utils/mb/conversion_procs/utf8_and_win1258/utf8_and_win1258.c b/src/backend/utils/mb/conversion_procs/utf8_and_win1258/utf8_and_win1258.c
index 3b74f81ba9c..7cdcfd3c120 100644
--- a/src/backend/utils/mb/conversion_procs/utf8_and_win1258/utf8_and_win1258.c
+++ b/src/backend/utils/mb/conversion_procs/utf8_and_win1258/utf8_and_win1258.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_win1258/utf8_and_win1258.c,v 1.2 2005/09/24 17:53:26 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_win1258/utf8_and_win1258.c,v 1.3 2005/10/15 02:49:35 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -45,7 +45,7 @@ win1258_to_utf8(PG_FUNCTION_ARGS)
Assert(len >= 0);
LocalToUtf(src, dest, LUmapWIN1258,
- sizeof(LUmapWIN1258) / sizeof(pg_local_to_utf), PG_WIN1258, len);
+ sizeof(LUmapWIN1258) / sizeof(pg_local_to_utf), PG_WIN1258, len);
PG_RETURN_VOID();
}
diff --git a/src/backend/utils/mb/conversion_procs/utf8_and_win874/utf8_and_win874.c b/src/backend/utils/mb/conversion_procs/utf8_and_win874/utf8_and_win874.c
index 48f30b2cad9..7eda096a9be 100644
--- a/src/backend/utils/mb/conversion_procs/utf8_and_win874/utf8_and_win874.c
+++ b/src/backend/utils/mb/conversion_procs/utf8_and_win874/utf8_and_win874.c
@@ -6,7 +6,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_win874/utf8_and_win874.c,v 1.12 2005/09/24 17:53:26 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_win874/utf8_and_win874.c,v 1.13 2005/10/15 02:49:35 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -63,7 +63,7 @@ win874_to_utf8(PG_FUNCTION_ARGS)
Assert(len >= 0);
LocalToUtf(src, dest, LUmapWIN874,
- sizeof(LUmapWIN874) / sizeof(pg_local_to_utf), PG_WIN874, len);
+ sizeof(LUmapWIN874) / sizeof(pg_local_to_utf), PG_WIN874, len);
PG_RETURN_VOID();
}
diff --git a/src/backend/utils/mb/encnames.c b/src/backend/utils/mb/encnames.c
index 5c0b15fd745..145343a881d 100644
--- a/src/backend/utils/mb/encnames.c
+++ b/src/backend/utils/mb/encnames.c
@@ -2,7 +2,7 @@
* Encoding names and routines for work with it. All
* in this file is shared bedween FE and BE.
*
- * $PostgreSQL: pgsql/src/backend/utils/mb/encnames.c,v 1.25 2005/03/14 18:31:20 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/encnames.c,v 1.26 2005/10/15 02:49:33 momjian Exp $
*/
#ifdef FRONTEND
#include "postgres_fe.h"
@@ -45,16 +45,16 @@ pg_encname pg_encname_tbl[] =
}, /* Big5; Chinese for Taiwan multibyte set */
{
"euccn", PG_EUC_CN
- }, /* EUC-CN; Extended Unix Code for
- * simplified Chinese */
+ }, /* EUC-CN; Extended Unix Code for simplified
+ * Chinese */
{
"eucjp", PG_EUC_JP
- }, /* EUC-JP; Extended UNIX Code fixed Width
- * for Japanese, standard OSF */
+ }, /* EUC-JP; Extended UNIX Code fixed Width for
+ * Japanese, standard OSF */
{
"euckr", PG_EUC_KR
- }, /* EUC-KR; Extended Unix Code for Korean ,
- * KS X 1001 standard */
+ }, /* EUC-KR; Extended Unix Code for Korean , KS
+ * X 1001 standard */
{
"euctw", PG_EUC_TW
}, /* EUC-TW; Extended Unix Code for
@@ -111,8 +111,8 @@ pg_encname pg_encname_tbl[] =
}, /* ISO-8859-9; RFC1345,KXS2 */
{
"johab", PG_JOHAB
- }, /* JOHAB; Extended Unix Code for
- * simplified Chinese */
+ }, /* JOHAB; Extended Unix Code for simplified
+ * Chinese */
{
"koi8", PG_KOI8R
}, /* _dirty_ alias for KOI8-R (backward
@@ -185,8 +185,8 @@ pg_encname pg_encname_tbl[] =
}, /* alias for WIN1258 */
{
"win", PG_WIN1251
- }, /* _dirty_ alias for windows-1251
- * (backward compatibility) */
+ }, /* _dirty_ alias for windows-1251 (backward
+ * compatibility) */
{
"win1250", PG_WIN1250
}, /* alias for Windows-1250 */
diff --git a/src/backend/utils/mb/mbutils.c b/src/backend/utils/mb/mbutils.c
index 8058fc8d27a..f8dc7a31922 100644
--- a/src/backend/utils/mb/mbutils.c
+++ b/src/backend/utils/mb/mbutils.c
@@ -4,7 +4,7 @@
* (currently mule internal code (mic) is used)
* Tatsuo Ishii
*
- * $PostgreSQL: pgsql/src/backend/utils/mb/mbutils.c,v 1.51 2005/09/24 17:53:17 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/mbutils.c,v 1.52 2005/10/15 02:49:33 momjian Exp $
*/
#include "postgres.h"
@@ -107,12 +107,11 @@ SetClientEncoding(int encoding, bool doit)
}
/*
- * If we're not inside a transaction then we can't do catalog lookups,
- * so fail. After backend startup, this could only happen if we are
+ * If we're not inside a transaction then we can't do catalog lookups, so
+ * fail. After backend startup, this could only happen if we are
* re-reading postgresql.conf due to SIGHUP --- so basically this just
* constrains the ability to change client_encoding on the fly from
- * postgresql.conf. Which would probably be a stupid thing to do
- * anyway.
+ * postgresql.conf. Which would probably be a stupid thing to do anyway.
*/
if (!IsTransactionState())
return -1;
@@ -136,8 +135,8 @@ SetClientEncoding(int encoding, bool doit)
return 0;
/*
- * load the fmgr info into TopMemoryContext so that it survives
- * outside transaction.
+ * load the fmgr info into TopMemoryContext so that it survives outside
+ * transaction.
*/
oldcontext = MemoryContextSwitchTo(TopMemoryContext);
to_server = palloc(sizeof(FmgrInfo));
@@ -180,8 +179,8 @@ InitializeClientEncoding(void)
if (SetClientEncoding(pending_client_encoding, true) < 0)
{
/*
- * Oops, the requested conversion is not available. We couldn't
- * fail before, but we can now.
+ * Oops, the requested conversion is not available. We couldn't fail
+ * before, but we can now.
*/
ereport(FATAL,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
@@ -256,8 +255,8 @@ pg_do_encoding_conversion(unsigned char *src, int len,
}
/*
- * XXX we should avoid throwing errors in OidFunctionCall. Otherwise
- * we are going into infinite loop! So we have to make sure that the
+ * XXX we should avoid throwing errors in OidFunctionCall. Otherwise we
+ * are going into infinite loop! So we have to make sure that the
* function exists before calling OidFunctionCall.
*/
if (!SearchSysCacheExists(PROCOID,
@@ -290,11 +289,11 @@ pg_convert(PG_FUNCTION_ARGS)
Datum string = PG_GETARG_DATUM(0);
Datum dest_encoding_name = PG_GETARG_DATUM(1);
Datum src_encoding_name = DirectFunctionCall1(
- namein, CStringGetDatum(DatabaseEncoding->name));
+ namein, CStringGetDatum(DatabaseEncoding->name));
Datum result;
result = DirectFunctionCall3(
- pg_convert2, string, src_encoding_name, dest_encoding_name);
+ pg_convert2, string, src_encoding_name, dest_encoding_name);
/* free memory allocated by namein */
pfree((void *) src_encoding_name);
@@ -343,8 +342,7 @@ pg_convert2(PG_FUNCTION_ARGS)
/*
* build text data type structure. we cannot use textin() here, since
- * textin assumes that input string encoding is same as database
- * encoding.
+ * textin assumes that input string encoding is same as database encoding.
*/
len = strlen((char *) result) + VARHDRSZ;
retval = palloc(len);
@@ -502,7 +500,7 @@ pg_mbstrlen_with_len(const char *mbstr, int limit)
while (limit > 0 && *mbstr)
{
- int l = pg_mblen(mbstr);
+ int l = pg_mblen(mbstr);
limit -= l;
mbstr += l;
diff --git a/src/backend/utils/mb/wchar.c b/src/backend/utils/mb/wchar.c
index 59116e2e818..e8866ba35ca 100644
--- a/src/backend/utils/mb/wchar.c
+++ b/src/backend/utils/mb/wchar.c
@@ -1,7 +1,7 @@
/*
* conversion functions between pg_wchar and multibyte streams.
* Tatsuo Ishii
- * $PostgreSQL: pgsql/src/backend/utils/mb/wchar.c,v 1.45 2005/09/24 17:53:17 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/wchar.c,v 1.46 2005/10/15 02:49:33 momjian Exp $
*
* WIN1250 client encoding updated by Pavel Behal
*
@@ -406,14 +406,14 @@ pg_utf_mblen(const unsigned char *s)
len = 1;
else if ((*s & 0xe0) == 0xc0)
len = 2;
- else if ((*s & 0xf0) == 0xe0)
- len = 3;
- else if ((*s & 0xf8) == 0xf0)
- len = 4;
- else if ((*s & 0xfc) == 0xf8)
- len = 5;
- else if ((*s & 0xfe) == 0xfc)
- len = 6;
+ else if ((*s & 0xf0) == 0xe0)
+ len = 3;
+ else if ((*s & 0xf8) == 0xf0)
+ len = 4;
+ else if ((*s & 0xfc) == 0xf8)
+ len = 5;
+ else if ((*s & 0xfe) == 0xfc)
+ len = 6;
return (len);
}
@@ -727,8 +727,8 @@ pg_wchar_tbl pg_wchar_table[] = {
{pg_euckr2wchar_with_len, pg_euckr_mblen, pg_euckr_dsplen, 3}, /* 3; PG_EUC_KR */
{pg_euctw2wchar_with_len, pg_euctw_mblen, pg_euctw_dsplen, 3}, /* 4; PG_EUC_TW */
{pg_johab2wchar_with_len, pg_johab_mblen, pg_johab_dsplen, 3}, /* 5; PG_JOHAB */
- {pg_utf2wchar_with_len, pg_utf_mblen, pg_utf_dsplen, 4}, /* 6; PG_UTF8 */
- {pg_mule2wchar_with_len, pg_mule_mblen, pg_mule_dsplen, 3}, /* 7; PG_MULE_INTERNAL */
+ {pg_utf2wchar_with_len, pg_utf_mblen, pg_utf_dsplen, 4}, /* 6; PG_UTF8 */
+ {pg_mule2wchar_with_len, pg_mule_mblen, pg_mule_dsplen, 3}, /* 7; PG_MULE_INTERNAL */
{pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, 1}, /* 8; PG_LATIN1 */
{pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, 1}, /* 9; PG_LATIN2 */
{pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, 1}, /* 10; PG_LATIN3 */
@@ -775,8 +775,8 @@ pg_encoding_mblen(int encoding, const char *mbstr)
return ((encoding >= 0 &&
encoding < sizeof(pg_wchar_table) / sizeof(pg_wchar_tbl)) ?
- ((*pg_wchar_table[encoding].mblen) ((const unsigned char *) mbstr)) :
- ((*pg_wchar_table[PG_SQL_ASCII].mblen) ((const unsigned char *) mbstr)));
+ ((*pg_wchar_table[encoding].mblen) ((const unsigned char *) mbstr)) :
+ ((*pg_wchar_table[PG_SQL_ASCII].mblen) ((const unsigned char *) mbstr)));
}
/*
@@ -789,8 +789,8 @@ pg_encoding_dsplen(int encoding, const char *mbstr)
return ((encoding >= 0 &&
encoding < sizeof(pg_wchar_table) / sizeof(pg_wchar_tbl)) ?
- ((*pg_wchar_table[encoding].dsplen) ((const unsigned char *) mbstr)) :
- ((*pg_wchar_table[PG_SQL_ASCII].dsplen) ((const unsigned char *) mbstr)));
+ ((*pg_wchar_table[encoding].dsplen) ((const unsigned char *) mbstr)) :
+ ((*pg_wchar_table[PG_SQL_ASCII].dsplen) ((const unsigned char *) mbstr)));
}
/*
@@ -806,28 +806,57 @@ pg_encoding_max_length(int encoding)
#ifndef FRONTEND
-bool pg_utf8_islegal(const unsigned char *source, int length) {
- unsigned char a;
- const unsigned char *srcptr = source+length;
- switch (length) {
- default: return false;
- /* Everything else falls through when "true"... */
- case 4: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return false;
- case 3: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return false;
- case 2: if ((a = (*--srcptr)) > 0xBF) return false;
- switch (*source) {
- /* no fall-through in this inner switch */
- case 0xE0: if (a < 0xA0) return false; break;
- case 0xED: if (a > 0x9F) return false; break;
- case 0xF0: if (a < 0x90) return false; break;
- case 0xF4: if (a > 0x8F) return false; break;
- default: if (a < 0x80) return false;
- }
-
- case 1: if (*source >= 0x80 && *source < 0xC2) return false;
- }
- if (*source > 0xF4) return false;
- return true;
+bool
+pg_utf8_islegal(const unsigned char *source, int length)
+{
+ unsigned char a;
+ const unsigned char *srcptr = source + length;
+
+ switch (length)
+ {
+ default:
+ return false;
+ /* Everything else falls through when "true"... */
+ case 4:
+ if ((a = (*--srcptr)) < 0x80 || a > 0xBF)
+ return false;
+ case 3:
+ if ((a = (*--srcptr)) < 0x80 || a > 0xBF)
+ return false;
+ case 2:
+ if ((a = (*--srcptr)) > 0xBF)
+ return false;
+ switch (*source)
+ {
+ /* no fall-through in this inner switch */
+ case 0xE0:
+ if (a < 0xA0)
+ return false;
+ break;
+ case 0xED:
+ if (a > 0x9F)
+ return false;
+ break;
+ case 0xF0:
+ if (a < 0x90)
+ return false;
+ break;
+ case 0xF4:
+ if (a > 0x8F)
+ return false;
+ break;
+ default:
+ if (a < 0x80)
+ return false;
+ }
+
+ case 1:
+ if (*source >= 0x80 && *source < 0xC2)
+ return false;
+ }
+ if (*source > 0xF4)
+ return false;
+ return true;
}
@@ -855,11 +884,11 @@ pg_verifymbstr(const char *mbstr, int len, bool noError)
while (len > 0 && *mbstr)
{
l = pg_mblen(mbstr);
-
+
/* special UTF-8 check */
if (encoding == PG_UTF8)
{
- if(!pg_utf8_islegal((const unsigned char *) mbstr, l))
+ if (!pg_utf8_islegal((const unsigned char *) mbstr, l))
{
if (noError)
return false;
@@ -868,7 +897,9 @@ pg_verifymbstr(const char *mbstr, int len, bool noError)
errmsg("invalid UNICODE byte sequence detected near byte 0x%02x",
(unsigned char) *mbstr)));
}
- } else {
+ }
+ else
+ {
for (i = 1; i < l; i++)
{
/*
@@ -878,23 +909,23 @@ pg_verifymbstr(const char *mbstr, int len, bool noError)
if (i >= len || (mbstr[i] & 0x80) == 0)
{
char buf[8 * 2 + 1];
- char *p = buf;
- int j,
- jlimit;
+ char *p = buf;
+ int j,
+ jlimit;
if (noError)
return false;
jlimit = Min(l, len);
- jlimit = Min(jlimit, 8); /* prevent buffer overrun */
+ jlimit = Min(jlimit, 8); /* prevent buffer overrun */
for (j = 0; j < jlimit; j++)
p += sprintf(p, "%02x", (unsigned char) mbstr[j]);
ereport(ERROR,
(errcode(ERRCODE_CHARACTER_NOT_IN_REPERTOIRE),
- errmsg("invalid byte sequence for encoding \"%s\": 0x%s",
- GetDatabaseEncodingName(), buf)));
+ errmsg("invalid byte sequence for encoding \"%s\": 0x%s",
+ GetDatabaseEncodingName(), buf)));
}
}
}
diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c
index 1ba1ac31d3f..3394fd77e31 100644
--- a/src/backend/utils/misc/guc.c
+++ b/src/backend/utils/misc/guc.c
@@ -10,7 +10,7 @@
* Written by Peter Eisentraut <peter_e@gmx.net>.
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/misc/guc.c,v 1.292 2005/10/14 20:53:56 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/misc/guc.c,v 1.293 2005/10/15 02:49:36 momjian Exp $
*
*--------------------------------------------------------------------
*/
@@ -72,7 +72,7 @@
#define PG_KRB_SRVNAM ""
#endif
-#define CONFIG_FILENAME "postgresql.conf"
+#define CONFIG_FILENAME "postgresql.conf"
#define HBA_FILENAME "pg_hba.conf"
#define IDENT_FILENAME "pg_ident.conf"
@@ -94,9 +94,10 @@ extern DLLIMPORT bool check_function_bodies;
extern int CommitDelay;
extern int CommitSiblings;
extern char *default_tablespace;
-extern bool fullPageWrites;
+extern bool fullPageWrites;
+
#ifdef TRACE_SORT
-extern bool trace_sort;
+extern bool trace_sort;
#endif
static const char *assign_log_destination(const char *value,
@@ -106,9 +107,9 @@ static const char *assign_log_destination(const char *value,
static int syslog_facility = LOG_LOCAL0;
static const char *assign_syslog_facility(const char *facility,
- bool doit, GucSource source);
+ bool doit, GucSource source);
static const char *assign_syslog_ident(const char *ident,
- bool doit, GucSource source);
+ bool doit, GucSource source);
#endif
static const char *assign_defaultxactisolevel(const char *newval, bool doit,
@@ -157,8 +158,8 @@ bool Explain_pretty_print = true;
bool log_parser_stats = false;
bool log_planner_stats = false;
bool log_executor_stats = false;
-bool log_statement_stats = false; /* this is sort of all
- * three above together */
+bool log_statement_stats = false; /* this is sort of all three
+ * above together */
bool log_btree_build_stats = false;
bool SQL_inheritance = true;
@@ -181,9 +182,9 @@ char *HbaFileName;
char *IdentFileName;
char *external_pid_file;
-int tcp_keepalives_idle;
-int tcp_keepalives_interval;
-int tcp_keepalives_count;
+int tcp_keepalives_idle;
+int tcp_keepalives_interval;
+int tcp_keepalives_count;
/*
* These variables are all dummies that don't do anything, except in some
@@ -217,8 +218,8 @@ static int max_function_args;
static int max_index_keys;
static int max_identifier_length;
static int block_size;
-static bool integer_datetimes;
-static bool standard_conforming_strings;
+static bool integer_datetimes;
+static bool standard_conforming_strings;
/* should be static, but commands/variable.c needs to get at these */
char *role_string;
@@ -501,7 +502,7 @@ static struct config_bool ConfigureNamesBool[] =
{"fsync", PGC_SIGHUP, WAL_SETTINGS,
gettext_noop("Forces synchronization of updates to disk."),
gettext_noop("The server will use the fsync() system call in several places to make "
- "sure that updates are physically written to disk. This insures "
+ "sure that updates are physically written to disk. This insures "
"that a database cluster will recover to a consistent state after "
"an operating system or hardware crash.")
},
@@ -512,7 +513,7 @@ static struct config_bool ConfigureNamesBool[] =
{"zero_damaged_pages", PGC_SUSET, DEVELOPER_OPTIONS,
gettext_noop("Continues processing past damaged page headers."),
gettext_noop("Detection of a damaged page header normally causes PostgreSQL to "
- "report an error, aborting the current transaction. Setting "
+ "report an error, aborting the current transaction. Setting "
"zero_damaged_pages to true causes the system to instead report a "
"warning, zero out the damaged page, and continue processing. This "
"behavior will destroy data, namely all the rows on the damaged page."),
@@ -526,7 +527,7 @@ static struct config_bool ConfigureNamesBool[] =
gettext_noop("Writes full pages to WAL when first modified after a checkpoint."),
gettext_noop("A page write in process during an operating system crash might be "
"only partially written to disk. During recovery, the row changes"
- "stored in WAL are not enough to recover. This option writes "
+ "stored in WAL are not enough to recover. This option writes "
"pages when first modified after a checkpoint to WAL so full recovery "
"is possible.")
},
@@ -537,7 +538,7 @@ static struct config_bool ConfigureNamesBool[] =
{"silent_mode", PGC_POSTMASTER, LOGGING_WHEN,
gettext_noop("Runs the server silently."),
gettext_noop("If this parameter is set, the server will automatically run in the "
- "background and any controlling terminals are dissociated.")
+ "background and any controlling terminals are dissociated.")
},
&SilentMode,
false, NULL, NULL
@@ -693,7 +694,7 @@ static struct config_bool ConfigureNamesBool[] =
{"stats_command_string", PGC_SUSET, STATS_COLLECTOR,
gettext_noop("Collects statistics about executing commands."),
gettext_noop("Enables the collection of statistics on the currently "
- "executing command of each session, along with the time "
+ "executing command of each session, along with the time "
"at which that command began execution.")
},
&pgstat_collect_querystring,
@@ -722,7 +723,7 @@ static struct config_bool ConfigureNamesBool[] =
NULL
},
&autovacuum_start_daemon,
- false, NULL, NULL
+ false, NULL, NULL
},
{
@@ -779,8 +780,8 @@ static struct config_bool ConfigureNamesBool[] =
gettext_noop("Logs the host name in the connection logs."),
gettext_noop("By default, connection logs only show the IP address "
"of the connecting host. If you want them to show the host name you "
- "can turn this on, but depending on your host name resolution "
- "setup it might impose a non-negligible performance penalty.")
+ "can turn this on, but depending on your host name resolution "
+ "setup it might impose a non-negligible performance penalty.")
},
&log_hostname,
false, NULL, NULL
@@ -806,7 +807,7 @@ static struct config_bool ConfigureNamesBool[] =
{"password_encryption", PGC_USERSET, CONN_AUTH_SECURITY,
gettext_noop("Encrypt passwords."),
gettext_noop("When a password is specified in CREATE USER or "
- "ALTER USER without writing either ENCRYPTED or UNENCRYPTED, "
+ "ALTER USER without writing either ENCRYPTED or UNENCRYPTED, "
"this parameter determines whether the password is to be encrypted.")
},
&Password_encryption,
@@ -816,9 +817,9 @@ static struct config_bool ConfigureNamesBool[] =
{"transform_null_equals", PGC_USERSET, COMPAT_OPTIONS_CLIENT,
gettext_noop("Treats \"expr=NULL\" as \"expr IS NULL\"."),
gettext_noop("When turned on, expressions of the form expr = NULL "
- "(or NULL = expr) are treated as expr IS NULL, that is, they "
- "return true if expr evaluates to the null value, and false "
- "otherwise. The correct behavior of expr = NULL is to always "
+ "(or NULL = expr) are treated as expr IS NULL, that is, they "
+ "return true if expr evaluates to the null value, and false "
+ "otherwise. The correct behavior of expr = NULL is to always "
"return null (unknown).")
},
&Transform_null_equals,
@@ -979,7 +980,7 @@ static struct config_int ConfigureNamesInt[] =
{"default_statistics_target", PGC_USERSET, QUERY_TUNING_OTHER,
gettext_noop("Sets the default statistics target."),
gettext_noop("This applies to table columns that have not had a "
- "column-specific target set via ALTER TABLE SET STATISTICS.")
+ "column-specific target set via ALTER TABLE SET STATISTICS.")
},
&default_statistics_target,
10, 1, 1000, NULL, NULL
@@ -989,7 +990,7 @@ static struct config_int ConfigureNamesInt[] =
gettext_noop("Sets the FROM-list size beyond which subqueries are not "
"collapsed."),
gettext_noop("The planner will merge subqueries into upper "
- "queries if the resulting FROM list would have no more than "
+ "queries if the resulting FROM list would have no more than "
"this many items.")
},
&from_collapse_limit,
@@ -1000,7 +1001,7 @@ static struct config_int ConfigureNamesInt[] =
gettext_noop("Sets the FROM-list size beyond which JOIN constructs are not "
"flattened."),
gettext_noop("The planner will flatten explicit inner JOIN "
- "constructs into lists of FROM items whenever a list of no more "
+ "constructs into lists of FROM items whenever a list of no more "
"than this many items would result.")
},
&join_collapse_limit,
@@ -1052,12 +1053,12 @@ static struct config_int ConfigureNamesInt[] =
* Note: There is some postprocessing done in PostmasterMain() to make
* sure the buffers are at least twice the number of backends, so the
* constraints here are partially unused. Similarly, the superuser
- * reserved number is checked to ensure it is less than the max
- * backends number.
+ * reserved number is checked to ensure it is less than the max backends
+ * number.
*
* MaxBackends is limited to INT_MAX/4 because some places compute
- * 4*MaxBackends without any overflow check. Likewise we have to
- * limit NBuffers to INT_MAX/2.
+ * 4*MaxBackends without any overflow check. Likewise we have to limit
+ * NBuffers to INT_MAX/2.
*/
{
{"max_connections", PGC_POSTMASTER, CONN_AUTH_SETTINGS,
@@ -1121,7 +1122,7 @@ static struct config_int ConfigureNamesInt[] =
{"work_mem", PGC_USERSET, RESOURCES_MEM,
gettext_noop("Sets the maximum memory to be used for query workspaces."),
gettext_noop("This much memory may be used by each internal "
- "sort operation and hash table before switching to "
+ "sort operation and hash table before switching to "
"temporary disk files.")
},
&work_mem,
@@ -1278,7 +1279,7 @@ static struct config_int ConfigureNamesInt[] =
{"max_locks_per_transaction", PGC_POSTMASTER, LOCK_MANAGEMENT,
gettext_noop("Sets the maximum number of locks per transaction."),
gettext_noop("The shared lock table is sized on the assumption that "
- "at most max_locks_per_transaction * max_connections distinct "
+ "at most max_locks_per_transaction * max_connections distinct "
"objects will need to be locked at any one time.")
},
&max_locks_per_xact,
@@ -1328,7 +1329,7 @@ static struct config_int ConfigureNamesInt[] =
gettext_noop("Logs if filling of checkpoint segments happens more "
"frequently than this (in seconds)."),
gettext_noop("Write a message to the server log if checkpoints "
- "caused by the filling of checkpoint segment files happens more "
+ "caused by the filling of checkpoint segment files happens more "
"frequently than this number of seconds. Zero turns off the warning.")
},
&CheckPointWarning,
@@ -1368,7 +1369,7 @@ static struct config_int ConfigureNamesInt[] =
{"extra_float_digits", PGC_USERSET, CLIENT_CONN_LOCALE,
gettext_noop("Sets the number of digits displayed for floating-point values."),
gettext_noop("This affects real, double precision, and geometric data types. "
- "The parameter value is added to the standard number of digits "
+ "The parameter value is added to the standard number of digits "
"(FLT_DIG or DBL_DIG as appropriate).")
},
&extra_float_digits,
@@ -1497,29 +1498,29 @@ static struct config_int ConfigureNamesInt[] =
{
{"tcp_keepalives_idle", PGC_USERSET, CLIENT_CONN_OTHER,
- gettext_noop("Seconds between issuing TCP keepalives."),
- gettext_noop("A value of 0 uses the system default."),
- },
+ gettext_noop("Seconds between issuing TCP keepalives."),
+ gettext_noop("A value of 0 uses the system default."),
+ },
&tcp_keepalives_idle,
0, 0, INT_MAX, assign_tcp_keepalives_idle, show_tcp_keepalives_idle
},
{
{"tcp_keepalives_interval", PGC_USERSET, CLIENT_CONN_OTHER,
- gettext_noop("Seconds between TCP keepalive retransmits."),
- gettext_noop("A value of 0 uses the system default."),
- },
+ gettext_noop("Seconds between TCP keepalive retransmits."),
+ gettext_noop("A value of 0 uses the system default."),
+ },
&tcp_keepalives_interval,
0, 0, INT_MAX, assign_tcp_keepalives_interval, show_tcp_keepalives_interval
},
{
{"tcp_keepalives_count", PGC_USERSET, CLIENT_CONN_OTHER,
- gettext_noop("Maximum number of TCP keepalive retransmits."),
- gettext_noop("This controls the number of consecutive keepalive retransmits that can be "
- "lost before a connection is considered dead. A value of 0 uses the "
- "system default."),
- },
+ gettext_noop("Maximum number of TCP keepalive retransmits."),
+ gettext_noop("This controls the number of consecutive keepalive retransmits that can be "
+ "lost before a connection is considered dead. A value of 0 uses the "
+ "system default."),
+ },
&tcp_keepalives_count,
0, 0, INT_MAX, assign_tcp_keepalives_count, show_tcp_keepalives_count
},
@@ -1548,7 +1549,7 @@ static struct config_real ConfigureNamesReal[] =
gettext_noop("Sets the planner's estimate of the cost of a nonsequentially "
"fetched disk page."),
gettext_noop("This is measured as a multiple of the cost of a "
- "sequential page fetch. A higher value makes it more likely a "
+ "sequential page fetch. A higher value makes it more likely a "
"sequential scan will be used, a lower value makes it more likely an "
"index scan will be used.")
},
@@ -1683,7 +1684,7 @@ static struct config_string ConfigureNamesString[] =
{"log_min_messages", PGC_SUSET, LOGGING_WHEN,
gettext_noop("Sets the message levels that are logged."),
gettext_noop("Valid values are DEBUG5, DEBUG4, DEBUG3, DEBUG2, DEBUG1, "
- "INFO, NOTICE, WARNING, ERROR, LOG, FATAL, and PANIC. Each level "
+ "INFO, NOTICE, WARNING, ERROR, LOG, FATAL, and PANIC. Each level "
"includes all the levels that follow it.")
},
&log_min_messages_str,
@@ -2060,9 +2061,9 @@ static struct config_string ConfigureNamesString[] =
{
{"data_directory", PGC_POSTMASTER, FILE_LOCATIONS,
- gettext_noop("Sets the server's data directory."),
- NULL,
- GUC_SUPERUSER_ONLY
+ gettext_noop("Sets the server's data directory."),
+ NULL,
+ GUC_SUPERUSER_ONLY
},
&data_directory,
NULL, NULL, NULL
@@ -2070,9 +2071,9 @@ static struct config_string ConfigureNamesString[] =
{
{"config_file", PGC_POSTMASTER, FILE_LOCATIONS,
- gettext_noop("Sets the server's main configuration file."),
- NULL,
- GUC_DISALLOW_IN_FILE | GUC_SUPERUSER_ONLY
+ gettext_noop("Sets the server's main configuration file."),
+ NULL,
+ GUC_DISALLOW_IN_FILE | GUC_SUPERUSER_ONLY
},
&ConfigFileName,
NULL, NULL, NULL
@@ -2080,9 +2081,9 @@ static struct config_string ConfigureNamesString[] =
{
{"hba_file", PGC_POSTMASTER, FILE_LOCATIONS,
- gettext_noop("Sets the server's \"hba\" configuration file"),
- NULL,
- GUC_SUPERUSER_ONLY
+ gettext_noop("Sets the server's \"hba\" configuration file"),
+ NULL,
+ GUC_SUPERUSER_ONLY
},
&HbaFileName,
NULL, NULL, NULL
@@ -2090,9 +2091,9 @@ static struct config_string ConfigureNamesString[] =
{
{"ident_file", PGC_POSTMASTER, FILE_LOCATIONS,
- gettext_noop("Sets the server's \"ident\" configuration file"),
- NULL,
- GUC_SUPERUSER_ONLY
+ gettext_noop("Sets the server's \"ident\" configuration file"),
+ NULL,
+ GUC_SUPERUSER_ONLY
},
&IdentFileName,
NULL, NULL, NULL
@@ -2100,9 +2101,9 @@ static struct config_string ConfigureNamesString[] =
{
{"external_pid_file", PGC_POSTMASTER, FILE_LOCATIONS,
- gettext_noop("Writes the postmaster PID to the specified file."),
- NULL,
- GUC_SUPERUSER_ONLY
+ gettext_noop("Writes the postmaster PID to the specified file."),
+ NULL,
+ GUC_SUPERUSER_ONLY
},
&external_pid_file,
NULL, assign_canonical_path, NULL
@@ -2341,8 +2342,8 @@ static bool
is_custom_class(const char *name, int dotPos)
{
/*
- * assign_custom_variable_classes() has made sure no empty
- * identifiers or whitespace exists in the variable
+ * assign_custom_variable_classes() has made sure no empty identifiers or
+ * whitespace exists in the variable
*/
bool result = false;
const char *ccs = GetConfigOption("custom_variable_classes");
@@ -2472,21 +2473,21 @@ find_option(const char *name, int elevel)
Assert(name);
/*
- * By equating const char ** with struct config_generic *, we are
- * assuming the name field is first in config_generic.
+ * By equating const char ** with struct config_generic *, we are assuming
+ * the name field is first in config_generic.
*/
res = (struct config_generic **) bsearch((void *) &key,
(void *) guc_variables,
num_guc_variables,
- sizeof(struct config_generic *),
+ sizeof(struct config_generic *),
guc_var_compare);
if (res)
return *res;
/*
- * See if the name is an obsolete name for a variable. We assume that
- * the set of supported old names is short enough that a brute-force
- * search is the best way.
+ * See if the name is an obsolete name for a variable. We assume that the
+ * set of supported old names is short enough that a brute-force search is
+ * the best way.
*/
for (i = 0; map_old_guc_names[i] != NULL; i += 2)
{
@@ -2495,8 +2496,8 @@ find_option(const char *name, int elevel)
}
/*
- * Check if the name is qualified, and if so, check if the qualifier
- * maps to a custom variable class.
+ * Check if the name is qualified, and if so, check if the qualifier maps
+ * to a custom variable class.
*/
dot = strchr(name, GUC_QUALIFIER_SEPARATOR);
if (dot != NULL && is_custom_class(name, dot - name))
@@ -2525,9 +2526,9 @@ static int
guc_name_compare(const char *namea, const char *nameb)
{
/*
- * The temptation to use strcasecmp() here must be resisted, because
- * the array ordering has to remain stable across setlocale() calls.
- * So, build our own with a simple ASCII-only downcasing.
+ * The temptation to use strcasecmp() here must be resisted, because the
+ * array ordering has to remain stable across setlocale() calls. So, build
+ * our own with a simple ASCII-only downcasing.
*/
while (*namea && *nameb)
{
@@ -2656,8 +2657,7 @@ InitializeGUCOptions(void)
free(str);
/*
- * See notes in set_config_option about
- * casting
+ * See notes in set_config_option about casting
*/
str = (char *) newstr;
conf->reset_val = str;
@@ -2683,8 +2683,8 @@ InitializeGUCOptions(void)
PGC_POSTMASTER, PGC_S_OVERRIDE);
/*
- * For historical reasons, some GUC parameters can receive defaults
- * from environment variables. Process those settings.
+ * For historical reasons, some GUC parameters can receive defaults from
+ * environment variables. Process those settings.
*/
env = getenv("PGPORT");
@@ -2727,9 +2727,9 @@ SelectConfigFiles(const char *userDoption, const char *progname)
/*
* Find the configuration file: if config_file was specified on the
- * command line, use it, else use configdir/postgresql.conf. In any
- * case ensure the result is an absolute path, so that it will be
- * interpreted the same way by future backends.
+ * command line, use it, else use configdir/postgresql.conf. In any case
+ * ensure the result is an absolute path, so that it will be interpreted
+ * the same way by future backends.
*/
if (ConfigFileName)
fname = make_absolute_path(ConfigFileName);
@@ -2749,8 +2749,8 @@ SelectConfigFiles(const char *userDoption, const char *progname)
}
/*
- * Set the ConfigFileName GUC variable to its final value, ensuring
- * that it can't be overridden later.
+ * Set the ConfigFileName GUC variable to its final value, ensuring that
+ * it can't be overridden later.
*/
SetConfigOption("config_file", fname, PGC_POSTMASTER, PGC_S_OVERRIDE);
free(fname);
@@ -2771,8 +2771,8 @@ SelectConfigFiles(const char *userDoption, const char *progname)
* If the data_directory GUC variable has been set, use that as DataDir;
* otherwise use configdir if set; else punt.
*
- * Note: SetDataDir will copy and absolute-ize its argument,
- * so we don't have to.
+ * Note: SetDataDir will copy and absolute-ize its argument, so we don't have
+ * to.
*/
if (data_directory)
SetDataDir(data_directory);
@@ -2792,9 +2792,9 @@ SelectConfigFiles(const char *userDoption, const char *progname)
* Reflect the final DataDir value back into the data_directory GUC var.
* (If you are wondering why we don't just make them a single variable,
* it's because the EXEC_BACKEND case needs DataDir to be transmitted to
- * child backends specially. XXX is that still true? Given that we
- * now chdir to DataDir, EXEC_BACKEND can read the config file without
- * knowing DataDir in advance.)
+ * child backends specially. XXX is that still true? Given that we now
+ * chdir to DataDir, EXEC_BACKEND can read the config file without knowing
+ * DataDir in advance.)
*/
SetConfigOption("data_directory", DataDir, PGC_POSTMASTER, PGC_S_OVERRIDE);
@@ -2954,8 +2954,7 @@ ResetAllOptions(void)
else if (newstr != str)
{
/*
- * See notes in set_config_option about
- * casting
+ * See notes in set_config_option about casting
*/
str = (char *) newstr;
}
@@ -3005,8 +3004,8 @@ push_old_value(struct config_generic * gconf)
/*
* We keep all the stack entries in TopTransactionContext so as to
- * avoid allocation problems when a subtransaction back-fills
- * stack entries for upper transaction levels.
+ * avoid allocation problems when a subtransaction back-fills stack
+ * entries for upper transaction levels.
*/
stack = (GucStack *) MemoryContextAlloc(TopTransactionContext,
sizeof(GucStack));
@@ -3098,27 +3097,26 @@ AtEOXact_GUC(bool isCommit, bool isSubXact)
Assert(stack->nest_level == my_level);
/*
- * We will pop the stack entry. Start by restoring outer xact
- * status (since we may want to modify it below). Be careful to
- * use my_status to reference the inner xact status below this
- * point...
+ * We will pop the stack entry. Start by restoring outer xact status
+ * (since we may want to modify it below). Be careful to use
+ * my_status to reference the inner xact status below this point...
*/
gconf->status = stack->status;
/*
* We have two cases:
*
- * If commit and HAVE_TENTATIVE, set actual value to tentative (this
- * is to override a SET LOCAL if one occurred later than SET). We
- * keep the tentative value and propagate HAVE_TENTATIVE to the
- * parent status, allowing the SET's effect to percolate up. (But
- * if we're exiting the outermost transaction, we'll drop the
- * HAVE_TENTATIVE bit below.)
+ * If commit and HAVE_TENTATIVE, set actual value to tentative (this is
+ * to override a SET LOCAL if one occurred later than SET). We keep
+ * the tentative value and propagate HAVE_TENTATIVE to the parent
+ * status, allowing the SET's effect to percolate up. (But if we're
+ * exiting the outermost transaction, we'll drop the HAVE_TENTATIVE
+ * bit below.)
*
* Otherwise, we have a transaction that aborted or executed only SET
- * LOCAL (or no SET at all). In either case it should have no
- * further effect, so restore both tentative and actual values
- * from the stack entry.
+ * LOCAL (or no SET at all). In either case it should have no further
+ * effect, so restore both tentative and actual values from the stack
+ * entry.
*/
useTentative = isCommit && (my_status & GUC_HAVE_TENTATIVE) != 0;
@@ -3150,7 +3148,7 @@ AtEOXact_GUC(bool isCommit, bool isSubXact)
{
if (conf->assign_hook)
if (!(*conf->assign_hook) (newval,
- true, PGC_S_OVERRIDE))
+ true, PGC_S_OVERRIDE))
elog(LOG, "failed to commit %s",
conf->gen.name);
*conf->variable = newval;
@@ -3183,7 +3181,7 @@ AtEOXact_GUC(bool isCommit, bool isSubXact)
{
if (conf->assign_hook)
if (!(*conf->assign_hook) (newval,
- true, PGC_S_OVERRIDE))
+ true, PGC_S_OVERRIDE))
elog(LOG, "failed to commit %s",
conf->gen.name);
*conf->variable = newval;
@@ -3216,7 +3214,7 @@ AtEOXact_GUC(bool isCommit, bool isSubXact)
{
if (conf->assign_hook)
if (!(*conf->assign_hook) (newval,
- true, PGC_S_OVERRIDE))
+ true, PGC_S_OVERRIDE))
elog(LOG, "failed to commit %s",
conf->gen.name);
*conf->variable = newval;
@@ -3253,7 +3251,7 @@ AtEOXact_GUC(bool isCommit, bool isSubXact)
const char *newstr;
newstr = (*conf->assign_hook) (newval, true,
- PGC_S_OVERRIDE);
+ PGC_S_OVERRIDE);
if (newstr == NULL)
elog(LOG, "failed to commit %s",
conf->gen.name);
@@ -3263,8 +3261,7 @@ AtEOXact_GUC(bool isCommit, bool isSubXact)
* If newval should now be freed, it'll be
* taken care of below.
*
- * See notes in set_config_option about
- * casting
+ * See notes in set_config_option about casting
*/
newval = (char *) newstr;
}
@@ -3291,8 +3288,8 @@ AtEOXact_GUC(bool isCommit, bool isSubXact)
pfree(stack);
/*
- * If we're now out of all xact levels, forget TENTATIVE status
- * bit; there's nothing tentative about the value anymore.
+ * If we're now out of all xact levels, forget TENTATIVE status bit;
+ * there's nothing tentative about the value anymore.
*/
if (!isSubXact)
{
@@ -3306,10 +3303,10 @@ AtEOXact_GUC(bool isCommit, bool isSubXact)
}
/*
- * If we're now out of all xact levels, we can clear guc_dirty. (Note:
- * we cannot reset guc_dirty when exiting a subtransaction, because we
- * know that all outer transaction levels will have stacked values to
- * deal with.)
+ * If we're now out of all xact levels, we can clear guc_dirty. (Note: we
+ * cannot reset guc_dirty when exiting a subtransaction, because we know
+ * that all outer transaction levels will have stacked values to deal
+ * with.)
*/
if (!isSubXact)
guc_dirty = false;
@@ -3326,8 +3323,8 @@ BeginReportingGUCOptions(void)
int i;
/*
- * Don't do anything unless talking to an interactive frontend of
- * protocol 3.0 or later.
+ * Don't do anything unless talking to an interactive frontend of protocol
+ * 3.0 or later.
*/
if (whereToSendOutput != Remote ||
PG_PROTOCOL_MAJOR(FrontendProtocol) < 3)
@@ -3566,15 +3563,14 @@ set_config_option(const char *name, const char *value,
{
ereport(elevel,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("unrecognized configuration parameter \"%s\"", name)));
+ errmsg("unrecognized configuration parameter \"%s\"", name)));
return false;
}
/*
- * Check if the option can be set at this time. See guc.h for the
- * precise rules. Note that we don't want to throw errors if we're in
- * the SIGHUP context. In that case we just ignore the attempt and
- * return true.
+ * Check if the option can be set at this time. See guc.h for the precise
+ * rules. Note that we don't want to throw errors if we're in the SIGHUP
+ * context. In that case we just ignore the attempt and return true.
*/
switch (record->context)
{
@@ -3613,22 +3609,22 @@ set_config_option(const char *name, const char *value,
}
/*
- * Hmm, the idea of the SIGHUP context is "ought to be global,
- * but can be changed after postmaster start". But there's
- * nothing that prevents a crafty administrator from sending
- * SIGHUP signals to individual backends only.
+ * Hmm, the idea of the SIGHUP context is "ought to be global, but
+ * can be changed after postmaster start". But there's nothing
+ * that prevents a crafty administrator from sending SIGHUP
+ * signals to individual backends only.
*/
break;
case PGC_BACKEND:
if (context == PGC_SIGHUP)
{
/*
- * If a PGC_BACKEND parameter is changed in the config
- * file, we want to accept the new value in the postmaster
- * (whence it will propagate to subsequently-started
- * backends), but ignore it in existing backends. This is
- * a tad klugy, but necessary because we don't re-read the
- * config file during backend start.
+ * If a PGC_BACKEND parameter is changed in the config file,
+ * we want to accept the new value in the postmaster (whence
+ * it will propagate to subsequently-started backends), but
+ * ignore it in existing backends. This is a tad klugy, but
+ * necessary because we don't re-read the config file during
+ * backend start.
*/
if (IsUnderPostmaster)
return true;
@@ -3647,8 +3643,8 @@ set_config_option(const char *name, const char *value,
{
ereport(elevel,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("permission denied to set parameter \"%s\"",
- name)));
+ errmsg("permission denied to set parameter \"%s\"",
+ name)));
return false;
}
break;
@@ -3666,10 +3662,9 @@ set_config_option(const char *name, const char *value,
/*
* Ignore attempted set if overridden by previously processed setting.
* However, if changeVal is false then plow ahead anyway since we are
- * trying to find out if the value is potentially good, not actually
- * use it. Also keep going if makeDefault is true, since we may want
- * to set the reset/stacked values even if we can't set the variable
- * itself.
+ * trying to find out if the value is potentially good, not actually use
+ * it. Also keep going if makeDefault is true, since we may want to set
+ * the reset/stacked values even if we can't set the variable itself.
*/
if (record->source > source)
{
@@ -3698,8 +3693,8 @@ set_config_option(const char *name, const char *value,
{
ereport(elevel,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("parameter \"%s\" requires a Boolean value",
- name)));
+ errmsg("parameter \"%s\" requires a Boolean value",
+ name)));
return false;
}
}
@@ -3714,8 +3709,8 @@ set_config_option(const char *name, const char *value,
{
ereport(elevel,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("invalid value for parameter \"%s\": %d",
- name, (int) newval)));
+ errmsg("invalid value for parameter \"%s\": %d",
+ name, (int) newval)));
return false;
}
@@ -3774,8 +3769,8 @@ set_config_option(const char *name, const char *value,
{
ereport(elevel,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("parameter \"%s\" requires an integer value",
- name)));
+ errmsg("parameter \"%s\" requires an integer value",
+ name)));
return false;
}
if (newval < conf->min || newval > conf->max)
@@ -3783,7 +3778,7 @@ set_config_option(const char *name, const char *value,
ereport(elevel,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("%d is outside the valid range for parameter \"%s\" (%d .. %d)",
- newval, name, conf->min, conf->max)));
+ newval, name, conf->min, conf->max)));
return false;
}
}
@@ -3798,8 +3793,8 @@ set_config_option(const char *name, const char *value,
{
ereport(elevel,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("invalid value for parameter \"%s\": %d",
- name, newval)));
+ errmsg("invalid value for parameter \"%s\": %d",
+ name, newval)));
return false;
}
@@ -3858,8 +3853,8 @@ set_config_option(const char *name, const char *value,
{
ereport(elevel,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("parameter \"%s\" requires a numeric value",
- name)));
+ errmsg("parameter \"%s\" requires a numeric value",
+ name)));
return false;
}
if (newval < conf->min || newval > conf->max)
@@ -3867,7 +3862,7 @@ set_config_option(const char *name, const char *value,
ereport(elevel,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("%g is outside the valid range for parameter \"%s\" (%g .. %g)",
- newval, name, conf->min, conf->max)));
+ newval, name, conf->min, conf->max)));
return false;
}
}
@@ -3882,8 +3877,8 @@ set_config_option(const char *name, const char *value,
{
ereport(elevel,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("invalid value for parameter \"%s\": %g",
- name, newval)));
+ errmsg("invalid value for parameter \"%s\": %g",
+ name, newval)));
return false;
}
@@ -3945,9 +3940,8 @@ set_config_option(const char *name, const char *value,
else if (conf->reset_val)
{
/*
- * We could possibly avoid strdup here, but easier to
- * make this case work the same as the normal
- * assignment case.
+ * We could possibly avoid strdup here, but easier to make
+ * this case work the same as the normal assignment case.
*/
newval = guc_strdup(elevel, conf->reset_val);
if (newval == NULL)
@@ -3977,8 +3971,8 @@ set_config_option(const char *name, const char *value,
free(newval);
ereport(elevel,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("invalid value for parameter \"%s\": \"%s\"",
- name, value ? value : "")));
+ errmsg("invalid value for parameter \"%s\": \"%s\"",
+ name, value ? value : "")));
return false;
}
else if (hookresult != newval)
@@ -3986,13 +3980,12 @@ set_config_option(const char *name, const char *value,
free(newval);
/*
- * Having to cast away const here is annoying, but
- * the alternative is to declare assign_hooks as
- * returning char*, which would mean they'd have
- * to cast away const, or as both taking and
- * returning char*, which doesn't seem attractive
- * either --- we don't want them to scribble on
- * the passed str.
+ * Having to cast away const here is annoying, but the
+ * alternative is to declare assign_hooks as returning
+ * char*, which would mean they'd have to cast away
+ * const, or as both taking and returning char*, which
+ * doesn't seem attractive either --- we don't want
+ * them to scribble on the passed str.
*/
newval = (char *) hookresult;
}
@@ -4087,7 +4080,7 @@ GetConfigOption(const char *name)
if (record == NULL)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("unrecognized configuration parameter \"%s\"", name)));
+ errmsg("unrecognized configuration parameter \"%s\"", name)));
if ((record->flags & GUC_SUPERUSER_ONLY) && !superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
@@ -4127,7 +4120,7 @@ GetConfigOptionResetString(const char *name)
if (record == NULL)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("unrecognized configuration parameter \"%s\"", name)));
+ errmsg("unrecognized configuration parameter \"%s\"", name)));
if ((record->flags & GUC_SUPERUSER_ONLY) && !superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
@@ -4191,8 +4184,8 @@ flatten_set_variable_args(const char *name, List *args)
ListCell *l;
/*
- * Fast path if just DEFAULT. We do not check the variable name in
- * this case --- necessary for RESET ALL to work correctly.
+ * Fast path if just DEFAULT. We do not check the variable name in this
+ * case --- necessary for RESET ALL to work correctly.
*/
if (args == NIL)
return NULL;
@@ -4202,7 +4195,7 @@ flatten_set_variable_args(const char *name, List *args)
if (record == NULL)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("unrecognized configuration parameter \"%s\"", name)));
+ errmsg("unrecognized configuration parameter \"%s\"", name)));
flags = record->flags;
@@ -4240,18 +4233,18 @@ flatten_set_variable_args(const char *name, List *args)
if (arg->typename != NULL)
{
/*
- * Must be a ConstInterval argument for TIME ZONE.
- * Coerce to interval and back to normalize the value
- * and account for any typmod.
+ * Must be a ConstInterval argument for TIME ZONE. Coerce
+ * to interval and back to normalize the value and account
+ * for any typmod.
*/
- Datum interval;
+ Datum interval;
char *intervalout;
interval =
- DirectFunctionCall3(interval_in,
- CStringGetDatum(val),
- ObjectIdGetDatum(InvalidOid),
- Int32GetDatum(arg->typename->typmod));
+ DirectFunctionCall3(interval_in,
+ CStringGetDatum(val),
+ ObjectIdGetDatum(InvalidOid),
+ Int32GetDatum(arg->typename->typmod));
intervalout =
DatumGetCString(DirectFunctionCall1(interval_out,
@@ -4261,8 +4254,8 @@ flatten_set_variable_args(const char *name, List *args)
else
{
/*
- * Plain string literal or identifier. For quote
- * mode, quote it if it's not a vanilla identifier.
+ * Plain string literal or identifier. For quote mode,
+ * quote it if it's not a vanilla identifier.
*/
if (flags & GUC_LIST_QUOTE)
appendStringInfoString(&buf, quote_identifier(val));
@@ -4325,8 +4318,8 @@ set_config_by_name(PG_FUNCTION_ARGS)
value = DatumGetCString(DirectFunctionCall1(textout, PG_GETARG_DATUM(1)));
/*
- * Get the desired state of is_local. Default to false if provided
- * value is NULL
+ * Get the desired state of is_local. Default to false if provided value
+ * is NULL
*/
if (PG_ARGISNULL(2))
is_local = false;
@@ -4359,11 +4352,11 @@ define_custom_variable(struct config_generic * variable)
const char *value;
struct config_string *pHolder;
struct config_generic **res = (struct config_generic **) bsearch(
- (void *) &nameAddr,
- (void *) guc_variables,
- num_guc_variables,
- sizeof(struct config_generic *),
- guc_var_compare);
+ (void *) &nameAddr,
+ (void *) guc_variables,
+ num_guc_variables,
+ sizeof(struct config_generic *),
+ guc_var_compare);
if (res == NULL)
{
@@ -4388,8 +4381,7 @@ define_custom_variable(struct config_generic * variable)
value = *pHolder->variable;
/*
- * Assign the string value stored in the placeholder to the real
- * variable.
+ * Assign the string value stored in the placeholder to the real variable.
*
* XXX this is not really good enough --- it should be a nontransactional
* assignment, since we don't want it to roll back if the current xact
@@ -4656,7 +4648,7 @@ ShowAllGUCConfig(DestReceiver *dest)
TEXTOID, -1, 0);
TupleDescInitEntry(tupdesc, (AttrNumber) 3, "description",
TEXTOID, -1, 0);
-
+
/* prepare for projection of tuples */
tstate = begin_tup_output_tupdesc(dest, tupdesc);
@@ -4698,7 +4690,7 @@ GetConfigOptionByName(const char *name, const char **varname)
if (record == NULL)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("unrecognized configuration parameter \"%s\"", name)));
+ errmsg("unrecognized configuration parameter \"%s\"", name)));
if ((record->flags & GUC_SUPERUSER_ONLY) && !superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
@@ -4814,8 +4806,7 @@ GetConfigOptionByNum(int varnum, const char **values, bool *noshow)
default:
{
/*
- * should never get here, but in case we do, set 'em to
- * NULL
+ * should never get here, but in case we do, set 'em to NULL
*/
/* min_val */
@@ -4884,14 +4875,13 @@ show_all_settings(PG_FUNCTION_ARGS)
funcctx = SRF_FIRSTCALL_INIT();
/*
- * switch to memory context appropriate for multiple function
- * calls
+ * switch to memory context appropriate for multiple function calls
*/
oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
/*
- * need a tuple descriptor representing NUM_PG_SETTINGS_ATTS
- * columns of the appropriate types
+ * need a tuple descriptor representing NUM_PG_SETTINGS_ATTS columns
+ * of the appropriate types
*/
tupdesc = CreateTemplateTupleDesc(NUM_PG_SETTINGS_ATTS, false);
TupleDescInitEntry(tupdesc, (AttrNumber) 1, "name",
@@ -4916,8 +4906,8 @@ show_all_settings(PG_FUNCTION_ARGS)
TEXTOID, -1, 0);
/*
- * Generate attribute metadata needed later to produce tuples from
- * raw C strings
+ * Generate attribute metadata needed later to produce tuples from raw
+ * C strings
*/
attinmeta = TupleDescGetAttInMetadata(tupdesc);
funcctx->attinmeta = attinmeta;
@@ -5144,8 +5134,8 @@ write_nondefault_variables(GucContext context)
}
/*
- * Put new file in place. This could delay on Win32, but we don't
- * hold any exclusive locks.
+ * Put new file in place. This could delay on Win32, but we don't hold
+ * any exclusive locks.
*/
rename(CONFIG_EXEC_PARAMS_NEW, CONFIG_EXEC_PARAMS);
}
@@ -5233,8 +5223,7 @@ read_nondefault_variables(void)
FreeFile(fp);
}
-
-#endif /* EXEC_BACKEND */
+#endif /* EXEC_BACKEND */
/*
@@ -5317,15 +5306,15 @@ ProcessGUCArray(ArrayType *array, GucSource source)
{
ereport(WARNING,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("could not parse setting for parameter \"%s\"", name)));
+ errmsg("could not parse setting for parameter \"%s\"", name)));
free(name);
continue;
}
/*
- * We process all these options at SUSET level. We assume that
- * the right to insert an option into pg_database or pg_authid was
- * checked when it was inserted.
+ * We process all these options at SUSET level. We assume that the
+ * right to insert an option into pg_database or pg_authid was checked
+ * when it was inserted.
*/
SetConfigOption(name, value, PGC_SUSET, source);
@@ -5515,7 +5504,7 @@ assign_log_destination(const char *value, bool doit, GucSource source)
if (source >= PGC_S_INTERACTIVE)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("invalid list syntax for parameter \"log_destination\"")));
+ errmsg("invalid list syntax for parameter \"log_destination\"")));
return NULL;
}
@@ -5538,8 +5527,8 @@ assign_log_destination(const char *value, bool doit, GucSource source)
if (source >= PGC_S_INTERACTIVE)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("unrecognized \"log_destination\" key word: \"%s\"",
- tok)));
+ errmsg("unrecognized \"log_destination\" key word: \"%s\"",
+ tok)));
pfree(rawstring);
list_free(elemlist);
return NULL;
@@ -5560,7 +5549,7 @@ assign_log_destination(const char *value, bool doit, GucSource source)
static const char *
assign_syslog_facility(const char *facility, bool doit, GucSource source)
{
- int syslog_fac;
+ int syslog_fac;
if (pg_strcasecmp(facility, "LOCAL0") == 0)
syslog_fac = LOG_LOCAL0;
@@ -5599,8 +5588,7 @@ assign_syslog_ident(const char *ident, bool doit, GucSource source)
return ident;
}
-
-#endif /* HAVE_SYSLOG */
+#endif /* HAVE_SYSLOG */
static const char *
@@ -5690,8 +5678,8 @@ assign_msglvl(int *var, const char *newval, bool doit, GucSource source)
}
/*
- * Client_min_messages always prints 'info', but we allow it as a
- * value anyway.
+ * Client_min_messages always prints 'info', but we allow it as a value
+ * anyway.
*/
else if (pg_strcasecmp(newval, "info") == 0)
{
@@ -5784,8 +5772,8 @@ static const char *
show_num_temp_buffers(void)
{
/*
- * We show the GUC var until local buffers have been initialized,
- * and NLocBuffer afterwards.
+ * We show the GUC var until local buffers have been initialized, and
+ * NLocBuffer afterwards.
*/
static char nbuf[32];
@@ -5801,7 +5789,7 @@ assign_phony_autocommit(bool newval, bool doit, GucSource source)
if (doit && source >= PGC_S_INTERACTIVE)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("SET AUTOCOMMIT TO OFF is no longer supported")));
+ errmsg("SET AUTOCOMMIT TO OFF is no longer supported")));
return false;
}
return true;
@@ -5844,8 +5832,8 @@ assign_custom_variable_classes(const char *newval, bool doit, GucSource source)
if (hasSpaceAfterToken || !isalnum(c))
{
/*
- * Syntax error due to token following space after token or
- * non alpha numeric character
+ * Syntax error due to token following space after token or non
+ * alpha numeric character
*/
ereport(LOG,
(errcode(ERRCODE_SYNTAX_ERROR),
diff --git a/src/backend/utils/misc/pg_rusage.c b/src/backend/utils/misc/pg_rusage.c
index a4a6d9e586d..cf7bbb427cb 100644
--- a/src/backend/utils/misc/pg_rusage.c
+++ b/src/backend/utils/misc/pg_rusage.c
@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/misc/pg_rusage.c,v 1.1 2005/10/03 22:52:23 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/misc/pg_rusage.c,v 1.2 2005/10/15 02:49:36 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -65,9 +65,9 @@ pg_rusage_show(const PGRUsage *ru0)
snprintf(result, sizeof(result),
"CPU %d.%02ds/%d.%02du sec elapsed %d.%02d sec",
(int) (ru1.ru.ru_stime.tv_sec - ru0->ru.ru_stime.tv_sec),
- (int) (ru1.ru.ru_stime.tv_usec - ru0->ru.ru_stime.tv_usec) / 10000,
+ (int) (ru1.ru.ru_stime.tv_usec - ru0->ru.ru_stime.tv_usec) / 10000,
(int) (ru1.ru.ru_utime.tv_sec - ru0->ru.ru_utime.tv_sec),
- (int) (ru1.ru.ru_utime.tv_usec - ru0->ru.ru_utime.tv_usec) / 10000,
+ (int) (ru1.ru.ru_utime.tv_usec - ru0->ru.ru_utime.tv_usec) / 10000,
(int) (ru1.tv.tv_sec - ru0->tv.tv_sec),
(int) (ru1.tv.tv_usec - ru0->tv.tv_usec) / 10000);
diff --git a/src/backend/utils/misc/ps_status.c b/src/backend/utils/misc/ps_status.c
index 1627cf49f73..af1421cd2f6 100644
--- a/src/backend/utils/misc/ps_status.c
+++ b/src/backend/utils/misc/ps_status.c
@@ -5,7 +5,7 @@
* to contain some useful information. Mechanism differs wildly across
* platforms.
*
- * $PostgreSQL: pgsql/src/backend/utils/misc/ps_status.c,v 1.24 2005/05/24 07:16:27 neilc Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/misc/ps_status.c,v 1.25 2005/10/15 02:49:36 momjian Exp $
*
* Copyright (c) 2000-2005, PostgreSQL Global Development Group
* various details abducted from various places
@@ -85,7 +85,6 @@ extern char **environ;
#define PS_BUFFER_SIZE 256
static char ps_buffer[PS_BUFFER_SIZE];
static const size_t ps_buffer_size = PS_BUFFER_SIZE;
-
#else /* PS_USE_CLOBBER_ARGV */
static char *ps_buffer; /* will point to argv area */
static size_t ps_buffer_size; /* space determined at run time */
@@ -98,20 +97,22 @@ static int save_argc;
static char **save_argv;
#ifdef WIN32
- /*
- * Win32 does not support showing any changed arguments. To make it
- * at all possible to track which backend is doing what, we create
- * a named object that can be viewed with for example Process Explorer
- */
+
+ /*
+ * Win32 does not support showing any changed arguments. To make it at all
+ * possible to track which backend is doing what, we create a named object
+ * that can be viewed with for example Process Explorer
+ */
static HANDLE ident_handle = INVALID_HANDLE_VALUE;
-static void pgwin32_update_ident(char *ident)
+static void
+pgwin32_update_ident(char *ident)
{
- char name[PS_BUFFER_SIZE+32];
+ char name[PS_BUFFER_SIZE + 32];
if (ident_handle != INVALID_HANDLE_VALUE)
CloseHandle(ident_handle);
- sprintf(name,"pgident: %s",ident);
+ sprintf(name, "pgident: %s", ident);
ident_handle = CreateEvent(NULL,
TRUE,
@@ -130,7 +131,7 @@ static void pgwin32_update_ident(char *ident)
* environment strings may be moved, so this should be called before any code
* that might try to hang onto a getenv() result.)
*/
-char **
+char **
save_ps_display_args(int argc, char **argv)
{
save_argc = argc;
@@ -139,8 +140,8 @@ save_ps_display_args(int argc, char **argv)
#if defined(PS_USE_CLOBBER_ARGV)
/*
- * If we're going to overwrite the argv area, count the available
- * space. Also move the environment to make additional room.
+ * If we're going to overwrite the argv area, count the available space.
+ * Also move the environment to make additional room.
*/
{
char *end_of_area = NULL;
@@ -193,12 +194,12 @@ save_ps_display_args(int argc, char **argv)
* argument parsing purposes.
*
* (NB: do NOT think to remove the copying of argv[], even though
- * postmaster.c finishes looking at argv[] long before we ever
- * consider changing the ps display. On some platforms, getopt()
- * keeps pointers into the argv array, and will get horribly confused
- * when it is re-called to analyze a subprocess' argument string if
- * the argv storage has been clobbered meanwhile. Other platforms
- * have other dependencies on argv[].
+ * postmaster.c finishes looking at argv[] long before we ever consider
+ * changing the ps display. On some platforms, getopt() keeps pointers
+ * into the argv array, and will get horribly confused when it is
+ * re-called to analyze a subprocess' argument string if the argv storage
+ * has been clobbered meanwhile. Other platforms have other dependencies
+ * on argv[].
*/
{
char **new_argv;
@@ -220,8 +221,7 @@ save_ps_display_args(int argc, char **argv)
argv = new_argv;
}
-#endif /* PS_USE_CHANGE_ARGV or
- * PS_USE_CLOBBER_ARGV */
+#endif /* PS_USE_CHANGE_ARGV or PS_USE_CLOBBER_ARGV */
return argv;
}
@@ -278,8 +278,8 @@ init_ps_display(const char *username, const char *dbname,
#ifdef PS_USE_SETPROCTITLE
/*
- * apparently setproctitle() already adds a `progname:' prefix to the
- * ps line
+ * apparently setproctitle() already adds a `progname:' prefix to the ps
+ * line
*/
snprintf(ps_buffer, ps_buffer_size,
"%s %s %s ",
@@ -295,7 +295,6 @@ init_ps_display(const char *username, const char *dbname,
#ifdef WIN32
pgwin32_update_ident(ps_buffer);
#endif
-
#endif /* not PS_USE_NONE */
}
@@ -360,7 +359,6 @@ set_ps_display(const char *activity)
#ifdef WIN32
pgwin32_update_ident(ps_buffer);
#endif
-
#endif /* not PS_USE_NONE */
}
diff --git a/src/backend/utils/misc/superuser.c b/src/backend/utils/misc/superuser.c
index c9c17cef704..6eba2fb9354 100644
--- a/src/backend/utils/misc/superuser.c
+++ b/src/backend/utils/misc/superuser.c
@@ -14,7 +14,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/misc/superuser.c,v 1.33 2005/08/15 02:40:26 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/misc/superuser.c,v 1.34 2005/10/15 02:49:36 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -32,9 +32,9 @@
* the status of the last requested roleid. The cache can be flushed
* at need by watching for cache update events on pg_authid.
*/
-static Oid last_roleid = InvalidOid; /* InvalidOid == cache not valid */
-static bool last_roleid_is_super = false;
-static bool roleid_callback_registered = false;
+static Oid last_roleid = InvalidOid; /* InvalidOid == cache not valid */
+static bool last_roleid_is_super = false;
+static bool roleid_callback_registered = false;
static void RoleidCallback(Datum arg, Oid relid);
diff --git a/src/backend/utils/mmgr/aset.c b/src/backend/utils/mmgr/aset.c
index 6c1b5f390da..70bcf778a14 100644
--- a/src/backend/utils/mmgr/aset.c
+++ b/src/backend/utils/mmgr/aset.c
@@ -11,7 +11,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mmgr/aset.c,v 1.63 2005/09/01 18:15:42 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mmgr/aset.c,v 1.64 2005/10/15 02:49:36 momjian Exp $
*
* NOTE:
* This is a new (Feb. 05, 1999) implementation of the allocation set
@@ -140,8 +140,7 @@ typedef struct AllocSetContext
/* Allocation parameters for this context: */
Size initBlockSize; /* initial block size */
Size maxBlockSize; /* maximum block size */
- AllocBlock keeper; /* if not NULL, keep this block over
- * resets */
+ AllocBlock keeper; /* if not NULL, keep this block over resets */
} AllocSetContext;
typedef AllocSetContext *AllocSet;
@@ -342,8 +341,8 @@ AllocSetContextCreate(MemoryContext parent,
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of memory"),
- errdetail("Failed while creating memory context \"%s\".",
- name)));
+ errdetail("Failed while creating memory context \"%s\".",
+ name)));
}
block->aset = context;
block->freeptr = ((char *) block) + ALLOC_BLOCKHDRSZ;
@@ -505,8 +504,8 @@ AllocSetAlloc(MemoryContext context, Size size)
AssertArg(AllocSetIsValid(set));
/*
- * If requested size exceeds maximum for chunks, allocate an entire
- * block for this request.
+ * If requested size exceeds maximum for chunks, allocate an entire block
+ * for this request.
*/
if (size > ALLOC_CHUNK_LIMIT)
{
@@ -536,8 +535,8 @@ AllocSetAlloc(MemoryContext context, Size size)
#endif
/*
- * Stick the new block underneath the active allocation block, so
- * that we don't lose the use of the space remaining therein.
+ * Stick the new block underneath the active allocation block, so that
+ * we don't lose the use of the space remaining therein.
*/
if (set->blocks != NULL)
{
@@ -558,8 +557,7 @@ AllocSetAlloc(MemoryContext context, Size size)
/*
* Request is small enough to be treated as a chunk. Look in the
- * corresponding free list to see if there is a free chunk we could
- * reuse.
+ * corresponding free list to see if there is a free chunk we could reuse.
*/
fidx = AllocSetFreeIndex(size);
priorfree = NULL;
@@ -571,8 +569,8 @@ AllocSetAlloc(MemoryContext context, Size size)
}
/*
- * If one is found, remove it from the free list, make it again a
- * member of the alloc set and return its data address.
+ * If one is found, remove it from the free list, make it again a member
+ * of the alloc set and return its data address.
*/
if (chunk != NULL)
{
@@ -604,8 +602,8 @@ AllocSetAlloc(MemoryContext context, Size size)
Assert(chunk_size >= size);
/*
- * If there is enough room in the active allocation block, we will put
- * the chunk into that block. Else must start a new one.
+ * If there is enough room in the active allocation block, we will put the
+ * chunk into that block. Else must start a new one.
*/
if ((block = set->blocks) != NULL)
{
@@ -614,16 +612,16 @@ AllocSetAlloc(MemoryContext context, Size size)
if (availspace < (chunk_size + ALLOC_CHUNKHDRSZ))
{
/*
- * The existing active (top) block does not have enough room
- * for the requested allocation, but it might still have a
- * useful amount of space in it. Once we push it down in the
- * block list, we'll never try to allocate more space from it.
- * So, before we do that, carve up its free space into chunks
- * that we can put on the set's freelists.
+ * The existing active (top) block does not have enough room for
+ * the requested allocation, but it might still have a useful
+ * amount of space in it. Once we push it down in the block list,
+ * we'll never try to allocate more space from it. So, before we
+ * do that, carve up its free space into chunks that we can put on
+ * the set's freelists.
*
* Because we can only get here when there's less than
- * ALLOC_CHUNK_LIMIT left in the block, this loop cannot
- * iterate more than ALLOCSET_NUM_FREELISTS-1 times.
+ * ALLOC_CHUNK_LIMIT left in the block, this loop cannot iterate
+ * more than ALLOCSET_NUM_FREELISTS-1 times.
*/
while (availspace >= ((1 << ALLOC_MINBITS) + ALLOC_CHUNKHDRSZ))
{
@@ -631,10 +629,9 @@ AllocSetAlloc(MemoryContext context, Size size)
int a_fidx = AllocSetFreeIndex(availchunk);
/*
- * In most cases, we'll get back the index of the next
- * larger freelist than the one we need to put this chunk
- * on. The exception is when availchunk is exactly a
- * power of 2.
+ * In most cases, we'll get back the index of the next larger
+ * freelist than the one we need to put this chunk on. The
+ * exception is when availchunk is exactly a power of 2.
*/
if (availchunk != (1 << (a_fidx + ALLOC_MINBITS)))
{
@@ -676,11 +673,11 @@ AllocSetAlloc(MemoryContext context, Size size)
else
{
/*
- * Use first power of 2 that is larger than previous block,
- * but not more than the allowed limit. (We don't simply
- * double the prior block size, because in some cases this
- * could be a funny size, eg if very first allocation was for
- * an odd-sized large chunk.)
+ * Use first power of 2 that is larger than previous block, but
+ * not more than the allowed limit. (We don't simply double the
+ * prior block size, because in some cases this could be a funny
+ * size, eg if very first allocation was for an odd-sized large
+ * chunk.)
*/
Size pblksize = set->blocks->endptr - ((char *) set->blocks);
@@ -692,8 +689,8 @@ AllocSetAlloc(MemoryContext context, Size size)
}
/*
- * If initBlockSize is less than ALLOC_CHUNK_LIMIT, we could need
- * more space... but try to keep it a power of 2.
+ * If initBlockSize is less than ALLOC_CHUNK_LIMIT, we could need more
+ * space... but try to keep it a power of 2.
*/
required_size = chunk_size + ALLOC_BLOCKHDRSZ + ALLOC_CHUNKHDRSZ;
while (blksize < required_size)
@@ -703,9 +700,8 @@ AllocSetAlloc(MemoryContext context, Size size)
block = (AllocBlock) malloc(blksize);
/*
- * We could be asking for pretty big blocks here, so cope if
- * malloc fails. But give up if there's less than a meg or so
- * available...
+ * We could be asking for pretty big blocks here, so cope if malloc
+ * fails. But give up if there's less than a meg or so available...
*/
while (block == NULL && blksize > 1024 * 1024)
{
@@ -730,13 +726,13 @@ AllocSetAlloc(MemoryContext context, Size size)
block->endptr = ((char *) block) + blksize;
/*
- * If this is the first block of the set, make it the "keeper"
- * block. Formerly, a keeper block could only be created during
- * context creation, but allowing it to happen here lets us have
- * fast reset cycling even for contexts created with
- * minContextSize = 0; that way we don't have to force space to be
- * allocated in contexts that might never need any space. Don't
- * mark an oversize block as a keeper, however.
+ * If this is the first block of the set, make it the "keeper" block.
+ * Formerly, a keeper block could only be created during context
+ * creation, but allowing it to happen here lets us have fast reset
+ * cycling even for contexts created with minContextSize = 0; that way
+ * we don't have to force space to be allocated in contexts that might
+ * never need any space. Don't mark an oversize block as a keeper,
+ * however.
*/
if (set->blocks == NULL && blksize == set->initBlockSize)
{
@@ -870,8 +866,8 @@ AllocSetRealloc(MemoryContext context, void *pointer, Size size)
/*
* Chunk sizes are aligned to power of 2 in AllocSetAlloc(). Maybe the
- * allocated area already is >= the new size. (In particular, we
- * always fall out here if the requested size is a decrease.)
+ * allocated area already is >= the new size. (In particular, we always
+ * fall out here if the requested size is a decrease.)
*/
if (oldsize >= size)
{
@@ -887,9 +883,9 @@ AllocSetRealloc(MemoryContext context, void *pointer, Size size)
if (oldsize > ALLOC_CHUNK_LIMIT)
{
/*
- * The chunk must been allocated as a single-chunk block. Find
- * the containing block and use realloc() to make it bigger with
- * minimum space wastage.
+ * The chunk must been allocated as a single-chunk block. Find the
+ * containing block and use realloc() to make it bigger with minimum
+ * space wastage.
*/
AllocBlock block = set->blocks;
AllocBlock prevblock = NULL;
@@ -944,15 +940,15 @@ AllocSetRealloc(MemoryContext context, void *pointer, Size size)
else
{
/*
- * Small-chunk case. If the chunk is the last one in its block,
- * there might be enough free space after it that we can just
- * enlarge the chunk in-place. It's relatively painful to find
- * the containing block in the general case, but we can detect
- * last-ness quite cheaply for the typical case where the chunk is
- * in the active (topmost) allocation block. (At least with the
- * regression tests and code as of 1/2001, realloc'ing the last
- * chunk of a non-topmost block hardly ever happens, so it's not
- * worth scanning the block list to catch that case.)
+ * Small-chunk case. If the chunk is the last one in its block, there
+ * might be enough free space after it that we can just enlarge the
+ * chunk in-place. It's relatively painful to find the containing
+ * block in the general case, but we can detect last-ness quite
+ * cheaply for the typical case where the chunk is in the active
+ * (topmost) allocation block. (At least with the regression tests
+ * and code as of 1/2001, realloc'ing the last chunk of a non-topmost
+ * block hardly ever happens, so it's not worth scanning the block
+ * list to catch that case.)
*
* NOTE: must be careful not to create a chunk of a size that
* AllocSetAlloc would not create, else we'll get confused later.
@@ -1031,10 +1027,10 @@ AllocSetIsEmpty(MemoryContext context)
AllocSet set = (AllocSet) context;
/*
- * For now, we say "empty" only if the context is new or just reset.
- * We could examine the freelists to determine if all space has been
- * freed, but it's not really worth the trouble for present uses of
- * this functionality.
+ * For now, we say "empty" only if the context is new or just reset. We
+ * could examine the freelists to determine if all space has been freed,
+ * but it's not really worth the trouble for present uses of this
+ * functionality.
*/
if (set->isReset)
return true;
@@ -1073,7 +1069,7 @@ AllocSetStats(MemoryContext context)
}
}
fprintf(stderr,
- "%s: %ld total in %ld blocks; %ld free (%ld chunks); %ld used\n",
+ "%s: %ld total in %ld blocks; %ld free (%ld chunks); %ld used\n",
set->header.name, totalspace, nblocks, freespace, nchunks,
totalspace - freespace);
}
@@ -1144,9 +1140,9 @@ AllocSetCheck(MemoryContext context)
name, chunk, block);
/*
- * If chunk is allocated, check for correct aset pointer. (If
- * it's free, the aset is the freelist pointer, which we can't
- * check as easily...)
+ * If chunk is allocated, check for correct aset pointer. (If it's
+ * free, the aset is the freelist pointer, which we can't check as
+ * easily...)
*/
if (dsize > 0 && chunk->aset != (void *) set)
elog(WARNING, "problem in alloc set %s: bogus aset link in block %p, chunk %p",
diff --git a/src/backend/utils/mmgr/mcxt.c b/src/backend/utils/mmgr/mcxt.c
index 6d68e30f7eb..b6532730226 100644
--- a/src/backend/utils/mmgr/mcxt.c
+++ b/src/backend/utils/mmgr/mcxt.c
@@ -14,7 +14,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mmgr/mcxt.c,v 1.55 2005/05/14 23:16:29 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mmgr/mcxt.c,v 1.56 2005/10/15 02:49:36 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -78,8 +78,8 @@ MemoryContextInit(void)
AssertState(TopMemoryContext == NULL);
/*
- * Initialize TopMemoryContext as an AllocSetContext with slow growth
- * rate --- we don't really expect much to be allocated in it.
+ * Initialize TopMemoryContext as an AllocSetContext with slow growth rate
+ * --- we don't really expect much to be allocated in it.
*
* (There is special-case code in MemoryContextCreate() for this call.)
*/
@@ -90,18 +90,18 @@ MemoryContextInit(void)
8 * 1024);
/*
- * Not having any other place to point CurrentMemoryContext, make it
- * point to TopMemoryContext. Caller should change this soon!
+ * Not having any other place to point CurrentMemoryContext, make it point
+ * to TopMemoryContext. Caller should change this soon!
*/
CurrentMemoryContext = TopMemoryContext;
/*
- * Initialize ErrorContext as an AllocSetContext with slow growth rate
- * --- we don't really expect much to be allocated in it. More to the
- * point, require it to contain at least 8K at all times. This is the
- * only case where retained memory in a context is *essential* --- we
- * want to be sure ErrorContext still has some memory even if we've
- * run out elsewhere!
+ * Initialize ErrorContext as an AllocSetContext with slow growth rate ---
+ * we don't really expect much to be allocated in it. More to the point,
+ * require it to contain at least 8K at all times. This is the only case
+ * where retained memory in a context is *essential* --- we want to be
+ * sure ErrorContext still has some memory even if we've run out
+ * elsewhere!
*/
ErrorContext = AllocSetContextCreate(TopMemoryContext,
"ErrorContext",
@@ -169,9 +169,9 @@ MemoryContextDelete(MemoryContext context)
MemoryContextDeleteChildren(context);
/*
- * We delink the context from its parent before deleting it, so that
- * if there's an error we won't have deleted/busted contexts still
- * attached to the context tree. Better a leak than a crash.
+ * We delink the context from its parent before deleting it, so that if
+ * there's an error we won't have deleted/busted contexts still attached
+ * to the context tree. Better a leak than a crash.
*/
if (context->parent)
{
@@ -208,8 +208,8 @@ MemoryContextDeleteChildren(MemoryContext context)
AssertArg(MemoryContextIsValid(context));
/*
- * MemoryContextDelete will delink the child from me, so just iterate
- * as long as there is a child.
+ * MemoryContextDelete will delink the child from me, so just iterate as
+ * long as there is a child.
*/
while (context->firstchild != NULL)
MemoryContextDelete(context->firstchild);
@@ -384,9 +384,9 @@ MemoryContextContains(MemoryContext context, void *pointer)
((char *) pointer - STANDARDCHUNKHEADERSIZE);
/*
- * If the context link doesn't match then we certainly have a
- * non-member chunk. Also check for a reasonable-looking size as
- * extra guard against being fooled by bogus pointers.
+ * If the context link doesn't match then we certainly have a non-member
+ * chunk. Also check for a reasonable-looking size as extra guard against
+ * being fooled by bogus pointers.
*/
if (header->context == context && AllocSizeIsValid(header->size))
return true;
@@ -640,7 +640,6 @@ MemoryContextSwitchTo(MemoryContext context)
CurrentMemoryContext = context;
return old;
}
-
#endif /* ! __GNUC__ */
/*
diff --git a/src/backend/utils/mmgr/portalmem.c b/src/backend/utils/mmgr/portalmem.c
index b55a3430256..9866e12d68c 100644
--- a/src/backend/utils/mmgr/portalmem.c
+++ b/src/backend/utils/mmgr/portalmem.c
@@ -12,7 +12,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mmgr/portalmem.c,v 1.81 2005/06/17 22:32:47 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mmgr/portalmem.c,v 1.82 2005/10/15 02:49:36 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -272,8 +272,8 @@ PortalCreateHoldStore(Portal portal)
Assert(portal->holdStore == NULL);
/*
- * Create the memory context that is used for storage of the tuple
- * set. Note this is NOT a child of the portal's heap memory.
+ * Create the memory context that is used for storage of the tuple set.
+ * Note this is NOT a child of the portal's heap memory.
*/
portal->holdContext =
AllocSetContextCreate(PortalMemory,
@@ -305,10 +305,10 @@ PortalDrop(Portal portal, bool isTopCommit)
elog(ERROR, "cannot drop active portal");
/*
- * Remove portal from hash table. Because we do this first, we will
- * not come back to try to remove the portal again if there's any
- * error in the subsequent steps. Better to leak a little memory than
- * to get into an infinite error-recovery loop.
+ * Remove portal from hash table. Because we do this first, we will not
+ * come back to try to remove the portal again if there's any error in the
+ * subsequent steps. Better to leak a little memory than to get into an
+ * infinite error-recovery loop.
*/
PortalHashTableDelete(portal);
@@ -317,27 +317,27 @@ PortalDrop(Portal portal, bool isTopCommit)
(*portal->cleanup) (portal);
/*
- * Release any resources still attached to the portal. There are
- * several cases being covered here:
+ * Release any resources still attached to the portal. There are several
+ * cases being covered here:
*
- * Top transaction commit (indicated by isTopCommit): normally we should
- * do nothing here and let the regular end-of-transaction resource
- * releasing mechanism handle these resources too. However, if we
- * have a FAILED portal (eg, a cursor that got an error), we'd better
- * clean up its resources to avoid resource-leakage warning messages.
+ * Top transaction commit (indicated by isTopCommit): normally we should do
+ * nothing here and let the regular end-of-transaction resource releasing
+ * mechanism handle these resources too. However, if we have a FAILED
+ * portal (eg, a cursor that got an error), we'd better clean up its
+ * resources to avoid resource-leakage warning messages.
*
- * Sub transaction commit: never comes here at all, since we don't kill
- * any portals in AtSubCommit_Portals().
+ * Sub transaction commit: never comes here at all, since we don't kill any
+ * portals in AtSubCommit_Portals().
*
* Main or sub transaction abort: we will do nothing here because
* portal->resowner was already set NULL; the resources were already
* cleaned up in transaction abort.
*
- * Ordinary portal drop: must release resources. However, if the portal
- * is not FAILED then we do not release its locks. The locks become
- * the responsibility of the transaction's ResourceOwner (since it is
- * the parent of the portal's owner) and will be released when the
- * transaction eventually ends.
+ * Ordinary portal drop: must release resources. However, if the portal is
+ * not FAILED then we do not release its locks. The locks become the
+ * responsibility of the transaction's ResourceOwner (since it is the
+ * parent of the portal's owner) and will be released when the transaction
+ * eventually ends.
*/
if (portal->resowner &&
(!isTopCommit || portal->status == PORTAL_FAILED))
@@ -419,7 +419,7 @@ DropDependentPortals(MemoryContext queryContext)
bool
CommitHoldablePortals(void)
{
- bool result = false;
+ bool result = false;
HASH_SEQ_STATUS status;
PortalHashEnt *hentry;
@@ -435,27 +435,26 @@ CommitHoldablePortals(void)
portal->status == PORTAL_READY)
{
/*
- * We are exiting the transaction that created a holdable
- * cursor. Instead of dropping the portal, prepare it for
- * access by later transactions.
+ * We are exiting the transaction that created a holdable cursor.
+ * Instead of dropping the portal, prepare it for access by later
+ * transactions.
*
- * Note that PersistHoldablePortal() must release all resources
- * used by the portal that are local to the creating
- * transaction.
+ * Note that PersistHoldablePortal() must release all resources used
+ * by the portal that are local to the creating transaction.
*/
PortalCreateHoldStore(portal);
PersistHoldablePortal(portal);
/*
- * Any resources belonging to the portal will be released in
- * the upcoming transaction-wide cleanup; the portal will no
- * longer have its own resources.
+ * Any resources belonging to the portal will be released in the
+ * upcoming transaction-wide cleanup; the portal will no longer
+ * have its own resources.
*/
portal->resowner = NULL;
/*
- * Having successfully exported the holdable cursor, mark it
- * as not belonging to this transaction.
+ * Having successfully exported the holdable cursor, mark it as
+ * not belonging to this transaction.
*/
portal->createSubid = InvalidSubTransactionId;
@@ -480,7 +479,7 @@ CommitHoldablePortals(void)
bool
PrepareHoldablePortals(void)
{
- bool result = false;
+ bool result = false;
HASH_SEQ_STATUS status;
PortalHashEnt *hentry;
@@ -496,8 +495,8 @@ PrepareHoldablePortals(void)
portal->status == PORTAL_READY)
{
/*
- * We are exiting the transaction that created a holdable
- * cursor. Can't do PREPARE.
+ * We are exiting the transaction that created a holdable cursor.
+ * Can't do PREPARE.
*/
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
@@ -527,8 +526,8 @@ AtCommit_Portals(void)
Portal portal = hentry->portal;
/*
- * Do not touch active portals --- this can only happen in the
- * case of a multi-transaction utility command, such as VACUUM.
+ * Do not touch active portals --- this can only happen in the case of
+ * a multi-transaction utility command, such as VACUUM.
*
* Note however that any resource owner attached to such a portal is
* still going to go away, so don't leave a dangling pointer.
@@ -579,8 +578,7 @@ AtAbort_Portals(void)
portal->status = PORTAL_FAILED;
/*
- * Do nothing else to cursors held over from a previous
- * transaction.
+ * Do nothing else to cursors held over from a previous transaction.
*/
if (portal->createSubid == InvalidSubTransactionId)
continue;
@@ -594,8 +592,8 @@ AtAbort_Portals(void)
/*
* Any resources belonging to the portal will be released in the
- * upcoming transaction-wide cleanup; they will be gone before we
- * run PortalDrop.
+ * upcoming transaction-wide cleanup; they will be gone before we run
+ * PortalDrop.
*/
portal->resowner = NULL;
}
@@ -686,11 +684,10 @@ AtSubAbort_Portals(SubTransactionId mySubid,
continue;
/*
- * Force any active portals of my own transaction into FAILED
- * state. This is mostly to ensure that a portal running a FETCH
- * will go FAILED if the underlying cursor fails. (Note we do NOT
- * want to do this to upper-level portals, since they may be able
- * to continue.)
+ * Force any active portals of my own transaction into FAILED state.
+ * This is mostly to ensure that a portal running a FETCH will go
+ * FAILED if the underlying cursor fails. (Note we do NOT want to do
+ * this to upper-level portals, since they may be able to continue.)
*
* This is only needed to dodge the sanity check in PortalDrop.
*/
@@ -701,11 +698,11 @@ AtSubAbort_Portals(SubTransactionId mySubid,
* If the portal is READY then allow it to survive into the parent
* transaction; otherwise shut it down.
*
- * Currently, we can't actually support that because the portal's
- * query might refer to objects created or changed in the failed
- * subtransaction, leading to crashes if execution is resumed.
- * So, even READY portals are deleted. It would be nice to detect
- * whether the query actually depends on any such object, instead.
+ * Currently, we can't actually support that because the portal's query
+ * might refer to objects created or changed in the failed
+ * subtransaction, leading to crashes if execution is resumed. So,
+ * even READY portals are deleted. It would be nice to detect whether
+ * the query actually depends on any such object, instead.
*/
#ifdef NOT_USED
if (portal->status == PORTAL_READY)
@@ -725,9 +722,9 @@ AtSubAbort_Portals(SubTransactionId mySubid,
}
/*
- * Any resources belonging to the portal will be released in
- * the upcoming transaction-wide cleanup; they will be gone
- * before we run PortalDrop.
+ * Any resources belonging to the portal will be released in the
+ * upcoming transaction-wide cleanup; they will be gone before we
+ * run PortalDrop.
*/
portal->resowner = NULL;
}
diff --git a/src/backend/utils/resowner/resowner.c b/src/backend/utils/resowner/resowner.c
index 786652a757b..97933de820b 100644
--- a/src/backend/utils/resowner/resowner.c
+++ b/src/backend/utils/resowner/resowner.c
@@ -14,7 +14,7 @@
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/resowner/resowner.c,v 1.13 2005/08/08 19:17:23 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/resowner/resowner.c,v 1.14 2005/10/15 02:49:36 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -108,7 +108,7 @@ ResourceOwnerCreate(ResourceOwner parent, const char *name)
ResourceOwner owner;
owner = (ResourceOwner) MemoryContextAllocZero(TopMemoryContext,
- sizeof(ResourceOwnerData));
+ sizeof(ResourceOwnerData));
owner->name = name;
if (parent)
@@ -185,9 +185,9 @@ ResourceOwnerReleaseInternal(ResourceOwner owner,
ResourceOwnerReleaseInternal(child, phase, isCommit, isTopLevel);
/*
- * Make CurrentResourceOwner point to me, so that ReleaseBuffer etc
- * don't get confused. We needn't PG_TRY here because the outermost
- * level will fix it on error abort.
+ * Make CurrentResourceOwner point to me, so that ReleaseBuffer etc don't
+ * get confused. We needn't PG_TRY here because the outermost level will
+ * fix it on error abort.
*/
save = CurrentResourceOwner;
CurrentResourceOwner = owner;
@@ -195,16 +195,16 @@ ResourceOwnerReleaseInternal(ResourceOwner owner,
if (phase == RESOURCE_RELEASE_BEFORE_LOCKS)
{
/*
- * Release buffer pins. Note that ReleaseBuffer will
- * remove the buffer entry from my list, so I just have to
- * iterate till there are none.
+ * Release buffer pins. Note that ReleaseBuffer will remove the
+ * buffer entry from my list, so I just have to iterate till there are
+ * none.
*
- * During a commit, there shouldn't be any remaining pins ---
- * that would indicate failure to clean up the executor correctly ---
- * so issue warnings. In the abort case, just clean up quietly.
+ * During a commit, there shouldn't be any remaining pins --- that would
+ * indicate failure to clean up the executor correctly --- so issue
+ * warnings. In the abort case, just clean up quietly.
*
- * We are careful to do the releasing back-to-front, so as to
- * avoid O(N^2) behavior in ResourceOwnerForgetBuffer().
+ * We are careful to do the releasing back-to-front, so as to avoid
+ * O(N^2) behavior in ResourceOwnerForgetBuffer().
*/
while (owner->nbuffers > 0)
{
@@ -214,12 +214,12 @@ ResourceOwnerReleaseInternal(ResourceOwner owner,
}
/*
- * Release relcache references. Note that RelationClose will
- * remove the relref entry from my list, so I just have to
- * iterate till there are none.
+ * Release relcache references. Note that RelationClose will remove
+ * the relref entry from my list, so I just have to iterate till there
+ * are none.
*
- * As with buffer pins, warn if any are left at commit time,
- * and release back-to-front for speed.
+ * As with buffer pins, warn if any are left at commit time, and release
+ * back-to-front for speed.
*/
while (owner->nrelrefs > 0)
{
@@ -233,9 +233,9 @@ ResourceOwnerReleaseInternal(ResourceOwner owner,
if (isTopLevel)
{
/*
- * For a top-level xact we are going to release all locks (or
- * at least all non-session locks), so just do a single lmgr
- * call at the top of the recursion.
+ * For a top-level xact we are going to release all locks (or at
+ * least all non-session locks), so just do a single lmgr call at
+ * the top of the recursion.
*/
if (owner == TopTransactionResourceOwner)
ProcReleaseLocks(isCommit);
@@ -244,8 +244,8 @@ ResourceOwnerReleaseInternal(ResourceOwner owner,
{
/*
* Release locks retail. Note that if we are committing a
- * subtransaction, we do NOT release its locks yet, but
- * transfer them to the parent.
+ * subtransaction, we do NOT release its locks yet, but transfer
+ * them to the parent.
*/
Assert(owner->parent != NULL);
if (isCommit)
@@ -257,12 +257,12 @@ ResourceOwnerReleaseInternal(ResourceOwner owner,
else if (phase == RESOURCE_RELEASE_AFTER_LOCKS)
{
/*
- * Release catcache references. Note that ReleaseCatCache
- * will remove the catref entry from my list, so I just have
- * to iterate till there are none. Ditto for catcache lists.
+ * Release catcache references. Note that ReleaseCatCache will remove
+ * the catref entry from my list, so I just have to iterate till there
+ * are none. Ditto for catcache lists.
*
- * As with buffer pins, warn if any are left at commit time,
- * and release back-to-front for speed.
+ * As with buffer pins, warn if any are left at commit time, and release
+ * back-to-front for speed.
*/
while (owner->ncatrefs > 0)
{
@@ -309,16 +309,16 @@ ResourceOwnerDelete(ResourceOwner owner)
Assert(owner->nrelrefs == 0);
/*
- * Delete children. The recursive call will delink the child from me,
- * so just iterate as long as there is a child.
+ * Delete children. The recursive call will delink the child from me, so
+ * just iterate as long as there is a child.
*/
while (owner->firstchild != NULL)
ResourceOwnerDelete(owner->firstchild);
/*
* We delink the owner from its parent before deleting it, so that if
- * there's an error we won't have deleted/busted owners still attached
- * to the owner tree. Better a leak than a crash.
+ * there's an error we won't have deleted/busted owners still attached to
+ * the owner tree. Better a leak than a crash.
*/
ResourceOwnerNewParent(owner, NULL);
@@ -502,8 +502,8 @@ ResourceOwnerForgetBuffer(ResourceOwner owner, Buffer buffer)
/*
* Scan back-to-front because it's more likely we are releasing a
- * recently pinned buffer. This isn't always the case of course,
- * but it's the way to bet.
+ * recently pinned buffer. This isn't always the case of course, but
+ * it's the way to bet.
*/
for (i = nb1; i >= 0; i--)
{
diff --git a/src/backend/utils/sort/logtape.c b/src/backend/utils/sort/logtape.c
index e4066821de4..b8c760f4823 100644
--- a/src/backend/utils/sort/logtape.c
+++ b/src/backend/utils/sort/logtape.c
@@ -64,7 +64,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/sort/logtape.c,v 1.15 2004/12/31 22:02:52 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/sort/logtape.c,v 1.16 2005/10/15 02:49:37 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -91,10 +91,9 @@
typedef struct IndirectBlock
{
int nextSlot; /* next pointer slot to write or read */
- struct IndirectBlock *nextup; /* parent indirect level, or NULL
- * if top */
- long ptrs[BLOCKS_PER_INDIR_BLOCK]; /* indexes of contained
- * blocks */
+ struct IndirectBlock *nextup; /* parent indirect level, or NULL if
+ * top */
+ long ptrs[BLOCKS_PER_INDIR_BLOCK]; /* indexes of contained blocks */
} IndirectBlock;
/*
@@ -107,24 +106,23 @@ typedef struct LogicalTape
{
IndirectBlock *indirect; /* bottom of my indirect-block hierarchy */
bool writing; /* T while in write phase */
- bool frozen; /* T if blocks should not be freed when
- * read */
+ bool frozen; /* T if blocks should not be freed when read */
bool dirty; /* does buffer need to be written? */
/*
- * The total data volume in the logical tape is numFullBlocks * BLCKSZ
- * + lastBlockBytes. BUT: we do not update lastBlockBytes during
- * writing, only at completion of a write phase.
+ * The total data volume in the logical tape is numFullBlocks * BLCKSZ +
+ * lastBlockBytes. BUT: we do not update lastBlockBytes during writing,
+ * only at completion of a write phase.
*/
long numFullBlocks; /* number of complete blocks in log tape */
int lastBlockBytes; /* valid bytes in last (incomplete) block */
/*
* Buffer for current data block. Note we don't bother to store the
- * actual file block number of the data block (during the write phase
- * it hasn't been assigned yet, and during read we don't care
- * anymore). But we do need the relative block number so we can detect
- * end-of-tape while reading.
+ * actual file block number of the data block (during the write phase it
+ * hasn't been assigned yet, and during read we don't care anymore). But
+ * we do need the relative block number so we can detect end-of-tape while
+ * reading.
*/
long curBlockNumber; /* this block's logical blk# within tape */
int pos; /* next read/write position in buffer */
@@ -144,20 +142,18 @@ struct LogicalTapeSet
long nFileBlocks; /* # of blocks used in underlying file */
/*
- * We store the numbers of recycled-and-available blocks in
- * freeBlocks[]. When there are no such blocks, we extend the
- * underlying file. Note that the block numbers in freeBlocks are
- * always in *decreasing* order, so that removing the last entry gives
- * us the lowest free block.
+ * We store the numbers of recycled-and-available blocks in freeBlocks[].
+ * When there are no such blocks, we extend the underlying file. Note
+ * that the block numbers in freeBlocks are always in *decreasing* order,
+ * so that removing the last entry gives us the lowest free block.
*/
long *freeBlocks; /* resizable array */
int nFreeBlocks; /* # of currently free blocks */
- int freeBlocksLen; /* current allocated length of
- * freeBlocks[] */
+ int freeBlocksLen; /* current allocated length of freeBlocks[] */
/*
- * tapes[] is declared size 1 since C wants a fixed size, but actually
- * it is of length nTapes.
+ * tapes[] is declared size 1 since C wants a fixed size, but actually it
+ * is of length nTapes.
*/
int nTapes; /* # of logical tapes in set */
LogicalTape *tapes[1]; /* must be last in struct! */
@@ -232,9 +228,9 @@ static long
ltsGetFreeBlock(LogicalTapeSet *lts)
{
/*
- * If there are multiple free blocks, we select the one appearing last
- * in freeBlocks[]. If there are none, assign the next block at the
- * end of the file.
+ * If there are multiple free blocks, we select the one appearing last in
+ * freeBlocks[]. If there are none, assign the next block at the end of
+ * the file.
*/
if (lts->nFreeBlocks > 0)
return lts->freeBlocks[--lts->nFreeBlocks];
@@ -258,14 +254,14 @@ ltsReleaseBlock(LogicalTapeSet *lts, long blocknum)
{
lts->freeBlocksLen *= 2;
lts->freeBlocks = (long *) repalloc(lts->freeBlocks,
- lts->freeBlocksLen * sizeof(long));
+ lts->freeBlocksLen * sizeof(long));
}
/*
* Insert blocknum into array, preserving decreasing order (so that
- * ltsGetFreeBlock returns the lowest available block number). This
- * could get fairly slow if there were many free blocks, but we don't
- * expect there to be very many at one time.
+ * ltsGetFreeBlock returns the lowest available block number). This could
+ * get fairly slow if there were many free blocks, but we don't expect
+ * there to be very many at one time.
*/
ndx = lts->nFreeBlocks++;
ptr = lts->freeBlocks + ndx;
@@ -293,8 +289,8 @@ ltsRecordBlockNum(LogicalTapeSet *lts, IndirectBlock *indirect,
if (indirect->nextSlot >= BLOCKS_PER_INDIR_BLOCK)
{
/*
- * This indirect block is full, so dump it out and recursively
- * save its address in the next indirection level. Create a new
+ * This indirect block is full, so dump it out and recursively save
+ * its address in the next indirection level. Create a new
* indirection level if there wasn't one before.
*/
long indirblock = ltsGetFreeBlock(lts);
@@ -336,8 +332,8 @@ ltsRewindIndirectBlock(LogicalTapeSet *lts,
indirect->ptrs[indirect->nextSlot] = -1L;
/*
- * If block is not topmost, write it out, and recurse to obtain
- * address of first block in this hierarchy level. Read that one in.
+ * If block is not topmost, write it out, and recurse to obtain address of
+ * first block in this hierarchy level. Read that one in.
*/
if (indirect->nextup != NULL)
{
@@ -371,8 +367,8 @@ ltsRewindFrozenIndirectBlock(LogicalTapeSet *lts,
IndirectBlock *indirect)
{
/*
- * If block is not topmost, recurse to obtain address of first block
- * in this hierarchy level. Read that one in.
+ * If block is not topmost, recurse to obtain address of first block in
+ * this hierarchy level. Read that one in.
*/
if (indirect->nextup != NULL)
{
@@ -448,8 +444,8 @@ ltsRecallPrevBlockNum(LogicalTapeSet *lts,
ltsReadBlock(lts, indirblock, (void *) indirect->ptrs);
/*
- * The previous block would only have been written out if full, so
- * we need not search it for a -1 sentinel.
+ * The previous block would only have been written out if full, so we
+ * need not search it for a -1 sentinel.
*/
indirect->nextSlot = BLOCKS_PER_INDIR_BLOCK + 1;
}
@@ -471,8 +467,8 @@ LogicalTapeSetCreate(int ntapes)
int i;
/*
- * Create top-level struct. First LogicalTape pointer is already
- * counted in sizeof(LogicalTapeSet).
+ * Create top-level struct. First LogicalTape pointer is already counted
+ * in sizeof(LogicalTapeSet).
*/
Assert(ntapes > 0);
lts = (LogicalTapeSet *) palloc(sizeof(LogicalTapeSet) +
@@ -617,8 +613,8 @@ LogicalTapeRewind(LogicalTapeSet *lts, int tapenum, bool forWrite)
if (lt->writing)
{
/*
- * Completion of a write phase. Flush last partial data
- * block, flush any partial indirect blocks, rewind for normal
+ * Completion of a write phase. Flush last partial data block,
+ * flush any partial indirect blocks, rewind for normal
* (destructive) read.
*/
if (lt->dirty)
@@ -630,8 +626,8 @@ LogicalTapeRewind(LogicalTapeSet *lts, int tapenum, bool forWrite)
else
{
/*
- * This is only OK if tape is frozen; we rewind for (another)
- * read pass.
+ * This is only OK if tape is frozen; we rewind for (another) read
+ * pass.
*/
Assert(lt->frozen);
datablocknum = ltsRewindFrozenIndirectBlock(lts, lt->indirect);
@@ -656,8 +652,8 @@ LogicalTapeRewind(LogicalTapeSet *lts, int tapenum, bool forWrite)
*
* NOTE: we assume the caller has read the tape to the end; otherwise
* untouched data and indirect blocks will not have been freed. We
- * could add more code to free any unread blocks, but in current
- * usage of this module it'd be useless code.
+ * could add more code to free any unread blocks, but in current usage
+ * of this module it'd be useless code.
*/
IndirectBlock *ib,
*nextib;
@@ -757,8 +753,8 @@ LogicalTapeFreeze(LogicalTapeSet *lts, int tapenum)
Assert(lt->writing);
/*
- * Completion of a write phase. Flush last partial data block, flush
- * any partial indirect blocks, rewind for nondestructive read.
+ * Completion of a write phase. Flush last partial data block, flush any
+ * partial indirect blocks, rewind for nondestructive read.
*/
if (lt->dirty)
ltsDumpBuffer(lts, lt);
@@ -826,9 +822,9 @@ LogicalTapeBackspace(LogicalTapeSet *lts, int tapenum, size_t size)
return false; /* a seek too far... */
/*
- * OK, we need to back up nblocks blocks. This implementation would
- * be pretty inefficient for long seeks, but we really aren't
- * expecting that (a seek over one tuple is typical).
+ * OK, we need to back up nblocks blocks. This implementation would be
+ * pretty inefficient for long seeks, but we really aren't expecting that
+ * (a seek over one tuple is typical).
*/
while (nblocks-- > 0)
{
@@ -883,9 +879,9 @@ LogicalTapeSeek(LogicalTapeSet *lts, int tapenum,
return false;
/*
- * OK, advance or back up to the target block. This implementation
- * would be pretty inefficient for long seeks, but we really aren't
- * expecting that (a seek over one tuple is typical).
+ * OK, advance or back up to the target block. This implementation would
+ * be pretty inefficient for long seeks, but we really aren't expecting
+ * that (a seek over one tuple is typical).
*/
while (lt->curBlockNumber > blocknum)
{
diff --git a/src/backend/utils/sort/tuplesort.c b/src/backend/utils/sort/tuplesort.c
index 53f2b546f46..2007d7a6949 100644
--- a/src/backend/utils/sort/tuplesort.c
+++ b/src/backend/utils/sort/tuplesort.c
@@ -78,7 +78,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/sort/tuplesort.c,v 1.51 2005/10/03 22:55:54 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/sort/tuplesort.c,v 1.52 2005/10/15 02:49:37 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -102,7 +102,7 @@
/* GUC variable */
#ifdef TRACE_SORT
-bool trace_sort = false;
+bool trace_sort = false;
#endif
@@ -112,8 +112,7 @@ bool trace_sort = false;
*/
typedef enum
{
- TSS_INITIAL, /* Loading tuples; still within memory
- * limit */
+ TSS_INITIAL, /* Loading tuples; still within memory limit */
TSS_BUILDRUNS, /* Loading tuples; writing to tape */
TSS_SORTEDINMEM, /* Sort completed entirely in memory */
TSS_SORTEDONTAPE, /* Sort completed, final run is on tape */
@@ -135,13 +134,12 @@ struct Tuplesortstate
TupSortStatus status; /* enumerated value as shown above */
bool randomAccess; /* did caller request random access? */
long availMem; /* remaining memory available, in bytes */
- LogicalTapeSet *tapeset; /* logtape.c object for tapes in a temp
- * file */
+ LogicalTapeSet *tapeset; /* logtape.c object for tapes in a temp file */
/*
- * These function pointers decouple the routines that must know what
- * kind of tuple we are sorting from the routines that don't need to
- * know it. They are set up by the tuplesort_begin_xxx routines.
+ * These function pointers decouple the routines that must know what kind
+ * of tuple we are sorting from the routines that don't need to know it.
+ * They are set up by the tuplesort_begin_xxx routines.
*
* Function to compare two tuples; result is per qsort() convention, ie:
*
@@ -150,83 +148,78 @@ struct Tuplesortstate
int (*comparetup) (Tuplesortstate *state, const void *a, const void *b);
/*
- * Function to copy a supplied input tuple into palloc'd space. (NB:
- * we assume that a single pfree() is enough to release the tuple
- * later, so the representation must be "flat" in one palloc chunk.)
- * state->availMem must be decreased by the amount of space used.
+ * Function to copy a supplied input tuple into palloc'd space. (NB: we
+ * assume that a single pfree() is enough to release the tuple later, so
+ * the representation must be "flat" in one palloc chunk.) state->availMem
+ * must be decreased by the amount of space used.
*/
void *(*copytup) (Tuplesortstate *state, void *tup);
/*
- * Function to write a stored tuple onto tape. The representation of
- * the tuple on tape need not be the same as it is in memory;
- * requirements on the tape representation are given below. After
- * writing the tuple, pfree() it, and increase state->availMem by the
- * amount of memory space thereby released.
+ * Function to write a stored tuple onto tape. The representation of the
+ * tuple on tape need not be the same as it is in memory; requirements on
+ * the tape representation are given below. After writing the tuple,
+ * pfree() it, and increase state->availMem by the amount of memory space
+ * thereby released.
*/
void (*writetup) (Tuplesortstate *state, int tapenum, void *tup);
/*
- * Function to read a stored tuple from tape back into memory. 'len'
- * is the already-read length of the stored tuple. Create and return
- * a palloc'd copy, and decrease state->availMem by the amount of
- * memory space consumed.
+ * Function to read a stored tuple from tape back into memory. 'len' is
+ * the already-read length of the stored tuple. Create and return a
+ * palloc'd copy, and decrease state->availMem by the amount of memory
+ * space consumed.
*/
void *(*readtup) (Tuplesortstate *state, int tapenum, unsigned int len);
/*
- * This array holds pointers to tuples in sort memory. If we are in
- * state INITIAL, the tuples are in no particular order; if we are in
- * state SORTEDINMEM, the tuples are in final sorted order; in states
- * BUILDRUNS and FINALMERGE, the tuples are organized in "heap" order
- * per Algorithm H. (Note that memtupcount only counts the tuples
- * that are part of the heap --- during merge passes, memtuples[]
- * entries beyond TAPERANGE are never in the heap and are used to hold
- * pre-read tuples.) In state SORTEDONTAPE, the array is not used.
+ * This array holds pointers to tuples in sort memory. If we are in state
+ * INITIAL, the tuples are in no particular order; if we are in state
+ * SORTEDINMEM, the tuples are in final sorted order; in states BUILDRUNS
+ * and FINALMERGE, the tuples are organized in "heap" order per Algorithm
+ * H. (Note that memtupcount only counts the tuples that are part of the
+ * heap --- during merge passes, memtuples[] entries beyond TAPERANGE are
+ * never in the heap and are used to hold pre-read tuples.) In state
+ * SORTEDONTAPE, the array is not used.
*/
void **memtuples; /* array of pointers to palloc'd tuples */
int memtupcount; /* number of tuples currently present */
int memtupsize; /* allocated length of memtuples array */
/*
- * While building initial runs, this array holds the run number for
- * each tuple in memtuples[]. During merge passes, we re-use it to
- * hold the input tape number that each tuple in the heap was read
- * from, or to hold the index of the next tuple pre-read from the same
- * tape in the case of pre-read entries. This array is never
- * allocated unless we need to use tapes. Whenever it is allocated,
- * it has the same length as memtuples[].
+ * While building initial runs, this array holds the run number for each
+ * tuple in memtuples[]. During merge passes, we re-use it to hold the
+ * input tape number that each tuple in the heap was read from, or to hold
+ * the index of the next tuple pre-read from the same tape in the case of
+ * pre-read entries. This array is never allocated unless we need to use
+ * tapes. Whenever it is allocated, it has the same length as
+ * memtuples[].
*/
- int *memtupindex; /* index value associated with
- * memtuples[i] */
+ int *memtupindex; /* index value associated with memtuples[i] */
/*
* While building initial runs, this is the current output run number
- * (starting at 0). Afterwards, it is the number of initial runs we
- * made.
+ * (starting at 0). Afterwards, it is the number of initial runs we made.
*/
int currentRun;
/*
- * These variables are only used during merge passes. mergeactive[i]
- * is true if we are reading an input run from (actual) tape number i
- * and have not yet exhausted that run. mergenext[i] is the memtuples
- * index of the next pre-read tuple (next to be loaded into the heap)
- * for tape i, or 0 if we are out of pre-read tuples. mergelast[i]
- * similarly points to the last pre-read tuple from each tape.
- * mergeavailmem[i] is the amount of unused space allocated for tape
- * i. mergefreelist and mergefirstfree keep track of unused locations
- * in the memtuples[] array. memtupindex[] links together pre-read
- * tuples for each tape as well as recycled locations in
- * mergefreelist. It is OK to use 0 as a null link in these lists,
- * because memtuples[0] is part of the merge heap and is never a
- * pre-read tuple.
+ * These variables are only used during merge passes. mergeactive[i] is
+ * true if we are reading an input run from (actual) tape number i and
+ * have not yet exhausted that run. mergenext[i] is the memtuples index
+ * of the next pre-read tuple (next to be loaded into the heap) for tape
+ * i, or 0 if we are out of pre-read tuples. mergelast[i] similarly
+ * points to the last pre-read tuple from each tape. mergeavailmem[i] is
+ * the amount of unused space allocated for tape i. mergefreelist and
+ * mergefirstfree keep track of unused locations in the memtuples[] array.
+ * memtupindex[] links together pre-read tuples for each tape as well as
+ * recycled locations in mergefreelist. It is OK to use 0 as a null link
+ * in these lists, because memtuples[0] is part of the merge heap and is
+ * never a pre-read tuple.
*/
bool mergeactive[MAXTAPES]; /* Active input run source? */
- int mergenext[MAXTAPES]; /* first preread tuple for each
- * source */
- int mergelast[MAXTAPES]; /* last preread tuple for each
- * source */
+ int mergenext[MAXTAPES]; /* first preread tuple for each source */
+ int mergelast[MAXTAPES]; /* last preread tuple for each source */
long mergeavailmem[MAXTAPES]; /* availMem for prereading
* tapes */
long spacePerTape; /* actual per-tape target usage */
@@ -240,17 +233,15 @@ struct Tuplesortstate
*/
int Level; /* Knuth's l */
int destTape; /* current output tape (Knuth's j, less 1) */
- int tp_fib[MAXTAPES]; /* Target Fibonacci run counts
- * (A[]) */
+ int tp_fib[MAXTAPES]; /* Target Fibonacci run counts (A[]) */
int tp_runs[MAXTAPES]; /* # of real runs on each tape */
- int tp_dummy[MAXTAPES]; /* # of dummy runs for each tape
- * (D[]) */
+ int tp_dummy[MAXTAPES]; /* # of dummy runs for each tape (D[]) */
int tp_tapenum[MAXTAPES]; /* Actual tape numbers (TAPE[]) */
/*
- * These variables are used after completion of sorting to keep track
- * of the next tuple to return. (In the tape case, the tape's current
- * read position is also critical state.)
+ * These variables are used after completion of sorting to keep track of
+ * the next tuple to return. (In the tape case, the tape's current read
+ * position is also critical state.)
*/
int result_tape; /* actual tape number of finished output */
int current; /* array index (only used if SORTEDINMEM) */
@@ -258,8 +249,7 @@ struct Tuplesortstate
/* markpos_xxx holds marked position for mark and restore */
long markpos_block; /* tape block# (only used if SORTEDONTAPE) */
- int markpos_offset; /* saved "current", or offset in tape
- * block */
+ int markpos_offset; /* saved "current", or offset in tape block */
bool markpos_eof; /* saved "eof_reached" */
/*
@@ -272,8 +262,8 @@ struct Tuplesortstate
SortFunctionKind *sortFnKinds;
/*
- * These variables are specific to the IndexTuple case; they are set
- * by tuplesort_begin_index and used only by the IndexTuple routines.
+ * These variables are specific to the IndexTuple case; they are set by
+ * tuplesort_begin_index and used only by the IndexTuple routines.
*/
Relation indexRel;
ScanKey indexScanKey;
@@ -458,8 +448,7 @@ tuplesort_begin_common(int workMem, bool randomAccess)
/* Algorithm D variables will be initialized by inittapes, if needed */
- state->result_tape = -1; /* flag that result tape has not been
- * formed */
+ state->result_tape = -1; /* flag that result tape has not been formed */
return state;
}
@@ -505,8 +494,8 @@ tuplesort_begin_heap(TupleDesc tupDesc,
&state->sortFnKinds[i]);
/*
- * We needn't fill in sk_strategy or sk_subtype since these
- * scankeys will never be passed to an index.
+ * We needn't fill in sk_strategy or sk_subtype since these scankeys
+ * will never be passed to an index.
*/
ScanKeyInit(&state->scanKeys[i],
attNums[i],
@@ -606,8 +595,7 @@ tuplesort_end(Tuplesortstate *state)
pfree(state->memtupindex);
/*
- * this stuff might better belong in a variant-specific shutdown
- * routine
+ * this stuff might better belong in a variant-specific shutdown routine
*/
if (state->scanKeys)
pfree(state->scanKeys);
@@ -724,16 +712,16 @@ puttuple_common(Tuplesortstate *state, void *tuple)
/*
* Insert the copied tuple into the heap, with run number
- * currentRun if it can go into the current run, else run
- * number currentRun+1. The tuple can go into the current run
- * if it is >= the first not-yet-output tuple. (Actually, it
- * could go into the current run if it is >= the most recently
- * output tuple ... but that would require keeping around the
- * tuple we last output, and it's simplest to let writetup
- * free each tuple as soon as it's written.)
+ * currentRun if it can go into the current run, else run number
+ * currentRun+1. The tuple can go into the current run if it is
+ * >= the first not-yet-output tuple. (Actually, it could go into
+ * the current run if it is >= the most recently output tuple ...
+ * but that would require keeping around the tuple we last output,
+ * and it's simplest to let writetup free each tuple as soon as
+ * it's written.)
*
- * Note there will always be at least one tuple in the heap at
- * this point; see dumptuples.
+ * Note there will always be at least one tuple in the heap at this
+ * point; see dumptuples.
*/
Assert(state->memtupcount > 0);
if (COMPARETUP(state, tuple, state->memtuples[0]) >= 0)
@@ -742,8 +730,7 @@ puttuple_common(Tuplesortstate *state, void *tuple)
tuplesort_heap_insert(state, tuple, state->currentRun + 1, true);
/*
- * If we are over the memory limit, dump tuples till we're
- * under.
+ * If we are over the memory limit, dump tuples till we're under.
*/
dumptuples(state, false);
break;
@@ -770,8 +757,8 @@ tuplesort_performsort(Tuplesortstate *state)
case TSS_INITIAL:
/*
- * We were able to accumulate all the tuples within the
- * allowed amount of memory. Just qsort 'em and we're done.
+ * We were able to accumulate all the tuples within the allowed
+ * amount of memory. Just qsort 'em and we're done.
*/
if (state->memtupcount > 1)
{
@@ -788,10 +775,10 @@ tuplesort_performsort(Tuplesortstate *state)
case TSS_BUILDRUNS:
/*
- * Finish tape-based sort. First, flush all tuples remaining
- * in memory out to tape; then merge until we have a single
- * remaining run (or, if !randomAccess, one run per tape).
- * Note that mergeruns sets the correct state->status.
+ * Finish tape-based sort. First, flush all tuples remaining in
+ * memory out to tape; then merge until we have a single remaining
+ * run (or, if !randomAccess, one run per tape). Note that
+ * mergeruns sets the correct state->status.
*/
dumptuples(state, true);
mergeruns(state);
@@ -880,16 +867,15 @@ tuplesort_gettuple(Tuplesortstate *state, bool forward,
/*
* Backward.
*
- * if all tuples are fetched already then we return last tuple,
- * else - tuple before last returned.
+ * if all tuples are fetched already then we return last tuple, else
+ * - tuple before last returned.
*/
if (state->eof_reached)
{
/*
- * Seek position is pointing just past the zero tuplen at
- * the end of file; back up to fetch last tuple's ending
- * length word. If seek fails we must have a completely
- * empty file.
+ * Seek position is pointing just past the zero tuplen at the
+ * end of file; back up to fetch last tuple's ending length
+ * word. If seek fails we must have a completely empty file.
*/
if (!LogicalTapeBackspace(state->tapeset,
state->result_tape,
@@ -900,9 +886,8 @@ tuplesort_gettuple(Tuplesortstate *state, bool forward,
else
{
/*
- * Back up and fetch previously-returned tuple's ending
- * length word. If seek fails, assume we are at start of
- * file.
+ * Back up and fetch previously-returned tuple's ending length
+ * word. If seek fails, assume we are at start of file.
*/
if (!LogicalTapeBackspace(state->tapeset,
state->result_tape,
@@ -915,17 +900,17 @@ tuplesort_gettuple(Tuplesortstate *state, bool forward,
*/
if (!LogicalTapeBackspace(state->tapeset,
state->result_tape,
- tuplen + 2 * sizeof(unsigned int)))
+ tuplen + 2 * sizeof(unsigned int)))
{
/*
- * If that fails, presumably the prev tuple is the
- * first in the file. Back up so that it becomes next
- * to read in forward direction (not obviously right,
- * but that is what in-memory case does).
+ * If that fails, presumably the prev tuple is the first
+ * in the file. Back up so that it becomes next to read
+ * in forward direction (not obviously right, but that is
+ * what in-memory case does).
*/
if (!LogicalTapeBackspace(state->tapeset,
state->result_tape,
- tuplen + sizeof(unsigned int)))
+ tuplen + sizeof(unsigned int)))
elog(ERROR, "bogus tuple length in backward scan");
return NULL;
}
@@ -934,9 +919,9 @@ tuplesort_gettuple(Tuplesortstate *state, bool forward,
tuplen = getlen(state, state->result_tape, false);
/*
- * Now we have the length of the prior tuple, back up and read
- * it. Note: READTUP expects we are positioned after the
- * initial length word of the tuple, so back up to that point.
+ * Now we have the length of the prior tuple, back up and read it.
+ * Note: READTUP expects we are positioned after the initial
+ * length word of the tuple, so back up to that point.
*/
if (!LogicalTapeBackspace(state->tapeset,
state->result_tape,
@@ -968,14 +953,12 @@ tuplesort_gettuple(Tuplesortstate *state, bool forward,
if ((tupIndex = state->mergenext[srcTape]) == 0)
{
/*
- * out of preloaded data on this tape, try to read
- * more
+ * out of preloaded data on this tape, try to read more
*/
mergepreread(state);
/*
- * if still no data, we've reached end of run on this
- * tape
+ * if still no data, we've reached end of run on this tape
*/
if ((tupIndex = state->mergenext[srcTape]) == 0)
return tup;
@@ -1062,12 +1045,12 @@ inittapes(Tuplesortstate *state)
USEMEM(state, GetMemoryChunkSpace(state->memtupindex));
/*
- * Convert the unsorted contents of memtuples[] into a heap. Each
- * tuple is marked as belonging to run number zero.
+ * Convert the unsorted contents of memtuples[] into a heap. Each tuple is
+ * marked as belonging to run number zero.
*
* NOTE: we pass false for checkIndex since there's no point in comparing
- * indexes in this step, even though we do intend the indexes to be
- * part of the sort key...
+ * indexes in this step, even though we do intend the indexes to be part
+ * of the sort key...
*/
ntuples = state->memtupcount;
state->memtupcount = 0; /* make the heap empty */
@@ -1150,8 +1133,8 @@ mergeruns(Tuplesortstate *state)
/*
* If we produced only one initial run (quite likely if the total data
- * volume is between 1X and 2X workMem), we can just use that tape as
- * the finished output, rather than doing a useless merge.
+ * volume is between 1X and 2X workMem), we can just use that tape as the
+ * finished output, rather than doing a useless merge.
*/
if (state->currentRun == 1)
{
@@ -1183,8 +1166,8 @@ mergeruns(Tuplesortstate *state)
}
/*
- * If we don't have to produce a materialized sorted tape,
- * quit as soon as we're down to one real/dummy run per tape.
+ * If we don't have to produce a materialized sorted tape, quit as
+ * soon as we're down to one real/dummy run per tape.
*/
if (!state->randomAccess && allOneRun)
{
@@ -1215,8 +1198,7 @@ mergeruns(Tuplesortstate *state)
state->tp_runs[TAPERANGE - 1] = 0;
/*
- * reassign tape units per step D6; note we no longer care about
- * A[]
+ * reassign tape units per step D6; note we no longer care about A[]
*/
svTape = state->tp_tapenum[TAPERANGE];
svDummy = state->tp_dummy[TAPERANGE];
@@ -1233,12 +1215,12 @@ mergeruns(Tuplesortstate *state)
}
/*
- * Done. Knuth says that the result is on TAPE[1], but since we
- * exited the loop without performing the last iteration of step D6,
- * we have not rearranged the tape unit assignment, and therefore the
- * result is on TAPE[T]. We need to do it this way so that we can
- * freeze the final output tape while rewinding it. The last
- * iteration of step D6 would be a waste of cycles anyway...
+ * Done. Knuth says that the result is on TAPE[1], but since we exited
+ * the loop without performing the last iteration of step D6, we have not
+ * rearranged the tape unit assignment, and therefore the result is on
+ * TAPE[T]. We need to do it this way so that we can freeze the final
+ * output tape while rewinding it. The last iteration of step D6 would be
+ * a waste of cycles anyway...
*/
state->result_tape = state->tp_tapenum[TAPERANGE];
LogicalTapeFreeze(state->tapeset, state->result_tape);
@@ -1262,16 +1244,15 @@ mergeonerun(Tuplesortstate *state)
spaceFreed;
/*
- * Start the merge by loading one tuple from each active source tape
- * into the heap. We can also decrease the input run/dummy run
- * counts.
+ * Start the merge by loading one tuple from each active source tape into
+ * the heap. We can also decrease the input run/dummy run counts.
*/
beginmerge(state);
/*
- * Execute merge by repeatedly extracting lowest tuple in heap,
- * writing it out, and replacing it with next tuple from same tape (if
- * there is another one).
+ * Execute merge by repeatedly extracting lowest tuple in heap, writing it
+ * out, and replacing it with next tuple from same tape (if there is
+ * another one).
*/
while (state->memtupcount > 0)
{
@@ -1304,8 +1285,8 @@ mergeonerun(Tuplesortstate *state)
}
/*
- * When the heap empties, we're done. Write an end-of-run marker on
- * the output tape, and increment its count of real runs.
+ * When the heap empties, we're done. Write an end-of-run marker on the
+ * output tape, and increment its count of real runs.
*/
markrunend(state, destTape);
state->tp_runs[TAPERANGE]++;
@@ -1341,8 +1322,7 @@ beginmerge(Tuplesortstate *state)
memset(state->mergelast, 0, sizeof(state->mergelast));
memset(state->mergeavailmem, 0, sizeof(state->mergeavailmem));
state->mergefreelist = 0; /* nothing in the freelist */
- state->mergefirstfree = MAXTAPES; /* first slot available for
- * preread */
+ state->mergefirstfree = MAXTAPES; /* first slot available for preread */
/* Adjust run counts and mark the active tapes */
activeTapes = 0;
@@ -1361,8 +1341,8 @@ beginmerge(Tuplesortstate *state)
}
/*
- * Initialize space allocation to let each active input tape have an
- * equal share of preread space.
+ * Initialize space allocation to let each active input tape have an equal
+ * share of preread space.
*/
Assert(activeTapes > 0);
state->spacePerTape = state->availMem / activeTapes;
@@ -1373,8 +1353,8 @@ beginmerge(Tuplesortstate *state)
}
/*
- * Preread as many tuples as possible (and at least one) from each
- * active tape
+ * Preread as many tuples as possible (and at least one) from each active
+ * tape
*/
mergepreread(state);
@@ -1432,8 +1412,8 @@ mergepreread(Tuplesortstate *state)
continue;
/*
- * Read tuples from this tape until it has used up its free
- * memory, but ensure that we have at least one.
+ * Read tuples from this tape until it has used up its free memory,
+ * but ensure that we have at least one.
*/
priorAvail = state->availMem;
state->availMem = state->mergeavailmem[srcTape];
@@ -1508,8 +1488,8 @@ dumptuples(Tuplesortstate *state, bool alltuples)
(LACKMEM(state) && state->memtupcount > 1))
{
/*
- * Dump the heap's frontmost entry, and sift up to remove it from
- * the heap.
+ * Dump the heap's frontmost entry, and sift up to remove it from the
+ * heap.
*/
Assert(state->memtupcount > 0);
WRITETUP(state, state->tp_tapenum[state->destTape],
@@ -1680,8 +1660,8 @@ tuplesort_heap_insert(Tuplesortstate *state, void *tuple,
memtupindex = state->memtupindex;
/*
- * Sift-up the new entry, per Knuth 5.2.3 exercise 16. Note that Knuth
- * is using 1-based array indexes, not 0-based.
+ * Sift-up the new entry, per Knuth 5.2.3 exercise 16. Note that Knuth is
+ * using 1-based array indexes, not 0-based.
*/
j = state->memtupcount++;
while (j > 0)
@@ -1805,12 +1785,12 @@ SelectSortFunction(Oid sortOperator,
Oid opclass = InvalidOid;
/*
- * Search pg_amop to see if the target operator is registered as the
- * "<" or ">" operator of any btree opclass. It's possible that it
- * might be registered both ways (eg, if someone were to build a
- * "reverse sort" opclass for some reason); prefer the "<" case if so.
- * If the operator is registered the same way in multiple opclasses,
- * assume we can use the associated comparator function from any one.
+ * Search pg_amop to see if the target operator is registered as the "<"
+ * or ">" operator of any btree opclass. It's possible that it might be
+ * registered both ways (eg, if someone were to build a "reverse sort"
+ * opclass for some reason); prefer the "<" case if so. If the operator is
+ * registered the same way in multiple opclasses, assume we can use the
+ * associated comparator function from any one.
*/
catlist = SearchSysCacheList(AMOPOPID, 1,
ObjectIdGetDatum(sortOperator),
@@ -1854,11 +1834,11 @@ SelectSortFunction(Oid sortOperator,
}
/*
- * Can't find a comparator, so use the operator as-is. Decide whether
- * it is forward or reverse sort by looking at its name (grotty, but
- * this only matters for deciding which end NULLs should get sorted
- * to). XXX possibly better idea: see whether its selectivity
- * function is scalargtcmp?
+ * Can't find a comparator, so use the operator as-is. Decide whether it
+ * is forward or reverse sort by looking at its name (grotty, but this
+ * only matters for deciding which end NULLs should get sorted to). XXX
+ * possibly better idea: see whether its selectivity function is
+ * scalargtcmp?
*/
tuple = SearchSysCache(OPEROID,
ObjectIdGetDatum(sortOperator),
@@ -2142,15 +2122,15 @@ comparetup_index(Tuplesortstate *state, const void *a, const void *b)
* If btree has asked us to enforce uniqueness, complain if two equal
* tuples are detected (unless there was at least one NULL field).
*
- * It is sufficient to make the test here, because if two tuples are
- * equal they *must* get compared at some stage of the sort ---
- * otherwise the sort algorithm wouldn't have checked whether one must
- * appear before the other.
+ * It is sufficient to make the test here, because if two tuples are equal
+ * they *must* get compared at some stage of the sort --- otherwise the
+ * sort algorithm wouldn't have checked whether one must appear before the
+ * other.
*
- * Some rather brain-dead implementations of qsort will sometimes call
- * the comparison routine to compare a value to itself. (At this
- * writing only QNX 4 is known to do such silly things.) Don't raise
- * a bogus error in that case.
+ * Some rather brain-dead implementations of qsort will sometimes call the
+ * comparison routine to compare a value to itself. (At this writing only
+ * QNX 4 is known to do such silly things.) Don't raise a bogus error in
+ * that case.
*/
if (state->enforceUnique && !equal_hasnull && tuple1 != tuple2)
ereport(ERROR,
@@ -2159,10 +2139,10 @@ comparetup_index(Tuplesortstate *state, const void *a, const void *b)
errdetail("Table contains duplicated values.")));
/*
- * If key values are equal, we sort on ItemPointer. This does not
- * affect validity of the finished index, but it offers cheap
- * insurance against performance problems with bad qsort
- * implementations that have trouble with large numbers of equal keys.
+ * If key values are equal, we sort on ItemPointer. This does not affect
+ * validity of the finished index, but it offers cheap insurance against
+ * performance problems with bad qsort implementations that have trouble
+ * with large numbers of equal keys.
*/
{
BlockNumber blk1 = ItemPointerGetBlockNumber(&tuple1->t_tid);
diff --git a/src/backend/utils/sort/tuplestore.c b/src/backend/utils/sort/tuplestore.c
index 1c00e06371f..d409121418e 100644
--- a/src/backend/utils/sort/tuplestore.c
+++ b/src/backend/utils/sort/tuplestore.c
@@ -36,7 +36,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/sort/tuplestore.c,v 1.22 2005/05/06 17:24:54 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/sort/tuplestore.c,v 1.23 2005/10/15 02:49:37 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -72,41 +72,41 @@ struct Tuplestorestate
BufFile *myfile; /* underlying file, or NULL if none */
/*
- * These function pointers decouple the routines that must know what
- * kind of tuple we are handling from the routines that don't need to
- * know it. They are set up by the tuplestore_begin_xxx routines.
+ * These function pointers decouple the routines that must know what kind
+ * of tuple we are handling from the routines that don't need to know it.
+ * They are set up by the tuplestore_begin_xxx routines.
*
- * (Although tuplestore.c currently only supports heap tuples, I've
- * copied this part of tuplesort.c so that extension to other kinds of
- * objects will be easy if it's ever needed.)
+ * (Although tuplestore.c currently only supports heap tuples, I've copied
+ * this part of tuplesort.c so that extension to other kinds of objects
+ * will be easy if it's ever needed.)
*
* Function to copy a supplied input tuple into palloc'd space. (NB: we
- * assume that a single pfree() is enough to release the tuple later,
- * so the representation must be "flat" in one palloc chunk.)
- * state->availMem must be decreased by the amount of space used.
+ * assume that a single pfree() is enough to release the tuple later, so
+ * the representation must be "flat" in one palloc chunk.) state->availMem
+ * must be decreased by the amount of space used.
*/
void *(*copytup) (Tuplestorestate *state, void *tup);
/*
- * Function to write a stored tuple onto tape. The representation of
- * the tuple on tape need not be the same as it is in memory;
- * requirements on the tape representation are given below. After
- * writing the tuple, pfree() it, and increase state->availMem by the
- * amount of memory space thereby released.
+ * Function to write a stored tuple onto tape. The representation of the
+ * tuple on tape need not be the same as it is in memory; requirements on
+ * the tape representation are given below. After writing the tuple,
+ * pfree() it, and increase state->availMem by the amount of memory space
+ * thereby released.
*/
void (*writetup) (Tuplestorestate *state, void *tup);
/*
- * Function to read a stored tuple from tape back into memory. 'len'
- * is the already-read length of the stored tuple. Create and return
- * a palloc'd copy, and decrease state->availMem by the amount of
- * memory space consumed.
+ * Function to read a stored tuple from tape back into memory. 'len' is
+ * the already-read length of the stored tuple. Create and return a
+ * palloc'd copy, and decrease state->availMem by the amount of memory
+ * space consumed.
*/
void *(*readtup) (Tuplestorestate *state, unsigned int len);
/*
- * This array holds pointers to tuples in memory if we are in state
- * INMEM. In states WRITEFILE and READFILE it's not used.
+ * This array holds pointers to tuples in memory if we are in state INMEM.
+ * In states WRITEFILE and READFILE it's not used.
*/
void **memtuples; /* array of pointers to palloc'd tuples */
int memtupcount; /* number of tuples currently present */
@@ -115,17 +115,17 @@ struct Tuplestorestate
/*
* These variables are used to keep track of the current position.
*
- * In state WRITEFILE, the current file seek position is the write point,
- * and the read position is remembered in readpos_xxx; in state
- * READFILE, the current file seek position is the read point, and the
- * write position is remembered in writepos_xxx. (The write position
- * is the same as EOF, but since BufFileSeek doesn't currently
- * implement SEEK_END, we have to remember it explicitly.)
+ * In state WRITEFILE, the current file seek position is the write point, and
+ * the read position is remembered in readpos_xxx; in state READFILE, the
+ * current file seek position is the read point, and the write position is
+ * remembered in writepos_xxx. (The write position is the same as EOF,
+ * but since BufFileSeek doesn't currently implement SEEK_END, we have to
+ * remember it explicitly.)
*
- * Special case: if we are in WRITEFILE state and eof_reached is true,
- * then the read position is implicitly equal to the write position
- * (and hence to the file seek position); this way we need not update
- * the readpos_xxx variables on each write.
+ * Special case: if we are in WRITEFILE state and eof_reached is true, then
+ * the read position is implicitly equal to the write position (and hence
+ * to the file seek position); this way we need not update the readpos_xxx
+ * variables on each write.
*/
bool eof_reached; /* read reached EOF (always valid) */
int current; /* next array index (valid if INMEM) */
@@ -429,7 +429,7 @@ tuplestore_gettuple(Tuplestorestate *state, bool forward,
&state->writepos_file, &state->writepos_offset);
if (!state->eof_reached)
if (BufFileSeek(state->myfile,
- state->readpos_file, state->readpos_offset,
+ state->readpos_file, state->readpos_offset,
SEEK_SET) != 0)
elog(ERROR, "seek failed");
state->status = TSS_READFILE;
@@ -454,11 +454,11 @@ tuplestore_gettuple(Tuplestorestate *state, bool forward,
/*
* Backward.
*
- * if all tuples are fetched already then we return last tuple,
- * else - tuple before last returned.
+ * if all tuples are fetched already then we return last tuple, else
+ * - tuple before last returned.
*
- * Back up to fetch previously-returned tuple's ending length
- * word. If seek fails, assume we are at start of file.
+ * Back up to fetch previously-returned tuple's ending length word.
+ * If seek fails, assume we are at start of file.
*/
if (BufFileSeek(state->myfile, 0, -(long) sizeof(unsigned int),
SEEK_CUR) != 0)
@@ -476,17 +476,17 @@ tuplestore_gettuple(Tuplestorestate *state, bool forward,
* Back up to get ending length word of tuple before it.
*/
if (BufFileSeek(state->myfile, 0,
- -(long) (tuplen + 2 * sizeof(unsigned int)),
+ -(long) (tuplen + 2 * sizeof(unsigned int)),
SEEK_CUR) != 0)
{
/*
- * If that fails, presumably the prev tuple is the
- * first in the file. Back up so that it becomes next
- * to read in forward direction (not obviously right,
- * but that is what in-memory case does).
+ * If that fails, presumably the prev tuple is the first
+ * in the file. Back up so that it becomes next to read
+ * in forward direction (not obviously right, but that is
+ * what in-memory case does).
*/
if (BufFileSeek(state->myfile, 0,
- -(long) (tuplen + sizeof(unsigned int)),
+ -(long) (tuplen + sizeof(unsigned int)),
SEEK_CUR) != 0)
elog(ERROR, "bogus tuple length in backward scan");
return NULL;
@@ -495,9 +495,9 @@ tuplestore_gettuple(Tuplestorestate *state, bool forward,
}
/*
- * Now we have the length of the prior tuple, back up and read
- * it. Note: READTUP expects we are positioned after the
- * initial length word of the tuple, so back up to that point.
+ * Now we have the length of the prior tuple, back up and read it.
+ * Note: READTUP expects we are positioned after the initial
+ * length word of the tuple, so back up to that point.
*/
if (BufFileSeek(state->myfile, 0,
-(long) tuplen,
diff --git a/src/backend/utils/time/tqual.c b/src/backend/utils/time/tqual.c
index f8dcf43b64d..fa6bd4a3c58 100644
--- a/src/backend/utils/time/tqual.c
+++ b/src/backend/utils/time/tqual.c
@@ -21,7 +21,7 @@
* TransactionIdDidCommit will both return true. If we check only
* TransactionIdDidCommit, we could consider a tuple committed when a
* later GetSnapshotData call will still think the originating transaction
- * is in progress, which leads to application-level inconsistency. The
+ * is in progress, which leads to application-level inconsistency. The
* upshot is that we gotta check TransactionIdIsInProgress first in all
* code paths, except for a few cases where we are looking at
* subtransactions of our own main transaction and so there can't be any
@@ -32,7 +32,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/time/tqual.c,v 1.90 2005/08/20 00:39:57 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/time/tqual.c,v 1.91 2005/10/15 02:49:37 momjian Exp $
*
*-------------------------------------------------------------------------
*/
@@ -559,8 +559,7 @@ HeapTupleSatisfiesUpdate(HeapTupleHeader tuple, CommandId curcid,
else if (TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetXmin(tuple)))
{
if (HeapTupleHeaderGetCmin(tuple) >= curcid)
- return HeapTupleInvisible; /* inserted after scan
- * started */
+ return HeapTupleInvisible; /* inserted after scan started */
if (tuple->t_infomask & HEAP_XMAX_INVALID) /* xid invalid */
return HeapTupleMayBeUpdated;
@@ -581,11 +580,9 @@ HeapTupleSatisfiesUpdate(HeapTupleHeader tuple, CommandId curcid,
Assert(TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetXmax(tuple)));
if (HeapTupleHeaderGetCmax(tuple) >= curcid)
- return HeapTupleSelfUpdated; /* updated after scan
- * started */
+ return HeapTupleSelfUpdated; /* updated after scan started */
else
- return HeapTupleInvisible; /* updated before scan
- * started */
+ return HeapTupleInvisible; /* updated before scan started */
}
else if (TransactionIdIsInProgress(HeapTupleHeaderGetXmin(tuple)))
return HeapTupleInvisible;
@@ -632,8 +629,7 @@ HeapTupleSatisfiesUpdate(HeapTupleHeader tuple, CommandId curcid,
if (tuple->t_infomask & HEAP_IS_LOCKED)
return HeapTupleMayBeUpdated;
if (HeapTupleHeaderGetCmax(tuple) >= curcid)
- return HeapTupleSelfUpdated; /* updated after scan
- * started */
+ return HeapTupleSelfUpdated; /* updated after scan started */
else
return HeapTupleInvisible; /* updated before scan started */
}
@@ -945,12 +941,12 @@ HeapTupleSatisfiesSnapshot(HeapTupleHeader tuple, Snapshot snapshot,
* By here, the inserting transaction has committed - have to check
* when...
*
- * Note that the provided snapshot contains only top-level XIDs, so we
- * have to convert a subxact XID to its parent for comparison.
- * However, we can make first-pass range checks with the given XID,
- * because a subxact with XID < xmin has surely also got a parent with
- * XID < xmin, while one with XID >= xmax must belong to a parent that
- * was not yet committed at the time of this snapshot.
+ * Note that the provided snapshot contains only top-level XIDs, so we have
+ * to convert a subxact XID to its parent for comparison. However, we can
+ * make first-pass range checks with the given XID, because a subxact with
+ * XID < xmin has surely also got a parent with XID < xmin, while one with
+ * XID >= xmax must belong to a parent that was not yet committed at the
+ * time of this snapshot.
*/
if (TransactionIdFollowsOrEquals(HeapTupleHeaderGetXmin(tuple),
snapshot->xmin))
@@ -1074,8 +1070,8 @@ HeapTupleSatisfiesVacuum(HeapTupleHeader tuple, TransactionId OldestXmin,
/*
* Has inserting transaction committed?
*
- * If the inserting transaction aborted, then the tuple was never visible
- * to any other transaction, so we can delete it immediately.
+ * If the inserting transaction aborted, then the tuple was never visible to
+ * any other transaction, so we can delete it immediately.
*/
if (!(tuple->t_infomask & HEAP_XMIN_COMMITTED))
{
@@ -1135,8 +1131,7 @@ HeapTupleSatisfiesVacuum(HeapTupleHeader tuple, TransactionId OldestXmin,
else
{
/*
- * Not in Progress, Not Committed, so either Aborted or
- * crashed
+ * Not in Progress, Not Committed, so either Aborted or crashed
*/
tuple->t_infomask |= HEAP_XMIN_INVALID;
SetBufferCommitInfoNeedsSave(buffer);
@@ -1147,8 +1142,8 @@ HeapTupleSatisfiesVacuum(HeapTupleHeader tuple, TransactionId OldestXmin,
}
/*
- * Okay, the inserter committed, so it was good at some point. Now
- * what about the deleting transaction?
+ * Okay, the inserter committed, so it was good at some point. Now what
+ * about the deleting transaction?
*/
if (tuple->t_infomask & HEAP_XMAX_INVALID)
return HEAPTUPLE_LIVE;
@@ -1156,10 +1151,10 @@ HeapTupleSatisfiesVacuum(HeapTupleHeader tuple, TransactionId OldestXmin,
if (tuple->t_infomask & HEAP_IS_LOCKED)
{
/*
- * "Deleting" xact really only locked it, so the tuple
- * is live in any case. However, we must make sure that either
- * XMAX_COMMITTED or XMAX_INVALID gets set once the xact is gone;
- * otherwise it is unsafe to recycle CLOG status after vacuuming.
+ * "Deleting" xact really only locked it, so the tuple is live in any
+ * case. However, we must make sure that either XMAX_COMMITTED or
+ * XMAX_INVALID gets set once the xact is gone; otherwise it is unsafe
+ * to recycle CLOG status after vacuuming.
*/
if (!(tuple->t_infomask & HEAP_XMAX_COMMITTED))
{
@@ -1175,9 +1170,9 @@ HeapTupleSatisfiesVacuum(HeapTupleHeader tuple, TransactionId OldestXmin,
}
/*
- * We don't really care whether xmax did commit, abort or
- * crash. We know that xmax did lock the tuple, but
- * it did not and will never actually update it.
+ * We don't really care whether xmax did commit, abort or crash.
+ * We know that xmax did lock the tuple, but it did not and will
+ * never actually update it.
*/
tuple->t_infomask |= HEAP_XMAX_INVALID;
SetBufferCommitInfoNeedsSave(buffer);
@@ -1204,8 +1199,7 @@ HeapTupleSatisfiesVacuum(HeapTupleHeader tuple, TransactionId OldestXmin,
else
{
/*
- * Not in Progress, Not Committed, so either Aborted or
- * crashed
+ * Not in Progress, Not Committed, so either Aborted or crashed
*/
tuple->t_infomask |= HEAP_XMAX_INVALID;
SetBufferCommitInfoNeedsSave(buffer);
@@ -1223,10 +1217,10 @@ HeapTupleSatisfiesVacuum(HeapTupleHeader tuple, TransactionId OldestXmin,
HeapTupleHeaderGetXmax(tuple)))
{
/*
- * Inserter also deleted it, so it was never visible to anyone
- * else. However, we can only remove it early if it's not an
- * updated tuple; else its parent tuple is linking to it via t_ctid,
- * and this tuple mustn't go away before the parent does.
+ * Inserter also deleted it, so it was never visible to anyone else.
+ * However, we can only remove it early if it's not an updated tuple;
+ * else its parent tuple is linking to it via t_ctid, and this tuple
+ * mustn't go away before the parent does.
*/
if (!(tuple->t_infomask & HEAP_UPDATED))
return HEAPTUPLE_DEAD;