aboutsummaryrefslogtreecommitdiff
path: root/src/backend/commands
diff options
context:
space:
mode:
Diffstat (limited to 'src/backend/commands')
-rw-r--r--src/backend/commands/analyze.c4
-rw-r--r--src/backend/commands/cluster.c6
-rw-r--r--src/backend/commands/indexcmds.c7
-rw-r--r--src/backend/commands/sequence.c12
-rw-r--r--src/backend/commands/tablecmds.c29
-rw-r--r--src/backend/commands/tsearchcmds.c1
-rw-r--r--src/backend/commands/typecmds.c2
-rw-r--r--src/backend/commands/vacuum.c30
-rw-r--r--src/backend/commands/vacuumlazy.c22
-rw-r--r--src/backend/commands/variable.c6
10 files changed, 62 insertions, 57 deletions
diff --git a/src/backend/commands/analyze.c b/src/backend/commands/analyze.c
index fa84989fc6f..57188bc25a7 100644
--- a/src/backend/commands/analyze.c
+++ b/src/backend/commands/analyze.c
@@ -566,7 +566,7 @@ do_analyze_rel(Relation onerel, VacuumStmt *vacstmt, bool inh)
}
/*
- * Report ANALYZE to the stats collector, too. However, if doing
+ * Report ANALYZE to the stats collector, too. However, if doing
* inherited stats we shouldn't report, because the stats collector only
* tracks per-table stats.
*/
@@ -1231,7 +1231,7 @@ acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows,
qsort((void *) rows, numrows, sizeof(HeapTuple), compare_rows);
/*
- * Estimate total numbers of rows in relation. For live rows, use
+ * Estimate total numbers of rows in relation. For live rows, use
* vac_estimate_reltuples; for dead rows, we have no source of old
* information, so we have to assume the density is the same in unseen
* pages as in the pages we scanned.
diff --git a/src/backend/commands/cluster.c b/src/backend/commands/cluster.c
index 0ab3a8bcfae..c020dc2e4ec 100644
--- a/src/backend/commands/cluster.c
+++ b/src/backend/commands/cluster.c
@@ -762,12 +762,12 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex,
/*
* If the OldHeap has a toast table, get lock on the toast table to keep
- * it from being vacuumed. This is needed because autovacuum processes
+ * it from being vacuumed. This is needed because autovacuum processes
* toast tables independently of their main tables, with no lock on the
- * latter. If an autovacuum were to start on the toast table after we
+ * latter. If an autovacuum were to start on the toast table after we
* compute our OldestXmin below, it would use a later OldestXmin, and then
* possibly remove as DEAD toast tuples belonging to main tuples we think
- * are only RECENTLY_DEAD. Then we'd fail while trying to copy those
+ * are only RECENTLY_DEAD. Then we'd fail while trying to copy those
* tuples.
*
* We don't need to open the toast relation here, just lock it. The lock
diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c
index 479db2c3f11..b7c021d943a 100644
--- a/src/backend/commands/indexcmds.c
+++ b/src/backend/commands/indexcmds.c
@@ -185,14 +185,15 @@ DefineIndex(RangeVar *heapRelation,
rel->rd_rel->relkind != RELKIND_UNCATALOGED)
{
if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
+
/*
- * Custom error message for FOREIGN TABLE since the term is
- * close to a regular table and can confuse the user.
+ * Custom error message for FOREIGN TABLE since the term is close
+ * to a regular table and can confuse the user.
*/
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("cannot create index on foreign table \"%s\"",
- heapRelation->relname)));
+ heapRelation->relname)));
else
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
diff --git a/src/backend/commands/sequence.c b/src/backend/commands/sequence.c
index 383690270b8..be04177a2ee 100644
--- a/src/backend/commands/sequence.c
+++ b/src/backend/commands/sequence.c
@@ -1077,12 +1077,12 @@ read_info(SeqTable elm, Relation rel, Buffer *buf)
tuple.t_data = (HeapTupleHeader) PageGetItem(page, lp);
/*
- * Previous releases of Postgres neglected to prevent SELECT FOR UPDATE
- * on a sequence, which would leave a non-frozen XID in the sequence
- * tuple's xmax, which eventually leads to clog access failures or worse.
- * If we see this has happened, clean up after it. We treat this like a
- * hint bit update, ie, don't bother to WAL-log it, since we can certainly
- * do this again if the update gets lost.
+ * Previous releases of Postgres neglected to prevent SELECT FOR UPDATE on
+ * a sequence, which would leave a non-frozen XID in the sequence tuple's
+ * xmax, which eventually leads to clog access failures or worse. If we
+ * see this has happened, clean up after it. We treat this like a hint
+ * bit update, ie, don't bother to WAL-log it, since we can certainly do
+ * this again if the update gets lost.
*/
if (HeapTupleHeaderGetXmax(tuple.t_data) != InvalidTransactionId)
{
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index 6279f2bf9a5..2c9f855f531 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -2679,7 +2679,8 @@ AlterTableGetLockLevel(List *cmds)
* These subcommands affect implicit row type conversion. They
* have affects similar to CREATE/DROP CAST on queries. We
* don't provide for invalidating parse trees as a result of
- * such changes. Do avoid concurrent pg_class updates, though.
+ * such changes. Do avoid concurrent pg_class updates,
+ * though.
*/
case AT_AddOf:
case AT_DropOf:
@@ -2946,7 +2947,7 @@ ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd,
case AT_DisableRule:
case AT_DropInherit: /* NO INHERIT */
case AT_AddOf: /* OF */
- case AT_DropOf: /* NOT OF */
+ case AT_DropOf: /* NOT OF */
ATSimplePermissions(rel, ATT_TABLE);
/* These commands never recurse */
/* No command-specific prep needed */
@@ -4067,7 +4068,7 @@ find_typed_table_dependencies(Oid typeOid, const char *typeName, DropBehavior be
*
* Check whether a type is suitable for CREATE TABLE OF/ALTER TABLE OF. If it
* isn't suitable, throw an error. Currently, we require that the type
- * originated with CREATE TYPE AS. We could support any row type, but doing so
+ * originated with CREATE TYPE AS. We could support any row type, but doing so
* would require handling a number of extra corner cases in the DDL commands.
*/
void
@@ -4083,6 +4084,7 @@ check_of_type(HeapTuple typetuple)
Assert(OidIsValid(typ->typrelid));
typeRelation = relation_open(typ->typrelid, AccessShareLock);
typeOk = (typeRelation->rd_rel->relkind == RELKIND_COMPOSITE_TYPE);
+
/*
* Close the parent rel, but keep our AccessShareLock on it until xact
* commit. That will prevent someone else from deleting or ALTERing
@@ -7406,8 +7408,8 @@ ATExecChangeOwner(Oid relationOid, Oid newOwnerId, bool recursing, LOCKMODE lock
default:
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("\"%s\" is not a table, view, sequence, or foreign table",
- NameStr(tuple_class->relname))));
+ errmsg("\"%s\" is not a table, view, sequence, or foreign table",
+ NameStr(tuple_class->relname))));
}
/*
@@ -8603,7 +8605,7 @@ ATExecDropInherit(Relation rel, RangeVar *parent, LOCKMODE lockmode)
* Drop the dependency created by StoreCatalogInheritance1 (CREATE TABLE
* INHERITS/ALTER TABLE INHERIT -- refclassid will be RelationRelationId) or
* heap_create_with_catalog (CREATE TABLE OF/ALTER TABLE OF -- refclassid will
- * be TypeRelationId). There's no convenient way to do this, so go trawling
+ * be TypeRelationId). There's no convenient way to do this, so go trawling
* through pg_depend.
*/
static void
@@ -8730,8 +8732,8 @@ ATExecAddOf(Relation rel, const TypeName *ofTypename, LOCKMODE lockmode)
if (strncmp(table_attname, type_attname, NAMEDATALEN) != 0)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("table has column \"%s\" where type requires \"%s\"",
- table_attname, type_attname)));
+ errmsg("table has column \"%s\" where type requires \"%s\"",
+ table_attname, type_attname)));
/* Compare type. */
if (table_attr->atttypid != type_attr->atttypid ||
@@ -8739,8 +8741,8 @@ ATExecAddOf(Relation rel, const TypeName *ofTypename, LOCKMODE lockmode)
table_attr->attcollation != type_attr->attcollation)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("table \"%s\" has different type for column \"%s\"",
- RelationGetRelationName(rel), type_attname)));
+ errmsg("table \"%s\" has different type for column \"%s\"",
+ RelationGetRelationName(rel), type_attname)));
}
DecrTupleDescRefCount(typeTupleDesc);
@@ -8748,6 +8750,7 @@ ATExecAddOf(Relation rel, const TypeName *ofTypename, LOCKMODE lockmode)
for (; table_attno <= tableTupleDesc->natts; table_attno++)
{
Form_pg_attribute table_attr = tableTupleDesc->attrs[table_attno - 1];
+
if (!table_attr->attisdropped)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
@@ -8785,7 +8788,7 @@ ATExecAddOf(Relation rel, const TypeName *ofTypename, LOCKMODE lockmode)
/*
* ALTER TABLE NOT OF
*
- * Detach a typed table from its originating type. Just clear reloftype and
+ * Detach a typed table from its originating type. Just clear reloftype and
* remove the dependency.
*/
static void
@@ -8802,8 +8805,8 @@ ATExecDropOf(Relation rel, LOCKMODE lockmode)
RelationGetRelationName(rel))));
/*
- * We don't bother to check ownership of the type --- ownership of the table
- * is presumed enough rights. No lock required on the type, either.
+ * We don't bother to check ownership of the type --- ownership of the
+ * table is presumed enough rights. No lock required on the type, either.
*/
drop_parent_dependency(relid, TypeRelationId, rel->rd_rel->reloftype);
diff --git a/src/backend/commands/tsearchcmds.c b/src/backend/commands/tsearchcmds.c
index d08c9bbbc5c..3355eaafda2 100644
--- a/src/backend/commands/tsearchcmds.c
+++ b/src/backend/commands/tsearchcmds.c
@@ -96,6 +96,7 @@ get_ts_parser_func(DefElem *defel, int attnum)
break;
case Anum_pg_ts_parser_prslextype:
nargs = 1;
+
/*
* Note: because the lextype method returns type internal, it must
* have an internal-type argument for security reasons. The
diff --git a/src/backend/commands/typecmds.c b/src/backend/commands/typecmds.c
index f8eb5bc4a65..66c11de6723 100644
--- a/src/backend/commands/typecmds.c
+++ b/src/backend/commands/typecmds.c
@@ -1069,7 +1069,7 @@ DefineDomain(CreateDomainStmt *stmt)
basetypeMod, /* typeMod value */
typNDims, /* Array dimensions for base type */
typNotNull, /* Type NOT NULL */
- domaincoll); /* type's collation */
+ domaincoll); /* type's collation */
/*
* Process constraints which refer to the domain ID returned by TypeCreate
diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c
index 224c34f6e78..5cbf3a04f80 100644
--- a/src/backend/commands/vacuum.c
+++ b/src/backend/commands/vacuum.c
@@ -459,7 +459,7 @@ vacuum_set_xid_limits(int freeze_min_age,
* If we scanned the whole relation then we should just use the count of
* live tuples seen; but if we did not, we should not trust the count
* unreservedly, especially not in VACUUM, which may have scanned a quite
- * nonrandom subset of the table. When we have only partial information,
+ * nonrandom subset of the table. When we have only partial information,
* we take the old value of pg_class.reltuples as a measurement of the
* tuple density in the unscanned pages.
*
@@ -471,7 +471,7 @@ vac_estimate_reltuples(Relation relation, bool is_analyze,
BlockNumber scanned_pages,
double scanned_tuples)
{
- BlockNumber old_rel_pages = relation->rd_rel->relpages;
+ BlockNumber old_rel_pages = relation->rd_rel->relpages;
double old_rel_tuples = relation->rd_rel->reltuples;
double old_density;
double new_density;
@@ -483,8 +483,8 @@ vac_estimate_reltuples(Relation relation, bool is_analyze,
return scanned_tuples;
/*
- * If scanned_pages is zero but total_pages isn't, keep the existing
- * value of reltuples.
+ * If scanned_pages is zero but total_pages isn't, keep the existing value
+ * of reltuples.
*/
if (scanned_pages == 0)
return old_rel_tuples;
@@ -498,23 +498,23 @@ vac_estimate_reltuples(Relation relation, bool is_analyze,
/*
* Okay, we've covered the corner cases. The normal calculation is to
- * convert the old measurement to a density (tuples per page), then
- * update the density using an exponential-moving-average approach,
- * and finally compute reltuples as updated_density * total_pages.
+ * convert the old measurement to a density (tuples per page), then update
+ * the density using an exponential-moving-average approach, and finally
+ * compute reltuples as updated_density * total_pages.
*
- * For ANALYZE, the moving average multiplier is just the fraction of
- * the table's pages we scanned. This is equivalent to assuming
- * that the tuple density in the unscanned pages didn't change. Of
- * course, it probably did, if the new density measurement is different.
- * But over repeated cycles, the value of reltuples will converge towards
- * the correct value, if repeated measurements show the same new density.
+ * For ANALYZE, the moving average multiplier is just the fraction of the
+ * table's pages we scanned. This is equivalent to assuming that the
+ * tuple density in the unscanned pages didn't change. Of course, it
+ * probably did, if the new density measurement is different. But over
+ * repeated cycles, the value of reltuples will converge towards the
+ * correct value, if repeated measurements show the same new density.
*
* For VACUUM, the situation is a bit different: we have looked at a
* nonrandom sample of pages, but we know for certain that the pages we
* didn't look at are precisely the ones that haven't changed lately.
* Thus, there is a reasonable argument for doing exactly the same thing
- * as for the ANALYZE case, that is use the old density measurement as
- * the value for the unscanned pages.
+ * as for the ANALYZE case, that is use the old density measurement as the
+ * value for the unscanned pages.
*
* This logic could probably use further refinement.
*/
diff --git a/src/backend/commands/vacuumlazy.c b/src/backend/commands/vacuumlazy.c
index ce5fa180662..ccc586f12ef 100644
--- a/src/backend/commands/vacuumlazy.c
+++ b/src/backend/commands/vacuumlazy.c
@@ -86,7 +86,7 @@ typedef struct LVRelStats
/* Overall statistics about rel */
BlockNumber rel_pages; /* total number of pages */
BlockNumber scanned_pages; /* number of pages we examined */
- double scanned_tuples; /* counts only tuples on scanned pages */
+ double scanned_tuples; /* counts only tuples on scanned pages */
double old_rel_tuples; /* previous value of pg_class.reltuples */
double new_rel_tuples; /* new estimated total # of tuples */
BlockNumber pages_removed;
@@ -211,7 +211,7 @@ lazy_vacuum_rel(Relation onerel, VacuumStmt *vacstmt,
vac_update_relstats(onerel,
vacrelstats->rel_pages, vacrelstats->new_rel_tuples,
vacrelstats->hasindex,
- (vacrelstats->scanned_pages < vacrelstats->rel_pages) ?
+ (vacrelstats->scanned_pages < vacrelstats->rel_pages) ?
InvalidTransactionId :
FreezeLimit);
@@ -341,9 +341,9 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
* of pages.
*
* Before entering the main loop, establish the invariant that
- * next_not_all_visible_block is the next block number >= blkno that's
- * not all-visible according to the visibility map, or nblocks if there's
- * no such block. Also, we set up the skipping_all_visible_blocks flag,
+ * next_not_all_visible_block is the next block number >= blkno that's not
+ * all-visible according to the visibility map, or nblocks if there's no
+ * such block. Also, we set up the skipping_all_visible_blocks flag,
* which is needed because we need hysteresis in the decision: once we've
* started skipping blocks, we may as well skip everything up to the next
* not-all-visible block.
@@ -804,7 +804,7 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
/* now we can compute the new value for pg_class.reltuples */
vacrelstats->new_rel_tuples = vac_estimate_reltuples(onerel, false,
nblocks,
- vacrelstats->scanned_pages,
+ vacrelstats->scanned_pages,
num_tuples);
/* If any tuples need to be deleted, perform final vacuum cycle */
@@ -1082,11 +1082,11 @@ lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats)
if (new_rel_pages != old_rel_pages)
{
/*
- * Note: we intentionally don't update vacrelstats->rel_pages with
- * the new rel size here. If we did, it would amount to assuming that
- * the new pages are empty, which is unlikely. Leaving the numbers
- * alone amounts to assuming that the new pages have the same tuple
- * density as existing ones, which is less unlikely.
+ * Note: we intentionally don't update vacrelstats->rel_pages with the
+ * new rel size here. If we did, it would amount to assuming that the
+ * new pages are empty, which is unlikely. Leaving the numbers alone
+ * amounts to assuming that the new pages have the same tuple density
+ * as existing ones, which is less unlikely.
*/
UnlockRelation(onerel, AccessExclusiveLock);
return;
diff --git a/src/backend/commands/variable.c b/src/backend/commands/variable.c
index 9efd20f2bcf..8550869db3d 100644
--- a/src/backend/commands/variable.c
+++ b/src/backend/commands/variable.c
@@ -807,9 +807,9 @@ check_client_encoding(char **newval, void **extra, GucSource source)
*
* XXX Although canonicalizing seems like a good idea in the abstract, it
* breaks pre-9.1 JDBC drivers, which expect that if they send "UNICODE"
- * as the client_encoding setting then it will read back the same way.
- * As a workaround, don't replace the string if it's "UNICODE". Remove
- * that hack when pre-9.1 JDBC drivers are no longer in use.
+ * as the client_encoding setting then it will read back the same way. As
+ * a workaround, don't replace the string if it's "UNICODE". Remove that
+ * hack when pre-9.1 JDBC drivers are no longer in use.
*/
if (strcmp(*newval, canonical_name) != 0 &&
strcmp(*newval, "UNICODE") != 0)